mirror of
https://github.com/julia-actions/julia-runtest.git
synced 2026-02-11 18:46:55 +08:00
Add test failure annotations on v1.8+ (#58)
Co-authored-by: Dilum Aluthge <dilum@aluthge.com>
This commit is contained in:
53
.github/workflows/test_logger_ci.yml
vendored
Normal file
53
.github/workflows/test_logger_ci.yml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: TestLogger CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "master"
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
version:
|
||||
- "1.6"
|
||||
- "1" # automatically expands to the latest stable 1.x release of Julia
|
||||
- nightly
|
||||
os:
|
||||
- ubuntu-latest
|
||||
- macOS-latest
|
||||
- windows-latest
|
||||
arch:
|
||||
- x64
|
||||
- x86
|
||||
# 32-bit Julia binaries are not available on macOS
|
||||
exclude:
|
||||
- os: macOS-latest
|
||||
arch: x86
|
||||
|
||||
steps:
|
||||
- name: Checkout julia-runtest
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- uses: julia-actions/setup-julia@v1
|
||||
with:
|
||||
version: ${{ matrix.version }}
|
||||
arch: ${{ matrix.arch }}
|
||||
|
||||
- uses: actions/cache@v1
|
||||
env:
|
||||
cache-name: cache-artifacts
|
||||
with:
|
||||
path: ~/.julia/artifacts
|
||||
key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-test-${{ env.cache-name }}-
|
||||
${{ runner.os }}-test-
|
||||
${{ runner.os }}-
|
||||
|
||||
- run: julia --color=yes --check-bounds=yes test_logger_tests.jl
|
||||
@@ -33,10 +33,14 @@ jobs:
|
||||
arch: ${{ matrix.julia-arch }}
|
||||
- uses: julia-actions/julia-buildpkg@v1
|
||||
- uses: julia-actions/julia-runtest@v1
|
||||
with:
|
||||
annotate: true
|
||||
```
|
||||
|
||||
You can add this workflow to your repository by placing it in a file called `test.yml` in the folder `.github/workflows/`. [More info here](https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions).
|
||||
|
||||
Here, setting `annotate: true` causes GitHub "annotations" to appear when reviewing the PR, pointing to failing tests, if any.
|
||||
By default, `annotate` is set to false, but that may change in future releases of this action.
|
||||
### Prefixing the Julia command
|
||||
|
||||
In some packages, you may want to prefix the `julia` command with another command, e.g. for running tests of certain graphical libraries with `xvfb-run`.
|
||||
|
||||
19
action.yml
19
action.yml
@@ -29,6 +29,9 @@ inputs:
|
||||
project:
|
||||
description: 'Value passed to the --project flag. The default value is the repository root: "@."'
|
||||
default: '@.'
|
||||
annotate:
|
||||
description: 'Whether or not to attempt to create GitHub annotations to show test failures inline. Only effective on Julia 1.8+.'
|
||||
default: 'false'
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
@@ -49,9 +52,18 @@ runs:
|
||||
# the request metadata to pkg.julialang.org when installing
|
||||
# packages via `Pkg.test`.
|
||||
JULIA_PKG_SERVER: ""
|
||||
- name: Install dependencies in their own (shared) environment
|
||||
run: |
|
||||
if VERSION > v"1.8pre"
|
||||
using Pkg
|
||||
Pkg.activate("tests-logger-env"; shared=true)
|
||||
Pkg.add(Pkg.PackageSpec(name="GitHubActions", version="0.1"))
|
||||
end
|
||||
shell: julia --color=yes {0}
|
||||
if: inputs.annotate == 'true'
|
||||
- run: |
|
||||
# The Julia command that will be executed
|
||||
julia_cmd=( julia --color=yes --depwarn=${{ inputs.depwarn }} --inline=${{ inputs.inline }} --project=${{ inputs.project }} -e 'import Pkg;include(joinpath(ENV["GITHUB_ACTION_PATH"], "kwargs.jl"));kwargs = Kwargs.kwargs(;coverage = :(${{ inputs.coverage }}),force_latest_compatible_version = :(${{ inputs.force_latest_compatible_version }}), julia_args = ["--check-bounds=${{ inputs.check_bounds }}"]);Pkg.test(; kwargs...)' )
|
||||
julia_cmd=( julia --color=yes --depwarn=${{ inputs.depwarn }} --inline=${{ inputs.inline }} --project=${{ inputs.project }} -e 'include(joinpath(ENV["GITHUB_ACTION_PATH"], "test_harness.jl"))' )
|
||||
|
||||
# Add the prefix in front of the command if there is one
|
||||
prefix="${{ inputs.prefix }}"
|
||||
@@ -60,3 +72,8 @@ runs:
|
||||
# Run the Julia command
|
||||
"${julia_cmd[@]}"
|
||||
shell: bash
|
||||
env:
|
||||
ANNOTATE: ${{ inputs.annotate }}
|
||||
COVERAGE: ${{ inputs.coverage }}
|
||||
FORCE_LATEST_COMPATIBLE_VERSION: ${{ inputs.force_latest_compatible_version }}
|
||||
CHECK_BOUNDS: ${{ inputs.check_bounds }}
|
||||
|
||||
20
kwargs.jl
20
kwargs.jl
@@ -4,9 +4,25 @@ import Pkg
|
||||
|
||||
include(joinpath(@__DIR__, "autodetect-dependabot.jl"))
|
||||
|
||||
function kwargs(; coverage::Bool,
|
||||
force_latest_compatible_version::Union{Bool, Symbol},
|
||||
function kwargs(; coverage,
|
||||
force_latest_compatible_version,
|
||||
julia_args::AbstractVector{<:AbstractString}=String[])
|
||||
if coverage isa AbstractString
|
||||
coverage = parse(Bool, coverage)
|
||||
end
|
||||
coverage isa Bool || error("Unexpected type of `coverage`: $(typeof(coverage))")
|
||||
|
||||
if force_latest_compatible_version isa AbstractString
|
||||
res = tryparse(Bool, force_latest_compatible_version)
|
||||
if res === nothing
|
||||
res = Symbol(force_latest_compatible_version)
|
||||
end
|
||||
force_latest_compatible_version = res
|
||||
end
|
||||
if !(force_latest_compatible_version isa Union{Bool, Symbol})
|
||||
error("Unexpected type of `force_latest_compatible_version`: $(typeof(force_latest_compatible_version))")
|
||||
end
|
||||
|
||||
if !(force_latest_compatible_version isa Bool) && (force_latest_compatible_version != :auto)
|
||||
throw(ArgumentError("Invalid value for force_latest_compatible_version: $(force_latest_compatible_version)"))
|
||||
end
|
||||
|
||||
16
test_harness.jl
Normal file
16
test_harness.jl
Normal file
@@ -0,0 +1,16 @@
|
||||
import Pkg
|
||||
include("kwargs.jl")
|
||||
kwargs = Kwargs.kwargs(; coverage=ENV["COVERAGE"],
|
||||
force_latest_compatible_version=ENV["FORCE_LATEST_COMPATIBLE_VERSION"],
|
||||
julia_args=[string("--check-bounds=", ENV["CHECK_BOUNDS"])])
|
||||
|
||||
if parse(Bool, ENV["ANNOTATE"]) && VERSION > v"1.8pre"
|
||||
push!(LOAD_PATH, "@tests-logger-env") # access dependencies
|
||||
using GitHubActions, Logging
|
||||
global_logger(GitHubActionsLogger())
|
||||
include("test_logger.jl")
|
||||
pop!(LOAD_PATH)
|
||||
TestLogger.test(; kwargs...)
|
||||
else
|
||||
Pkg.test(; kwargs...)
|
||||
end
|
||||
97
test_logger.jl
Normal file
97
test_logger.jl
Normal file
@@ -0,0 +1,97 @@
|
||||
module TestLogger
|
||||
using Pkg
|
||||
|
||||
function parse_file_line(failed_line)
|
||||
# The bits like `\e[91m\e[1m` are color codes that get printed by `Pkg.test`. We
|
||||
# match with or without them.
|
||||
r = r"(\e\[91m\e\[1m)?Test Failed(\e\[22m\e\[39m)? at (\e\[39m\e\[1m)?(?<path>[^\s\e]+)(\e\[22m)?"
|
||||
m = match(r, failed_line)
|
||||
m === nothing && return (nothing, nothing)
|
||||
|
||||
if m[:path] === nothing
|
||||
return (nothing, nothing)
|
||||
else
|
||||
path_split_results = rsplit(m[:path], ":", limit=2)
|
||||
if length(path_split_results) == 1
|
||||
return (m[:path], nothing)
|
||||
else
|
||||
path, line_no = path_split_results
|
||||
|
||||
# Try to make sure line number is parseable to avoid false positives
|
||||
line_no = tryparse(Int, line_no) === nothing ? nothing : line_no
|
||||
return (path, line_no)
|
||||
end
|
||||
end
|
||||
return (nothing, nothing)
|
||||
end
|
||||
|
||||
function readlines_until(f, stream; keep_lines=true, io)
|
||||
lines = String[]
|
||||
while true
|
||||
line = readline(stream; keep=true)
|
||||
print(io, line)
|
||||
|
||||
# with `keep=true`, this should only happen when we're done?
|
||||
# I think so...
|
||||
if line == ""
|
||||
return line, lines
|
||||
end
|
||||
if f(line)
|
||||
return line, lines
|
||||
else
|
||||
keep_lines && push!(lines, line)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
function has_test_failure(line)
|
||||
contains(line, "Test Failed") || return false
|
||||
file, line_no = parse_file_line(line)
|
||||
return !isnothing(file) && !isnothing(line_no)
|
||||
end
|
||||
|
||||
function build_stream(io)
|
||||
stream = Base.BufferStream()
|
||||
t = @async begin
|
||||
while !eof(stream)
|
||||
# Iterate through and print until we get to "Test Failed" and can parse it
|
||||
failed_line, _ = readlines_until(has_test_failure, stream; keep_lines=false, io)
|
||||
@label found_failed_line
|
||||
# Parse file and line out
|
||||
file, line_no = parse_file_line(failed_line)
|
||||
|
||||
# Grab everything until the stacktrace, OR we hit another `Test Failed`
|
||||
stopped_line, msg_lines = readlines_until(stream; io) do line
|
||||
contains(line, "Stacktrace:") || has_test_failure(line)
|
||||
end
|
||||
|
||||
# If we stopped because we hit a 2nd test failure,
|
||||
# let's assume somehow the stacktrace didn't show up for the first one.
|
||||
# Let's continue by trying to find the info for this one, by jumping back.
|
||||
if has_test_failure(stopped_line)
|
||||
failed_line = stopped_line
|
||||
@goto found_failed_line
|
||||
end
|
||||
|
||||
if !isempty(msg_lines)
|
||||
msg = string("Test Failed\n", chomp(join(msg_lines)))
|
||||
# Now log it out
|
||||
@error msg _file=file _line=line_no
|
||||
end
|
||||
end
|
||||
end
|
||||
return stream, t
|
||||
end
|
||||
|
||||
|
||||
function test(args...; kwargs...)
|
||||
stream, t = build_stream(stdout)
|
||||
Base.errormonitor(t)
|
||||
return try
|
||||
Pkg.test(args...; kwargs..., io=stream)
|
||||
finally
|
||||
close(stream)
|
||||
end
|
||||
end
|
||||
|
||||
end # module
|
||||
71
test_logger_tests.jl
Normal file
71
test_logger_tests.jl
Normal file
@@ -0,0 +1,71 @@
|
||||
include("test_logger.jl")
|
||||
using Logging, Test
|
||||
|
||||
function simulate(text)
|
||||
logger = Test.TestLogger()
|
||||
output = IOBuffer()
|
||||
with_logger(logger) do
|
||||
stream, t = TestLogger.build_stream(output)
|
||||
for line in eachline(IOBuffer(text); keep=true)
|
||||
write(stream, line)
|
||||
end
|
||||
close(stream)
|
||||
wait(t)
|
||||
end
|
||||
return String(take!(output)), logger.logs
|
||||
end
|
||||
|
||||
@testset "TestLogger" begin
|
||||
|
||||
for input in (
|
||||
"""
|
||||
Test Failed at file.txt:1
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
6
|
||||
Stacktrace:
|
||||
Hi
|
||||
""",
|
||||
# Let us mess with the stacktrace line
|
||||
"""
|
||||
Test Failed at file.txt:1
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
6
|
||||
Stacktrace: extra stuff
|
||||
Hi
|
||||
""")
|
||||
|
||||
output, logs = simulate(input)
|
||||
@test output == input
|
||||
log = only(logs)
|
||||
@test log.message == "Test Failed\n1\n2\n3\n4\n5\n6"
|
||||
@test log.file == "file.txt"
|
||||
@test log.line == "1"
|
||||
end
|
||||
|
||||
# Next, check that if we hit a Test Failed, and then hit another one before we get a stacktrace,
|
||||
# we just move on to handling the new one.
|
||||
input = """
|
||||
Test Failed at file.txt:1
|
||||
Nah
|
||||
Test Failed at file.txt:1
|
||||
Correct
|
||||
Stacktrace:
|
||||
Hi
|
||||
"""
|
||||
|
||||
output, logs = simulate(input)
|
||||
@test output == input
|
||||
|
||||
log = only(logs)
|
||||
@test log.message == "Test Failed\nCorrect"
|
||||
@test log.file == "file.txt"
|
||||
@test log.line == "1"
|
||||
end
|
||||
Reference in New Issue
Block a user