tools run-cts: Add --coverage flag
Displays a per-test coverage viewer in your browser Change-Id: I0b808bfadf01dab0540143760580cd7ca680e93b Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/113644 Kokoro: Kokoro <noreply+kokoro@google.com> Reviewed-by: Antonio Maiorano <amaiorano@google.com> Commit-Queue: Ben Clayton <bclayton@google.com> Reviewed-by: Dan Sinclair <dsinclair@chromium.org>
This commit is contained in:
parent
8ac417c39c
commit
60dc70df71
|
@ -39,7 +39,9 @@ import (
|
||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"dawn.googlesource.com/dawn/tools/src/cov"
|
||||||
"dawn.googlesource.com/dawn/tools/src/fileutils"
|
"dawn.googlesource.com/dawn/tools/src/fileutils"
|
||||||
|
"dawn.googlesource.com/dawn/tools/src/git"
|
||||||
"github.com/mattn/go-colorable"
|
"github.com/mattn/go-colorable"
|
||||||
"github.com/mattn/go-isatty"
|
"github.com/mattn/go-isatty"
|
||||||
)
|
)
|
||||||
|
@ -65,8 +67,7 @@ Usage:
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
colors bool
|
colors bool
|
||||||
mainCtx context.Context
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ANSI escape sequences
|
// ANSI escape sequences
|
||||||
|
@ -99,7 +100,7 @@ func (f *dawnNodeFlags) Set(value string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeMainCtx() context.Context {
|
func makeCtx() context.Context {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
sigs := make(chan os.Signal, 1)
|
sigs := make(chan os.Signal, 1)
|
||||||
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
|
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
@ -112,7 +113,7 @@ func makeMainCtx() context.Context {
|
||||||
}
|
}
|
||||||
|
|
||||||
func run() error {
|
func run() error {
|
||||||
mainCtx = makeMainCtx()
|
ctx := makeCtx()
|
||||||
|
|
||||||
colors = os.Getenv("TERM") != "dumb" ||
|
colors = os.Getenv("TERM") != "dumb" ||
|
||||||
isatty.IsTerminal(os.Stdout.Fd()) ||
|
isatty.IsTerminal(os.Stdout.Fd()) ||
|
||||||
|
@ -128,8 +129,8 @@ func run() error {
|
||||||
backendDefault = "vulkan"
|
backendDefault = "vulkan"
|
||||||
}
|
}
|
||||||
|
|
||||||
var dawnNode, cts, node, npx, resultsPath, expectationsPath, logFilename, backend string
|
var dawnNode, cts, node, npx, resultsPath, expectationsPath, logFilename, backend, coverageFile string
|
||||||
var printStdout, verbose, isolated, build, dumpShaders bool
|
var printStdout, verbose, isolated, build, dumpShaders, genCoverage bool
|
||||||
var numRunners int
|
var numRunners int
|
||||||
var flags dawnNodeFlags
|
var flags dawnNodeFlags
|
||||||
flag.StringVar(&dawnNode, "dawn-node", "", "path to dawn.node module")
|
flag.StringVar(&dawnNode, "dawn-node", "", "path to dawn.node module")
|
||||||
|
@ -149,6 +150,8 @@ func run() error {
|
||||||
flag.StringVar(&backend, "backend", backendDefault, "backend to use: default|null|webgpu|d3d11|d3d12|metal|vulkan|opengl|opengles."+
|
flag.StringVar(&backend, "backend", backendDefault, "backend to use: default|null|webgpu|d3d11|d3d12|metal|vulkan|opengl|opengles."+
|
||||||
" set to 'vulkan' if VK_ICD_FILENAMES environment variable is set, 'default' otherwise")
|
" set to 'vulkan' if VK_ICD_FILENAMES environment variable is set, 'default' otherwise")
|
||||||
flag.BoolVar(&dumpShaders, "dump-shaders", false, "dump WGSL shaders. Enables --verbose")
|
flag.BoolVar(&dumpShaders, "dump-shaders", false, "dump WGSL shaders. Enables --verbose")
|
||||||
|
flag.BoolVar(&genCoverage, "coverage", false, "displays coverage data. Enables --isolated")
|
||||||
|
flag.StringVar(&coverageFile, "export-coverage", "", "write coverage data to the given path")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
// Create a thread-safe, color supporting stdout wrapper.
|
// Create a thread-safe, color supporting stdout wrapper.
|
||||||
|
@ -233,6 +236,7 @@ func run() error {
|
||||||
npx: npx,
|
npx: npx,
|
||||||
dawnNode: dawnNode,
|
dawnNode: dawnNode,
|
||||||
cts: cts,
|
cts: cts,
|
||||||
|
tmpDir: filepath.Join(os.TempDir(), "dawn-cts"),
|
||||||
flags: flags,
|
flags: flags,
|
||||||
results: testcaseStatuses{},
|
results: testcaseStatuses{},
|
||||||
evalScript: func(main string) string {
|
evalScript: func(main string) string {
|
||||||
|
@ -242,6 +246,28 @@ func run() error {
|
||||||
colors: colors,
|
colors: colors,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if coverageFile != "" {
|
||||||
|
r.coverageFile = coverageFile
|
||||||
|
genCoverage = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if genCoverage {
|
||||||
|
isolated = true
|
||||||
|
llvmCov, err := exec.LookPath("llvm-cov")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to find LLVM, required for --coverage")
|
||||||
|
}
|
||||||
|
turboCov := filepath.Join(filepath.Dir(dawnNode), "turbo-cov"+fileutils.ExeExt)
|
||||||
|
if !fileutils.IsExe(turboCov) {
|
||||||
|
turboCov = ""
|
||||||
|
}
|
||||||
|
r.covEnv = &cov.Env{
|
||||||
|
LLVMBin: filepath.Dir(llvmCov),
|
||||||
|
Binary: dawnNode,
|
||||||
|
TurboCov: turboCov,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if logFilename != "" {
|
if logFilename != "" {
|
||||||
writer, err := os.Create(logFilename)
|
writer, err := os.Create(logFilename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -305,19 +331,19 @@ func run() error {
|
||||||
if isolated {
|
if isolated {
|
||||||
fmt.Fprintln(stdout, "Running in parallel isolated...")
|
fmt.Fprintln(stdout, "Running in parallel isolated...")
|
||||||
fmt.Fprintf(stdout, "Testing %d test cases...\n", len(r.testcases))
|
fmt.Fprintf(stdout, "Testing %d test cases...\n", len(r.testcases))
|
||||||
if err := r.runParallelIsolated(); err != nil {
|
if err := r.runParallelIsolated(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintln(stdout, "Running in parallel with server...")
|
fmt.Fprintln(stdout, "Running in parallel with server...")
|
||||||
fmt.Fprintf(stdout, "Testing %d test cases...\n", len(r.testcases))
|
fmt.Fprintf(stdout, "Testing %d test cases...\n", len(r.testcases))
|
||||||
if err := r.runParallelWithServer(); err != nil {
|
if err := r.runParallelWithServer(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintln(stdout, "Running serially...")
|
fmt.Fprintln(stdout, "Running serially...")
|
||||||
if err := r.runSerially(query); err != nil {
|
if err := r.runSerially(ctx, query); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -385,18 +411,24 @@ func (c *cache) save(path string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
type runner struct {
|
type runner struct {
|
||||||
numRunners int
|
numRunners int
|
||||||
printStdout bool
|
printStdout bool
|
||||||
verbose bool
|
verbose bool
|
||||||
node, npx, dawnNode, cts string
|
node string
|
||||||
flags dawnNodeFlags
|
npx string
|
||||||
evalScript func(string) string
|
dawnNode string
|
||||||
testcases []string
|
cts string
|
||||||
expectations testcaseStatuses
|
tmpDir string
|
||||||
results testcaseStatuses
|
flags dawnNodeFlags
|
||||||
log logger
|
covEnv *cov.Env
|
||||||
stdout io.WriteCloser
|
coverageFile string
|
||||||
colors bool // Colors enabled?
|
evalScript func(string) string
|
||||||
|
testcases []string
|
||||||
|
expectations testcaseStatuses
|
||||||
|
results testcaseStatuses
|
||||||
|
log logger
|
||||||
|
stdout io.WriteCloser
|
||||||
|
colors bool // Colors enabled?
|
||||||
}
|
}
|
||||||
|
|
||||||
// scanSourceTimestamps scans all the .js and .ts files in all subdirectories of
|
// scanSourceTimestamps scans all the .js and .ts files in all subdirectories of
|
||||||
|
@ -562,7 +594,7 @@ func (p *prefixWriter) Write(data []byte) (int, error) {
|
||||||
|
|
||||||
// runParallelWithServer() starts r.numRunners instances of the CTS server test
|
// runParallelWithServer() starts r.numRunners instances of the CTS server test
|
||||||
// runner, and issues test run requests to those servers, concurrently.
|
// runner, and issues test run requests to those servers, concurrently.
|
||||||
func (r *runner) runParallelWithServer() error {
|
func (r *runner) runParallelWithServer(ctx context.Context) error {
|
||||||
// Create a chan of test indices.
|
// Create a chan of test indices.
|
||||||
// This will be read by the test runner goroutines.
|
// This will be read by the test runner goroutines.
|
||||||
caseIndices := make(chan int, len(r.testcases))
|
caseIndices := make(chan int, len(r.testcases))
|
||||||
|
@ -582,7 +614,7 @@ func (r *runner) runParallelWithServer() error {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
if err := r.runServer(id, caseIndices, results); err != nil {
|
if err := r.runServer(ctx, id, caseIndices, results); err != nil {
|
||||||
results <- result{
|
results <- result{
|
||||||
status: fail,
|
status: fail,
|
||||||
error: fmt.Errorf("Test server error: %w", err),
|
error: fmt.Errorf("Test server error: %w", err),
|
||||||
|
@ -591,8 +623,7 @@ func (r *runner) runParallelWithServer() error {
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
r.streamResults(wg, results)
|
return r.streamResults(ctx, wg, results)
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// runServer starts a test runner server instance, takes case indices from
|
// runServer starts a test runner server instance, takes case indices from
|
||||||
|
@ -600,7 +631,7 @@ func (r *runner) runParallelWithServer() error {
|
||||||
// The result of the test run is written to the results chan.
|
// The result of the test run is written to the results chan.
|
||||||
// Once the caseIndices chan has been closed, the server is stopped and
|
// Once the caseIndices chan has been closed, the server is stopped and
|
||||||
// runServer returns.
|
// runServer returns.
|
||||||
func (r *runner) runServer(id int, caseIndices <-chan int, results chan<- result) error {
|
func (r *runner) runServer(ctx context.Context, id int, caseIndices <-chan int, results chan<- result) error {
|
||||||
var port int
|
var port int
|
||||||
testCaseLog := &bytes.Buffer{}
|
testCaseLog := &bytes.Buffer{}
|
||||||
|
|
||||||
|
@ -627,7 +658,6 @@ func (r *runner) runServer(id int, caseIndices <-chan int, results chan<- result
|
||||||
args = append(args, "--gpu-provider-flag", f)
|
args = append(args, "--gpu-provider-flag", f)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := mainCtx
|
|
||||||
cmd := exec.CommandContext(ctx, r.node, args...)
|
cmd := exec.CommandContext(ctx, r.node, args...)
|
||||||
|
|
||||||
writer := io.Writer(testCaseLog)
|
writer := io.Writer(testCaseLog)
|
||||||
|
@ -736,7 +766,7 @@ func (r *runner) runServer(id int, caseIndices <-chan int, results chan<- result
|
||||||
// testcase in a separate process. This reduces possibility of state leakage
|
// testcase in a separate process. This reduces possibility of state leakage
|
||||||
// between tests.
|
// between tests.
|
||||||
// Up to r.numRunners tests will be run concurrently.
|
// Up to r.numRunners tests will be run concurrently.
|
||||||
func (r *runner) runParallelIsolated() error {
|
func (r *runner) runParallelIsolated(ctx context.Context) error {
|
||||||
// Create a chan of test indices.
|
// Create a chan of test indices.
|
||||||
// This will be read by the test runner goroutines.
|
// This will be read by the test runner goroutines.
|
||||||
caseIndices := make(chan int, len(r.testcases))
|
caseIndices := make(chan int, len(r.testcases))
|
||||||
|
@ -753,18 +783,28 @@ func (r *runner) runParallelIsolated() error {
|
||||||
wg := &sync.WaitGroup{}
|
wg := &sync.WaitGroup{}
|
||||||
for i := 0; i < r.numRunners; i++ {
|
for i := 0; i < r.numRunners; i++ {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
|
||||||
|
profraw := ""
|
||||||
|
if r.covEnv != nil {
|
||||||
|
profraw = filepath.Join(r.tmpDir, fmt.Sprintf("cts-%v.profraw", i))
|
||||||
|
defer os.Remove(profraw)
|
||||||
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
for idx := range caseIndices {
|
for idx := range caseIndices {
|
||||||
res := r.runTestcase(r.testcases[idx])
|
res := r.runTestcase(ctx, r.testcases[idx], profraw)
|
||||||
res.index = idx
|
res.index = idx
|
||||||
results <- res
|
results <- res
|
||||||
|
|
||||||
|
if err := ctx.Err(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
r.streamResults(wg, results)
|
return r.streamResults(ctx, wg, results)
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// streamResults reads from the chan 'results', printing the results in test-id
|
// streamResults reads from the chan 'results', printing the results in test-id
|
||||||
|
@ -772,7 +812,7 @@ func (r *runner) runParallelIsolated() error {
|
||||||
// automatically close the 'results' chan.
|
// automatically close the 'results' chan.
|
||||||
// Once all the results have been printed, a summary will be printed and the
|
// Once all the results have been printed, a summary will be printed and the
|
||||||
// function will return.
|
// function will return.
|
||||||
func (r *runner) streamResults(wg *sync.WaitGroup, results chan result) {
|
func (r *runner) streamResults(ctx context.Context, wg *sync.WaitGroup, results chan result) error {
|
||||||
// Create another goroutine to close the results chan when all the runner
|
// Create another goroutine to close the results chan when all the runner
|
||||||
// goroutines have finished.
|
// goroutines have finished.
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
@ -803,6 +843,11 @@ func (r *runner) streamResults(wg *sync.WaitGroup, results chan result) {
|
||||||
progressUpdateRate = time.Second
|
progressUpdateRate = time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var covTree *cov.Tree
|
||||||
|
if r.covEnv != nil {
|
||||||
|
covTree = &cov.Tree{}
|
||||||
|
}
|
||||||
|
|
||||||
for res := range results {
|
for res := range results {
|
||||||
r.log.logResults(res)
|
r.log.logResults(res)
|
||||||
r.results[res.testcase] = res.status
|
r.results[res.testcase] = res.status
|
||||||
|
@ -839,6 +884,10 @@ func (r *runner) streamResults(wg *sync.WaitGroup, results chan result) {
|
||||||
if time.Since(lastStatusUpdate) > progressUpdateRate {
|
if time.Since(lastStatusUpdate) > progressUpdateRate {
|
||||||
updateProgress()
|
updateProgress()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if res.coverage != nil {
|
||||||
|
covTree.Add(splitTestCaseForCoverage(res.testcase), res.coverage)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
fmt.Fprint(r.stdout, ansiProgressBar(animFrame, numTests, numByExpectedStatus))
|
fmt.Fprint(r.stdout, ansiProgressBar(animFrame, numTests, numByExpectedStatus))
|
||||||
|
|
||||||
|
@ -888,14 +937,53 @@ func (r *runner) streamResults(wg *sync.WaitGroup, results chan result) {
|
||||||
fmt.Fprintln(r.stdout)
|
fmt.Fprintln(r.stdout)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if covTree != nil {
|
||||||
|
// Obtain the current git revision
|
||||||
|
revision := "HEAD"
|
||||||
|
if g, err := git.New(""); err == nil {
|
||||||
|
if r, err := g.Open(fileutils.DawnRoot()); err == nil {
|
||||||
|
if l, err := r.Log(&git.LogOptions{From: "HEAD", To: "HEAD"}); err == nil {
|
||||||
|
revision = l[0].Hash.String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.coverageFile != "" {
|
||||||
|
file, err := os.Create(r.coverageFile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create the coverage file: %w", err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
if err := covTree.Encode(revision, file); err != nil {
|
||||||
|
return fmt.Errorf("failed to encode coverage file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(r.stdout)
|
||||||
|
fmt.Fprintln(r.stdout, "Coverage data written to "+r.coverageFile)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cov := &bytes.Buffer{}
|
||||||
|
if err := covTree.Encode(revision, cov); err != nil {
|
||||||
|
return fmt.Errorf("failed to encode coverage file: %w", err)
|
||||||
|
}
|
||||||
|
return showCoverageServer(ctx, cov.Bytes(), r.stdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// runSerially() calls the CTS test runner to run the test query in a single
|
// runSerially() calls the CTS test runner to run the test query in a single
|
||||||
// process.
|
// process.
|
||||||
// TODO(bclayton): Support comparing against r.expectations
|
// TODO(bclayton): Support comparing against r.expectations
|
||||||
func (r *runner) runSerially(query string) error {
|
func (r *runner) runSerially(ctx context.Context, query string) error {
|
||||||
|
profraw := ""
|
||||||
|
if r.covEnv != nil {
|
||||||
|
profraw = filepath.Join(r.tmpDir, "cts.profraw")
|
||||||
|
}
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
result := r.runTestcase(query)
|
result := r.runTestcase(ctx, query, profraw)
|
||||||
timeTaken := time.Since(start)
|
timeTaken := time.Since(start)
|
||||||
|
|
||||||
if r.verbose {
|
if r.verbose {
|
||||||
|
@ -942,12 +1030,13 @@ type result struct {
|
||||||
status status
|
status status
|
||||||
message string
|
message string
|
||||||
error error
|
error error
|
||||||
|
coverage *cov.Coverage
|
||||||
}
|
}
|
||||||
|
|
||||||
// runTestcase() runs the CTS testcase with the given query, returning the test
|
// runTestcase() runs the CTS testcase with the given query, returning the test
|
||||||
// result.
|
// result.
|
||||||
func (r *runner) runTestcase(query string) result {
|
func (r *runner) runTestcase(ctx context.Context, query string, profraw string) result {
|
||||||
ctx, cancel := context.WithTimeout(mainCtx, testTimeout)
|
ctx, cancel := context.WithTimeout(ctx, testTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
args := []string{
|
args := []string{
|
||||||
|
@ -973,27 +1062,52 @@ func (r *runner) runTestcase(query string) result {
|
||||||
cmd := exec.CommandContext(ctx, r.node, args...)
|
cmd := exec.CommandContext(ctx, r.node, args...)
|
||||||
cmd.Dir = r.cts
|
cmd.Dir = r.cts
|
||||||
|
|
||||||
|
if profraw != "" {
|
||||||
|
cmd.Env = os.Environ()
|
||||||
|
cmd.Env = append(cmd.Env, cov.RuntimeEnv(cmd.Env, profraw))
|
||||||
|
}
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
cmd.Stdout = &buf
|
cmd.Stdout = &buf
|
||||||
cmd.Stderr = &buf
|
cmd.Stderr = &buf
|
||||||
|
|
||||||
err := cmd.Run()
|
err := cmd.Run()
|
||||||
|
|
||||||
msg := buf.String()
|
msg := buf.String()
|
||||||
|
res := result{testcase: query,
|
||||||
|
status: pass,
|
||||||
|
message: msg,
|
||||||
|
error: err,
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.covEnv != nil {
|
||||||
|
coverage, covErr := r.covEnv.Import(profraw)
|
||||||
|
if covErr != nil {
|
||||||
|
err = fmt.Errorf("could not import coverage data: %v", err)
|
||||||
|
}
|
||||||
|
res.coverage = coverage
|
||||||
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case errors.Is(err, context.DeadlineExceeded):
|
case errors.Is(err, context.DeadlineExceeded):
|
||||||
return result{testcase: query, status: timeout, message: msg, error: err}
|
res.status = timeout
|
||||||
case err != nil:
|
case err != nil, strings.Contains(msg, "[fail]"):
|
||||||
break
|
res.status = fail
|
||||||
case strings.Contains(msg, "[fail]"):
|
|
||||||
return result{testcase: query, status: fail, message: msg}
|
|
||||||
case strings.Contains(msg, "[warn]"):
|
case strings.Contains(msg, "[warn]"):
|
||||||
return result{testcase: query, status: warn, message: msg}
|
res.status = warn
|
||||||
case strings.Contains(msg, "[skip]"):
|
case strings.Contains(msg, "[skip]"):
|
||||||
return result{testcase: query, status: skip, message: msg}
|
res.status = skip
|
||||||
case strings.Contains(msg, "[pass]"), err == nil:
|
case strings.Contains(msg, "[pass]"):
|
||||||
return result{testcase: query, status: pass, message: msg}
|
break
|
||||||
|
default:
|
||||||
|
res.status = fail
|
||||||
|
msg += "\ncould not parse test output"
|
||||||
}
|
}
|
||||||
return result{testcase: query, status: fail, message: fmt.Sprint(msg, err), error: err}
|
|
||||||
|
if res.error != nil {
|
||||||
|
res.message = fmt.Sprint(res.message, res.error)
|
||||||
|
}
|
||||||
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
// filterTestcases returns in with empty strings removed
|
// filterTestcases returns in with empty strings removed
|
||||||
|
@ -1251,3 +1365,83 @@ func (w *muxWriter) Close() error {
|
||||||
close(w.data)
|
close(w.data)
|
||||||
return <-w.err
|
return <-w.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func splitTestCaseForCoverage(testcase string) []string {
|
||||||
|
out := []string{}
|
||||||
|
s := 0
|
||||||
|
for e, r := range testcase {
|
||||||
|
switch r {
|
||||||
|
case ':', '.':
|
||||||
|
out = append(out, testcase[s:e])
|
||||||
|
s = e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// showCoverageServer starts a localhost http server to display the coverage data, launching a
|
||||||
|
// browser if one can be found. Blocks until the context is cancelled.
|
||||||
|
func showCoverageServer(ctx context.Context, covData []byte, stdout io.Writer) error {
|
||||||
|
const port = "9392"
|
||||||
|
url := fmt.Sprintf("http://localhost:%v/index.html", port)
|
||||||
|
|
||||||
|
handler := http.NewServeMux()
|
||||||
|
handler.HandleFunc("/index.html", func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
f, err := os.Open(filepath.Join(fileutils.ThisDir(), "view-coverage.html"))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprint(w, "file not found")
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
io.Copy(w, f)
|
||||||
|
})
|
||||||
|
handler.HandleFunc("/coverage.dat", func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
io.Copy(w, bytes.NewReader(covData))
|
||||||
|
})
|
||||||
|
handler.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
rel := r.URL.Path
|
||||||
|
if r.URL.Path == "" {
|
||||||
|
http.Redirect(w, r, url, http.StatusSeeOther)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if strings.Contains(rel, "..") {
|
||||||
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
fmt.Fprint(w, "file path must not contain '..'")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f, err := os.Open(filepath.Join(fileutils.DawnRoot(), r.URL.Path))
|
||||||
|
if err != nil {
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
fmt.Fprintf(w, "file '%v' not found", r.URL.Path)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
io.Copy(w, f)
|
||||||
|
})
|
||||||
|
|
||||||
|
server := &http.Server{Addr: ":" + port, Handler: handler}
|
||||||
|
go server.ListenAndServe()
|
||||||
|
|
||||||
|
fmt.Fprintln(stdout)
|
||||||
|
fmt.Fprintln(stdout, "Serving coverage view at "+blue+url+ansiReset)
|
||||||
|
|
||||||
|
openBrowser(url)
|
||||||
|
|
||||||
|
<-ctx.Done()
|
||||||
|
return server.Shutdown(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// openBrowser launches a browser to open the given url
|
||||||
|
func openBrowser(url string) error {
|
||||||
|
switch runtime.GOOS {
|
||||||
|
case "linux":
|
||||||
|
return exec.Command("xdg-open", url).Start()
|
||||||
|
case "windows":
|
||||||
|
return exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start()
|
||||||
|
case "darwin":
|
||||||
|
return exec.Command("open", url).Start()
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unsupported platform")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,578 @@
|
||||||
|
<!doctype html>
|
||||||
|
<!--
|
||||||
|
Copyright 2022 The Dawn and Tint Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
<html>
|
||||||
|
|
||||||
|
<head>
|
||||||
|
<title>Dawn Code Coverage viewer</title>
|
||||||
|
|
||||||
|
<script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.52.0/codemirror.min.js"></script>
|
||||||
|
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.52.0/theme/seti.min.css">
|
||||||
|
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.52.0/codemirror.min.css">
|
||||||
|
<script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.52.0/mode/clike/clike.min.js"></script>
|
||||||
|
<script src=https://cdnjs.cloudflare.com/ajax/libs/pako/1.0.10/pako.min.js></script>
|
||||||
|
|
||||||
|
<style>
|
||||||
|
::-webkit-scrollbar {
|
||||||
|
background-color: #30353530;
|
||||||
|
}
|
||||||
|
|
||||||
|
::-webkit-scrollbar-thumb {
|
||||||
|
background-color: #80858050;
|
||||||
|
}
|
||||||
|
|
||||||
|
::-webkit-scrollbar-corner {
|
||||||
|
background-color: #00000000;
|
||||||
|
}
|
||||||
|
|
||||||
|
.frame {
|
||||||
|
display: flex;
|
||||||
|
left: 0px;
|
||||||
|
right: 0px;
|
||||||
|
top: 0px;
|
||||||
|
bottom: 0px;
|
||||||
|
position: absolute;
|
||||||
|
font-family: monospace;
|
||||||
|
background-color: #151515;
|
||||||
|
color: #c0b070;
|
||||||
|
}
|
||||||
|
|
||||||
|
.left-pane {
|
||||||
|
flex: 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
.center-pane {
|
||||||
|
flex: 3;
|
||||||
|
min-width: 0;
|
||||||
|
min-height: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.top-pane {
|
||||||
|
flex: 1;
|
||||||
|
overflow: scroll;
|
||||||
|
}
|
||||||
|
|
||||||
|
.v-flex {
|
||||||
|
display: flex;
|
||||||
|
height: 100%;
|
||||||
|
flex-direction: column;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-tree {
|
||||||
|
font-size: small;
|
||||||
|
overflow: auto;
|
||||||
|
padding: 5px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.test-tree {
|
||||||
|
font-size: small;
|
||||||
|
overflow: auto;
|
||||||
|
padding: 5px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.CodeMirror {
|
||||||
|
flex: 3;
|
||||||
|
height: 100%;
|
||||||
|
border: 1px solid #eee;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-div {
|
||||||
|
margin: 0px;
|
||||||
|
white-space: nowrap;
|
||||||
|
padding: 2px;
|
||||||
|
margin-top: 1px;
|
||||||
|
margin-bottom: 1px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-div:hover {
|
||||||
|
background-color: #303030;
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-div.selected {
|
||||||
|
background-color: #505050;
|
||||||
|
color: #f0f0a0;
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
.test-name {
|
||||||
|
margin: 0px;
|
||||||
|
white-space: nowrap;
|
||||||
|
padding: 2px;
|
||||||
|
margin-top: 1px;
|
||||||
|
margin-bottom: 1px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.file-coverage {
|
||||||
|
color: black;
|
||||||
|
width: 20pt;
|
||||||
|
padding-right: 3pt;
|
||||||
|
padding-left: 3px;
|
||||||
|
margin-right: 5pt;
|
||||||
|
display: inline-block;
|
||||||
|
text-align: center;
|
||||||
|
border-radius: 5px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.with-coverage {
|
||||||
|
background-color: #20d04080;
|
||||||
|
border-width: 0px 0px 0px 0px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.with-coverage-start {
|
||||||
|
border-left: solid 1px;
|
||||||
|
border-color: #20f02080;
|
||||||
|
margin-left: -1px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.with-coverage-end {
|
||||||
|
border-right: solid 1px;
|
||||||
|
border-color: #20f02080;
|
||||||
|
margin-right: -1px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.without-coverage {
|
||||||
|
background-color: #d0204080;
|
||||||
|
border-width: 0px 0px 0px 0px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.without-coverage-start {
|
||||||
|
border-left: solid 1px;
|
||||||
|
border-color: #f0202080;
|
||||||
|
margin-left: -1px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.without-coverage-end {
|
||||||
|
border-right: solid 1px;
|
||||||
|
border-color: #f0202080;
|
||||||
|
margin-right: -1px;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
|
||||||
|
<body>
|
||||||
|
<div class="frame">
|
||||||
|
<div id="file_tree" class="left-pane file-tree"></div>
|
||||||
|
<div class="center-pane">
|
||||||
|
<div id="source" class="v-flex">
|
||||||
|
<div class="top-pane">
|
||||||
|
<div class="test-tree" id="test_tree"></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
// "Download" the coverage.dat file if the user presses ctrl-s
|
||||||
|
document.addEventListener('keydown', e => {
|
||||||
|
if (e.ctrlKey && e.key === 's') {
|
||||||
|
e.preventDefault();
|
||||||
|
window.open("coverage.dat");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let current = {
|
||||||
|
file: "",
|
||||||
|
start_line: 0,
|
||||||
|
start_column: 0,
|
||||||
|
end_line: 0,
|
||||||
|
end_column: 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
let pending = { ...current };
|
||||||
|
{
|
||||||
|
let url = new URL(location.href);
|
||||||
|
let query_string = url.search;
|
||||||
|
let search_params = new URLSearchParams(query_string);
|
||||||
|
var f = search_params.get('f');
|
||||||
|
var s = search_params.get('s');
|
||||||
|
var e = search_params.get('e');
|
||||||
|
if (f) {
|
||||||
|
pending.file = f; // f.replace(/\./g, '/');
|
||||||
|
}
|
||||||
|
if (s) {
|
||||||
|
s = s.split('.');
|
||||||
|
pending.start_line = s.length > 0 ? parseInt(s[0]) : 0;
|
||||||
|
pending.start_column = s.length > 1 ? parseInt(s[1]) : 0;
|
||||||
|
}
|
||||||
|
if (e) {
|
||||||
|
e = e.split('.');
|
||||||
|
pending.end_line = e.length > 0 ? parseInt(e[0]) : 0;
|
||||||
|
pending.end_column = e.length > 1 ? parseInt(e[1]) : 0;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let set_location = (file, start_line, start_column, end_line, end_column) => {
|
||||||
|
current.file = file;
|
||||||
|
current.start_line = start_line;
|
||||||
|
current.start_column = start_column;
|
||||||
|
current.end_line = end_line;
|
||||||
|
current.end_column = end_column;
|
||||||
|
|
||||||
|
let url = new URL(location.href);
|
||||||
|
let query_string = url.search;
|
||||||
|
// Don't use URLSearchParams, as it will unnecessarily escape
|
||||||
|
// characters, such as '/'.
|
||||||
|
url.search = "f=" + file +
|
||||||
|
"&s=" + start_line + "." + end_line +
|
||||||
|
"&e=" + end_line + "." + end_column;
|
||||||
|
window.history.replaceState(null, "", url.toString());
|
||||||
|
};
|
||||||
|
|
||||||
|
let before = (line, col, span) => {
|
||||||
|
if (line < span[0]) { return true; }
|
||||||
|
if (line == span[0]) { return col < span[1]; }
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
let after = (line, col, span) => {
|
||||||
|
if (line > span[2]) { return true; }
|
||||||
|
if (line == span[2]) { return col > span[3]; }
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
let intersects = (span, from, to) => {
|
||||||
|
if (!before(to.line + 1, to.ch + 1, span) &&
|
||||||
|
!after(from.line + 1, from.ch + 1, span)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
let el_file_tree = document.getElementById("file_tree");
|
||||||
|
let el_test_tree = document.getElementById("test_tree");
|
||||||
|
let el_source = CodeMirror(document.getElementById("source"), {
|
||||||
|
lineNumbers: true,
|
||||||
|
theme: "seti",
|
||||||
|
mode: "text/x-c++src",
|
||||||
|
readOnly: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
window.onload = function () {
|
||||||
|
el_source.doc.setValue("// Loading... ");
|
||||||
|
fetch("coverage.dat").then(response =>
|
||||||
|
response.arrayBuffer()
|
||||||
|
).then(compressed =>
|
||||||
|
pako.inflate(new Uint8Array(compressed))
|
||||||
|
).then(decompressed =>
|
||||||
|
JSON.parse(new TextDecoder("utf-8").decode(decompressed))
|
||||||
|
).then(json => {
|
||||||
|
el_source.doc.setValue("// Select file from the left... ");
|
||||||
|
|
||||||
|
let revision = json.r;
|
||||||
|
let names = json.n;
|
||||||
|
let tests = json.t;
|
||||||
|
let spans = json.s;
|
||||||
|
let files = json.f;
|
||||||
|
|
||||||
|
let glob_group = (file, groupID, span_ids) => {
|
||||||
|
while (true) {
|
||||||
|
let group = file.g[groupID];
|
||||||
|
group.s.forEach(span_id => span_ids.add(span_id));
|
||||||
|
if (!group.e) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
groupID = group.e;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
let coverage_spans = (file, data, span_ids) => {
|
||||||
|
if (data.g != undefined) {
|
||||||
|
glob_group(file, data.g, span_ids);
|
||||||
|
}
|
||||||
|
if (data.s != undefined) {
|
||||||
|
data.s.forEach(span_id => span_ids.add(span_id));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let glob_node = (file, nodes, span_ids) => {
|
||||||
|
nodes.forEach(node => {
|
||||||
|
let data = node[1];
|
||||||
|
coverage_spans(file, data, span_ids);
|
||||||
|
if (data.c) {
|
||||||
|
glob_node(file, data.c, span_ids);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
let markup = file => {
|
||||||
|
if (file.u) {
|
||||||
|
for (span of file.u) {
|
||||||
|
el_source.doc.markText(
|
||||||
|
{ "line": span[0] - 1, "ch": span[1] - 1 },
|
||||||
|
{ "line": span[2] - 1, "ch": span[3] - 1 },
|
||||||
|
{
|
||||||
|
// inclusiveLeft: true,
|
||||||
|
className: "without-coverage",
|
||||||
|
startStyle: "without-coverage-start",
|
||||||
|
endStyle: "without-coverage-end",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let span_ids = new Set();
|
||||||
|
glob_node(file, file.c, span_ids);
|
||||||
|
el_source.operation(() => {
|
||||||
|
span_ids.forEach((span_id) => {
|
||||||
|
let span = spans[span_id];
|
||||||
|
el_source.doc.markText(
|
||||||
|
{ "line": span[0] - 1, "ch": span[1] - 1 },
|
||||||
|
{ "line": span[2] - 1, "ch": span[3] - 1 },
|
||||||
|
{
|
||||||
|
// inclusiveLeft: true,
|
||||||
|
className: "with-coverage",
|
||||||
|
startStyle: "with-coverage-start",
|
||||||
|
endStyle: "with-coverage-end",
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
let NONE_OVERLAP = 0;
|
||||||
|
let ALL_OVERLAP = 1;
|
||||||
|
let SOME_OVERLAP = 2;
|
||||||
|
|
||||||
|
let gather_overlaps = (parent, file, coverage_nodes, from, to) => {
|
||||||
|
if (!coverage_nodes) { return; }
|
||||||
|
|
||||||
|
// Start by populating all the children nodes from the full
|
||||||
|
// test lists. This includes nodes that do not have child
|
||||||
|
// coverage data.
|
||||||
|
for (var index = 0; index < parent.test.length; index++) {
|
||||||
|
if (parent.children.has(index)) { continue; }
|
||||||
|
|
||||||
|
let test_node = parent.test[index];
|
||||||
|
let test_name_id = test_node[0];
|
||||||
|
let test_name = names[test_name_id];
|
||||||
|
let test_children = test_node[1];
|
||||||
|
|
||||||
|
let node = {
|
||||||
|
test: test_children,
|
||||||
|
name: parent.name ? parent.name + test_name : test_name,
|
||||||
|
overlaps: new Map(parent.overlaps), // map: span_id -> OVERLAP
|
||||||
|
children: new Map(), // map: index -> struct
|
||||||
|
is_leaf: test_children.length == 0,
|
||||||
|
};
|
||||||
|
parent.children.set(index, node);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now update the children that do have coverage data.
|
||||||
|
for (const coverage_node of coverage_nodes) {
|
||||||
|
let index = coverage_node[0];
|
||||||
|
let coverage = coverage_node[1];
|
||||||
|
let node = parent.children.get(index);
|
||||||
|
|
||||||
|
let span_ids = new Set();
|
||||||
|
coverage_spans(file, coverage, span_ids);
|
||||||
|
|
||||||
|
// Update the node overlaps based on the coverage spans.
|
||||||
|
for (const span_id of span_ids) {
|
||||||
|
if (intersects(spans[span_id], from, to)) {
|
||||||
|
let overlap = parent.overlaps.get(span_id) || NONE_OVERLAP;
|
||||||
|
overlap = (overlap == NONE_OVERLAP) ? ALL_OVERLAP : NONE_OVERLAP;
|
||||||
|
node.overlaps.set(span_id, overlap);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate the child nodes.
|
||||||
|
gather_overlaps(node, file, coverage.c, from, to);
|
||||||
|
|
||||||
|
// Gather all the spans used by the children.
|
||||||
|
let all_spans = new Set();
|
||||||
|
for (const [_, child] of node.children) {
|
||||||
|
for (const [span, _] of child.overlaps) {
|
||||||
|
all_spans.add(span);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the node.overlaps based on the child overlaps.
|
||||||
|
for (const span of all_spans) {
|
||||||
|
let overlap = undefined;
|
||||||
|
for (const [_, child] of node.children) {
|
||||||
|
let child_overlap = child.overlaps.get(span);
|
||||||
|
child_overlap = (child_overlap == undefined) ? NONE_OVERLAP : child_overlap;
|
||||||
|
if (overlap == undefined) {
|
||||||
|
overlap = child_overlap;
|
||||||
|
} else {
|
||||||
|
overlap = (child_overlap == overlap) ? overlap : SOME_OVERLAP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
node.overlaps.set(span, overlap);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If all the node.overlaps are NONE_OVERLAP or ALL_OVERLAP
|
||||||
|
// then there's no point holding on to the children -
|
||||||
|
// we know all transitive children either fully overlap
|
||||||
|
// or don't at all.
|
||||||
|
let some_overlap = false;
|
||||||
|
for (const [_, overlap] of node.overlaps) {
|
||||||
|
if (overlap == SOME_OVERLAP) {
|
||||||
|
some_overlap = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!some_overlap) {
|
||||||
|
node.children = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let gather_tests = (file, coverage_nodes, test_nodes, from, to) => {
|
||||||
|
let out = [];
|
||||||
|
|
||||||
|
let traverse = (parent) => {
|
||||||
|
for (const [idx, node] of parent.children) {
|
||||||
|
let do_traversal = false;
|
||||||
|
let do_add = false;
|
||||||
|
|
||||||
|
for (const [_, overlap] of node.overlaps) {
|
||||||
|
switch (overlap) {
|
||||||
|
case SOME_OVERLAP:
|
||||||
|
do_traversal = true;
|
||||||
|
break;
|
||||||
|
case ALL_OVERLAP:
|
||||||
|
do_add = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (do_add) {
|
||||||
|
out.push(node.name + (node.is_leaf ? "" : "*"));
|
||||||
|
} else if (do_traversal) {
|
||||||
|
traverse(node);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let tree = {
|
||||||
|
test: test_nodes,
|
||||||
|
overlaps: new Map(), // map: span_id -> OVERLAP
|
||||||
|
children: new Map(), // map: index -> struct
|
||||||
|
};
|
||||||
|
|
||||||
|
gather_overlaps(tree, file, coverage_nodes, from, to);
|
||||||
|
|
||||||
|
traverse(tree);
|
||||||
|
|
||||||
|
return out;
|
||||||
|
};
|
||||||
|
|
||||||
|
let update_selection = (from, to) => {
|
||||||
|
if (from.line > to.line || (from.line == to.line && from.ch > to.ch)) {
|
||||||
|
let tmp = from;
|
||||||
|
from = to;
|
||||||
|
to = tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
let file = files[current.file];
|
||||||
|
let filtered = gather_tests(file, file.c, tests, from, to);
|
||||||
|
el_test_tree.innerHTML = "";
|
||||||
|
filtered.forEach(test_name => {
|
||||||
|
let element = document.createElement('p');
|
||||||
|
element.className = "test-name";
|
||||||
|
element.innerText = test_name;
|
||||||
|
el_test_tree.appendChild(element);
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
let load_source = (path) => {
|
||||||
|
if (!files[path]) { return; }
|
||||||
|
|
||||||
|
for (let i = 0; i < el_file_tree.childNodes.length; i++) {
|
||||||
|
let el = el_file_tree.childNodes[i];
|
||||||
|
if (el.path == path) {
|
||||||
|
el.classList.add("selected");
|
||||||
|
} else {
|
||||||
|
el.classList.remove("selected");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
el_source.doc.setValue("// Loading... ");
|
||||||
|
fetch(`${path}`)
|
||||||
|
.then(response => response.text())
|
||||||
|
.then(source => {
|
||||||
|
el_source.doc.setValue(source);
|
||||||
|
current.file = path;
|
||||||
|
markup(files[path]);
|
||||||
|
if (pending.start_line) {
|
||||||
|
var start = {
|
||||||
|
line: pending.start_line - 1,
|
||||||
|
ch: pending.start_column ? pending.start_column - 1 : 0
|
||||||
|
};
|
||||||
|
var end = {
|
||||||
|
line: pending.end_line ? pending.end_line - 1 : pending.start_line - 1,
|
||||||
|
ch: pending.end_column ? pending.end_column - 1 : 0
|
||||||
|
};
|
||||||
|
el_source.doc.setSelection(start, end);
|
||||||
|
update_selection(start, end);
|
||||||
|
}
|
||||||
|
pending = {};
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
el_source.doc.on("beforeSelectionChange", (doc, selection) => {
|
||||||
|
if (!files[current.file]) { return; }
|
||||||
|
|
||||||
|
let range = selection.ranges[0];
|
||||||
|
let from = range.head;
|
||||||
|
let to = range.anchor;
|
||||||
|
|
||||||
|
set_location(current.file, from.line + 1, from.ch + 1, to.line + 1, to.ch + 1);
|
||||||
|
|
||||||
|
update_selection(from, to);
|
||||||
|
});
|
||||||
|
|
||||||
|
for (const path of Object.keys(files)) {
|
||||||
|
let file = files[path];
|
||||||
|
|
||||||
|
let div = document.createElement('div');
|
||||||
|
div.className = "file-div";
|
||||||
|
div.onclick = () => { pending = {}; load_source(path); }
|
||||||
|
div.path = path;
|
||||||
|
el_file_tree.appendChild(div);
|
||||||
|
|
||||||
|
let coverage = document.createElement('span');
|
||||||
|
coverage.className = "file-coverage";
|
||||||
|
if (file.p != undefined) {
|
||||||
|
let red = 1.0 - file.p;
|
||||||
|
let green = file.p;
|
||||||
|
let normalize = 1.0 / (red * red + green * green);
|
||||||
|
red *= normalize;
|
||||||
|
green *= normalize;
|
||||||
|
coverage.innerText = Math.round(file.p * 100);
|
||||||
|
coverage.style = "background-color: RGB(" + 255 * red + "," + 255 * green + ", 0" + ")";
|
||||||
|
} else {
|
||||||
|
coverage.innerText = "--";
|
||||||
|
coverage.style = "background-color: RGB(180,180,180)";
|
||||||
|
}
|
||||||
|
div.appendChild(coverage);
|
||||||
|
|
||||||
|
let filepath = document.createElement('span');
|
||||||
|
filepath.className = "file-path";
|
||||||
|
filepath.innerText = path;
|
||||||
|
div.appendChild(filepath);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pending.file) {
|
||||||
|
load_source(pending.file);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
|
||||||
|
</html>
|
|
@ -64,6 +64,13 @@ type Git struct {
|
||||||
|
|
||||||
// New returns a new Git instance
|
// New returns a new Git instance
|
||||||
func New(exe string) (*Git, error) {
|
func New(exe string) (*Git, error) {
|
||||||
|
if exe == "" {
|
||||||
|
g, err := exec.LookPath("git")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to find git: %v", err)
|
||||||
|
}
|
||||||
|
exe = g
|
||||||
|
}
|
||||||
if _, err := os.Stat(exe); err != nil {
|
if _, err := os.Stat(exe); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue