@@ -142,6 +142,24 @@ pub struct BashCompletionArgs {
142142 #[ clap( long) ]
143143 bash_completion_path : PathBuf ,
144144
145+ /// List available tests without running them.
146+ #[ clap( long) ]
147+ list : bool ,
148+
149+ /// Filter tests by name pattern (passed to pytest -k).
150+ /// Supports pytest expression syntax, e.g., `"test_alias"`, `"test_alias and test_1"`.
151+ #[ clap( long, short = 't' ) ]
152+ test_filter : Option < String > ,
153+
154+ /// Run only specific test file(s). Can be specified multiple times.
155+ /// Example: `-f test_alias.py -f test_bash.py`
156+ #[ clap( long, short = 'f' ) ]
157+ file : Vec < String > ,
158+
159+ /// Stop on first test failure.
160+ #[ clap( long, short = 'x' ) ]
161+ stop_on_first : bool ,
162+
145163 /// Output file for JSON test results.
146164 #[ clap( long, short = 'o' ) ]
147165 output : Option < PathBuf > ,
@@ -155,7 +173,8 @@ pub struct BashCompletionArgs {
155173 #[ clap( long) ]
156174 summary_script : Option < PathBuf > ,
157175
158- /// Number of parallel test workers.
176+ /// Number of parallel test workers (requires pytest-xdist).
177+ /// Use -j 1 to disable parallel execution.
159178 #[ clap( long, short = 'j' , default_value = "128" ) ]
160179 jobs : u32 ,
161180}
@@ -367,6 +386,47 @@ fn run_tests_with_coverage(
367386 Ok ( ( ) )
368387}
369388
389+ /// List available bash-completion tests without running them.
390+ fn list_bash_completion_tests ( sh : & Shell , args : & BashCompletionArgs , verbose : bool ) -> Result < ( ) > {
391+ eprintln ! ( "Collecting bash-completion tests..." ) ;
392+
393+ // Determine test targets - specific files or all tests
394+ let test_targets: Vec < String > = if args. file . is_empty ( ) {
395+ vec ! [ "./t" . to_string( ) ]
396+ } else {
397+ args. file
398+ . iter ( )
399+ . map ( |f| {
400+ if f. starts_with ( "./t/" ) || f. starts_with ( "t/" ) {
401+ f. clone ( )
402+ } else {
403+ format ! ( "./t/{f}" )
404+ }
405+ } )
406+ . collect ( )
407+ } ;
408+
409+ let mut pytest_args = vec ! [ "--collect-only" . to_string( ) , "-q" . to_string( ) ] ;
410+
411+ // Add test filter if specified
412+ if let Some ( filter) = & args. test_filter {
413+ pytest_args. push ( "-k" . to_string ( ) ) ;
414+ pytest_args. push ( filter. clone ( ) ) ;
415+ }
416+
417+ // Add test targets
418+ pytest_args. extend ( test_targets) ;
419+
420+ if verbose {
421+ eprintln ! ( "Running: pytest {}" , pytest_args. join( " " ) ) ;
422+ }
423+
424+ // Run pytest --collect-only and display results
425+ cmd ! ( sh, "pytest" ) . args ( & pytest_args) . run ( ) ?;
426+
427+ Ok ( ( ) )
428+ }
429+
370430/// Run the bash-completion project's test suite against brush.
371431///
372432/// This runs pytest on the bash-completion test suite with brush as the shell,
@@ -382,11 +442,8 @@ fn run_bash_completion_tests(
382442 binary_args : & BinaryArgs ,
383443 verbose : bool ,
384444) -> Result < ( ) > {
385- eprintln ! ( "Running bash-completion test suite..." ) ;
386-
387445 // Find the brush binary (use explicit path or auto-detect from target dir)
388446 let brush_path = binary_args. find_brush_binary ( ) ?;
389- eprintln ! ( "Using brush binary: {}" , brush_path. display( ) ) ;
390447
391448 let test_dir = args. bash_completion_path . join ( "test" ) ;
392449 if !test_dir. exists ( ) {
@@ -396,34 +453,72 @@ fn run_bash_completion_tests(
396453 ) ;
397454 }
398455
399- // Get workspace root for script path resolution (fail hard if not found)
400- let workspace_root = find_workspace_root ( ) ?;
401-
402- let brush_path_str = brush_path. display ( ) . to_string ( ) ;
403- let jobs = args. jobs . to_string ( ) ;
404-
405456 // Build the pytest command
406457 let dir_guard = sh. push_dir ( & test_dir) ;
407458
408459 // Set environment variable for the test suite
460+ let brush_path_str = brush_path. display ( ) . to_string ( ) ;
409461 let _env = sh. push_env (
410462 "BASH_COMPLETION_TEST_BASH" ,
411463 format ! ( "{brush_path_str} --noprofile --no-config --input-backend=basic" ) ,
412464 ) ;
413465
414- // Determine output arguments
415- let json_output = args. output . as_ref ( ) . map_or_else (
416- || "test-results-bash-completion.json" . to_string ( ) ,
417- |p| p. display ( ) . to_string ( ) ,
418- ) ;
466+ // Handle --list mode: just collect and display tests
467+ if args. list {
468+ return list_bash_completion_tests ( sh, args, verbose) ;
469+ }
419470
420- // Build the json report file argument
421- let json_report_arg = format ! ( "--json-report-file={json_output}" ) ;
471+ eprintln ! ( "Running bash-completion test suite..." ) ;
472+ eprintln ! ( "Using brush binary: {}" , brush_path. display( ) ) ;
473+
474+ // Determine test targets - specific files or all tests
475+ let test_targets: Vec < String > = if args. file . is_empty ( ) {
476+ vec ! [ "./t" . to_string( ) ]
477+ } else {
478+ args. file
479+ . iter ( )
480+ . map ( |f| {
481+ if f. starts_with ( "./t/" ) || f. starts_with ( "t/" ) {
482+ f. clone ( )
483+ } else {
484+ format ! ( "./t/{f}" )
485+ }
486+ } )
487+ . collect ( )
488+ } ;
422489
423490 // Build pytest args
424- let mut pytest_args = vec ! [ "-n" , & jobs, "--json-report" , & json_report_arg, "./t" ] ;
491+ let mut pytest_args: Vec < String > = Vec :: new ( ) ;
492+
493+ // Add parallel execution flag if jobs > 1 (requires pytest-xdist)
494+ if args. jobs > 1 {
495+ pytest_args. push ( "-n" . to_string ( ) ) ;
496+ pytest_args. push ( args. jobs . to_string ( ) ) ;
497+ }
498+
499+ // Add JSON report if output is requested (requires pytest-json-report)
500+ let json_output = args. output . as_ref ( ) . map ( |p| p. display ( ) . to_string ( ) ) ;
501+ if let Some ( ref output) = json_output {
502+ pytest_args. push ( "--json-report" . to_string ( ) ) ;
503+ pytest_args. push ( format ! ( "--json-report-file={output}" ) ) ;
504+ }
505+
506+ // Add optional flags
507+ if verbose {
508+ pytest_args. push ( "-v" . to_string ( ) ) ;
509+ }
510+ if args. stop_on_first {
511+ pytest_args. push ( "-x" . to_string ( ) ) ;
512+ }
513+ if let Some ( filter) = & args. test_filter {
514+ pytest_args. push ( "-k" . to_string ( ) ) ;
515+ pytest_args. push ( filter. clone ( ) ) ;
516+ }
517+
518+ // Add test targets at the end
519+ pytest_args. extend ( test_targets) ;
520+
425521 if verbose {
426- pytest_args. insert ( 0 , "-v" ) ;
427522 eprintln ! ( "Running: pytest {}" , pytest_args. join( " " ) ) ;
428523 }
429524
@@ -434,8 +529,11 @@ fn run_bash_completion_tests(
434529 eprintln ! ( "Some tests failed, but continuing to generate reports..." ) ;
435530 }
436531
437- // Generate summary report if requested
438- if let Some ( summary_path) = & args. summary_output {
532+ // Generate summary report if requested (requires JSON output)
533+ if let ( Some ( summary_path) , Some ( output) ) = ( & args. summary_output , & json_output) {
534+ // Get workspace root for script path resolution
535+ let workspace_root = find_workspace_root ( ) ?;
536+
439537 let summary_path_str = summary_path. display ( ) . to_string ( ) ;
440538
441539 // Determine the script path - use provided path or default to workspace root
@@ -451,10 +549,10 @@ fn run_bash_completion_tests(
451549
452550 let title = "Test Summary: bash-completion test suite" ;
453551 if verbose {
454- eprintln ! ( "Running: python3 {script_path_str} -r {json_output } --title \" {title}\" " ) ;
552+ eprintln ! ( "Running: python3 {script_path_str} -r {output } --title \" {title}\" " ) ;
455553 }
456554 let summary_result = cmd ! ( sh, "python3 {script_path_str}" )
457- . args ( [ "-r" , & json_output , "--title" , title] )
555+ . args ( [ "-r" , output , "--title" , title] )
458556 . read ( ) ;
459557
460558 match summary_result {
@@ -466,10 +564,14 @@ fn run_bash_completion_tests(
466564 eprintln ! ( "Warning: Failed to generate summary report: {e}" ) ;
467565 }
468566 }
567+ } else if args. summary_output . is_some ( ) && json_output. is_none ( ) {
568+ eprintln ! ( "Warning: --summary-output requires --output for JSON results" ) ;
469569 }
470570
471571 eprintln ! ( "bash-completion test suite completed." ) ;
472- eprintln ! ( "Results written to: {json_output}" ) ;
572+ if let Some ( ref output) = json_output {
573+ eprintln ! ( "Results written to: {output}" ) ;
574+ }
473575
474576 // Propagate test failure after reports are generated
475577 if pytest_failed {
0 commit comments