test_resolve.sh 1.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. #!/usr/bin/env bash
  2. set -euo pipefail
  3. cd "$(dirname "$0")"
  4. if [[ -x .venv/bin/pytest ]]; then
  5. PYTEST=.venv/bin/pytest
  6. else
  7. PYTEST=pytest
  8. fi
  9. python3 - <<'PY'
  10. import json
  11. from pathlib import Path
  12. cases = json.loads(Path('tests/fixtures/resolve_harness_cases.json').read_text())
  13. print('\nResolver test runner')
  14. print(f' project : {Path.cwd()}')
  15. print(f' cases : {len(cases)}')
  16. print('\nSubjects under test:')
  17. for case in cases:
  18. print(f" - {case['name']}: {case['subject']} ({case['context'].get('language','en')})")
  19. print('\nWhat is tested:')
  20. print(' - cache hits resolve when confidence is sufficient')
  21. print(' - low-confidence cache triggers re-resolution in ranked mode')
  22. print(' - interactive mode returns all candidates below threshold')
  23. print(' - embedding scoring is recorded when enabled')
  24. print('\nRunning tests...\n')
  25. PY
  26. REPORT=$(mktemp)
  27. trap 'rm -f "$REPORT"' EXIT
  28. "$PYTEST" -q --junitxml="$REPORT" \
  29. tests/test_resolve_tool.py \
  30. tests/test_resolve_strategies.py \
  31. "$@"
  32. python3 - "$REPORT" <<'PY'
  33. import sys
  34. import xml.etree.ElementTree as ET
  35. from pathlib import Path
  36. report = Path(sys.argv[1])
  37. root = ET.parse(report).getroot()
  38. rows = []
  39. for case in root.iter('testcase'):
  40. rows.append((case.get('classname',''), case.get('name',''), float(case.get('time','0'))))
  41. rows.sort(key=lambda r: r[2], reverse=True)
  42. print('\nPer-test timings:')
  43. print(f"{'test':60} {'seconds':>8}")
  44. print('-' * 70)
  45. for cls, name, secs in rows:
  46. print(f"{(cls + '::' + name):60} {secs:8.3f}")
  47. PY