| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253 |
- #!/usr/bin/env bash
- set -euo pipefail
- cd "$(dirname "$0")"
- if [[ -x .venv/bin/pytest ]]; then
- PYTEST=.venv/bin/pytest
- else
- PYTEST=pytest
- fi
- python3 - <<'PY'
- import json
- from pathlib import Path
- cases = json.loads(Path('tests/fixtures/resolve_harness_cases.json').read_text())
- print('\nResolver test runner')
- print(f' project : {Path.cwd()}')
- print(f' cases : {len(cases)}')
- print('\nSubjects under test:')
- for case in cases:
- print(f" - {case['name']}: {case['subject']} ({case['context'].get('language','en')})")
- print('\nWhat is tested:')
- print(' - cache hits resolve when confidence is sufficient')
- print(' - low-confidence cache triggers re-resolution in ranked mode')
- print(' - interactive mode returns all candidates below threshold')
- print(' - embedding scoring is recorded when enabled')
- print('\nRunning tests...\n')
- PY
- REPORT=$(mktemp)
- trap 'rm -f "$REPORT"' EXIT
- "$PYTEST" -q --junitxml="$REPORT" \
- tests/test_resolve_tool.py \
- tests/test_resolve_strategies.py \
- "$@"
- python3 - "$REPORT" <<'PY'
- import sys
- import xml.etree.ElementTree as ET
- from pathlib import Path
- report = Path(sys.argv[1])
- root = ET.parse(report).getroot()
- rows = []
- for case in root.iter('testcase'):
- rows.append((case.get('classname',''), case.get('name',''), float(case.get('time','0'))))
- rows.sort(key=lambda r: r[2], reverse=True)
- print('\nPer-test timings:')
- print(f"{'test':60} {'seconds':>8}")
- print('-' * 70)
- for cls, name, secs in rows:
- print(f"{(cls + '::' + name):60} {secs:8.3f}")
- PY
|