|
|
"""Evaluate sentence segmentation (sent_tokenize) against test cases.""" |
|
|
import json |
|
|
import argparse |
|
|
from underthesea import sent_tokenize as underthesea_sent_tokenize |
|
|
|
|
|
|
|
|
def evaluate(test_cases_path: str, verbose: bool = False, improved: bool = False): |
|
|
if improved: |
|
|
from sent_tokenize import sent_tokenize |
|
|
else: |
|
|
sent_tokenize = underthesea_sent_tokenize |
|
|
with open(test_cases_path, "r", encoding="utf-8") as f: |
|
|
test_cases = json.load(f) |
|
|
|
|
|
total = len(test_cases) |
|
|
correct = 0 |
|
|
incorrect = 0 |
|
|
by_category = {} |
|
|
failures = [] |
|
|
|
|
|
for tc in test_cases: |
|
|
input_text = tc["input"] |
|
|
expected = tc["expected"] |
|
|
category = tc["category"] |
|
|
|
|
|
actual = sent_tokenize(input_text) |
|
|
is_correct = actual == expected |
|
|
|
|
|
if category not in by_category: |
|
|
by_category[category] = {"total": 0, "correct": 0} |
|
|
by_category[category]["total"] += 1 |
|
|
|
|
|
if is_correct: |
|
|
correct += 1 |
|
|
by_category[category]["correct"] += 1 |
|
|
else: |
|
|
incorrect += 1 |
|
|
failures.append( |
|
|
{ |
|
|
"id": tc["id"], |
|
|
"category": category, |
|
|
"input": input_text, |
|
|
"expected": expected, |
|
|
"actual": actual, |
|
|
} |
|
|
) |
|
|
|
|
|
|
|
|
print("=" * 60) |
|
|
label = "IMPROVED (trained Punkt)" if improved else "BASELINE (underthesea)" |
|
|
print(f"SENTENCE SEGMENTATION EVALUATION - {label}") |
|
|
print("=" * 60) |
|
|
print(f"\nTotal: {total} Correct: {correct} Incorrect: {incorrect}") |
|
|
print(f"Accuracy: {100 * correct / total:.1f}%") |
|
|
print() |
|
|
print(f"{'Category':<25} {'Total':>6} {'Correct':>8} {'Acc':>7}") |
|
|
print("-" * 48) |
|
|
for cat in sorted(by_category): |
|
|
stats = by_category[cat] |
|
|
acc = 100 * stats["correct"] / stats["total"] |
|
|
print(f"{cat:<25} {stats['total']:>6} {stats['correct']:>8} {acc:>6.1f}%") |
|
|
|
|
|
if verbose and failures: |
|
|
print(f"\n{'='*60}") |
|
|
print(f"FAILURES ({len(failures)})") |
|
|
print("=" * 60) |
|
|
for f in failures: |
|
|
print(f"\n[{f['id']}] {f['category']}") |
|
|
print(f" Input: {f['input'][:100]}...") |
|
|
print(f" Expected: {[s[:60] for s in f['expected']]}") |
|
|
print(f" Actual: {[s[:60] for s in f['actual']]}") |
|
|
|
|
|
return { |
|
|
"total": total, |
|
|
"correct": correct, |
|
|
"incorrect": incorrect, |
|
|
"accuracy": correct / total, |
|
|
"by_category": by_category, |
|
|
"failures": failures, |
|
|
} |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
parser = argparse.ArgumentParser() |
|
|
parser.add_argument( |
|
|
"--test-cases", |
|
|
default="test_cases.json", |
|
|
help="Path to test cases JSON file", |
|
|
) |
|
|
parser.add_argument( |
|
|
"-v", "--verbose", action="store_true", help="Show failure details" |
|
|
) |
|
|
parser.add_argument( |
|
|
"--improved", |
|
|
action="store_true", |
|
|
help="Use trained Punkt model instead of underthesea default", |
|
|
) |
|
|
args = parser.parse_args() |
|
|
evaluate(args.test_cases, verbose=args.verbose, improved=args.improved) |
|
|
|