|
| 1 | +#!/usr/bin/env python3 |
| 2 | +# -*- coding: utf-8 -*- |
| 3 | + |
| 4 | +import argparse |
| 5 | +import json |
| 6 | +import os |
| 7 | + |
| 8 | +import yaml |
| 9 | + |
| 10 | +from pythainlp import cli |
| 11 | +from pythainlp.benchmarks import word_tokenization |
| 12 | + |
| 13 | + |
| 14 | +def _read_file(path): |
| 15 | + with open(path, "r", encoding="utf-8") as f: |
| 16 | + lines = map(lambda r: r.strip(), f.readlines()) |
| 17 | + return list(lines) |
| 18 | + |
| 19 | + |
| 20 | +class App: |
| 21 | + def __init__(self, argv): |
| 22 | + parser = argparse.ArgumentParser( |
| 23 | + prog="benchmark", |
| 24 | + description=( |
| 25 | + "Benchmark for various tasks;" |
| 26 | + "currently, we have only for word tokenization." |
| 27 | + ), |
| 28 | + usage=( |
| 29 | + "thainlp benchmark [task] [task-options]\n\n" |
| 30 | + "tasks:\n\n" |
| 31 | + "word-tokenization benchmark word tokenization\n\n" |
| 32 | + "--" |
| 33 | + ), |
| 34 | + ) |
| 35 | + |
| 36 | + parser.add_argument( |
| 37 | + "task", type=str, help="[word-tokenization]" |
| 38 | + ) |
| 39 | + |
| 40 | + args = parser.parse_args(argv[2:3]) |
| 41 | + cli.exit_if_empty(args.task, parser) |
| 42 | + task = str.lower(args.task) |
| 43 | + |
| 44 | + task_argv = argv[3:] |
| 45 | + if task == "word-tokenization": |
| 46 | + WordTokenizationBenchmark(task, task_argv) |
| 47 | + |
| 48 | + |
| 49 | +class WordTokenizationBenchmark: |
| 50 | + def __init__(self, name, argv): |
| 51 | + parser = argparse.ArgumentParser(**cli.make_usage("benchmark " + name)) |
| 52 | + |
| 53 | + parser.add_argument( |
| 54 | + "--input-file", |
| 55 | + action="store", |
| 56 | + help="Path to input file to compare against the test file", |
| 57 | + ) |
| 58 | + |
| 59 | + parser.add_argument( |
| 60 | + "--test-file", |
| 61 | + action="store", |
| 62 | + help="Path to test file i.e. ground truth", |
| 63 | + ) |
| 64 | + |
| 65 | + parser.add_argument( |
| 66 | + "--save-details", |
| 67 | + default=False, |
| 68 | + action="store_true", |
| 69 | + help=( |
| 70 | + "Save comparison details to files (eval-XXX.json" |
| 71 | + " and eval-details-XXX.json)" |
| 72 | + ) |
| 73 | + ) |
| 74 | + |
| 75 | + args = parser.parse_args(argv) |
| 76 | + |
| 77 | + actual = _read_file(args.input_file) |
| 78 | + expected = _read_file(args.test_file) |
| 79 | + |
| 80 | + assert len(actual) == len(expected), \ |
| 81 | + "Input and test files do not have the same number of samples" |
| 82 | + |
| 83 | + print( |
| 84 | + "Benchmarking %s against %s with %d samples in total" |
| 85 | + % (args.input_file, args.test_file, len(actual)) |
| 86 | + ) |
| 87 | + |
| 88 | + df_raw = word_tokenization.benchmark(expected, actual) |
| 89 | + |
| 90 | + columns = [ |
| 91 | + "char_level:tp", |
| 92 | + "char_level:fp", |
| 93 | + "char_level:tn", |
| 94 | + "char_level:fn", |
| 95 | + "word_level:correctly_tokenised_words", |
| 96 | + "word_level:total_words_in_sample", |
| 97 | + "word_level:total_words_in_ref_sample", |
| 98 | + ] |
| 99 | + |
| 100 | + statistics = dict() |
| 101 | + |
| 102 | + for c in columns: |
| 103 | + statistics[c] = float(df_raw[c].sum()) |
| 104 | + |
| 105 | + statistics["char_level:precision"] = statistics["char_level:tp"] / ( |
| 106 | + statistics["char_level:tp"] + statistics["char_level:fp"] |
| 107 | + ) |
| 108 | + |
| 109 | + statistics["char_level:recall"] = statistics["char_level:tp"] / ( |
| 110 | + statistics["char_level:tp"] + statistics["char_level:fn"] |
| 111 | + ) |
| 112 | + |
| 113 | + statistics["word_level:precision"] = \ |
| 114 | + statistics["word_level:correctly_tokenised_words"] \ |
| 115 | + / statistics["word_level:total_words_in_sample"] |
| 116 | + |
| 117 | + statistics["word_level:recall"] = \ |
| 118 | + statistics["word_level:correctly_tokenised_words"] \ |
| 119 | + / statistics["word_level:total_words_in_ref_sample"] |
| 120 | + |
| 121 | + print("============== Benchmark Result ==============") |
| 122 | + |
| 123 | + for c in ["tp", "fn", "tn", "fp", "precision", "recall"]: |
| 124 | + c = f"char_level:{c}" |
| 125 | + v = statistics[c] |
| 126 | + print(f"{c:>40s} {v:.4f}") |
| 127 | + |
| 128 | + for c in [ |
| 129 | + "total_words_in_sample", |
| 130 | + "total_words_in_ref_sample", |
| 131 | + "correctly_tokenised_words", |
| 132 | + "precision", |
| 133 | + "recall" |
| 134 | + ]: |
| 135 | + c = f"word_level:{c}" |
| 136 | + v = statistics[c] |
| 137 | + print(f"{c:>40s} {v:.4f}") |
| 138 | + |
| 139 | + if args.save_details: |
| 140 | + dir_name = os.path.dirname(args.input_file) |
| 141 | + file_name = args.input_file.split("/")[-1].split(".")[0] |
| 142 | + |
| 143 | + res_path = "%s/eval-%s.yml" % (dir_name, file_name) |
| 144 | + print("Evaluation result is saved to %s" % res_path) |
| 145 | + |
| 146 | + with open(res_path, "w", encoding="utf-8") as outfile: |
| 147 | + yaml.dump(statistics, outfile, default_flow_style=False) |
| 148 | + |
| 149 | + res_path = "%s/eval-details-%s.json" % (dir_name, file_name) |
| 150 | + print("Details of comparisons is saved to %s" % res_path) |
| 151 | + |
| 152 | + with open(res_path, "w", encoding="utf-8") as f: |
| 153 | + samples = [] |
| 154 | + for i, r in enumerate(df_raw.to_dict("records")): |
| 155 | + expected, actual = r["expected"], r["actual"] |
| 156 | + del r["expected"] |
| 157 | + del r["actual"] |
| 158 | + |
| 159 | + samples.append( |
| 160 | + dict( |
| 161 | + metrics=r, |
| 162 | + expected=expected, |
| 163 | + actual=actual, id=i |
| 164 | + ) |
| 165 | + ) |
| 166 | + |
| 167 | + details = dict(metrics=statistics, samples=samples) |
| 168 | + |
| 169 | + json.dump(details, f, ensure_ascii=False) |
0 commit comments