-
Notifications
You must be signed in to change notification settings - Fork 32
/
evaluate.py
41 lines (34 loc) · 1.27 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import argparse
import sys
import os
from method_evaluator import MethodEvaluator
from rouge_results_writer import export_results
import timing
from baseline import baseline
# Imports files from a parent directory.
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'textrank'))
from summa.summarizer import summarize as textrank
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--documents", metavar='N', type=str, nargs='+',
help="specify dataset text numbers to summarize.")
parser.add_argument("-d", "--dataset", help="specify which dataset to use.")
parser.add_argument("-b", "--baseline", action="store_true", help="calculates the baselines scores.")
args = parser.parse_args()
# Use the summa dataset by default.
if args.dataset:
dataset = args.dataset
else:
dataset = 'summa'
# Calculate all rouge scores by default.
if args.documents:
documents = [document for document in args.documents]
else:
documents = None
# Don't calculate baseline method by default.
if args.baseline:
method = baseline
else:
method = textrank
evaluator = MethodEvaluator(dataset, method, documents)
results = evaluator.get_rouge_scores()
export_results(dataset, results, 'baseline' if args.baseline else 'textrank')