Skip to content

Commit

Permalink
Remove qas input wherever possible. Comment out NQ inference, since i…
Browse files Browse the repository at this point in the history
…t is not working (#681)
  • Loading branch information
Timoeller authored Jan 8, 2021
1 parent 42af265 commit d4f4508
Show file tree
Hide file tree
Showing 6 changed files with 22 additions and 55 deletions.
34 changes: 0 additions & 34 deletions examples/inferencer_multiprocessing.py

This file was deleted.

30 changes: 16 additions & 14 deletions examples/natural_questions.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,21 +122,23 @@ def question_answering():
model.save(save_dir)
processor.save(save_dir)

# 9. Since training on the whole NQ corpus requires substantial compute resources we trained and uploaded a model on s3
fetch_archive_from_http("https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/models/roberta-base-squad2-nq.zip", output_dir="../saved_models/farm")
QA_input = [
{
"qas": ["Did GameTrailers rated Twilight Princess as one of the best games ever created?"],
"context": "Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created."
}
]

model = QAInferencer.load(model_name_or_path="../saved_models/farm/roberta-base-squad2-nq", batch_size=batch_size, gpu=True)
result = model.inference_from_dicts(dicts=QA_input, return_json=False) # result is a list of QAPred objects

print(f"\nQuestion: Did GameTrailers rated Twilight Princess as one of the best games ever created?"
f"\nAnswer from model: {result[0].prediction[0].answer}")
model.close_multiprocessing_pool()
# TODO make inferencing work again with trained and saved model (currently the inferencer loads a fast Robertatokenizer, which is not supported by NQ)
# # 9. Since training on the whole NQ corpus requires substantial compute resources we trained and uploaded a model on s3
# fetch_archive_from_http("https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/models/roberta-base-squad2-nq.zip", output_dir="../saved_models/farm")
# QA_input = [
# {
# "qas": ["Did GameTrailers rated Twilight Princess as one of the best games ever created?"],
# "context": "Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created."
# }
# ]
#
# model = QAInferencer.load(model_name_or_path="../saved_models/farm/roberta-base-squad2-nq", batch_size=batch_size, gpu=True)
# result = model.inference_from_dicts(dicts=QA_input, return_json=False) # result is a list of QAPred objects
#
# print(f"\nQuestion: Did GameTrailers rated Twilight Princess as one of the best games ever created?"
# f"\nAnswer from model: {result[0].prediction[0].answer}")
# model.close_multiprocessing_pool()

if __name__ == "__main__":
question_answering()
Expand Down
4 changes: 2 additions & 2 deletions examples/onnx_question_answering.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ def onnx_runtime_example():

qa_input = [
{
"qas": ["Who counted the game among the best ever made?"],
"context": "Twilight Princess was released to universal critical acclaim and commercial success. "
"questions": ["Who counted the game among the best ever made?"],
"text": "Twilight Princess was released to universal critical acclaim and commercial success. "
"It received perfect scores from major publications such as 1UP.com, Computer and Video Games, "
"Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators "
"GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii "
Expand Down
4 changes: 2 additions & 2 deletions examples/question_answering.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,8 @@ def question_answering():
# 9. Load it & harvest your fruits (Inference)
QA_input = [
{
"qas": ["Who counted the game among the best ever made?"],
"context": "Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created."
"questions": ["Who counted the game among the best ever made?"],
"text": "Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created."
}]

model = QAInferencer.load(save_dir, batch_size=40, gpu=True)
Expand Down
4 changes: 2 additions & 2 deletions examples/streaming_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ def sample_dicts_generator():
:rtype: iter
"""
qa_input = {
"qas": ["Who counted the game among the best ever made?"],
"context": "Twilight Princess was released to universal critical acclaim and commercial success. "
"questions": ["Who counted the game among the best ever made?"],
"text": "Twilight Princess was released to universal critical acclaim and commercial success. "
"It received perfect scores from major publications such as 1UP.com, Computer and Video Games, "
"Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators "
"GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii "
Expand Down
1 change: 0 additions & 1 deletion farm/infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,6 @@ def inference_from_dicts(
Runs down-stream inference on samples created from input dictionaries.
The format of the input `dicts` depends on the task:
* QA (SQuAD style): [{"qas": ["What is X?"], "context": "Some context containing the answer"}] (Deprecated)
* QA (FARM style): [{"questions": ["What is X?"], "text": "Some context containing the answer"}]
* Classification / NER / embeddings: [{"text": "Some input text"}]
Expand Down

0 comments on commit d4f4508

Please sign in to comment.