Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

to use Fast tokenizer #46

Merged
merged 5 commits into from
Jun 27, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 9 additions & 6 deletions src/tevatron/arguments.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,12 +99,15 @@ def __post_init__(self):
self.dataset_split = 'train'
self.dataset_language = 'default'
if self.train_dir is not None:
files = os.listdir(self.train_dir)
self.train_path = [
os.path.join(self.train_dir, f)
for f in files
if f.endswith('jsonl') or f.endswith('json')
]
if os.path.isdir(self.train_dir):
files = os.listdir(self.train_dir)
self.train_path = [
os.path.join(self.train_dir, f)
for f in files
if f.endswith('jsonl') or f.endswith('json')
]
else:
self.train_path = [self.train_dir]
else:
self.train_path = None

Expand Down
4 changes: 2 additions & 2 deletions src/tevatron/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def __init__(
self.total_len = len(self.train_data)

def create_one_example(self, text_encoding: List[int], is_query=False):
item = self.tok.encode_plus(
item = self.tok.prepare_for_model(
text_encoding,
truncation='only_first',
max_length=self.data_args.q_max_len if is_query else self.data_args.p_max_len,
Expand Down Expand Up @@ -95,7 +95,7 @@ def __len__(self):

def __getitem__(self, item) -> Tuple[str, BatchEncoding]:
text_id, text = (self.encode_data[item][f] for f in self.input_keys)
encoded_text = self.tok.encode_plus(
encoded_text = self.tok.prepare_for_model(
text,
max_length=self.max_len,
truncation='only_first',
Expand Down
2 changes: 1 addition & 1 deletion src/tevatron/datasets/preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,4 +54,4 @@ def __call__(self, example):
add_special_tokens=False,
max_length=self.text_max_length,
truncation=True)
return {'text_id': docid, 'text': text}
return {'text_id': docid, 'text': text}
3 changes: 1 addition & 2 deletions src/tevatron/driver/encode.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,7 @@ def main():
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=False,
cache_dir=model_args.cache_dir
)

model = DenseModel.load(
Expand Down
10 changes: 8 additions & 2 deletions src/tevatron/driver/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import os
import sys

import torch
from transformers import AutoConfig, AutoTokenizer
from transformers import (
HfArgumentParser,
Expand Down Expand Up @@ -66,8 +67,7 @@ def main():
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=False,
cache_dir=model_args.cache_dir
)
model = DenseModel.build(
model_args,
Expand All @@ -78,7 +78,13 @@ def main():

train_dataset = HFTrainDataset(tokenizer=tokenizer, data_args=data_args,
cache_dir=data_args.data_cache_dir or model_args.cache_dir)
if training_args.local_rank > 0:
print("Waiting for main process to perform the mapping")
torch.distributed.barrier()
train_dataset = TrainDataset(data_args, train_dataset.process(), tokenizer)
if training_args.local_rank == 0:
print("Loading results from main process")
torch.distributed.barrier()

trainer_cls = GCTrainer if training_args.grad_cache else Trainer
trainer = trainer_cls(
Expand Down
3 changes: 2 additions & 1 deletion src/tevatron/faiss_retriever/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

def search_queries(retriever, q_reps, p_lookup, args):
if args.batch_size > 0:
all_scores, all_indices = retriever.batch_search(q_reps, args.depth, args.batch_size)
all_scores, all_indices = retriever.batch_search(q_reps, args.depth, args.batch_size, args.quiet)
else:
all_scores, all_indices = retriever.search(q_reps, args.depth)

Expand Down Expand Up @@ -56,6 +56,7 @@ def main():
parser.add_argument('--depth', type=int, default=1000)
parser.add_argument('--save_ranking_to', required=True)
parser.add_argument('--save_text', action='store_true')
parser.add_argument('--quiet', action='store_true')

args = parser.parse_args()

Expand Down
5 changes: 3 additions & 2 deletions src/tevatron/faiss_retriever/retriever.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import faiss

import logging
from tqdm import tqdm

logger = logging.getLogger(__name__)

Expand All @@ -17,11 +18,11 @@ def add(self, p_reps: np.ndarray):
def search(self, q_reps: np.ndarray, k: int):
return self.index.search(q_reps, k)

def batch_search(self, q_reps: np.ndarray, k: int, batch_size: int):
def batch_search(self, q_reps: np.ndarray, k: int, batch_size: int, quiet: bool=False):
num_query = q_reps.shape[0]
all_scores = []
all_indices = []
for start_idx in range(0, num_query, batch_size):
for start_idx in tqdm(range(0, num_query, batch_size), disable=quiet):
nn_scores, nn_indices = self.search(q_reps[start_idx: start_idx + batch_size], k)
all_scores.append(nn_scores)
all_indices.append(nn_indices)
Expand Down