Skip to content

Commit

Permalink
Fix new tokenizer problem
Browse files Browse the repository at this point in the history
  • Loading branch information
RobinDong committed May 2, 2024
1 parent 4174dab commit f724c26
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 2 deletions.
2 changes: 1 addition & 1 deletion tinymm/CLIP/demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def start(self, checkpoint: str, cuda: bool = False):
text = st.text_input("Input:")
if not text:
return
ids = self.enc(text)
ids = self.enc(text)["input_ids"]
ids = np.pad(ids, (0, (self.seq_len - len(ids))), "constant")
ids = torch.tensor(ids).unsqueeze(0)
txt_embd = model.txt_encoder(ids)
Expand Down
2 changes: 1 addition & 1 deletion tinymm/CLIP/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def __init__(self, model):
print("Compute embeddings for all categories...")
for index in tqdm(range(1, self.nr_categories + 1)):
text = f"A photo of a {self.cats[index]}"
ids = enc(text)
ids = enc(text)["input_ids"]
ids = np.pad(ids, (0, (self.seq_len - len(ids))), "constant")
ids = torch.tensor(ids).unsqueeze(0)
with torch.no_grad():
Expand Down

0 comments on commit f724c26

Please sign in to comment.