From 3fc41ef646de2867930073749be0f82b11551454 Mon Sep 17 00:00:00 2001 From: tdolan21 Date: Mon, 6 Nov 2023 20:00:41 -0500 Subject: [PATCH] Added the EOS token in the demo code to prevent run-on generation --- README.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 91eb9b35..d67f1466 100644 --- a/README.md +++ b/README.md @@ -137,7 +137,13 @@ from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained("01-ai/Yi-34B", device_map="auto", torch_dtype="auto", trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained("01-ai/Yi-34B", trust_remote_code=True) inputs = tokenizer("There's a place where time stands still. A place of breath taking wonder, but also", return_tensors="pt") -outputs = model.generate(inputs.input_ids.cuda(), max_new_tokens=256) +max_length = 256 + +outputs = model.generate( + inputs.input_ids.cuda(), + max_length=max_length, + eos_token_id=tokenizer.eos_token_id +) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ```