Arturo Jiménez de los Galanes Reguillos commited on
Commit
25de67e
1 Parent(s): 0580074

Add chat template directly to tokenizer

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -22,6 +22,7 @@ def messages_for(python):
22
  ]
23
 
24
  tokenizer = AutoTokenizer.from_pretrained(MODEL)
 
25
  model = AutoModelForCausalLM.from_pretrained(MODEL, torch_dtype=torch.bfloat16, device_map="auto")
26
  model.eval()
27
 
@@ -32,7 +33,6 @@ cplusplus = None
32
  def translate(python):
33
  inputs = tokenizer.apply_chat_template(
34
  messages_for(python),
35
- chat_template=CHAT_TEMPLATE,
36
  return_tensors="pt").to(model.device)
37
  generation_kwargs = dict(
38
  inputs,
@@ -41,7 +41,7 @@ def translate(python):
41
  do_sample=False,
42
  pad_token_id=tokenizer.eos_token_id,
43
  eos_token_id=tokenizer.eos_token_id
44
- )
45
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
46
  thread.start()
47
  cplusplus = ""
 
22
  ]
23
 
24
  tokenizer = AutoTokenizer.from_pretrained(MODEL)
25
+ tokenizer.chat_template = CHAT_TEMPLATE
26
  model = AutoModelForCausalLM.from_pretrained(MODEL, torch_dtype=torch.bfloat16, device_map="auto")
27
  model.eval()
28
 
 
33
  def translate(python):
34
  inputs = tokenizer.apply_chat_template(
35
  messages_for(python),
 
36
  return_tensors="pt").to(model.device)
37
  generation_kwargs = dict(
38
  inputs,
 
41
  do_sample=False,
42
  pad_token_id=tokenizer.eos_token_id,
43
  eos_token_id=tokenizer.eos_token_id
44
+ )
45
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
46
  thread.start()
47
  cplusplus = ""