ngrigg commited on
Commit
0b58425
1 Parent(s): 059c9d2

update llama model

Browse files
Files changed (1) hide show
  1. llama_models.py +6 -0
llama_models.py CHANGED
@@ -20,8 +20,14 @@ def load_model(model_name):
20
  return tokenizer, model
21
 
22
  async def process_text_local(model_name, text):
 
23
  tokenizer, model = load_model(model_name)
 
24
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512) # Set max_length to 512
 
 
25
  outputs = model.generate(**inputs, max_length=512)
 
26
  result = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
27
  return result
 
20
  return tokenizer, model
21
 
22
  async def process_text_local(model_name, text):
23
+ print("Loading model and tokenizer...")
24
  tokenizer, model = load_model(model_name)
25
+ print("Encoding text...")
26
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512) # Set max_length to 512
27
+ print("Text encoded successfully.")
28
+ print("Generating output...")
29
  outputs = model.generate(**inputs, max_length=512)
30
+ print("Output generated successfully.")
31
  result = tokenizer.decode(outputs[0], skip_special_tokens=True)
32
+ print("Output decoded successfully.")
33
  return result