Hunzla commited on
Commit
e471312
1 Parent(s): bfcd66a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -0
app.py CHANGED
@@ -4,9 +4,13 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
 
5
  # Load model and tokenizer
6
  model_name = "meta-llama/Llama-2-7b-chat-hf"
 
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
 
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
9
  chat_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
 
10
 
11
  # Define the generate_response function
12
  def generate_response(prompt):
 
4
 
5
  # Load model and tokenizer
6
  model_name = "meta-llama/Llama-2-7b-chat-hf"
7
+ print("started loading model")
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
+ print("loaded model")
10
  tokenizer = AutoTokenizer.from_pretrained(model_name)
11
+ print("loaded tokenizer")
12
  chat_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
13
+ print("built pipeline")
14
 
15
  # Define the generate_response function
16
  def generate_response(prompt):