Allen Park commited on
Commit
f03cfb2
1 Parent(s): bd26917

comment out recent commit

Browse files
Files changed (1) hide show
  1. app.py +29 -24
app.py CHANGED
@@ -10,16 +10,20 @@ if torch.cuda.is_available():
10
  else:
11
  device = "cpu"
12
 
13
- def load_model_and_tokenizer(model_choice):
14
- if model_choice == "Patronus Lynx 8B":
15
- model_name = "PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct"
16
- else:
17
- model_name = "PatronusAI/Llama-3-Patronus-Lynx-70B-Instruct"
 
 
 
 
18
 
19
- tokenizer = AutoTokenizer.from_pretrained(model_name)
20
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto").to(device)
21
- model.gradient_checkpointing_enable()
22
- return tokenizer, model
23
 
24
  PROMPT = """
25
  Given the following QUESTION, DOCUMENT and ANSWER you must analyze the provided answer and determine whether it is faithful to the contents of the DOCUMENT. The ANSWER must not offer new information beyond the context provided in the DOCUMENT. The ANSWER also must not contradict information provided in the DOCUMENT. Output your final verdict by strictly following this format: "PASS" if the answer is faithful to the DOCUMENT and "FAIL" if the answer is not faithful to the DOCUMENT. Show your reasoning.
@@ -73,7 +77,8 @@ HEADER = """
73
  """
74
 
75
  @spaces.GPU()
76
- def model_call(question, document, answer, tokenizer, model):
 
77
  device = next(model.parameters()).device
78
  NEW_FORMAT = PROMPT.format(question=question, document=document, answer=answer)
79
  print("ENTIRE NEW_FORMAT", NEW_FORMAT)
@@ -95,31 +100,31 @@ def model_call(question, document, answer, tokenizer, model):
95
  print(generated_text)
96
  return generated_text
97
 
98
- def update_model(model_choice, tokenizer_state, model_state):
99
- new_tokenizer, new_model = load_model_and_tokenizer(model_choice)
100
- print("UPDATED MODEL", new_tokenizer, new_model)
101
- return new_tokenizer, new_model
102
 
103
  inputs = [
104
  gr.Textbox(label="Question"),
105
  gr.Textbox(label="Document"),
106
  gr.Textbox(label="Answer")
107
  ]
108
- output = gr.Textbox(label="Output")
109
 
110
- submit_button = gr.Button("Submit")
111
 
112
  with gr.Blocks() as demo:
113
  gr.Markdown(HEADER)
 
 
 
114
 
115
- tokenizer_state = gr.State()
116
- model_state = gr.State()
117
 
118
- model_dropdown = gr.Dropdown(choices=["Patronus Lynx 8B", "Patronus Lynx 70B"], value="Patronus Lynx 8B", label="Model")
119
- model_dropdown.change(fn=update_model, inputs=[model_dropdown, tokenizer_state, model_state], outputs=[tokenizer_state, model_state])
120
 
121
- submit_button.click(fn=model_call, inputs=inputs, outputs=output)
122
-
123
- initial_tokenizer, initial_model = load_model_and_tokenizer("Patronus Lynx 8B")
124
- demo.load(fn=lambda: (initial_tokenizer, initial_model), outputs=[tokenizer_state, model_state])
125
  demo.launch()
 
10
  else:
11
  device = "cpu"
12
 
13
+ tokenizer = AutoTokenizer.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct")
14
+ model = AutoModelForCausalLM.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct", torch_dtype=torch.float16, device_map="auto").to(device)
15
+ model.gradient_checkpointing_enable()
16
+
17
+ # def load_model_and_tokenizer(model_choice):
18
+ # if model_choice == "Patronus Lynx 8B":
19
+ # model_name = "PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct"
20
+ # else:
21
+ # model_name = "PatronusAI/Llama-3-Patronus-Lynx-70B-Instruct"
22
 
23
+ # tokenizer = AutoTokenizer.from_pretrained(model_name)
24
+ # model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto").to(device)
25
+ # model.gradient_checkpointing_enable()
26
+ # return tokenizer, model
27
 
28
  PROMPT = """
29
  Given the following QUESTION, DOCUMENT and ANSWER you must analyze the provided answer and determine whether it is faithful to the contents of the DOCUMENT. The ANSWER must not offer new information beyond the context provided in the DOCUMENT. The ANSWER also must not contradict information provided in the DOCUMENT. Output your final verdict by strictly following this format: "PASS" if the answer is faithful to the DOCUMENT and "FAIL" if the answer is not faithful to the DOCUMENT. Show your reasoning.
 
77
  """
78
 
79
  @spaces.GPU()
80
+ # def model_call(question, document, answer, tokenizer, model):
81
+ def model_call(question, document, answer):
82
  device = next(model.parameters()).device
83
  NEW_FORMAT = PROMPT.format(question=question, document=document, answer=answer)
84
  print("ENTIRE NEW_FORMAT", NEW_FORMAT)
 
100
  print(generated_text)
101
  return generated_text
102
 
103
+ # def update_model(model_choice, tokenizer_state, model_state):
104
+ # new_tokenizer, new_model = load_model_and_tokenizer(model_choice)
105
+ # print("UPDATED MODEL", new_tokenizer, new_model)
106
+ # return new_tokenizer, new_model
107
 
108
  inputs = [
109
  gr.Textbox(label="Question"),
110
  gr.Textbox(label="Document"),
111
  gr.Textbox(label="Answer")
112
  ]
113
+ # output = gr.Textbox(label="Output")
114
 
115
+ # submit_button = gr.Button("Submit")
116
 
117
  with gr.Blocks() as demo:
118
  gr.Markdown(HEADER)
119
+ gr.Interface(fn=model_call, inputs=inputs, outputs="text")
120
+ # tokenizer_state = gr.State()
121
+ # model_state = gr.State()
122
 
123
+ # model_dropdown = gr.Dropdown(choices=["Patronus Lynx 8B", "Patronus Lynx 70B"], value="Patronus Lynx 8B", label="Model")
124
+ # model_dropdown.change(fn=update_model, inputs=[model_dropdown, tokenizer_state, model_state], outputs=[tokenizer_state, model_state])
125
 
126
+ # submit_button.click(fn=model_call, inputs=inputs, outputs=output)
 
127
 
128
+ # initial_tokenizer, initial_model = load_model_and_tokenizer("Patronus Lynx 8B")
129
+ # demo.load(fn=lambda: (initial_tokenizer, initial_model), outputs=[tokenizer_state, model_state])
 
 
130
  demo.launch()