masanorihirano commited on
Commit
df7df20
1 Parent(s): 1a09cf8
Files changed (1) hide show
  1. app.py +22 -3
app.py CHANGED
@@ -21,7 +21,7 @@ from transformers import LlamaTokenizer
21
 
22
  print(datetime.datetime.now())
23
 
24
- NUM_THREADS = 4
25
 
26
  print(NUM_THREADS)
27
 
@@ -166,8 +166,27 @@ def evaluate(
166
  top_k: int = 0
167
  prompt = generate_prompt(instruction, input)
168
  inputs = tokenizer(prompt, return_tensors="pt")
169
- if len(inputs["input_ids"][0]) > max_tokens:
170
- return f"please reduce length. Currently, {len(inputs['input_ids'][0])} token are used.", gr.update(interactive=True), gr.update(interactive=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  input_ids = inputs["input_ids"].to(device)
172
  generation_config = GenerationConfig(
173
  temperature=temperature,
 
21
 
22
  print(datetime.datetime.now())
23
 
24
+ NUM_THREADS = 1
25
 
26
  print(NUM_THREADS)
27
 
 
166
  top_k: int = 0
167
  prompt = generate_prompt(instruction, input)
168
  inputs = tokenizer(prompt, return_tensors="pt")
169
+ if len(inputs["input_ids"][0]) > max_tokens + 10:
170
+ if HF_TOKEN and DATASET_REPOSITORY:
171
+ try:
172
+ now = datetime.datetime.now()
173
+ current_time = now.strftime("%Y-%m-%d %H:%M:%S")
174
+ print(f"[{current_time}] Pushing prompt and completion to the Hub")
175
+ save_inputs_and_outputs(
176
+ now,
177
+ prompt,
178
+ "",
179
+ {
180
+ "temperature": temperature,
181
+ "top_p": top_p,
182
+ "top_k": top_k,
183
+ "num_beams": num_beams,
184
+ "max_tokens": max_tokens,
185
+ },
186
+ )
187
+ except Exception as e:
188
+ print(e)
189
+ return f"please reduce the input length. Currently, {len(inputs['input_ids'][0])} token are used.", gr.update(interactive=True), gr.update(interactive=True)
190
  input_ids = inputs["input_ids"].to(device)
191
  generation_config = GenerationConfig(
192
  temperature=temperature,