chendl commited on
Commit
098738d
1 Parent(s): ed66a91

update chat

Browse files
app.py CHANGED
@@ -234,7 +234,7 @@ def upload_img(gr_img, text_input, chat_state,chatbot):
234
  chatbot = chatbot + [[(path,), None]]
235
  llm_message = chat.upload_img(gr_img, chat_state, img_list)
236
  return gr.update(interactive=False), gr.update(interactive=True, placeholder='Type and press Enter'), gr.update(
237
- value="Start Chatting", interactive=False), chat_state, img_list
238
 
239
 
240
  def gradio_ask(user_message, chatbot, chat_state):
@@ -293,7 +293,7 @@ with gr.Blocks() as demo:
293
  text_input = gr.Textbox(label='User', placeholder='Please upload your image first', interactive=False)
294
 
295
  upload_button.click(upload_img, [image, text_input, chat_state,chatbot],
296
- [image, text_input, upload_button, chat_state, img_list])
297
 
298
  text_input.submit(gradio_ask, [text_input, chatbot, chat_state], [text_input, chatbot, chat_state]).then(
299
  gradio_answer, [chatbot, chat_state, img_list, num_beams, temperature], [chatbot, chat_state, img_list]
 
234
  chatbot = chatbot + [[(path,), None]]
235
  llm_message = chat.upload_img(gr_img, chat_state, img_list)
236
  return gr.update(interactive=False), gr.update(interactive=True, placeholder='Type and press Enter'), gr.update(
237
+ value="Start Chatting", interactive=False), chat_state, img_list,chatbot
238
 
239
 
240
  def gradio_ask(user_message, chatbot, chat_state):
 
293
  text_input = gr.Textbox(label='User', placeholder='Please upload your image first', interactive=False)
294
 
295
  upload_button.click(upload_img, [image, text_input, chat_state,chatbot],
296
+ [image, text_input, upload_button, chat_state, img_list,chatbot])
297
 
298
  text_input.submit(gradio_ask, [text_input, chatbot, chat_state], [text_input, chatbot, chat_state]).then(
299
  gradio_answer, [chatbot, chat_state, img_list, num_beams, temperature], [chatbot, chat_state, img_list]
multimodal/open_flamingo/chat/conversation.py CHANGED
@@ -318,7 +318,7 @@ class Chat:
318
  previsual_token_id = self.tokenizer("<|#previsual#|>", add_special_tokens=False)["input_ids"][-1]
319
  prebox_token_id = self.tokenizer("<|#prebox#|>", add_special_tokens=False)["input_ids"][-1]
320
  size = 224
321
- model.eval()
322
  # "/gpfs/u/home/LMCG/LMCGljnn/scratch-shared/cdl/tmp_img/chat_vis/chat19.png"
323
  # image_path = input("Please enter the image path: ")
324
  image = img_list[0].convert("RGB")
@@ -358,8 +358,9 @@ class Chat:
358
  image_start_index_list = ((input_ids == media_token_id).nonzero(as_tuple=True)[-1] + 1).tolist()
359
  image_start_index_list = [[x] for x in image_start_index_list]
360
  image_nums = [1] * len(input_ids)
361
- with torch.no_grad() and torch.cuda.amp.autocast(dtype=torch.float16):
362
- outputs = model.generate(
 
363
  batch_images,
364
  input_ids,
365
  attention_mask=attention_mask,
 
318
  previsual_token_id = self.tokenizer("<|#previsual#|>", add_special_tokens=False)["input_ids"][-1]
319
  prebox_token_id = self.tokenizer("<|#prebox#|>", add_special_tokens=False)["input_ids"][-1]
320
  size = 224
321
+ self.model.eval()
322
  # "/gpfs/u/home/LMCG/LMCGljnn/scratch-shared/cdl/tmp_img/chat_vis/chat19.png"
323
  # image_path = input("Please enter the image path: ")
324
  image = img_list[0].convert("RGB")
 
358
  image_start_index_list = ((input_ids == media_token_id).nonzero(as_tuple=True)[-1] + 1).tolist()
359
  image_start_index_list = [[x] for x in image_start_index_list]
360
  image_nums = [1] * len(input_ids)
361
+ # and torch.cuda.amp.autocast(dtype=torch.float16)
362
+ with torch.no_grad():
363
+ outputs = self.model.generate(
364
  batch_images,
365
  input_ids,
366
  attention_mask=attention_mask,