Vision-CAIR commited on
Commit
9767856
1 Parent(s): 7a02f27

Push model using huggingface_hub.

Browse files
Files changed (1) hide show
  1. mini_gpt4_llama_v2.py +2 -0
mini_gpt4_llama_v2.py CHANGED
@@ -29,6 +29,7 @@ from transformers import PretrainedConfig
29
  from transformers import PreTrainedModel
30
  from typing import List
31
  from collections import defaultdict
 
32
  class minigpt4_video_config(PretrainedConfig):
33
  model_type="minigpt4_video"
34
  PRETRAINED_MODEL_CONFIG_DICT = {
@@ -74,6 +75,7 @@ class MiniGPT4_Video(Blip2Base, PreTrainedModel):
74
  vis_processor_cfg = {"name": "blip2_image_train","image_size": 224}
75
  print(vis_processor_cfg)
76
  self.vis_processor = registry.get_processor_class(vis_processor_cfg["name"]).from_config(vis_processor_cfg)
 
77
  if "Mistral" in self.llama_model:
78
  from minigpt4_video.modeling_mistral import MistralForCausalLM as llm_model
79
  print("Mistral model")
 
29
  from transformers import PreTrainedModel
30
  from typing import List
31
  from collections import defaultdict
32
+ from minigpt4_video.conversation import CONV_VISION
33
  class minigpt4_video_config(PretrainedConfig):
34
  model_type="minigpt4_video"
35
  PRETRAINED_MODEL_CONFIG_DICT = {
 
75
  vis_processor_cfg = {"name": "blip2_image_train","image_size": 224}
76
  print(vis_processor_cfg)
77
  self.vis_processor = registry.get_processor_class(vis_processor_cfg["name"]).from_config(vis_processor_cfg)
78
+ self.CONV_VISION = CONV_VISION
79
  if "Mistral" in self.llama_model:
80
  from minigpt4_video.modeling_mistral import MistralForCausalLM as llm_model
81
  print("Mistral model")