Vision-CAIR commited on
Commit
4559ee4
1 Parent(s): 4e636d4

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. mini_gpt4_llama_v2.py +2 -2
mini_gpt4_llama_v2.py CHANGED
@@ -105,11 +105,11 @@ class MiniGPT4_Video(Blip2Base, PreTrainedModel):
105
  self.vis_processor = registry.get_processor_class(vis_processor_cfg["name"]).from_config(vis_processor_cfg)
106
  self.CONV_VISION = CONV_VISION
107
  if "Mistral" in self.llama_model:
108
- from minigpt4_video.modeling_mistral import MistralForCausalLM as llm_model
109
  print("Mistral model")
110
  self.model_type = "Mistral"
111
  else:
112
- from minigpt4_video.modeling_llama_v2 import LlamaForCausalLM as llm_model
113
  print("Llama model")
114
  self.model_type = "Llama"
115
  self.tokenizer = self.init_tokenizer()
 
105
  self.vis_processor = registry.get_processor_class(vis_processor_cfg["name"]).from_config(vis_processor_cfg)
106
  self.CONV_VISION = CONV_VISION
107
  if "Mistral" in self.llama_model:
108
+ from .modeling_mistral import MistralForCausalLM as llm_model
109
  print("Mistral model")
110
  self.model_type = "Mistral"
111
  else:
112
+ from .modeling_llama_v2 import LlamaForCausalLM as llm_model
113
  print("Llama model")
114
  self.model_type = "Llama"
115
  self.tokenizer = self.init_tokenizer()