mrcuddle commited on
Commit
640ca87
1 Parent(s): 34d7835

Update joycaption.py

Browse files
Files changed (1) hide show
  1. joycaption.py +4 -9
joycaption.py CHANGED
@@ -35,15 +35,10 @@ use_inference_client = False
35
  PIXTRAL_PATHS = ["mrcuddle/lumimaid-v0.2-8b-pixtral", "mrcuddle/lumimaid-v0.2-12b-pixtral"]
36
 
37
  llm_models = {
38
- "Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2": None,
39
- #PIXTRAL_PATHS[0]: None,
40
- "bunnycore/LLama-3.1-8B-Matrix": None,
41
- "Sao10K/Llama-3.1-8B-Stheno-v3.4": None,
42
- "unsloth/Meta-Llama-3.1-8B-bnb-4bit": None,
43
- "DevQuasar/HermesNova-Llama-3.1-8B": None,
44
- "mergekit-community/L3.1-Boshima-b-FIX": None,
45
- #"chuanli11/Llama-3.2-3B-Instruct-uncensored": None, # Error(s) in loading state_dict for ImageAdapter:\n\tsize mismatch for linear1.weight: copying a param with shape torch.Size([4096, 1152]) from checkpoint, the shape in current model is torch.Size([3072, 1152]).\n\tsize mismatch for linear1.bias: copying a param with shape torch.Size([4096]) from checkpoint,
46
- "unsloth/Meta-Llama-3.1-8B-Instruct": None,
47
  }
48
 
49
  CLIP_PATH = "google/siglip-so400m-patch14-384"
 
35
  PIXTRAL_PATHS = ["mrcuddle/lumimaid-v0.2-8b-pixtral", "mrcuddle/lumimaid-v0.2-12b-pixtral"]
36
 
37
  llm_models = {
38
+ "mrcuddle/Lumimaid-v0.2-Llama-3.2-11B-Vision": None,
39
+ "mrcuddle/Lumimaid-v0.2-8B": None,
40
+ "mrcuddle/lumimaid-8b-StoryGen": None,
41
+ "mrcuddle/Lumimaid-NSFW-Reddit_Merge": None,
 
 
 
 
 
42
  }
43
 
44
  CLIP_PATH = "google/siglip-so400m-patch14-384"