Upload fusion_t2i_CLIP_interrogator.ipynb
Browse files
Google Colab Jupyter Notebooks/fusion_t2i_CLIP_interrogator.ipynb
CHANGED
@@ -983,308 +983,6 @@
|
|
983 |
},
|
984 |
"execution_count": null,
|
985 |
"outputs": []
|
986 |
-
},
|
987 |
-
{
|
988 |
-
"cell_type": "markdown",
|
989 |
-
"source": [
|
990 |
-
"OTHER STUFF BELOW - Code for the modules below are work-in-progress."
|
991 |
-
],
|
992 |
-
"metadata": {
|
993 |
-
"id": "FRIqYJDEebpf"
|
994 |
-
}
|
995 |
-
},
|
996 |
-
{
|
997 |
-
"cell_type": "markdown",
|
998 |
-
"source": [
|
999 |
-
"The savefile can be used here : https://perchance.org/fusion-ai-image-generator"
|
1000 |
-
],
|
1001 |
-
"metadata": {
|
1002 |
-
"id": "JldNmWy1iyvK"
|
1003 |
-
}
|
1004 |
-
},
|
1005 |
-
{
|
1006 |
-
"cell_type": "code",
|
1007 |
-
"source": [
|
1008 |
-
"# @title \t⚄ Create fusion-generator .json savefile from result\n",
|
1009 |
-
"filename = 'blank.json'\n",
|
1010 |
-
"path = '/content/text-to-image-prompts/fusion/'\n",
|
1011 |
-
"\n",
|
1012 |
-
"print(f'reading {filename}....')\n",
|
1013 |
-
"_index = 0\n",
|
1014 |
-
"%cd {path}\n",
|
1015 |
-
"with open(f'{filename}', 'r') as f:\n",
|
1016 |
-
" data = json.load(f)\n",
|
1017 |
-
"#------#\n",
|
1018 |
-
"_df = pd.DataFrame({'count': data})['count']\n",
|
1019 |
-
"_savefile = {\n",
|
1020 |
-
" key : value for key, value in _df.items()\n",
|
1021 |
-
"}\n",
|
1022 |
-
"#------#\n",
|
1023 |
-
"from safetensors.torch import load_file\n",
|
1024 |
-
"import json , os , torch\n",
|
1025 |
-
"import pandas as pd\n",
|
1026 |
-
"#----#\n",
|
1027 |
-
"def my_mkdirs(folder):\n",
|
1028 |
-
" if os.path.exists(folder)==False:\n",
|
1029 |
-
" os.makedirs(folder)\n",
|
1030 |
-
"#------#\n",
|
1031 |
-
"savefile_prompt = ''\n",
|
1032 |
-
"for i in range(N) : savefile_prompt = savefile_prompt + ' ' + __prompts\n",
|
1033 |
-
"_savefile['main'] = savefile_prompt.replace('\\n', ' ').replace(' ', ' ').replace(' ', ' ')\n",
|
1034 |
-
"#------#\n",
|
1035 |
-
"save_filename = f'fusion_C05_X7_1000_{PROMPT_INDEX}.json'\n",
|
1036 |
-
"output_folder = '/content/output/savefiles/'\n",
|
1037 |
-
"my_mkdirs(output_folder)\n",
|
1038 |
-
"#-----#\n",
|
1039 |
-
"%cd {output_folder}\n",
|
1040 |
-
"print(f'Saving segment {save_filename} to {output_folder}...')\n",
|
1041 |
-
"with open(save_filename, 'w') as f:\n",
|
1042 |
-
" json.dump(_savefile, f)\n"
|
1043 |
-
],
|
1044 |
-
"metadata": {
|
1045 |
-
"id": "Q7vpNAXQilbf",
|
1046 |
-
"cellView": "form"
|
1047 |
-
},
|
1048 |
-
"execution_count": null,
|
1049 |
-
"outputs": []
|
1050 |
-
},
|
1051 |
-
{
|
1052 |
-
"cell_type": "code",
|
1053 |
-
"source": [
|
1054 |
-
"# @title \t⚄ Create a savefile-set from the entire range of pre-encoded items\n",
|
1055 |
-
"\n",
|
1056 |
-
"# @markdown 📥 Load the data (only required one time)\n",
|
1057 |
-
"load_the_data = True # @param {type:\"boolean\"}\n",
|
1058 |
-
"\n",
|
1059 |
-
"import math\n",
|
1060 |
-
"from safetensors.torch import load_file\n",
|
1061 |
-
"import json , os , torch\n",
|
1062 |
-
"import pandas as pd\n",
|
1063 |
-
"from PIL import Image\n",
|
1064 |
-
"import requests\n",
|
1065 |
-
"\n",
|
1066 |
-
"def my_mkdirs(folder):\n",
|
1067 |
-
" if os.path.exists(folder)==False:\n",
|
1068 |
-
" os.makedirs(folder)\n",
|
1069 |
-
"\n",
|
1070 |
-
"# @markdown ⚖️ Set the value for C in the reference <br> <br> sim = C* text_enc + image_enc*(1-C) <br><br>\n",
|
1071 |
-
"\n",
|
1072 |
-
"C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
|
1073 |
-
"\n",
|
1074 |
-
"# @markdown 🚫 Penalize similarity to this prompt(optional)\n",
|
1075 |
-
"if(load_the_data):\n",
|
1076 |
-
" target_prompts , target_text_encodings , urls , target_image_encodings , NUM_ITEMS = getPromptsAndLinks('/content/text-to-image-prompts/fusion')\n",
|
1077 |
-
" from transformers import AutoTokenizer\n",
|
1078 |
-
" tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
|
1079 |
-
" from transformers import CLIPProcessor, CLIPModel\n",
|
1080 |
-
" processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
|
1081 |
-
" model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
|
1082 |
-
" logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
|
1083 |
-
"#---------#\n",
|
1084 |
-
"\n",
|
1085 |
-
"filename = 'blank.json'\n",
|
1086 |
-
"path = '/content/text-to-image-prompts/fusion/'\n",
|
1087 |
-
"print(f'reading {filename}....')\n",
|
1088 |
-
"_index = 0\n",
|
1089 |
-
"%cd {path}\n",
|
1090 |
-
"with open(f'{filename}', 'r') as f:\n",
|
1091 |
-
" data = json.load(f)\n",
|
1092 |
-
"#------#\n",
|
1093 |
-
"_df = pd.DataFrame({'count': data})['count']\n",
|
1094 |
-
"_blank = {\n",
|
1095 |
-
" key : value for key, value in _df.items()\n",
|
1096 |
-
"}\n",
|
1097 |
-
"#------#\n",
|
1098 |
-
"\n",
|
1099 |
-
"root_savefile_name = 'fusion_C05_X7'\n",
|
1100 |
-
"\n",
|
1101 |
-
"%cd /content/\n",
|
1102 |
-
"output_folder = '/content/output/savefiles/'\n",
|
1103 |
-
"my_mkdirs(output_folder)\n",
|
1104 |
-
"my_mkdirs('/content/output2/savefiles/')\n",
|
1105 |
-
"my_mkdirs('/content/output3/savefiles/')\n",
|
1106 |
-
"my_mkdirs('/content/output4/savefiles/')\n",
|
1107 |
-
"my_mkdirs('/content/output5/savefiles/')\n",
|
1108 |
-
"my_mkdirs('/content/output6/savefiles/')\n",
|
1109 |
-
"my_mkdirs('/content/output7/savefiles/')\n",
|
1110 |
-
"my_mkdirs('/content/output8/savefiles/')\n",
|
1111 |
-
"my_mkdirs('/content/output9/savefiles/')\n",
|
1112 |
-
"my_mkdirs('/content/output10/savefiles/')\n",
|
1113 |
-
"my_mkdirs('/content/output11/savefiles/')\n",
|
1114 |
-
"my_mkdirs('/content/output12/savefiles/')\n",
|
1115 |
-
"my_mkdirs('/content/output13/savefiles/')\n",
|
1116 |
-
"\n",
|
1117 |
-
"\n",
|
1118 |
-
"NEG = '' # @param {type:'string'}\n",
|
1119 |
-
"strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.1}\n",
|
1120 |
-
"\n",
|
1121 |
-
"for index in range(1667):\n",
|
1122 |
-
"\n",
|
1123 |
-
" PROMPT_INDEX = index\n",
|
1124 |
-
" prompt = target_prompts[f'{index}']\n",
|
1125 |
-
" url = urls[f'{index}']\n",
|
1126 |
-
" if url.find('perchance')>-1:\n",
|
1127 |
-
" image = Image.open(requests.get(url, stream=True).raw)\n",
|
1128 |
-
" else: continue #print(\"(No image for this ID)\")\n",
|
1129 |
-
"\n",
|
1130 |
-
" print(f\"no. {PROMPT_INDEX} : '{prompt}'\")\n",
|
1131 |
-
" text_features_A = target_text_encodings[f'{index}']\n",
|
1132 |
-
" image_features_A = target_image_encodings[f'{index}']\n",
|
1133 |
-
" # text-similarity\n",
|
1134 |
-
" sims = C * torch.matmul(text_tensor, text_features_A.t())\n",
|
1135 |
-
"\n",
|
1136 |
-
" neg_sims = 0*sims\n",
|
1137 |
-
" if(NEG != ''):\n",
|
1138 |
-
" # Get text features for user input\n",
|
1139 |
-
" inputs = tokenizer(text = NEG, padding=True, return_tensors=\"pt\")\n",
|
1140 |
-
" text_features_NEG = model.get_text_features(**inputs)\n",
|
1141 |
-
" text_features_NEG = text_features_A/text_features_A.norm(p=2, dim=-1, keepdim=True)\n",
|
1142 |
-
" # text-similarity\n",
|
1143 |
-
" neg_sims = strength*torch.matmul(text_tensor, text_features_NEG.t())\n",
|
1144 |
-
" #------#\n",
|
1145 |
-
"\n",
|
1146 |
-
" # plus image-similarity\n",
|
1147 |
-
" sims = sims + (1-C) * torch.matmul(text_tensor, image_features_A.t()) * logit_scale\n",
|
1148 |
-
"\n",
|
1149 |
-
" # minus NEG-similarity\n",
|
1150 |
-
" sims = sims - neg_sims\n",
|
1151 |
-
"\n",
|
1152 |
-
" # Sort the items\n",
|
1153 |
-
" sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
|
1154 |
-
"\n",
|
1155 |
-
" # @markdown Repeat output N times\n",
|
1156 |
-
" RANGE = 1000\n",
|
1157 |
-
" NUM_CHUNKS = 10+\n",
|
1158 |
-
" separator = '|'\n",
|
1159 |
-
" _savefiles = {}\n",
|
1160 |
-
" #-----#\n",
|
1161 |
-
" for chunk in range(NUM_CHUNKS):\n",
|
1162 |
-
" if chunk=<10:continue\n",
|
1163 |
-
" start_at_index = chunk * RANGE\n",
|
1164 |
-
" _prompts = ''\n",
|
1165 |
-
" for _index in range(start_at_index + RANGE):\n",
|
1166 |
-
" if _index < start_at_index : continue\n",
|
1167 |
-
" index = indices[_index].item()\n",
|
1168 |
-
" prompt = prompts[f'{index}']\n",
|
1169 |
-
" _prompts = _prompts.replace(prompt + separator,'')\n",
|
1170 |
-
" _prompts = _prompts + prompt + separator\n",
|
1171 |
-
" #------#\n",
|
1172 |
-
" _prompts = fix_bad_symbols(_prompts)\n",
|
1173 |
-
" _prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n",
|
1174 |
-
" _savefiles[f'{chunk}'] = _prompts\n",
|
1175 |
-
" #---------#\n",
|
1176 |
-
" save_filename = f'{root_savefile_name}_{start_at_index + RANGE}_{PROMPT_INDEX}.json'\n",
|
1177 |
-
"\n",
|
1178 |
-
"\n",
|
1179 |
-
" if (chunk=<20 && chunk>10): %cd '/content/output2/savefiles/'\n",
|
1180 |
-
" if (chunk<=30 && chunk>20): %cd '/content/output3/savefiles/'\n",
|
1181 |
-
" if (chunk=<40 && chunk>30): %cd '/content/output4/savefiles/'\n",
|
1182 |
-
" if (chunk<=50 && chunk>40): %cd '/content/output5/savefiles/'\n",
|
1183 |
-
" if (chunk=<60 && chunk>50): %cd '/content/output6/savefiles/'\n",
|
1184 |
-
" if (chunk<=70 && chunk>60): %cd '/content/output7/savefiles/'\n",
|
1185 |
-
" if (chunk=<80 && chunk>70): %cd '/content/output8/savefiles/'\n",
|
1186 |
-
" if (chunk<=90 && chunk>80): %cd '/content/output9/savefiles/'\n",
|
1187 |
-
" if (chunk=<100 && chunk>90): %cd '/content/output10/savefiles/'\n",
|
1188 |
-
" if (chunk<=110 && chunk>100): %cd '/content/output11/savefiles/'\n",
|
1189 |
-
" if (chunk=<120 && chunk>110): %cd '/content/output12/savefiles/'\n",
|
1190 |
-
" if (chunk<=130 && chunk>120): %cd '/content/output13/savefiles/'\n",
|
1191 |
-
"\n",
|
1192 |
-
"\n",
|
1193 |
-
" #------#\n",
|
1194 |
-
" print(f'Saving savefile {save_filename} to {output_folder}...')\n",
|
1195 |
-
" with open(save_filename, 'w') as f:\n",
|
1196 |
-
" json.dump(_savefiles, f)\n",
|
1197 |
-
" #---------#\n",
|
1198 |
-
" continue\n",
|
1199 |
-
"#-----------#"
|
1200 |
-
],
|
1201 |
-
"metadata": {
|
1202 |
-
"id": "x1uAVXZEoL0T",
|
1203 |
-
"cellView": "form"
|
1204 |
-
},
|
1205 |
-
"execution_count": null,
|
1206 |
-
"outputs": []
|
1207 |
-
},
|
1208 |
-
{
|
1209 |
-
"cell_type": "code",
|
1210 |
-
"source": [
|
1211 |
-
"# Determine if this notebook is running on Colab or Kaggle\n",
|
1212 |
-
"#Use https://www.kaggle.com/ if Google Colab GPU is busy\n",
|
1213 |
-
"home_directory = '/content/'\n",
|
1214 |
-
"using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
|
1215 |
-
"if using_Kaggle : home_directory = '/kaggle/working/'\n",
|
1216 |
-
"%cd {home_directory}\n",
|
1217 |
-
"#-------#\n",
|
1218 |
-
"\n",
|
1219 |
-
"# @title Download the text_encodings as .zip\n",
|
1220 |
-
"import os\n",
|
1221 |
-
"%cd {home_directory}\n",
|
1222 |
-
"#os.remove(f'{home_directory}results.zip')\n",
|
1223 |
-
"root_output_folder = home_directory + 'output/'\n",
|
1224 |
-
"zip_dest = f'/content/results.zip' #drive/MyDrive\n",
|
1225 |
-
"!zip -r {zip_dest} {root_output_folder}"
|
1226 |
-
],
|
1227 |
-
"metadata": {
|
1228 |
-
"id": "zivBNrw9uSVD",
|
1229 |
-
"cellView": "form"
|
1230 |
-
},
|
1231 |
-
"execution_count": null,
|
1232 |
-
"outputs": []
|
1233 |
-
},
|
1234 |
-
{
|
1235 |
-
"cell_type": "code",
|
1236 |
-
"source": [
|
1237 |
-
"# @title \t⚄ Quick fix for normalizing encoded text corpus tensors\n",
|
1238 |
-
"\n",
|
1239 |
-
"import os\n",
|
1240 |
-
"my_mkdirs('/content/output')\n",
|
1241 |
-
"my_mkdirs('/content/output/text_encodings')\n",
|
1242 |
-
"\n",
|
1243 |
-
"for filename in os.listdir(f'{prompts_folder}'):\n",
|
1244 |
-
" %cd {prompts_folder}\n",
|
1245 |
-
" prompts = {}\n",
|
1246 |
-
" with open(f'{filename}', 'r') as f:\n",
|
1247 |
-
" data = json.load(f).items()\n",
|
1248 |
-
" for key,value in data:\n",
|
1249 |
-
" prompts[key] = value\n",
|
1250 |
-
" #------#\n",
|
1251 |
-
" num_items = int(prompts['num_items'])\n",
|
1252 |
-
"\n",
|
1253 |
-
" %cd {encodings_folder}\n",
|
1254 |
-
" enc_filename = filename.replace('json', 'safetensors')\n",
|
1255 |
-
" _text_encodings = load_file(f'{enc_filename}')['weights'].to(torch.uint8)\n",
|
1256 |
-
" text_encodings = torch.zeros(num_items , dim)\n",
|
1257 |
-
" tmp = torch.ones(dim)\n",
|
1258 |
-
" tmp2 = torch.tensor(1/0.0043)\n",
|
1259 |
-
" zero_point = 0\n",
|
1260 |
-
" for index in range(num_items):\n",
|
1261 |
-
" text_encodings[index] = torch.tensor(0.0043) * torch.sub(_text_encodings[index][1:dim+1] , tmp , alpha= _text_encodings[index][0]).to(torch.float32)\n",
|
1262 |
-
" text_encodings[index] = tmp2*text_encodings[index]/text_encodings[index].norm(p=2, dim=-1, keepdim = True)\n",
|
1263 |
-
" test = torch.round( torch.add(text_encodings[index],tmp*zero_point))\n",
|
1264 |
-
" less_than_zero = test<0\n",
|
1265 |
-
" while(torch.any(less_than_zero).item()):\n",
|
1266 |
-
" zero_point = zero_point + 1\n",
|
1267 |
-
" test = torch.round( torch.add(text_encodings[index],tmp*zero_point))\n",
|
1268 |
-
" less_than_zero = test<0\n",
|
1269 |
-
" #------#\n",
|
1270 |
-
" _text_encodings[index][0] = zero_point\n",
|
1271 |
-
" _text_encodings[index][1:dim+1] = test\n",
|
1272 |
-
" #-------#\n",
|
1273 |
-
" %cd /content/output/text_encodings\n",
|
1274 |
-
"\n",
|
1275 |
-
" tmp = {}\n",
|
1276 |
-
" tmp['weights'] = _text_encodings.to(torch.uint8)\n",
|
1277 |
-
" tmp['num_items'] = torch.tensor(num_items).to(torch.uint8)\n",
|
1278 |
-
" tmp['scale'] = torch.tensor(0.0043)\n",
|
1279 |
-
" save_file(tmp , f'{enc_filename}')\n",
|
1280 |
-
"#------#"
|
1281 |
-
],
|
1282 |
-
"metadata": {
|
1283 |
-
"cellView": "form",
|
1284 |
-
"id": "9qgHW1Wr7kZn"
|
1285 |
-
},
|
1286 |
-
"execution_count": null,
|
1287 |
-
"outputs": []
|
1288 |
}
|
1289 |
]
|
1290 |
}
|
|
|
983 |
},
|
984 |
"execution_count": null,
|
985 |
"outputs": []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
986 |
}
|
987 |
]
|
988 |
}
|