Upload fusion_t2i_CLIP_interrogator.ipynb
Browse files
Google Colab Notebooks/fusion_t2i_CLIP_interrogator.ipynb
CHANGED
@@ -389,21 +389,21 @@
|
|
389 |
"# @title \tβ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
|
390 |
"# @markdown πΌοΈ Choose a pre-encoded reference\n",
|
391 |
"index = 708 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
|
392 |
-
"\n",
|
393 |
"PROMPT_INDEX = index\n",
|
394 |
-
"
|
395 |
-
"# @markdown βοΈ Set the value for C in the reference <br> <br> sim = C* text_enc + image_enc*(1-C) <br><br>\n",
|
396 |
-
"\n",
|
397 |
"C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
398 |
"\n",
|
399 |
"# @markdown π« Penalize similarity to this prompt(optional)\n",
|
400 |
-
"\n",
|
401 |
"NEG = '' # @param {type:'string'}\n",
|
402 |
-
"strength = 1 # @param {type:\"slider\", min:-
|
403 |
-
"\n",
|
404 |
-
"# @markdown
|
405 |
-
"\n",
|
406 |
-
"\n",
|
407 |
"# @title βοΈπ Print the results (Advanced)\n",
|
408 |
"list_size = 1000 # param {type:'number'}\n",
|
409 |
"start_at_index = 0 # param {type:'number'}\n",
|
@@ -413,9 +413,9 @@
|
|
413 |
"print_Descriptions = True # param {type:\"boolean\"}\n",
|
414 |
"compact_Output = True # param {type:\"boolean\"}\n",
|
415 |
"\n",
|
416 |
-
"
|
417 |
-
"#
|
418 |
-
"newline_Separator = False #
|
419 |
"\n",
|
420 |
"import random\n",
|
421 |
"list_size2 = 1000 # param {type:'number'}\n",
|
@@ -482,12 +482,30 @@
|
|
482 |
" neg_sims = torch.matmul(text_tensor, text_features_NEG.t())\n",
|
483 |
" #------#\n",
|
484 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
485 |
" # plus image-similarity\n",
|
486 |
-
"
|
|
|
|
|
|
|
|
|
|
|
487 |
"\n",
|
488 |
"\n",
|
489 |
" # minus NEG-similarity\n",
|
490 |
-
" sims = sims -
|
|
|
491 |
"\n",
|
492 |
" # Sort the items\n",
|
493 |
" sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
|
|
|
389 |
"# @title \tβ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
|
390 |
"# @markdown πΌοΈ Choose a pre-encoded reference\n",
|
391 |
"index = 708 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
|
|
|
392 |
"PROMPT_INDEX = index\n",
|
393 |
+
"# @markdown βοΈ Set the value for C in the reference <br> <br> sim = C* text_enc + image_enc*(1-C) <br>\n",
|
|
|
|
|
394 |
"C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
|
395 |
+
"# @markdown -----------\n",
|
396 |
+
"# @markdown π Enhance similarity to this prompt(optional)\n",
|
397 |
+
"POS = '' # @param {type:'string'}\n",
|
398 |
+
"strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.1}\n",
|
399 |
+
"pos_strength = strength\n",
|
400 |
+
"# @markdown -----------\n",
|
401 |
"\n",
|
402 |
"# @markdown π« Penalize similarity to this prompt(optional)\n",
|
|
|
403 |
"NEG = '' # @param {type:'string'}\n",
|
404 |
+
"strength = 1 # @param {type:\"slider\", min:-50, max:50, step:0.1}\n",
|
405 |
+
"neg_strength = strength\n",
|
406 |
+
"# @markdown -----------\n",
|
|
|
|
|
407 |
"# @title βοΈπ Print the results (Advanced)\n",
|
408 |
"list_size = 1000 # param {type:'number'}\n",
|
409 |
"start_at_index = 0 # param {type:'number'}\n",
|
|
|
413 |
"print_Descriptions = True # param {type:\"boolean\"}\n",
|
414 |
"compact_Output = True # param {type:\"boolean\"}\n",
|
415 |
"\n",
|
416 |
+
"\n",
|
417 |
+
"# markdown Printing options\n",
|
418 |
+
"newline_Separator = False # param {type:\"boolean\"}\n",
|
419 |
"\n",
|
420 |
"import random\n",
|
421 |
"list_size2 = 1000 # param {type:'number'}\n",
|
|
|
482 |
" neg_sims = torch.matmul(text_tensor, text_features_NEG.t())\n",
|
483 |
" #------#\n",
|
484 |
"\n",
|
485 |
+
" pos_sims = 0*sims\n",
|
486 |
+
" if(POS != ''):\n",
|
487 |
+
"\n",
|
488 |
+
" # Get text features for user input\n",
|
489 |
+
" inputs = tokenizer(text = POS, padding=True, return_tensors=\"pt\")\n",
|
490 |
+
" text_features_POS = model.get_text_features(**inputs)\n",
|
491 |
+
" text_features_POS = text_features_POS/text_features_POS.norm(p=2, dim=-1, keepdim=True)\n",
|
492 |
+
"\n",
|
493 |
+
" # text-similarity\n",
|
494 |
+
" pos_sims = torch.matmul(text_tensor, text_features_POS.t())\n",
|
495 |
+
" #------#\n",
|
496 |
+
"\n",
|
497 |
" # plus image-similarity\n",
|
498 |
+
" img_sims = torch.matmul(text_tensor, image_features_A.t()) * logit_scale\n",
|
499 |
+
" sims = sims + (1-C) * img_sims\n",
|
500 |
+
"\n",
|
501 |
+
"\n",
|
502 |
+
" # plus POS-similarity\n",
|
503 |
+
" sims = sims + pos_strength*pos_sims\n",
|
504 |
"\n",
|
505 |
"\n",
|
506 |
" # minus NEG-similarity\n",
|
507 |
+
" sims = sims - neg_strength*neg_sims\n",
|
508 |
+
"\n",
|
509 |
"\n",
|
510 |
" # Sort the items\n",
|
511 |
" sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
|