codeShare commited on
Commit
c56de2f
·
verified ·
1 Parent(s): 6362a04

Upload fusion_t2i_CLIP_interrogator.ipynb

Browse files
Google Colab Jupyter Notebooks/fusion_t2i_CLIP_interrogator.ipynb CHANGED
@@ -177,7 +177,8 @@
177
  "print(f'Using settings SCALE = {SCALE} and ZERO_POINT = {ZERO_POINT} for visualizing the text_encoding')"
178
  ],
179
  "metadata": {
180
- "id": "YDu8XlehhWID"
 
181
  },
182
  "execution_count": null,
183
  "outputs": []
@@ -185,13 +186,135 @@
185
  {
186
  "cell_type": "markdown",
187
  "source": [
188
- "**Feel free to skip these cells if you do not plan on using them**\n",
189
  "\n"
190
  ],
191
  "metadata": {
192
  "id": "Xf9zoq-Za3wi"
193
  }
194
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
  {
196
  "cell_type": "code",
197
  "source": [
@@ -206,7 +329,7 @@
206
  "try:prompt\n",
207
  "except: prompt = ''\n",
208
  "\n",
209
- "# @markdown 🖼️+📝 Choose a pre-encoded reference (optional)\n",
210
  "index = 596 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
211
  "PROMPT_INDEX = index\n",
212
  "prompt = target_prompts[f'{PROMPT_INDEX}']\n",
@@ -268,6 +391,15 @@
268
  "execution_count": null,
269
  "outputs": []
270
  },
 
 
 
 
 
 
 
 
 
271
  {
272
  "cell_type": "code",
273
  "source": [
@@ -337,6 +469,15 @@
337
  "execution_count": null,
338
  "outputs": []
339
  },
 
 
 
 
 
 
 
 
 
340
  {
341
  "cell_type": "code",
342
  "source": [
@@ -463,103 +604,27 @@
463
  " #------#"
464
  ],
465
  "metadata": {
466
- "id": "lOQuTPfBMK82"
 
467
  },
468
  "execution_count": null,
469
  "outputs": []
470
  },
471
  {
472
- "cell_type": "code",
473
  "source": [
474
- "# @title ⚄ Evaluate saved reference similarity to select items (optional)\n",
475
- "EVAL = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
476
- "\n",
477
- "# @markdown 📝 Enhance/Penalize Similarity and skip items containing word(s)\n",
478
- "POS = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
479
- "NEG = ''# @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
480
- "# @markdown -----\n",
481
- "# @markdown logarithmic prompt strength x for value 10^(x-1)\n",
482
- "_POS = 0 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
483
- "_NEG = 0 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
484
  "\n",
485
- "show_local_reference = True # @param {type:\"boolean\"}\n",
486
- "show_encoding = True # @param {type:\"boolean\"}\n",
487
- "\n",
488
- "%cd /content/\n",
489
- "_ref = load_file('reference.safetensors' )\n",
490
- "ref = _ref['weights'].to(dot_dtype)\n",
491
- "\n",
492
- "if EVAL.strip() != '':\n",
493
- " print(\"Saved Reference:\\n\")\n",
494
- " for item in EVAL.split(','):\n",
495
- " if item.strip()=='':continue\n",
496
- " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
497
- " test = model.get_text_features(**inputs)[0]\n",
498
- " test = test/test.norm(p=2 , dim = -1 , keepdim = True)\n",
499
- " ref= ref/ref.norm(p=2 , dim=-1 , keepdim=True)\n",
500
- " eval = torch.dot(ref , test)\n",
501
- " print(f'{item.strip()} : {round(eval.item()*100, 2)}%')\n",
502
- " #-----#\n",
503
- "\n",
504
- " if(show_local_reference):\n",
505
- " print(\"\\n---------\\nLocal Reference with enchancements added :\\n\")\n",
506
- "\n",
507
- " for _item in POS.split(','):\n",
508
- " item = _item.strip()\n",
509
- " if item == '':continue\n",
510
- " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
511
- " ref = ref + math.pow(10,_POS-1) * model.get_text_features(**inputs)[0]\n",
512
- " #-------#\n",
513
- "\n",
514
- " for _item in NEG.split(','):\n",
515
- " item = _item.strip()\n",
516
- " if item == '':continue\n",
517
- " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
518
- " ref = ref + math.pow(10,_NEG-1) * model.get_text_features(**inputs)[0]\n",
519
- " #-------#\n",
520
- "\n",
521
- " ref= ref/ref.norm(p=2 , dim=-1 , keepdim=True)\n",
522
- " for item in EVAL.split(','):\n",
523
- " if item.strip()=='':continue\n",
524
- " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
525
- " test = model.get_text_features(**inputs)[0]\n",
526
- " test = test/test.norm(p=2 , dim = -1 , keepdim = True)\n",
527
- " eval = torch.dot(ref , test)\n",
528
- " print(f'{item.strip()} : {round(eval.item()*100, 2)}%')\n",
529
- " #-----#\n",
530
- "\n",
531
- " if show_encoding:\n",
532
- " # create figure\n",
533
- " fig = plt.figure(figsize=(10*image_size, 10*image_size))\n",
534
- " fig.patch.set_facecolor((56/255,56/255,56/255))\n",
535
- " rows = 1\n",
536
- " columns = 3\n",
537
- " fig.add_subplot(rows, columns, 1)\n",
538
- " plt.imshow( visualize(ref))\n",
539
- " plt.axis('off')\n",
540
- " plt.title( \"Encoding (local variable)\", color='white', fontsize=round(20*image_size))\n",
541
- " if num_plots>1:\n",
542
- " fig.add_subplot(rows, columns, 2)\n",
543
- " plt.imshow( visualize( _ref['weights'].to(dot_dtype)))\n",
544
- " plt.axis('off')\n",
545
- " plt.title(\"Encoding (saved file)\", color='white', fontsize=round(20*image_size))\n",
546
- "\n",
547
- " fig.add_subplot(rows, columns, 3)\n",
548
- " plt.imshow( visualize(ref - _ref['weights'].to(dot_dtype)))\n",
549
- " plt.axis('off')\n",
550
- " plt.title(\"Changes\", color='white', fontsize=round(20*image_size))\n",
551
- " #------#\n"
552
  ],
553
  "metadata": {
554
- "id": "Oxi6nOyrUTAe"
555
- },
556
- "execution_count": null,
557
- "outputs": []
558
  },
559
  {
560
  "cell_type": "code",
561
  "source": [
562
- "# @title ⚄ Run the CLIP interrogator on the saved reference\n",
563
  "LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
564
  "_START_AT = '0' # @param [\"0\", \"10000\", \"50000\"] {allow-input: true}\n",
565
  "START_AT = 0\n",
@@ -757,6 +822,17 @@
757
  "execution_count": null,
758
  "outputs": []
759
  },
 
 
 
 
 
 
 
 
 
 
 
760
  {
761
  "cell_type": "code",
762
  "source": [
 
177
  "print(f'Using settings SCALE = {SCALE} and ZERO_POINT = {ZERO_POINT} for visualizing the text_encoding')"
178
  ],
179
  "metadata": {
180
+ "id": "YDu8XlehhWID",
181
+ "cellView": "form"
182
  },
183
  "execution_count": null,
184
  "outputs": []
 
186
  {
187
  "cell_type": "markdown",
188
  "source": [
189
+ "**Paste a prompt in the cell below to create an encoding**\n",
190
  "\n"
191
  ],
192
  "metadata": {
193
  "id": "Xf9zoq-Za3wi"
194
  }
195
  },
196
+ {
197
+ "cell_type": "code",
198
+ "source": [
199
+ "\n",
200
+ "# @markdown 📝 Write a text prompt (this will overwrite any savefile already stored)\n",
201
+ "NEW_ENCODING = '' # @param {type:'string' ,placeholder:'write a prompt'}\n",
202
+ "enable = True # @param {type:\"boolean\"}\n",
203
+ "# @markdown -----\n",
204
+ "# @markdown 📝 Enhance/Penalize Similarity and skip items containing word(s)\n",
205
+ "POS = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
206
+ "NEG = ''# @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
207
+ "# @markdown -----\n",
208
+ "# @markdown logarithmic prompt strength x for value 10^(x-1)\n",
209
+ "_POS = 0 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
210
+ "_NEG = 0 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
211
+ "# @markdown -----\n",
212
+ "# @markdown Check similiarity for this encoding against any written prompt(s)\n",
213
+ "# @title ⚄ Evaluate saved reference similarity to select items (optional)\n",
214
+ "EVAL = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
215
+ "\n",
216
+ "show_local_reference = True # @param {type:\"boolean\"}\n",
217
+ "show_encoding = True # @param {type:\"boolean\"}\n",
218
+ "\n",
219
+ "try:\n",
220
+ " %cd /content/\n",
221
+ " _ref = load_file('reference.safetensors' )\n",
222
+ " ref = _ref['weights'].to(dot_dtype)\n",
223
+ "except:\n",
224
+ " ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
225
+ " _ref = {}\n",
226
+ " _ref['weights'] = ref\n",
227
+ " %cd /content/\n",
228
+ " save_file(_ref, 'reference.safetensors')\n",
229
+ "#-----#\n",
230
+ "\n",
231
+ "if NEW_ENCODING.strip() != ''\n",
232
+ " item = NEW_ENCODING.strip()\n",
233
+ " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
234
+ " ref = model.get_text_features(**inputs)[0]\n",
235
+ " ref= ref/ref.norm(p=2 , dim=-1 , keepdim=True)\n",
236
+ "#------#\n",
237
+ "\n",
238
+ "try: ref\n",
239
+ "except: ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
240
+ "\n",
241
+ "if EVAL.strip() != '':\n",
242
+ " print(\"Saved Reference:\\n\")\n",
243
+ " for item in EVAL.split(','):\n",
244
+ " if item.strip()=='':continue\n",
245
+ " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
246
+ " test = model.get_text_features(**inputs)[0]\n",
247
+ " test = test/test.norm(p=2 , dim = -1 , keepdim = True)\n",
248
+ " ref= ref/ref.norm(p=2 , dim=-1 , keepdim=True)\n",
249
+ " eval = torch.dot(ref , test)\n",
250
+ " print(f'{item.strip()} : {round(eval.item()*100, 2)}%')\n",
251
+ " #-----#\n",
252
+ " if(show_local_reference):\n",
253
+ " print(\"\\n---------\\nLocal Reference with enchancements added :\\n\")\n",
254
+ "\n",
255
+ " for _item in POS.split(','):\n",
256
+ " item = _item.strip()\n",
257
+ " if item == '':continue\n",
258
+ " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
259
+ " ref = ref + math.pow(10,_POS-1) * model.get_text_features(**inputs)[0]\n",
260
+ " #-------#\n",
261
+ "\n",
262
+ " for _item in NEG.split(','):\n",
263
+ " item = _item.strip()\n",
264
+ " if item == '':continue\n",
265
+ " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
266
+ " ref = ref + math.pow(10,_NEG-1) * model.get_text_features(**inputs)[0]\n",
267
+ " #-------#\n",
268
+ "\n",
269
+ " ref= ref/ref.norm(p=2 , dim=-1 , keepdim=True)\n",
270
+ " for item in EVAL.split(','):\n",
271
+ " if item.strip()=='':continue\n",
272
+ " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
273
+ " test = model.get_text_features(**inputs)[0]\n",
274
+ " test = test/test.norm(p=2 , dim = -1 , keepdim = True)\n",
275
+ " eval = torch.dot(ref , test)\n",
276
+ " print(f'{item.strip()} : {round(eval.item()*100, 2)}%')\n",
277
+ " #-----#\n",
278
+ "\n",
279
+ " if show_encoding:\n",
280
+ " # create figure\n",
281
+ " fig = plt.figure(figsize=(10*image_size, 10*image_size))\n",
282
+ " fig.patch.set_facecolor((56/255,56/255,56/255))\n",
283
+ " rows = 1\n",
284
+ " columns = 3\n",
285
+ " fig.add_subplot(rows, columns, 1)\n",
286
+ " plt.imshow( visualize(ref))\n",
287
+ " plt.axis('off')\n",
288
+ " plt.title( \"Encoding (local variable)\", color='white', fontsize=round(20*image_size))\n",
289
+ " if num_plots>1:\n",
290
+ " fig.add_subplot(rows, columns, 2)\n",
291
+ " plt.imshow( visualize( _ref['weights'].to(dot_dtype)))\n",
292
+ " plt.axis('off')\n",
293
+ " plt.title(\"Encoding (saved file)\", color='white', fontsize=round(20*image_size))\n",
294
+ "\n",
295
+ " fig.add_subplot(rows, columns, 3)\n",
296
+ " plt.imshow( visualize(ref - _ref['weights'].to(dot_dtype)))\n",
297
+ " plt.axis('off')\n",
298
+ " plt.title(\"Changes\", color='white', fontsize=round(20*image_size))\n",
299
+ " #------#\n",
300
+ "\n",
301
+ "\n"
302
+ ],
303
+ "metadata": {
304
+ "id": "Oxi6nOyrUTAe"
305
+ },
306
+ "execution_count": null,
307
+ "outputs": []
308
+ },
309
+ {
310
+ "cell_type": "markdown",
311
+ "source": [
312
+ "**Use a pre-encoded image+prompt pair as reference (optional)**"
313
+ ],
314
+ "metadata": {
315
+ "id": "f9_AcquM7AYZ"
316
+ }
317
+ },
318
  {
319
  "cell_type": "code",
320
  "source": [
 
329
  "try:prompt\n",
330
  "except: prompt = ''\n",
331
  "\n",
332
+ "# @markdown 🖼️+📝 Choose a pre-encoded reference (note: some results are NSFW!)\n",
333
  "index = 596 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
334
  "PROMPT_INDEX = index\n",
335
  "prompt = target_prompts[f'{PROMPT_INDEX}']\n",
 
391
  "execution_count": null,
392
  "outputs": []
393
  },
394
+ {
395
+ "cell_type": "markdown",
396
+ "source": [
397
+ "**Use an image as a reference via URL (optional)**"
398
+ ],
399
+ "metadata": {
400
+ "id": "KI9Ho6CG7m3Z"
401
+ }
402
+ },
403
  {
404
  "cell_type": "code",
405
  "source": [
 
469
  "execution_count": null,
470
  "outputs": []
471
  },
472
+ {
473
+ "cell_type": "markdown",
474
+ "source": [
475
+ "**Use an image as a reference via uploading it to the /content/ folder (optional)**"
476
+ ],
477
+ "metadata": {
478
+ "id": "MBPi7F8S7tg3"
479
+ }
480
+ },
481
  {
482
  "cell_type": "code",
483
  "source": [
 
604
  " #------#"
605
  ],
606
  "metadata": {
607
+ "id": "lOQuTPfBMK82",
608
+ "cellView": "form"
609
  },
610
  "execution_count": null,
611
  "outputs": []
612
  },
613
  {
614
+ "cell_type": "markdown",
615
  "source": [
616
+ "**Run the interrogator**\n",
 
 
 
 
 
 
 
 
 
617
  "\n",
618
+ " Since the list of items is large (>1 million items) you will need to select a range within the sorted results to print."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
619
  ],
620
  "metadata": {
621
+ "id": "ROKsoZrt7zMe"
622
+ }
 
 
623
  },
624
  {
625
  "cell_type": "code",
626
  "source": [
627
+ "# @title ⚄ CLIP Interrogator\n",
628
  "LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
629
  "_START_AT = '0' # @param [\"0\", \"10000\", \"50000\"] {allow-input: true}\n",
630
  "START_AT = 0\n",
 
822
  "execution_count": null,
823
  "outputs": []
824
  },
825
+ {
826
+ "cell_type": "markdown",
827
+ "source": [
828
+ "**Evaluate Similarities**\n",
829
+ "\n",
830
+ "Run this cell to see how far down the list you can go before similarity to the reference is lost."
831
+ ],
832
+ "metadata": {
833
+ "id": "yl1DYzUn8YCC"
834
+ }
835
+ },
836
  {
837
  "cell_type": "code",
838
  "source": [