codeShare commited on
Commit
0f5ef92
1 Parent(s): 6124561

Upload sd_token_similarity_calculator.ipynb

Browse files
Files changed (1) hide show
  1. sd_token_similarity_calculator.ipynb +10 -38
sd_token_similarity_calculator.ipynb CHANGED
@@ -387,11 +387,11 @@
387
  "start_search_at_index = 0 # @param {type:\"slider\", min:0, max: 49407, step:100}\n",
388
  "# @markdown The lower the start_index, the more similiar the sampled tokens will be to the target token assigned in the '⚡ Get similiar tokens' cell\". If the cell was not run, then it will use tokens ordered by similarity to the \"girl\\</w>\" token\n",
389
  "start_search_at_ID = start_search_at_index\n",
390
- "search_range = 1220 # @param {type:\"slider\", min:10, max: 2000, step:10}\n",
391
  "\n",
392
- "samples_per_iter = 20 # @param {type:\"slider\", min:10, max: 100, step:10}\n",
393
  "\n",
394
- "iterations = 20 # @param {type:\"slider\", min:1, max: 20, step:0}\n",
395
  "restrictions = 'None' # @param [\"None\", \"Suffix only\", \"Prefix only\"]\n",
396
  "#markdown Limit char size of included token <----- Disabled\n",
397
  "min_char_size = 0 #param {type:\"slider\", min:0, max: 20, step:1}\n",
@@ -436,14 +436,14 @@
436
  "results_name = {}\n",
437
  "#-----#\n",
438
  "for iter in range(ITERS):\n",
439
- " dots = torch.zeros(RANGE)\n",
440
- " is_trail = torch.zeros(RANGE)\n",
441
- " import re\n",
442
  " #-----#\n",
443
- " _start = START + iter*RANGE\n",
444
  "\n",
445
  " for index in range(samples_per_iter):\n",
446
- " id_C = min(_start + index*RANGE, NUM_TOKENS) + random.randint(0,RANGE)\n",
 
447
  " name_C = db_vocab[f'{id_C}']\n",
448
  " is_Prefix = 0\n",
449
  " #Skip if non-AZ characters are found\n",
@@ -546,7 +546,7 @@
546
  " max_name_trail = ''\n",
547
  " #----#\n",
548
  " for index in range(min(list_size,RANGE)):\n",
549
- " id = START + indices[index].item()\n",
550
  " name = db_vocab[f'{id}']\n",
551
  " #-----#\n",
552
  " if (name.find('</w>')<=-1):\n",
@@ -634,7 +634,7 @@
634
  "\n",
635
  "#--------#\n",
636
  "print('')\n",
637
- "if(use == '🖼️image_encoding from image'):\n",
638
  " from google.colab.patches import cv2_imshow\n",
639
  " cv2_imshow(image_A)\n",
640
  "#-----#\n",
@@ -656,34 +656,6 @@
656
  "execution_count": null,
657
  "outputs": []
658
  },
659
- {
660
- "cell_type": "code",
661
- "source": [],
662
- "metadata": {
663
- "id": "5XN2pM5NAfS5",
664
- "outputId": "df4eefe6-12e7-416e-dc2d-b6df22a14d69",
665
- "colab": {
666
- "base_uri": "https://localhost:8080/",
667
- "height": 321
668
- }
669
- },
670
- "execution_count": 25,
671
- "outputs": [
672
- {
673
- "output_type": "error",
674
- "ename": "AttributeError",
675
- "evalue": "clip",
676
- "traceback": [
677
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
678
- "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
679
- "\u001b[0;32m<ipython-input-25-2eb0ffbc049b>\u001b[0m in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mif\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0muse\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'🖼️image_encoding from image'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mgoogle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcolab\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpatches\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mcv2_imshow\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0mcv2_imshow\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimage_A\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
680
- "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/google/colab/patches/__init__.py\u001b[0m in \u001b[0;36mcv2_imshow\u001b[0;34m(a)\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mN\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mM\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m4\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0man\u001b[0m \u001b[0mNxM\u001b[0m \u001b[0mBGRA\u001b[0m \u001b[0mcolor\u001b[0m \u001b[0mimage\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \"\"\"\n\u001b[0;32m---> 18\u001b[0;31m \u001b[0ma\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m255\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mastype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'uint8'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 19\u001b[0m \u001b[0;31m# cv2 stores colors as BGR; convert to RGB\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndim\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m3\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
681
- "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/PIL/Image.py\u001b[0m in \u001b[0;36m__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 527\u001b[0m \u001b[0mdeprecate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Image categories\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m10\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"is_animated\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mplural\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 528\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_category\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 529\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mAttributeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 530\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 531\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mproperty\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
682
- "\u001b[0;31mAttributeError\u001b[0m: clip"
683
- ]
684
- }
685
- ]
686
- },
687
  {
688
  "cell_type": "code",
689
  "source": [
 
387
  "start_search_at_index = 0 # @param {type:\"slider\", min:0, max: 49407, step:100}\n",
388
  "# @markdown The lower the start_index, the more similiar the sampled tokens will be to the target token assigned in the '⚡ Get similiar tokens' cell\". If the cell was not run, then it will use tokens ordered by similarity to the \"girl\\</w>\" token\n",
389
  "start_search_at_ID = start_search_at_index\n",
390
+ "search_range = 1000 # @param {type:\"slider\", min:10, max: 2000, step:10}\n",
391
  "\n",
392
+ "samples_per_iter = 10 # @param {type:\"slider\", min:10, max: 100, step:10}\n",
393
  "\n",
394
+ "iterations = 5 # @param {type:\"slider\", min:1, max: 20, step:0}\n",
395
  "restrictions = 'None' # @param [\"None\", \"Suffix only\", \"Prefix only\"]\n",
396
  "#markdown Limit char size of included token <----- Disabled\n",
397
  "min_char_size = 0 #param {type:\"slider\", min:0, max: 20, step:1}\n",
 
436
  "results_name = {}\n",
437
  "#-----#\n",
438
  "for iter in range(ITERS):\n",
439
+ " dots = torch.zeros(min(list_size,RANGE))\n",
440
+ " is_trail = torch.zeros(min(list_size,RANGE))\n",
441
+ "\n",
442
  " #-----#\n",
 
443
  "\n",
444
  " for index in range(samples_per_iter):\n",
445
+ " _start = START + iter*RANGE\n",
446
+ " id_C = random.randint(_start , _start + RANGE)\n",
447
  " name_C = db_vocab[f'{id_C}']\n",
448
  " is_Prefix = 0\n",
449
  " #Skip if non-AZ characters are found\n",
 
546
  " max_name_trail = ''\n",
547
  " #----#\n",
548
  " for index in range(min(list_size,RANGE)):\n",
549
+ " id = _start + indices[index].item()\n",
550
  " name = db_vocab[f'{id}']\n",
551
  " #-----#\n",
552
  " if (name.find('</w>')<=-1):\n",
 
634
  "\n",
635
  "#--------#\n",
636
  "print('')\n",
637
+ "if(use == '🖼️image_encoding from image' and colab_image_path != \"\"):\n",
638
  " from google.colab.patches import cv2_imshow\n",
639
  " cv2_imshow(image_A)\n",
640
  "#-----#\n",
 
656
  "execution_count": null,
657
  "outputs": []
658
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
659
  {
660
  "cell_type": "code",
661
  "source": [