codeShare commited on
Commit
977f957
1 Parent(s): b91416c

Upload fusion_t2i_CLIP_interrogator_dev.ipynb

Browse files
Google Colab Jupyter Notebooks/fusion_t2i_CLIP_interrogator_dev.ipynb ADDED
@@ -0,0 +1,1290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": []
7
+ },
8
+ "kernelspec": {
9
+ "name": "python3",
10
+ "display_name": "Python 3"
11
+ },
12
+ "language_info": {
13
+ "name": "python"
14
+ }
15
+ },
16
+ "cells": [
17
+ {
18
+ "cell_type": "code",
19
+ "source": [
20
+ "# @title ⚄ 🔄 Initialize\n",
21
+ "\n",
22
+ "import os\n",
23
+ "home_directory = '/content/'\n",
24
+ "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
25
+ "if using_Kaggle : home_directory = '/kaggle/working/'\n",
26
+ "%cd {home_directory}\n",
27
+ "\n",
28
+ "def fix_bad_symbols(txt):\n",
29
+ " result = txt\n",
30
+ " for symbol in ['^', '}', '{' , ')', '(', '[' , ']' , ':' , '=' ]:\n",
31
+ " result = result.replace(symbol,'\\\\' + symbol)\n",
32
+ " #------#\n",
33
+ " return result;\n",
34
+ "\n",
35
+ "def my_mkdirs(folder):\n",
36
+ " if os.path.exists(folder)==False:\n",
37
+ " os.makedirs(folder)\n",
38
+ "\n",
39
+ "#🔸🔹\n",
40
+ "# Load the data if not already loaded\n",
41
+ "try:\n",
42
+ " loaded\n",
43
+ "except:\n",
44
+ " from safetensors.torch import load_file , save_file\n",
45
+ " import json , torch , requests , math\n",
46
+ " import pandas as pd\n",
47
+ " from PIL import Image\n",
48
+ " import cv2\n",
49
+ " from matplotlib import pyplot as plt\n",
50
+ " #----#\n",
51
+ " %cd {home_directory}\n",
52
+ " !git clone https://huggingface.co/datasets/codeShare/fusion-t2i-generator-data\n",
53
+ " loaded = True\n",
54
+ " %cd {home_directory + 'fusion-t2i-generator-data/'}\n",
55
+ " !unzip reference.zip\n",
56
+ "\n",
57
+ "from transformers import AutoTokenizer\n",
58
+ "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
59
+ "from transformers import CLIPProcessor, CLIPModel\n",
60
+ "processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
61
+ "model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
62
+ "logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
63
+ "\n",
64
+ "#------#\n",
65
+ "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
66
+ "with open(f'reference_prompts.json', 'r') as f:\n",
67
+ " data = json.load(f)\n",
68
+ " _df = pd.DataFrame({'count': data})['count']\n",
69
+ " target_prompts = {\n",
70
+ " key : value for key, value in _df.items()\n",
71
+ " }\n",
72
+ "#------#\n",
73
+ "with open(f'reference_urls.json', 'r') as f:\n",
74
+ " data = json.load(f)\n",
75
+ " _df = pd.DataFrame({'count': data})['count']\n",
76
+ " target_urls = {\n",
77
+ " key : value for key, value in _df.items()\n",
78
+ " }\n",
79
+ "\n",
80
+ "#------#\n",
81
+ "dot_dtype = torch.float32\n",
82
+ "dim = 768\n",
83
+ "ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
84
+ "\n",
85
+ "# title ⚄ Define parameters for visalizing the reference in a 16x16 grid <br> (the visualization settings has no effect on output)\n",
86
+ "from PIL import Image, ImageDraw\n",
87
+ "SCALE = 0.0002 # param {type:\"slider\", min:0.0001, max:0.001, step:0.00001}\n",
88
+ "ZERO_POINT = 100 # param {type:\"slider\", min:0, max:300, step:1}\n",
89
+ "CELL_SIZE = 16\n",
90
+ "image_size = 0.5 # param {type:\"slider\", min:0, max:1, step:0.01}\n",
91
+ "show_encoding = False # param {type:\"boolean\"}\n",
92
+ "#------#\n",
93
+ "\n",
94
+ "BORDER_THICKNESS = 4\n",
95
+ "\n",
96
+ "def visualize(_ref):\n",
97
+ " RGB_tensor = (torch.round(_ref/SCALE)+torch.ones(dim)*ZERO_POINT)\n",
98
+ " cellsize = CELL_SIZE\n",
99
+ " tick = round(cellsize/2)\n",
100
+ " border_offset = round(BORDER_THICKNESS/2)\n",
101
+ " width = 16*cellsize + BORDER_THICKNESS\n",
102
+ " height = 16*cellsize + BORDER_THICKNESS\n",
103
+ " image = Image.new('RGB', (width, height), (0, 0, 0))\n",
104
+ " draw = ImageDraw.Draw(image)\n",
105
+ " for row in range(16):\n",
106
+ " for col in range(16):\n",
107
+ " tmp = 3*row*col\n",
108
+ " r = max(0,min(255,int(RGB_tensor[tmp].item())))\n",
109
+ " g = max(0,min(255,int(RGB_tensor[tmp+1].item())))\n",
110
+ " b = max(0,min(255,int(RGB_tensor[tmp+2].item())))\n",
111
+ " fillColor = (r,g,b)\n",
112
+ " x0 = row*cellsize +border_offset\n",
113
+ " y0 = (15-col)*cellsize +border_offset\n",
114
+ " x1 = row*cellsize + 2*tick + border_offset\n",
115
+ " y1 = (15-col)*cellsize + 2*tick + border_offset\n",
116
+ " shape = [(x0, y0), (x1, y1)]\n",
117
+ " draw.rectangle(shape, fill=fillColor, outline=(0,0,0))\n",
118
+ " return (image)\n",
119
+ "\n",
120
+ "num_plots = 1\n",
121
+ "try:\n",
122
+ " %cd /content/\n",
123
+ " _ref = load_file('reference.safetensors' )\n",
124
+ " num_plots = num_plots+1\n",
125
+ "except: _ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
126
+ "#-----#\n",
127
+ "try: ref\n",
128
+ "except: ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
129
+ "\n",
130
+ "\n",
131
+ "if show_encoding:\n",
132
+ " # create figure\n",
133
+ " fig = plt.figure(figsize=(10*image_size, 10*image_size))\n",
134
+ " fig.patch.set_facecolor((56/255,56/255,56/255))\n",
135
+ " rows = 1\n",
136
+ " columns = num_plots\n",
137
+ " fig.add_subplot(rows, columns, 1)\n",
138
+ " plt.imshow( visualize(ref))\n",
139
+ " plt.axis('off')\n",
140
+ " plt.title( \"Encoding (local variable)\", color='white', fontsize=round(20*image_size))\n",
141
+ " if num_plots>1:\n",
142
+ " fig.add_subplot(rows, columns, 2)\n",
143
+ " plt.imshow( visualize( _ref['weights'].to(dot_dtype)))\n",
144
+ " plt.axis('off')\n",
145
+ " plt.title(\"Encoding (saved file)\", color='white', fontsize=round(20*image_size))\n",
146
+ " #------#\n",
147
+ "\n",
148
+ "print(f'Using settings SCALE = {SCALE} and ZERO_POINT = {ZERO_POINT} for visualizing the text_encoding')"
149
+ ],
150
+ "metadata": {
151
+ "id": "TC5lMJrS1HCC",
152
+ "cellView": "form"
153
+ },
154
+ "execution_count": null,
155
+ "outputs": []
156
+ },
157
+ {
158
+ "cell_type": "code",
159
+ "source": [
160
+ "# @title ⚄ 📷💭 Use pre-encoded image+prompt pair\n",
161
+ "loaded_ref = False\n",
162
+ "try:\n",
163
+ " ref\n",
164
+ " loaded_ref = True\n",
165
+ "except:ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
166
+ "if loaded_ref : prev_ref = ref.clone().detach()\n",
167
+ "\n",
168
+ "try:prompt\n",
169
+ "except: prompt = ''\n",
170
+ "\n",
171
+ "# @markdown 🖼️+📝 Choose a pre-encoded reference (note: some results are NSFW!)\n",
172
+ "index = 596 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
173
+ "PROMPT_INDEX = index\n",
174
+ "prompt = target_prompts[f'{PROMPT_INDEX}']\n",
175
+ "url = target_urls[f'{PROMPT_INDEX}']\n",
176
+ "if url.find('perchance')>-1:\n",
177
+ " image = Image.open(requests.get(url, stream=True).raw)\n",
178
+ "#------#\n",
179
+ "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
180
+ "references = torch.load('reference_text_and_image_encodings.pt' , weights_only=False)\n",
181
+ "# @markdown ⚖️ 🖼️ encoding <-----?-----> 📝 encoding </div> <br>\n",
182
+ "C = 0.3 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
183
+ "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
184
+ "method = 'Add to existing ref' # @param [\"Refresh\" , \"Add to existing ref\" , \"Subtract from existing ref\" , \"Do nothing\"]\n",
185
+ "image_size = 0.57 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
186
+ "show_encoding = True # @param {type:\"boolean\"}\n",
187
+ "\n",
188
+ "if(not method == 'Do nothing'):\n",
189
+ " if method == 'Refresh': ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
190
+ " if method == 'Subtract from existing ref':\n",
191
+ " ref = torch.sub(ref, math.pow(10 ,log_strength-1) * C * references[index][0].dequantize().to(dtype = torch.float32))\n",
192
+ " ref = torch.sub(ref, math.pow(10 ,log_strength-1) * (1-C) * references[index][1].dequantize().to(dtype = torch.float32))\n",
193
+ " else:\n",
194
+ " ref = torch.add(ref, math.pow(10 ,log_strength-1) * C * references[index][0].dequantize().to(dtype = torch.float32))\n",
195
+ " ref = torch.add(ref, math.pow(10 ,log_strength-1) * (1-C) * references[index][1].dequantize().to(dtype = torch.float32))\n",
196
+ " #---------#\n",
197
+ " references = '' # Clear up memory\n",
198
+ " ref = ref/ref.norm(p=2, dim=-1, keepdim=True)\n",
199
+ " ref = ref.clone().detach()\n",
200
+ " #------#\n",
201
+ " # create figure\n",
202
+ " fig = plt.figure(figsize=(10*image_size, 10*image_size))\n",
203
+ " fig.patch.set_facecolor((56/255,56/255,56/255))\n",
204
+ " rows = 1\n",
205
+ " columns = 1\n",
206
+ " if show_encoding: columns = columns+1\n",
207
+ " if show_encoding and loaded_ref : columns = columns+1\n",
208
+ " fig.add_subplot(rows, columns, 1)\n",
209
+ " plt.imshow(image)\n",
210
+ " plt.axis('off')\n",
211
+ " plt.title(f\"Reference image at index={index}\" , color='white' , fontsize=round(20*image_size))\n",
212
+ " #-----#\n",
213
+ " if show_encoding and loaded_ref:\n",
214
+ " fig.add_subplot(rows, columns, columns-1)\n",
215
+ " plt.imshow( visualize(prev_ref))\n",
216
+ " plt.axis('off')\n",
217
+ " plt.title(\"Encoding (before)\" , color='white' , fontsize=round(20*image_size))\n",
218
+ " print(f'Prompt for this image : \\n\\n \"{prompt} \" \\n\\n')\n",
219
+ "\n",
220
+ " if show_encoding:\n",
221
+ " fig.add_subplot(rows, columns, columns)\n",
222
+ " plt.imshow( visualize(ref))\n",
223
+ " plt.axis('off')\n",
224
+ " plt.title(\"Encoding (now)\" , color='white' , fontsize=round(20*image_size))\n",
225
+ " #------#\n"
226
+ ],
227
+ "metadata": {
228
+ "id": "BwrEs5zVB0Sb",
229
+ "cellView": "form"
230
+ },
231
+ "execution_count": null,
232
+ "outputs": []
233
+ },
234
+ {
235
+ "cell_type": "markdown",
236
+ "source": [
237
+ "# Other methods"
238
+ ],
239
+ "metadata": {
240
+ "id": "f9_AcquM7AYZ"
241
+ }
242
+ },
243
+ {
244
+ "cell_type": "code",
245
+ "source": [
246
+ "# @title ⚄ 🧩 Create an encoding\n",
247
+ "# @markdown 📝 Write a text prompt (this will overwrite any savefile already stored)\n",
248
+ "NEW_ENCODING = '' # @param {type:'string' ,placeholder:'write a prompt'}\n",
249
+ "enable = True # @param {type:\"boolean\"}\n",
250
+ "# @markdown -----\n",
251
+ "# @markdown 📝 Enhance/Penalize Similarity and skip items containing word(s)\n",
252
+ "POS = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
253
+ "NEG = ''# @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
254
+ "# @markdown -----\n",
255
+ "# @markdown logarithmic prompt strength x for value 10^(x-1)\n",
256
+ "_POS = 0 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
257
+ "_NEG = 0 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
258
+ "# @markdown -----\n",
259
+ "# @markdown Check similiarity for this encoding against any written prompt(s)\n",
260
+ "# @title ⚄ Evaluate saved reference similarity to select items (optional)\n",
261
+ "EVAL = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
262
+ "\n",
263
+ "show_local_reference = True # @param {type:\"boolean\"}\n",
264
+ "show_encoding = True # @param {type:\"boolean\"}\n",
265
+ "\n",
266
+ "try:\n",
267
+ " %cd /content/\n",
268
+ " _ref = load_file('reference.safetensors' )\n",
269
+ " ref = _ref['weights'].to(dot_dtype)\n",
270
+ "except:\n",
271
+ " ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
272
+ " _ref = {}\n",
273
+ " _ref['weights'] = ref\n",
274
+ " %cd /content/\n",
275
+ " save_file(_ref, 'reference.safetensors')\n",
276
+ "#-----#\n",
277
+ "\n",
278
+ "if NEW_ENCODING.strip() != '':\n",
279
+ " item = NEW_ENCODING.strip()\n",
280
+ " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
281
+ " ref = model.get_text_features(**inputs)[0]\n",
282
+ " ref= ref/ref.norm(p=2 , dim=-1 , keepdim=True)\n",
283
+ "#------#\n",
284
+ "\n",
285
+ "try: ref\n",
286
+ "except: ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
287
+ "\n",
288
+ "if EVAL.strip() != '':\n",
289
+ " print(\"Saved Reference:\\n\")\n",
290
+ " for item in EVAL.split(','):\n",
291
+ " if item.strip()=='':continue\n",
292
+ " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
293
+ " test = model.get_text_features(**inputs)[0]\n",
294
+ " test = test/test.norm(p=2 , dim = -1 , keepdim = True)\n",
295
+ " ref= ref/ref.norm(p=2 , dim=-1 , keepdim=True)\n",
296
+ " eval = torch.dot(ref , test)\n",
297
+ " print(f'{item.strip()} : {round(eval.item()*100, 2)}%')\n",
298
+ " #-----#\n",
299
+ " if(show_local_reference):\n",
300
+ " print(\"\\n---------\\nLocal Reference with enchancements added :\\n\")\n",
301
+ "\n",
302
+ " for _item in POS.split(','):\n",
303
+ " item = _item.strip()\n",
304
+ " if item == '':continue\n",
305
+ " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
306
+ " ref = ref + math.pow(10,_POS-1) * model.get_text_features(**inputs)[0]\n",
307
+ " #-------#\n",
308
+ "\n",
309
+ " for _item in NEG.split(','):\n",
310
+ " item = _item.strip()\n",
311
+ " if item == '':continue\n",
312
+ " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
313
+ " ref = ref + math.pow(10,_NEG-1) * model.get_text_features(**inputs)[0]\n",
314
+ " #-------#\n",
315
+ "\n",
316
+ " ref= ref/ref.norm(p=2 , dim=-1 , keepdim=True)\n",
317
+ " for item in EVAL.split(','):\n",
318
+ " if item.strip()=='':continue\n",
319
+ " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
320
+ " test = model.get_text_features(**inputs)[0]\n",
321
+ " test = test/test.norm(p=2 , dim = -1 , keepdim = True)\n",
322
+ " eval = torch.dot(ref , test)\n",
323
+ " print(f'{item.strip()} : {round(eval.item()*100, 2)}%')\n",
324
+ " #-----#\n",
325
+ "\n",
326
+ " if show_encoding:\n",
327
+ " # create figure\n",
328
+ " fig = plt.figure(figsize=(10*image_size, 10*image_size))\n",
329
+ " fig.patch.set_facecolor((56/255,56/255,56/255))\n",
330
+ " rows = 1\n",
331
+ " columns = 3\n",
332
+ " fig.add_subplot(rows, columns, 1)\n",
333
+ " plt.imshow( visualize(ref))\n",
334
+ " plt.axis('off')\n",
335
+ " plt.title( \"Encoding (local variable)\", color='white', fontsize=round(20*image_size))\n",
336
+ " if num_plots>1:\n",
337
+ " fig.add_subplot(rows, columns, 2)\n",
338
+ " plt.imshow( visualize( _ref['weights'].to(dot_dtype)))\n",
339
+ " plt.axis('off')\n",
340
+ " plt.title(\"Encoding (saved file)\", color='white', fontsize=round(20*image_size))\n",
341
+ "\n",
342
+ " fig.add_subplot(rows, columns, 3)\n",
343
+ " plt.imshow( visualize(ref - _ref['weights'].to(dot_dtype)))\n",
344
+ " plt.axis('off')\n",
345
+ " plt.title(\"Changes\", color='white', fontsize=round(20*image_size))\n",
346
+ " #------#\n",
347
+ "\n",
348
+ "\n"
349
+ ],
350
+ "metadata": {
351
+ "id": "Oxi6nOyrUTAe",
352
+ "cellView": "form"
353
+ },
354
+ "execution_count": null,
355
+ "outputs": []
356
+ },
357
+ {
358
+ "cell_type": "markdown",
359
+ "source": [
360
+ "**Use an image as a reference via URL (optional)**"
361
+ ],
362
+ "metadata": {
363
+ "id": "KI9Ho6CG7m3Z"
364
+ }
365
+ },
366
+ {
367
+ "cell_type": "code",
368
+ "source": [
369
+ "# @title ⚄ 🌐🖼️ Load an image via URL\n",
370
+ "loaded_ref = False\n",
371
+ "try:\n",
372
+ " ref\n",
373
+ " loaded_ref = True\n",
374
+ "except:ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
375
+ "if loaded_ref : prev_ref = ref.clone().detach()\n",
376
+ "\n",
377
+ "try:prompt\n",
378
+ "except: prompt = ''\n",
379
+ "\n",
380
+ "# @markdown 🖼️ Upload your own image for use as reference via URL (optional)\n",
381
+ "URL = '' # @param {type:'string' ,placeholder:'paste an url here'}\n",
382
+ "if URL.strip() != '':\n",
383
+ " image = Image.open(requests.get(URL, stream=True).raw)\n",
384
+ " log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
385
+ " method = 'Add to existing ref' # @param [\"Refresh\" , \"Add to existing ref\" , \"Subtract from existing ref\" , \"Do nothing\"]\n",
386
+ " image_size = 0.79 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
387
+ " show_encoding = True # @param {type:\"boolean\"}\n",
388
+ " #---------#\n",
389
+ " if(not method == 'Do nothing'):\n",
390
+ " # Get image features\n",
391
+ " inputs = processor(images=image, return_tensors=\"pt\")\n",
392
+ " image_features = model.get_image_features(**inputs)\n",
393
+ " image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)\n",
394
+ " #-------#\n",
395
+ " if method == 'Refresh':\n",
396
+ " ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
397
+ " if method == 'Subtract from existing ref':\n",
398
+ " ref = ref - math.pow(10,log_strength-1)*image_features\n",
399
+ " else: ref = ref + math.pow(10,log_strength-1)*image_features\n",
400
+ " #-----#\n",
401
+ " ref = ref/ref.norm(p=2, dim=-1, keepdim=True)\n",
402
+ " ref = ref[0]\n",
403
+ " ref = ref.clone().detach()\n",
404
+ " #------#\n",
405
+ " # create figure\n",
406
+ " fig = plt.figure(figsize=(10*image_size, 10*image_size))\n",
407
+ " fig.patch.set_facecolor((56/255,56/255,56/255))\n",
408
+ " rows = 1\n",
409
+ " columns = 1\n",
410
+ " if show_encoding: columns = 2\n",
411
+ " if show_encoding and loaded_ref : columns = 3\n",
412
+ " fig.add_subplot(rows, columns, 1)\n",
413
+ " plt.imshow(image)\n",
414
+ " plt.axis('off')\n",
415
+ " plt.title(\"Reference image from URL\" , color='white' , fontsize=round(20*image_size))\n",
416
+ " #-----#\n",
417
+ " if show_encoding and loaded_ref:\n",
418
+ " fig.add_subplot(rows, columns, columns-1)\n",
419
+ " plt.imshow( visualize(prev_ref))\n",
420
+ " plt.axis('off')\n",
421
+ " plt.title(\"Encoding (before)\" , color='white' , fontsize=round(20*image_size))\n",
422
+ " if show_encoding:\n",
423
+ " fig.add_subplot(rows, columns, columns)\n",
424
+ " plt.imshow( visualize(ref))\n",
425
+ " plt.axis('off')\n",
426
+ " plt.title(\"Encoding (now)\" , color='white' , fontsize=round(20*image_size))\n",
427
+ " #------#"
428
+ ],
429
+ "metadata": {
430
+ "id": "IqUsiQw2HU2C",
431
+ "cellView": "form"
432
+ },
433
+ "execution_count": null,
434
+ "outputs": []
435
+ },
436
+ {
437
+ "cell_type": "markdown",
438
+ "source": [
439
+ "**Use an image as a reference via uploading it to the /content/ folder (optional)**"
440
+ ],
441
+ "metadata": {
442
+ "id": "MBPi7F8S7tg3"
443
+ }
444
+ },
445
+ {
446
+ "cell_type": "code",
447
+ "source": [
448
+ "# @title ⚄ 📂🖼️ Use an uploaded image as reference\n",
449
+ "loaded_ref = False\n",
450
+ "try:\n",
451
+ " ref\n",
452
+ " loaded_ref = True\n",
453
+ "except:ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
454
+ "if loaded_ref : prev_ref = ref.clone().detach()\n",
455
+ "\n",
456
+ "try:prompt\n",
457
+ "except: prompt = ''\n",
458
+ "\n",
459
+ "# @markdown 🖼️ Upload your own image for use as reference via URL (optional)\n",
460
+ "FILENAME = '' # @param {type:'string' ,placeholder:'IMG_123.png'}\n",
461
+ "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
462
+ "method = 'Add to existing ref' # @param [\"Refresh\" , \"Add to existing ref\" , \"Subtract from existing ref\" , \"Do nothing\"]\n",
463
+ "image_size = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
464
+ "show_encoding = True # @param {type:\"boolean\"}\n",
465
+ "\n",
466
+ "if FILENAME.strip() != '':\n",
467
+ " %cd /content/\n",
468
+ " image = cv2.imread(FILENAME)\n",
469
+ " b,g,r = cv2.split(image)\n",
470
+ " image = cv2.merge([r,g,b])\n",
471
+ " #---------#\n",
472
+ " if(not method == 'Do nothing'):\n",
473
+ " # Get image features\n",
474
+ " inputs = processor(images=image, return_tensors=\"pt\")\n",
475
+ " image_features = model.get_image_features(**inputs)\n",
476
+ " image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)\n",
477
+ " #-------#\n",
478
+ " if method == 'Refresh':\n",
479
+ " ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
480
+ " if method == 'Subtract from existing ref':\n",
481
+ " ref = ref - math.pow(10,log_strength-1)*image_features\n",
482
+ " else: ref = ref + math.pow(10,log_strength-1)*image_features\n",
483
+ " #-----#\n",
484
+ " ref = ref/ref.norm(p=2, dim=-1, keepdim=True)\n",
485
+ " ref = ref[0]\n",
486
+ " ref = ref.clone().detach()\n",
487
+ " #------#\n",
488
+ " # create figure\n",
489
+ " fig = plt.figure(figsize=(10*image_size, 10*image_size))\n",
490
+ " fig.patch.set_facecolor((56/255,56/255,56/255))\n",
491
+ " rows = 1\n",
492
+ " columns = 1\n",
493
+ " if show_encoding: columns = 2\n",
494
+ " if show_encoding and loaded_ref : columns = 3\n",
495
+ " fig.add_subplot(rows, columns, 1)\n",
496
+ " plt.imshow(image)\n",
497
+ " plt.axis('off')\n",
498
+ " plt.title(f\"Reference image from uploaded image {FILENAME}\" , color='white' , fontsize=round(20*image_size))\n",
499
+ " #-----#\n",
500
+ " if show_encoding and loaded_ref:\n",
501
+ " fig.add_subplot(rows, columns, columns-1)\n",
502
+ " plt.imshow( visualize(prev_ref))\n",
503
+ " plt.axis('off')\n",
504
+ " plt.title(\"Encoding (before)\" , color='white' , fontsize=round(20*image_size))\n",
505
+ " if show_encoding:\n",
506
+ " fig.add_subplot(rows, columns, columns)\n",
507
+ " plt.imshow( visualize(ref))\n",
508
+ " plt.axis('off')\n",
509
+ " plt.title(\"Encoding (now)\" , color='white' , fontsize=round(20*image_size))\n",
510
+ " #------#"
511
+ ],
512
+ "metadata": {
513
+ "id": "I_-GOwFPKkha",
514
+ "cellView": "form"
515
+ },
516
+ "execution_count": null,
517
+ "outputs": []
518
+ },
519
+ {
520
+ "cell_type": "markdown",
521
+ "source": [
522
+ "# Search prompts using CLIP"
523
+ ],
524
+ "metadata": {
525
+ "id": "UqrYOkhlEQdM"
526
+ }
527
+ },
528
+ {
529
+ "cell_type": "code",
530
+ "source": [
531
+ "# @title ⚄ 💾 Save the reference\n",
532
+ "\n",
533
+ "loaded_ref = False\n",
534
+ "try:\n",
535
+ " ref\n",
536
+ " loaded_ref = True\n",
537
+ "except:ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
538
+ "if loaded_ref : prev_ref = ref.clone().detach()\n",
539
+ "\n",
540
+ "try:prompt\n",
541
+ "except: prompt = ''\n",
542
+ "\n",
543
+ "reset_everything = False # @param {type:\"boolean\"}\n",
544
+ "_ref = {}\n",
545
+ "ref = ref/ref.norm(p=2, dim=-1, keepdim=True)\n",
546
+ "if (reset_everything) : ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
547
+ "_ref['weights'] = ref.to(dot_dtype)\n",
548
+ "%cd /content/\n",
549
+ "save_file(_ref , 'reference.safetensors' )\n",
550
+ "image_size = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
551
+ "show_encoding = True # @param {type:\"boolean\"}\n",
552
+ "#------#\n",
553
+ "print(\"Saved local encoding to reference.safetensors\")\n",
554
+ "if show_encoding:\n",
555
+ " # create figure\n",
556
+ " fig = plt.figure(figsize=(10*image_size, 10*image_size))\n",
557
+ " fig.patch.set_facecolor((56/255,56/255,56/255))\n",
558
+ " rows = 1\n",
559
+ " columns = num_plots\n",
560
+ " fig.add_subplot(rows, columns, 1)\n",
561
+ " plt.imshow( visualize(ref))\n",
562
+ " plt.axis('off')\n",
563
+ " plt.title( \"Encoding (local variable)\", color='white', fontsize=round(20*image_size))\n",
564
+ " if num_plots>1:\n",
565
+ " fig.add_subplot(rows, columns, 2)\n",
566
+ " plt.imshow( visualize( _ref['weights'].to(dot_dtype)))\n",
567
+ " plt.axis('off')\n",
568
+ " plt.title(\"Encoding (saved file)\", color='white', fontsize=round(20*image_size))\n",
569
+ " #------#"
570
+ ],
571
+ "metadata": {
572
+ "id": "lOQuTPfBMK82",
573
+ "cellView": "form"
574
+ },
575
+ "execution_count": null,
576
+ "outputs": []
577
+ },
578
+ {
579
+ "cell_type": "markdown",
580
+ "source": [
581
+ "**Run the interrogator**\n",
582
+ "\n",
583
+ " Since the list of items is large (>1 million items) you will need to select a range within the sorted results to print."
584
+ ],
585
+ "metadata": {
586
+ "id": "ROKsoZrt7zMe"
587
+ }
588
+ },
589
+ {
590
+ "cell_type": "code",
591
+ "source": [
592
+ "# @title ⚄ 🕵️‍♂️ Run the CLIP Interrogator\n",
593
+ "LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
594
+ "_START_AT = '0' # @param [\"0\", \"10000\", \"50000\"] {allow-input: true}\n",
595
+ "START_AT = 0\n",
596
+ "#-----#\n",
597
+ "if _START_AT.find('K')>-1:\n",
598
+ " START_AT = _START_AT.replace('K','')\n",
599
+ " if START_AT.isnumeric(): START_AT = int(START_AT)*1000\n",
600
+ "#------#\n",
601
+ "else:\n",
602
+ " if _START_AT.isnumeric(): START_AT = int(_START_AT)\n",
603
+ "#----#\n",
604
+ "\n",
605
+ "output_folder = home_directory + 'results/'\n",
606
+ "output_folder_sims = home_directory + 'results/sims/'\n",
607
+ "my_mkdirs(output_folder)\n",
608
+ "my_mkdirs(output_folder_sims)\n",
609
+ "\n",
610
+ "# @markdown -----\n",
611
+ "# @markdown Select vocab\n",
612
+ "general = True # @param {type:\"boolean\"}\n",
613
+ "civit9 = True # @param {type:\"boolean\"}\n",
614
+ "fanfic1 = False # @param {type:\"boolean\"}\n",
615
+ "fanfic2 = False # @param {type:\"boolean\"}\n",
616
+ "# @markdown -----\n",
617
+ "# @title ⚄ New interrogator code using quantized text corpus\n",
618
+ "%cd /content/\n",
619
+ "_ref = load_file('reference.safetensors' )\n",
620
+ "ref = _ref['weights'].to(dot_dtype)\n",
621
+ "# @markdown 📝 Enhance/Penalize Similarity and skip items containing word(s)\n",
622
+ "POS1 = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
623
+ "POS2 = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
624
+ "NEG = ''# @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
625
+ "SKIP = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
626
+ "min_wordcount = 0 # @param {type:\"slider\", min:0, max:20, step:1}\n",
627
+ "def isBlacklisted(_txt):\n",
628
+ " blacklist = SKIP.lower().replace('</w>' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').strip()\n",
629
+ " if blacklist == '': return False\n",
630
+ " txt = _txt.lower().strip()\n",
631
+ " if len(txt)<min_wordcount: return True\n",
632
+ " if txt.isnumeric(): return True\n",
633
+ " #-----#\n",
634
+ " for item in list(blacklist.split(',')):\n",
635
+ " if item.strip() == '' : continue\n",
636
+ " if txt.find(item.strip())> -1 : return True\n",
637
+ " #------#\n",
638
+ " found = False\n",
639
+ " alphabet = 'abcdefghijklmnopqrstuvxyz'\n",
640
+ " for letter in alphabet:\n",
641
+ " found = txt.find(letter)>-1\n",
642
+ " if found:break\n",
643
+ " #------#\n",
644
+ " return not found\n",
645
+ "# @markdown -----\n",
646
+ "# @markdown logarithmic prompt strength x for value 10^(x-1)\n",
647
+ "_POS1 = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
648
+ "_POS2 = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
649
+ "_NEG = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
650
+ "# @markdown -----\n",
651
+ "# @markdown Save similarity as a list for later review (this will slow down the code)\n",
652
+ "save_similiarity = True # @param {type:\"boolean\"}\n",
653
+ "# @markdown -----\n",
654
+ "include_similiarity = False # @param {type:\"boolean\"}\n",
655
+ "print_as_list = False # @param {type:\"boolean\"}\n",
656
+ "N = 7 # @param {type:\"slider\", min:0, max:10, step:1}\n",
657
+ "#-----#\n",
658
+ "for _item in POS1.split(','):\n",
659
+ " item = _item.strip()\n",
660
+ " if item == '':continue\n",
661
+ " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
662
+ " ref = ref + math.pow(10,_POS1-1) * model.get_text_features(**inputs)[0]\n",
663
+ "#-------#\n",
664
+ "for _item in POS2.split(','):\n",
665
+ " item = _item.strip()\n",
666
+ " if item == '':continue\n",
667
+ " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
668
+ " ref = ref + math.pow(10,_POS2-1) * model.get_text_features(**inputs)[0]\n",
669
+ "#-------#\n",
670
+ "for _item in NEG.split(','):\n",
671
+ " item = _item.strip()\n",
672
+ " if item == '':continue\n",
673
+ " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
674
+ " ref = ref + math.pow(10,_NEG-1) * model.get_text_features(**inputs)[0]\n",
675
+ "#------#\n",
676
+ "ref = (ref/ref.norm(p=2, dim=-1, keepdim=True)).to(dtype = dot_dtype)\n",
677
+ "vocab_to_load = ''\n",
678
+ "if (general): vocab_to_load = vocab_to_load + 'general , '\n",
679
+ "if (civit9): vocab_to_load = vocab_to_load + 'civit9 , '\n",
680
+ "if (fanfic1): vocab_to_load = vocab_to_load + 'fanfic1 , '\n",
681
+ "if (fanfic2): vocab_to_load = vocab_to_load + 'fanfic2 , '\n",
682
+ "vocab_to_load = (vocab_to_load +'}').replace(' , }' , '')\n",
683
+ "multi = vocab_to_load.find(',')>-1\n",
684
+ "#-----#\n",
685
+ "prompts_folder = f'{home_directory}fusion-t2i-generator-data/vocab-v2/text'\n",
686
+ "encodings_folder = f'{home_directory}fusion-t2i-generator-data/vocab-v2/text_encodings'\n",
687
+ "#----#\n",
688
+ "scale = 0.0043\n",
689
+ "size = 0\n",
690
+ "#------#\n",
691
+ "total_items = 0\n",
692
+ "for filename in os.listdir(prompts_folder):\n",
693
+ " if (not general and filename.find('general')>-1):continue\n",
694
+ " if (not civit9 and filename.find('civit9')>-1):continue\n",
695
+ " if (not fanfic1 and filename.find('fanfic1')>-1):continue\n",
696
+ " if (not fanfic2 and filename.find('fanfic2')>-1):continue\n",
697
+ " size = size + LIST_SIZE\n",
698
+ "#-------#\n",
699
+ "similiar_sims = torch.zeros(size)\n",
700
+ "similiar_prompts = {}\n",
701
+ "_index = 0\n",
702
+ "#-------#\n",
703
+ "similiar_encodings = {}\n",
704
+ "for filename in os.listdir(prompts_folder):\n",
705
+ " if (not general and filename.find('general')>-1):continue\n",
706
+ " if (not civit9 and filename.find('civit9')>-1):continue\n",
707
+ " if (not fanfic1 and filename.find('fanfic1')>-1):continue\n",
708
+ " if (not fanfic2 and filename.find('fanfic2')>-1):continue\n",
709
+ " #------#\n",
710
+ " root_filename = filename.replace('.json', '')\n",
711
+ " %cd {prompts_folder}\n",
712
+ " prompts = {}\n",
713
+ " with open(f'{root_filename}.json', 'r') as f:\n",
714
+ " data = json.load(f).items()\n",
715
+ " for key,value in data:\n",
716
+ " prompts[key] = value\n",
717
+ " num_items = int(prompts['num_items'])\n",
718
+ " total_items = total_items + num_items\n",
719
+ " #------#\n",
720
+ " try:vocab_loaded\n",
721
+ " except:\n",
722
+ " vocab_loaded = 'first'\n",
723
+ " #-----#\n",
724
+ " if vocab_loaded == 'first' or (vocab_loaded != vocab_to_load and not multi):\n",
725
+ " %cd {encodings_folder}\n",
726
+ " _text_encodings = load_file(f'{root_filename}.safetensors')['weights'].to(torch.uint8)\n",
727
+ " text_encodings = torch.zeros(num_items , dim)\n",
728
+ " tmp = torch.ones(dim).to(dot_dtype)\n",
729
+ " for index in range(num_items):\n",
730
+ " text_encodings[index] = torch.sub(_text_encodings[index][1:dim+1].to(dot_dtype) , tmp , alpha= _text_encodings[index][0].to(dot_dtype))\n",
731
+ " vocab_loaded = vocab_to_load\n",
732
+ " #------#\n",
733
+ " sims = torch.matmul(text_encodings*scale, ref.t())\n",
734
+ " sorted , indices = torch.sort(sims , dim=0 , descending = True)\n",
735
+ " tmp = {}\n",
736
+ " tmp['weights'] = sorted\n",
737
+ " %cd {output_folder_sims}\n",
738
+ " save_file(tmp, root_filename + '_sims.safetensors')\n",
739
+ " tmp={}\n",
740
+ " #-----#\n",
741
+ " for index in range(LIST_SIZE + START_AT):\n",
742
+ " if index<START_AT: continue\n",
743
+ " key = indices[index].item()\n",
744
+ " try:prompt = prompts[f'{key}']\n",
745
+ " except:continue\n",
746
+ " if(isBlacklisted(prompt)):continue\n",
747
+ " #-------#\n",
748
+ " similiar_sims[_index] = torch.tensor(round(sims[key].item(), 5))\n",
749
+ " similiar_prompts[f'{_index}'] = prompt\n",
750
+ " _index = _index + 1\n",
751
+ " #-------#\n",
752
+ " continue\n",
753
+ "#---------#\n",
754
+ "total_items = total_items + num_items+1\n",
755
+ "#-------#\n",
756
+ "print(f'\\nProcessed entire list of {total_items} items to find closest match.\\nSaved closest matching indices {START_AT} to {START_AT + LIST_SIZE} as the dict \"similiar_prompts\" with {LIST_SIZE} items.\\n')\n",
757
+ "\n",
758
+ "# Print results\n",
759
+ "sorted , indices = torch.sort(similiar_sims , dim=0 , descending = True)\n",
760
+ "if(print_as_list):\n",
761
+ " for index in range(LIST_SIZE):\n",
762
+ " key = indices[index].item()\n",
763
+ " sim = similiar_sims[key].item()\n",
764
+ " prompt = similiar_prompts[f'{key}']\n",
765
+ " if include_similiarity :print(f'{prompt} - {round(sim*100,1)} %')\n",
766
+ " else: print(f'{prompt}')\n",
767
+ "#-------#\n",
768
+ "else:\n",
769
+ " prompt = ''\n",
770
+ " for iter in range(N):\n",
771
+ " prompt = prompt + '{'\n",
772
+ " for index in range(LIST_SIZE):\n",
773
+ " key = indices[index].item()\n",
774
+ " sim = similiar_sims[key].item()\n",
775
+ " prompt = prompt + fix_bad_symbols(similiar_prompts[f'{key}']) + '|'\n",
776
+ " #-----#\n",
777
+ " prompt = (prompt + '}').replace('|}', '} ')\n",
778
+ " #------#\n",
779
+ " print(f'Similiar prompts: \\n\\n\\n{prompt} \\n\\n\\n//----//')\n",
780
+ "#-----#\n",
781
+ "\n",
782
+ "#Clear memory\n",
783
+ "_text_encodings = {}\n",
784
+ "prompts = {}\n",
785
+ "#-----#\n",
786
+ "\n",
787
+ "image\n"
788
+ ],
789
+ "metadata": {
790
+ "id": "kOYZ8Ajn-DD8"
791
+ },
792
+ "execution_count": null,
793
+ "outputs": []
794
+ },
795
+ {
796
+ "cell_type": "markdown",
797
+ "source": [
798
+ "**Evaluate Similarities**\n",
799
+ "\n",
800
+ "Run this cell to see how far down the list you can go before similarity to the reference is lost."
801
+ ],
802
+ "metadata": {
803
+ "id": "yl1DYzUn8YCC"
804
+ }
805
+ },
806
+ {
807
+ "cell_type": "code",
808
+ "source": [
809
+ "# @title ⚄ 🔍 Test how unique the encoding is\n",
810
+ "%cd {output_folder_sims}\n",
811
+ "index = 0\n",
812
+ "for filename in os.listdir(output_folder_sims):\n",
813
+ " _sims = load_file(filename)\n",
814
+ " _sims = _sims['weights']\n",
815
+ " for _sim in _sims.tolist():\n",
816
+ " index = index + 1\n",
817
+ " #-------#\n",
818
+ "total_items = index\n",
819
+ "sims = torch.zeros(total_items)\n",
820
+ "index = 0\n",
821
+ "for filename in os.listdir(output_folder_sims):\n",
822
+ " _sims = load_file(filename)\n",
823
+ " _sims = _sims['weights']\n",
824
+ " for sim in _sims.tolist():\n",
825
+ " sims[index] = sim\n",
826
+ " index = index + 1\n",
827
+ " #-------#\n",
828
+ "#---------------#\n",
829
+ "_sorted , indices = torch.sort(sims , dim=0 , descending = True)\n",
830
+ "SCALE = 0.001\n",
831
+ "sorted = torch.round(_sorted/SCALE)\n",
832
+ "ZERO_POINT = sorted[total_items-1].item()\n",
833
+ "sorted = (sorted - torch.ones(total_items)*ZERO_POINT)\n",
834
+ "densities = torch.bincount(sorted.to(dtype = torch.int64))\n",
835
+ "yy = densities.tolist()\n",
836
+ "top = (sorted[0] + ZERO_POINT).to(dtype = torch.int64).item()\n",
837
+ "num_coords = round(top - ZERO_POINT)\n",
838
+ "xx = [round((ZERO_POINT + x)*100*SCALE,2) for x in range(num_coords+1)]\n",
839
+ "index = 0\n",
840
+ "for item in xx:\n",
841
+ " if item>0:break\n",
842
+ " index = index + 1\n",
843
+ "#----#\n",
844
+ "positive_bound = index\n",
845
+ "ss =list(xx)\n",
846
+ "tmp = 0\n",
847
+ "chunk = 1\n",
848
+ "CHUNK_SIZE = 1000\n",
849
+ "index = 0\n",
850
+ "for num in reversed(yy):\n",
851
+ " tmp = tmp + num\n",
852
+ " if(tmp>CHUNK_SIZE):\n",
853
+ " _tmp = math.floor(tmp/CHUNK_SIZE)\n",
854
+ " chunk = chunk + _tmp\n",
855
+ " tmp = tmp - CHUNK_SIZE * _tmp\n",
856
+ " ss[num_coords - index] = chunk\n",
857
+ " index = index + 1\n",
858
+ "#------#\n",
859
+ "fig, ax = plt.subplots()\n",
860
+ "fig.canvas.draw()\n",
861
+ "plt.plot(ss[positive_bound:], xx[positive_bound:])\n",
862
+ "plt.xlabel ('Search depth')\n",
863
+ "plt.ylabel ('Similarity')\n",
864
+ "plt.title ('Similarity to index')\n",
865
+ "plt.grid()\n",
866
+ "indices_depth = [item.get_text() for item in ax.get_xticklabels()]\n",
867
+ "sim_pcnts = [item.get_text() for item in ax.get_yticklabels()]\n",
868
+ "\n",
869
+ "index = 0\n",
870
+ "for index_depth in indices_depth:\n",
871
+ " indices_depth[index] = index_depth + 'K'\n",
872
+ " index = index + 1\n",
873
+ "#-------#\n",
874
+ "\n",
875
+ "index = 0\n",
876
+ "for sim_pcnt in sim_pcnts:\n",
877
+ " sim_pcnts[index] = sim_pcnt + '%'\n",
878
+ " index = index + 1\n",
879
+ "#-------#\n",
880
+ "ax.set_xticklabels(indices_depth)\n",
881
+ "ax.set_yticklabels(sim_pcnts)\n",
882
+ "plt.show()"
883
+ ],
884
+ "metadata": {
885
+ "id": "ln6DsZPG99ez"
886
+ },
887
+ "execution_count": null,
888
+ "outputs": []
889
+ },
890
+ {
891
+ "cell_type": "code",
892
+ "source": [
893
+ "# @title ⚄ Save the results\n",
894
+ "\n",
895
+ "def mkdir(folder):\n",
896
+ " if os.path.exists(folder)==False:\n",
897
+ " os.makedirs(folder)\n",
898
+ "#-----#\n",
899
+ "output_folder = home_directory + 'results'\n",
900
+ "mkdir(output_folder)\n",
901
+ "#-----#\n",
902
+ "try: similiar_prompts\n",
903
+ "except:similiar_prompts = {}\n",
904
+ "%cd {output_folder}\n",
905
+ "print(f'Saving similiar_prompts.json to {output_folder}...')\n",
906
+ "with open('similiar_prompts.json', 'w') as f:\n",
907
+ " json.dump(similiar_prompts, f)\n",
908
+ "#-----#\n",
909
+ "try: similiar_sims\n",
910
+ "except: similiar_sims = torch.zeros(dim).to(dot_dtype)\n",
911
+ "#-------#\n",
912
+ "_similiar_sims = {}\n",
913
+ "_similiar_sims['weights'] = similiar_sims.to(dot_dtype)\n",
914
+ "%cd {output_folder}\n",
915
+ "print(f'Saving similiar_sims.safetensors to {output_folder}...')\n",
916
+ "save_file(_similiar_sims, 'similiar_sims.safetensors')\n"
917
+ ],
918
+ "metadata": {
919
+ "id": "m-N553nXz9Jd",
920
+ "cellView": "form"
921
+ },
922
+ "execution_count": null,
923
+ "outputs": []
924
+ },
925
+ {
926
+ "cell_type": "code",
927
+ "source": [
928
+ "\n",
929
+ "# @title ⚄ Print results\n",
930
+ "sorted , indices = torch.sort(similiar_sims , dim=0 , descending = True)\n",
931
+ "include_similiarity = False # @param {type:\"boolean\"}\n",
932
+ "print_as_list = False # @param {type:\"boolean\"}\n",
933
+ "N = 7 # @param {type:\"slider\", min:0, max:10, step:1}\n",
934
+ "FILENAME = '' # @param {type:'string' ,placeholder:'write .json file to load (optional)'}\n",
935
+ "_FILENAME = FILENAME.replace('.json' , '')\n",
936
+ "if _FILENAME.strip() == '': _FILENAME = 'similiar_prompts'\n",
937
+ "#------#\n",
938
+ "%cd {output_folder}\n",
939
+ "with open(f'{_FILENAME}.json', 'r') as f:\n",
940
+ " data = json.load(f)\n",
941
+ " _df = pd.DataFrame({'count': data})['count']\n",
942
+ " similiar_prompts = {\n",
943
+ " key : value for key, value in _df.items()\n",
944
+ " }\n",
945
+ "#-------#\n",
946
+ "_similiar_sims = load_file('similiar_sims.safetensors')\n",
947
+ "similiar_sims = _similiar_sims['weights'].to(dot_dtype)\n",
948
+ "\n",
949
+ "# @title ⚄ Run the CLIP interrogator on the saved reference\n",
950
+ "\n",
951
+ "# @markdown Select which values within the saved list to print\n",
952
+ "LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
953
+ "START_AT = 0 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
954
+ "\n",
955
+ "if(print_as_list):\n",
956
+ " for index in range(LIST_SIZE + START_AT):\n",
957
+ " if index<START_AT:continue\n",
958
+ " key = indices[index].item()\n",
959
+ " sim = similiar_sims[key].item()\n",
960
+ " prompt = similiar_prompts[f'{key}']\n",
961
+ " if include_similiarity :print(f'{prompt} - {round(sim*100,1)} %')\n",
962
+ " else: print(f'{prompt}')\n",
963
+ "#-------#\n",
964
+ "else:\n",
965
+ " prompt = ''\n",
966
+ " for iter in range(N):\n",
967
+ " prompt = prompt + '{'\n",
968
+ " for index in range(LIST_SIZE + START_AT):\n",
969
+ " if index<START_AT:continue\n",
970
+ " key = indices[index].item()\n",
971
+ " sim = similiar_sims[key].item()\n",
972
+ " prompt = prompt + fix_bad_symbols(similiar_prompts[f'{key}']) + '|'\n",
973
+ " #-----#\n",
974
+ " prompt = (prompt + '}').replace('|}', '} ')\n",
975
+ " #------#\n",
976
+ " print(f'Similiar prompts: \\n\\n {prompt} \\n\\n')\n",
977
+ "image\n",
978
+ "#-----#\n"
979
+ ],
980
+ "metadata": {
981
+ "id": "XOMkIKc9-wZz",
982
+ "cellView": "form"
983
+ },
984
+ "execution_count": null,
985
+ "outputs": []
986
+ },
987
+ {
988
+ "cell_type": "markdown",
989
+ "source": [
990
+ "OTHER STUFF BELOW - Code for the modules below are work-in-progress."
991
+ ],
992
+ "metadata": {
993
+ "id": "FRIqYJDEebpf"
994
+ }
995
+ },
996
+ {
997
+ "cell_type": "markdown",
998
+ "source": [
999
+ "The savefile can be used here : https://perchance.org/fusion-ai-image-generator"
1000
+ ],
1001
+ "metadata": {
1002
+ "id": "JldNmWy1iyvK"
1003
+ }
1004
+ },
1005
+ {
1006
+ "cell_type": "code",
1007
+ "source": [
1008
+ "# @title \t⚄ Create fusion-generator .json savefile from result\n",
1009
+ "filename = 'blank.json'\n",
1010
+ "path = '/content/text-to-image-prompts/fusion/'\n",
1011
+ "\n",
1012
+ "print(f'reading {filename}....')\n",
1013
+ "_index = 0\n",
1014
+ "%cd {path}\n",
1015
+ "with open(f'{filename}', 'r') as f:\n",
1016
+ " data = json.load(f)\n",
1017
+ "#------#\n",
1018
+ "_df = pd.DataFrame({'count': data})['count']\n",
1019
+ "_savefile = {\n",
1020
+ " key : value for key, value in _df.items()\n",
1021
+ "}\n",
1022
+ "#------#\n",
1023
+ "from safetensors.torch import load_file\n",
1024
+ "import json , os , torch\n",
1025
+ "import pandas as pd\n",
1026
+ "#----#\n",
1027
+ "def my_mkdirs(folder):\n",
1028
+ " if os.path.exists(folder)==False:\n",
1029
+ " os.makedirs(folder)\n",
1030
+ "#------#\n",
1031
+ "savefile_prompt = ''\n",
1032
+ "for i in range(N) : savefile_prompt = savefile_prompt + ' ' + __prompts\n",
1033
+ "_savefile['main'] = savefile_prompt.replace('\\n', ' ').replace(' ', ' ').replace(' ', ' ')\n",
1034
+ "#------#\n",
1035
+ "save_filename = f'fusion_C05_X7_1000_{PROMPT_INDEX}.json'\n",
1036
+ "output_folder = '/content/output/savefiles/'\n",
1037
+ "my_mkdirs(output_folder)\n",
1038
+ "#-----#\n",
1039
+ "%cd {output_folder}\n",
1040
+ "print(f'Saving segment {save_filename} to {output_folder}...')\n",
1041
+ "with open(save_filename, 'w') as f:\n",
1042
+ " json.dump(_savefile, f)\n"
1043
+ ],
1044
+ "metadata": {
1045
+ "id": "Q7vpNAXQilbf",
1046
+ "cellView": "form"
1047
+ },
1048
+ "execution_count": null,
1049
+ "outputs": []
1050
+ },
1051
+ {
1052
+ "cell_type": "code",
1053
+ "source": [
1054
+ "# @title \t⚄ Create a savefile-set from the entire range of pre-encoded items\n",
1055
+ "\n",
1056
+ "# @markdown 📥 Load the data (only required one time)\n",
1057
+ "load_the_data = True # @param {type:\"boolean\"}\n",
1058
+ "\n",
1059
+ "import math\n",
1060
+ "from safetensors.torch import load_file\n",
1061
+ "import json , os , torch\n",
1062
+ "import pandas as pd\n",
1063
+ "from PIL import Image\n",
1064
+ "import requests\n",
1065
+ "\n",
1066
+ "def my_mkdirs(folder):\n",
1067
+ " if os.path.exists(folder)==False:\n",
1068
+ " os.makedirs(folder)\n",
1069
+ "\n",
1070
+ "# @markdown ⚖️ Set the value for C in the reference <br> <br> sim = C* text_enc + image_enc*(1-C) <br><br>\n",
1071
+ "\n",
1072
+ "C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
1073
+ "\n",
1074
+ "# @markdown 🚫 Penalize similarity to this prompt(optional)\n",
1075
+ "if(load_the_data):\n",
1076
+ " target_prompts , target_text_encodings , urls , target_image_encodings , NUM_ITEMS = getPromptsAndLinks('/content/text-to-image-prompts/fusion')\n",
1077
+ " from transformers import AutoTokenizer\n",
1078
+ " tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
1079
+ " from transformers import CLIPProcessor, CLIPModel\n",
1080
+ " processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
1081
+ " model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
1082
+ " logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
1083
+ "#---------#\n",
1084
+ "\n",
1085
+ "filename = 'blank.json'\n",
1086
+ "path = '/content/text-to-image-prompts/fusion/'\n",
1087
+ "print(f'reading {filename}....')\n",
1088
+ "_index = 0\n",
1089
+ "%cd {path}\n",
1090
+ "with open(f'{filename}', 'r') as f:\n",
1091
+ " data = json.load(f)\n",
1092
+ "#------#\n",
1093
+ "_df = pd.DataFrame({'count': data})['count']\n",
1094
+ "_blank = {\n",
1095
+ " key : value for key, value in _df.items()\n",
1096
+ "}\n",
1097
+ "#------#\n",
1098
+ "\n",
1099
+ "root_savefile_name = 'fusion_C05_X7'\n",
1100
+ "\n",
1101
+ "%cd /content/\n",
1102
+ "output_folder = '/content/output/savefiles/'\n",
1103
+ "my_mkdirs(output_folder)\n",
1104
+ "my_mkdirs('/content/output2/savefiles/')\n",
1105
+ "my_mkdirs('/content/output3/savefiles/')\n",
1106
+ "my_mkdirs('/content/output4/savefiles/')\n",
1107
+ "my_mkdirs('/content/output5/savefiles/')\n",
1108
+ "my_mkdirs('/content/output6/savefiles/')\n",
1109
+ "my_mkdirs('/content/output7/savefiles/')\n",
1110
+ "my_mkdirs('/content/output8/savefiles/')\n",
1111
+ "my_mkdirs('/content/output9/savefiles/')\n",
1112
+ "my_mkdirs('/content/output10/savefiles/')\n",
1113
+ "my_mkdirs('/content/output11/savefiles/')\n",
1114
+ "my_mkdirs('/content/output12/savefiles/')\n",
1115
+ "my_mkdirs('/content/output13/savefiles/')\n",
1116
+ "\n",
1117
+ "\n",
1118
+ "NEG = '' # @param {type:'string'}\n",
1119
+ "strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.1}\n",
1120
+ "\n",
1121
+ "for index in range(1667):\n",
1122
+ "\n",
1123
+ " PROMPT_INDEX = index\n",
1124
+ " prompt = target_prompts[f'{index}']\n",
1125
+ " url = urls[f'{index}']\n",
1126
+ " if url.find('perchance')>-1:\n",
1127
+ " image = Image.open(requests.get(url, stream=True).raw)\n",
1128
+ " else: continue #print(\"(No image for this ID)\")\n",
1129
+ "\n",
1130
+ " print(f\"no. {PROMPT_INDEX} : '{prompt}'\")\n",
1131
+ " text_features_A = target_text_encodings[f'{index}']\n",
1132
+ " image_features_A = target_image_encodings[f'{index}']\n",
1133
+ " # text-similarity\n",
1134
+ " sims = C * torch.matmul(text_tensor, text_features_A.t())\n",
1135
+ "\n",
1136
+ " neg_sims = 0*sims\n",
1137
+ " if(NEG != ''):\n",
1138
+ " # Get text features for user input\n",
1139
+ " inputs = tokenizer(text = NEG, padding=True, return_tensors=\"pt\")\n",
1140
+ " text_features_NEG = model.get_text_features(**inputs)\n",
1141
+ " text_features_NEG = text_features_A/text_features_A.norm(p=2, dim=-1, keepdim=True)\n",
1142
+ " # text-similarity\n",
1143
+ " neg_sims = strength*torch.matmul(text_tensor, text_features_NEG.t())\n",
1144
+ " #------#\n",
1145
+ "\n",
1146
+ " # plus image-similarity\n",
1147
+ " sims = sims + (1-C) * torch.matmul(text_tensor, image_features_A.t()) * logit_scale\n",
1148
+ "\n",
1149
+ " # minus NEG-similarity\n",
1150
+ " sims = sims - neg_sims\n",
1151
+ "\n",
1152
+ " # Sort the items\n",
1153
+ " sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
1154
+ "\n",
1155
+ " # @markdown Repeat output N times\n",
1156
+ " RANGE = 1000\n",
1157
+ " NUM_CHUNKS = 10+\n",
1158
+ " separator = '|'\n",
1159
+ " _savefiles = {}\n",
1160
+ " #-----#\n",
1161
+ " for chunk in range(NUM_CHUNKS):\n",
1162
+ " if chunk=<10:continue\n",
1163
+ " start_at_index = chunk * RANGE\n",
1164
+ " _prompts = ''\n",
1165
+ " for _index in range(start_at_index + RANGE):\n",
1166
+ " if _index < start_at_index : continue\n",
1167
+ " index = indices[_index].item()\n",
1168
+ " prompt = prompts[f'{index}']\n",
1169
+ " _prompts = _prompts.replace(prompt + separator,'')\n",
1170
+ " _prompts = _prompts + prompt + separator\n",
1171
+ " #------#\n",
1172
+ " _prompts = fix_bad_symbols(_prompts)\n",
1173
+ " _prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n",
1174
+ " _savefiles[f'{chunk}'] = _prompts\n",
1175
+ " #---------#\n",
1176
+ " save_filename = f'{root_savefile_name}_{start_at_index + RANGE}_{PROMPT_INDEX}.json'\n",
1177
+ "\n",
1178
+ "\n",
1179
+ " if (chunk=<20 && chunk>10): %cd '/content/output2/savefiles/'\n",
1180
+ " if (chunk<=30 && chunk>20): %cd '/content/output3/savefiles/'\n",
1181
+ " if (chunk=<40 && chunk>30): %cd '/content/output4/savefiles/'\n",
1182
+ " if (chunk<=50 && chunk>40): %cd '/content/output5/savefiles/'\n",
1183
+ " if (chunk=<60 && chunk>50): %cd '/content/output6/savefiles/'\n",
1184
+ " if (chunk<=70 && chunk>60): %cd '/content/output7/savefiles/'\n",
1185
+ " if (chunk=<80 && chunk>70): %cd '/content/output8/savefiles/'\n",
1186
+ " if (chunk<=90 && chunk>80): %cd '/content/output9/savefiles/'\n",
1187
+ " if (chunk=<100 && chunk>90): %cd '/content/output10/savefiles/'\n",
1188
+ " if (chunk<=110 && chunk>100): %cd '/content/output11/savefiles/'\n",
1189
+ " if (chunk=<120 && chunk>110): %cd '/content/output12/savefiles/'\n",
1190
+ " if (chunk<=130 && chunk>120): %cd '/content/output13/savefiles/'\n",
1191
+ "\n",
1192
+ "\n",
1193
+ " #------#\n",
1194
+ " print(f'Saving savefile {save_filename} to {output_folder}...')\n",
1195
+ " with open(save_filename, 'w') as f:\n",
1196
+ " json.dump(_savefiles, f)\n",
1197
+ " #---------#\n",
1198
+ " continue\n",
1199
+ "#-----------#"
1200
+ ],
1201
+ "metadata": {
1202
+ "id": "x1uAVXZEoL0T",
1203
+ "cellView": "form"
1204
+ },
1205
+ "execution_count": null,
1206
+ "outputs": []
1207
+ },
1208
+ {
1209
+ "cell_type": "code",
1210
+ "source": [
1211
+ "# Determine if this notebook is running on Colab or Kaggle\n",
1212
+ "#Use https://www.kaggle.com/ if Google Colab GPU is busy\n",
1213
+ "home_directory = '/content/'\n",
1214
+ "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
1215
+ "if using_Kaggle : home_directory = '/kaggle/working/'\n",
1216
+ "%cd {home_directory}\n",
1217
+ "#-------#\n",
1218
+ "\n",
1219
+ "# @title Download the text_encodings as .zip\n",
1220
+ "import os\n",
1221
+ "%cd {home_directory}\n",
1222
+ "#os.remove(f'{home_directory}results.zip')\n",
1223
+ "root_output_folder = home_directory + 'output/'\n",
1224
+ "zip_dest = f'/content/results.zip' #drive/MyDrive\n",
1225
+ "!zip -r {zip_dest} {root_output_folder}"
1226
+ ],
1227
+ "metadata": {
1228
+ "id": "zivBNrw9uSVD",
1229
+ "cellView": "form"
1230
+ },
1231
+ "execution_count": null,
1232
+ "outputs": []
1233
+ },
1234
+ {
1235
+ "cell_type": "code",
1236
+ "source": [
1237
+ "# @title \t⚄ Quick fix for normalizing encoded text corpus tensors\n",
1238
+ "\n",
1239
+ "import os\n",
1240
+ "my_mkdirs('/content/output')\n",
1241
+ "my_mkdirs('/content/output/text_encodings')\n",
1242
+ "\n",
1243
+ "for filename in os.listdir(f'{prompts_folder}'):\n",
1244
+ " %cd {prompts_folder}\n",
1245
+ " prompts = {}\n",
1246
+ " with open(f'{filename}', 'r') as f:\n",
1247
+ " data = json.load(f).items()\n",
1248
+ " for key,value in data:\n",
1249
+ " prompts[key] = value\n",
1250
+ " #------#\n",
1251
+ " num_items = int(prompts['num_items'])\n",
1252
+ "\n",
1253
+ " %cd {encodings_folder}\n",
1254
+ " enc_filename = filename.replace('json', 'safetensors')\n",
1255
+ " _text_encodings = load_file(f'{enc_filename}')['weights'].to(torch.uint8)\n",
1256
+ " text_encodings = torch.zeros(num_items , dim)\n",
1257
+ " tmp = torch.ones(dim)\n",
1258
+ " tmp2 = torch.tensor(1/0.0043)\n",
1259
+ " zero_point = 0\n",
1260
+ " for index in range(num_items):\n",
1261
+ " text_encodings[index] = torch.tensor(0.0043) * torch.sub(_text_encodings[index][1:dim+1] , tmp , alpha= _text_encodings[index][0]).to(torch.float32)\n",
1262
+ " text_encodings[index] = tmp2*text_encodings[index]/text_encodings[index].norm(p=2, dim=-1, keepdim = True)\n",
1263
+ " test = torch.round( torch.add(text_encodings[index],tmp*zero_point))\n",
1264
+ " less_than_zero = test<0\n",
1265
+ " while(torch.any(less_than_zero).item()):\n",
1266
+ " zero_point = zero_point + 1\n",
1267
+ " test = torch.round( torch.add(text_encodings[index],tmp*zero_point))\n",
1268
+ " less_than_zero = test<0\n",
1269
+ " #------#\n",
1270
+ " _text_encodings[index][0] = zero_point\n",
1271
+ " _text_encodings[index][1:dim+1] = test\n",
1272
+ " #-------#\n",
1273
+ " %cd /content/output/text_encodings\n",
1274
+ "\n",
1275
+ " tmp = {}\n",
1276
+ " tmp['weights'] = _text_encodings.to(torch.uint8)\n",
1277
+ " tmp['num_items'] = torch.tensor(num_items).to(torch.uint8)\n",
1278
+ " tmp['scale'] = torch.tensor(0.0043)\n",
1279
+ " save_file(tmp , f'{enc_filename}')\n",
1280
+ "#------#"
1281
+ ],
1282
+ "metadata": {
1283
+ "cellView": "form",
1284
+ "id": "9qgHW1Wr7kZn"
1285
+ },
1286
+ "execution_count": null,
1287
+ "outputs": []
1288
+ }
1289
+ ]
1290
+ }