codeShare commited on
Commit
f04b033
1 Parent(s): a8e5652

Upload indexed_text_encoding_converter.ipynb

Browse files
Google Colab Notebooks/indexed_text_encoding_converter.ipynb ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ },
16
+ "accelerator": "GPU"
17
+ },
18
+ "cells": [
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": null,
22
+ "metadata": {
23
+ "id": "cskYkw0zXHEm"
24
+ },
25
+ "outputs": [],
26
+ "source": [
27
+ "# @title Make your own text_encodings .safetensor file for later use (using GPU is recommended to speed things up)\n",
28
+ "\n",
29
+ "import json\n",
30
+ "import pandas as pd\n",
31
+ "import os\n",
32
+ "import shelve\n",
33
+ "import torch\n",
34
+ "from safetensors.torch import save_file\n",
35
+ "import json\n",
36
+ "\n",
37
+ "# Determine if this notebook is running on Colab or Kaggle\n",
38
+ "#Use https://www.kaggle.com/ if Google Colab GPU is busy\n",
39
+ "home_directory = '/content/'\n",
40
+ "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
41
+ "if using_Kaggle : home_directory = '/kaggle/working/'\n",
42
+ "%cd {home_directory}\n",
43
+ "#-------#\n",
44
+ "\n",
45
+ "# User input\n",
46
+ "target = home_directory + 'text-to-image-prompts/names/fullnames/'\n",
47
+ "output_folder = home_directory + 'output/fullnames/'\n",
48
+ "root_filename = 'names_fullnames_text_👱_♀️female_fullnames'\n",
49
+ "NUM_FILES = 9\n",
50
+ "#--------#\n",
51
+ "\n",
52
+ "# Setup environment\n",
53
+ "def my_mkdirs(folder):\n",
54
+ " if os.path.exists(folder)==False:\n",
55
+ " os.makedirs(folder)\n",
56
+ "#--------#\n",
57
+ "output_folder_text = output_folder + 'text/'\n",
58
+ "output_folder_text = output_folder + 'text/'\n",
59
+ "output_folder_text_encodings = output_folder + 'text_encodings/'\n",
60
+ "target_raw = target + 'raw/'\n",
61
+ "%cd {home_directory}\n",
62
+ "my_mkdirs(output_folder)\n",
63
+ "my_mkdirs(output_folder_text)\n",
64
+ "my_mkdirs(output_folder_text_encodings)\n",
65
+ "#-------#\n",
66
+ "\n",
67
+ "# Load the data if not already loaded\n",
68
+ "try:\n",
69
+ " loaded\n",
70
+ "except:\n",
71
+ " %cd {home_directory}\n",
72
+ " !git clone https://huggingface.co/datasets/codeShare/text-to-image-prompts\n",
73
+ " loaded = True\n",
74
+ "#--------#\n",
75
+ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
76
+ "from transformers import AutoTokenizer\n",
77
+ "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
78
+ "from transformers import CLIPProcessor, CLIPModel\n",
79
+ "processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
80
+ "model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\").to(device)\n",
81
+ "#---------#\n",
82
+ "for file_index in range(NUM_FILES + 1):\n",
83
+ " if (file_index < 1): continue\n",
84
+ " filename = f'{root_filename}-{file_index}'\n",
85
+ "\n",
86
+ " # Read {filename}.json\n",
87
+ " %cd {target_raw}\n",
88
+ " with open(filename + '.json', 'r') as f:\n",
89
+ " data = json.load(f)\n",
90
+ " _df = pd.DataFrame({'count': data})['count']\n",
91
+ " prompts = {\n",
92
+ " key : value.replace(\"</w>\",\" \") for key, value in _df.items()\n",
93
+ " }\n",
94
+ " index = 0\n",
95
+ " for key in prompts:\n",
96
+ " index = index + 1\n",
97
+ " #----------#\n",
98
+ " NUM_ITEMS = index\n",
99
+ " #------#\n",
100
+ "\n",
101
+ " # Calculate text_encoding for .json file contents and results as .db file\n",
102
+ " names_dict = {}\n",
103
+ " text_encoding_dict = {}\n",
104
+ " segments = {}\n",
105
+ " index = 0;\n",
106
+ " subby = 1;\n",
107
+ " NUM_HEADERS = 2\n",
108
+ " CHUNKS_SIZE = 1000\n",
109
+ " _filename = ''\n",
110
+ " for _index in range(NUM_ITEMS):\n",
111
+ " if (index % 100 == 0) : print(index)\n",
112
+ " if (index == 0 and _index>0) : index = index + 2 #make space for headers\n",
113
+ " if (_index % (CHUNKS_SIZE-NUM_HEADERS) == 0 and _index > 0) :\n",
114
+ "\n",
115
+ " # Write headers in the .json\n",
116
+ " names_dict[f'{0}'] = f'{_index}'\n",
117
+ " names_dict[f'{1}'] = f'{filename}-{subby}'\n",
118
+ "\n",
119
+ " # Encode the headers into text_encoding\n",
120
+ " inputs = tokenizer(text = '' + names_dict[f'{0}'], padding=True, return_tensors=\"pt\").to(device)\n",
121
+ " text_features = model.get_text_features(**inputs).to(device)\n",
122
+ " text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
123
+ " text_encoding_dict[f'{0}'] = text_features.to(torch.device('cpu'))\n",
124
+ " inputs = tokenizer(text = '' + names_dict[f'{1}'], padding=True, return_tensors=\"pt\").to(device)\n",
125
+ " text_features = model.get_text_features(**inputs).to(device)\n",
126
+ " text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
127
+ " text_encoding_dict[f'{1}'] = text_features.to(torch.device('cpu'))\n",
128
+ " #-------#\n",
129
+ "\n",
130
+ " # Write .json\n",
131
+ " _filename = f'{filename}-{subby}.json'\n",
132
+ " %cd {output_folder_text}\n",
133
+ " print(f'Saving segment {_filename} to {output_folder_text}...')\n",
134
+ " with open(_filename, 'w') as f:\n",
135
+ " json.dump(names_dict, f)\n",
136
+ " #-------#\n",
137
+ "\n",
138
+ " # Write .safetensors\n",
139
+ " _filename = f'{filename}-{subby}.safetensors'\n",
140
+ " %cd {output_folder_text_encodings}\n",
141
+ " print(f'Saving segment {_filename} to {output_folder_text_encodings}...')\n",
142
+ " save_file(text_encoding_dict, _filename)\n",
143
+ " #--------#\n",
144
+ "\n",
145
+ " #Iterate\n",
146
+ " subby = subby + 1\n",
147
+ " segments[f'{subby}'] = _filename\n",
148
+ " text_encoding_dict = {}\n",
149
+ " names_dict = {}\n",
150
+ " index = 0\n",
151
+ " #------#\n",
152
+ " #------#\n",
153
+ " else: index = index + 1\n",
154
+ " #--------#\n",
155
+ " inputs = tokenizer(text = '' + prompts[f'{_index}'], padding=True, return_tensors=\"pt\").to(device)\n",
156
+ " text_features = model.get_text_features(**inputs).to(device)\n",
157
+ " text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
158
+ " text_encoding_dict[f'{index}'] = text_features.to(torch.device('cpu'))\n",
159
+ " names_dict[f'{index}'] = prompts[f'{_index}']\n",
160
+ " continue\n",
161
+ " #-----#\n",
162
+ " #-----#\n",
163
+ " # Write headers in the .json\n",
164
+ " names_dict[f'{0}'] = f'{_index}'\n",
165
+ " names_dict[f'{1}'] = f'{filename}-{subby}'\n",
166
+ "\n",
167
+ " # Encode the headers into text_encoding\n",
168
+ " inputs = tokenizer(text = '' + names_dict[f'{0}'], padding=True, return_tensors=\"pt\").to(device)\n",
169
+ " text_features = model.get_text_features(**inputs).to(device)\n",
170
+ " text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
171
+ " text_encoding_dict[f'{0}'] = text_features.to(torch.device('cpu'))\n",
172
+ " inputs = tokenizer(text = '' + names_dict[f'{1}'], padding=True, return_tensors=\"pt\").to(device)\n",
173
+ " text_features = model.get_text_features(**inputs).to(device)\n",
174
+ " text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
175
+ " text_encoding_dict[f'{1}'] = text_features.to(torch.device('cpu'))\n",
176
+ " #-------#\n",
177
+ "\n",
178
+ " # Write .json\n",
179
+ " _filename = f'{filename}-{subby}.json'\n",
180
+ " %cd {output_folder_text}\n",
181
+ " print(f'Saving segment {_filename} to {output_folder_text}...')\n",
182
+ " with open(_filename, 'w') as f:\n",
183
+ " json.dump(names_dict, f)\n",
184
+ " #-------#\n",
185
+ "\n",
186
+ " # Write .safetensors\n",
187
+ " _filename = f'{filename}-{subby}.safetensors'\n",
188
+ " %cd {output_folder_text_encodings}\n",
189
+ " print(f'Saving segment {_filename} to {output_folder_text_encodings}...')\n",
190
+ " save_file(text_encoding_dict, _filename)\n",
191
+ " #--------#\n",
192
+ "\n",
193
+ " #Iterate\n",
194
+ " subby = subby + 1\n",
195
+ " segments[f'{subby}'] = _filename\n",
196
+ " text_encoding_dict = {}\n",
197
+ " names_dict = {}\n",
198
+ " index = 0\n",
199
+ " #------#\n",
200
+ " #----#"
201
+ ]
202
+ },
203
+ {
204
+ "cell_type": "code",
205
+ "source": [
206
+ "# @title Download the text_encodings as .zip\n",
207
+ "import os\n",
208
+ "%cd {home_directory}\n",
209
+ "#os.remove(f'{home_directory}results.zip')\n",
210
+ "zip_dest = f'{home_directory}results.zip'\n",
211
+ "!zip -r {zip_dest} {output_folder}"
212
+ ],
213
+ "metadata": {
214
+ "id": "cR-ed0CGhekk"
215
+ },
216
+ "execution_count": null,
217
+ "outputs": []
218
+ }
219
+ ]
220
+ }