codeShare commited on
Commit
fe9cdc7
1 Parent(s): eaffdba

Upload token_vectors_math.ipynb

Browse files
Google Colab Notebooks/token_vectors_math.ipynb CHANGED
@@ -47,10 +47,8 @@
47
  " _file_name = 'vocab'\n",
48
  " #-----#\n",
49
  " index = 0\n",
50
- " file_index = 0\n",
51
  " prompts = {}\n",
52
  " text_encodings = {}\n",
53
- " _text_encodings = {}\n",
54
  " #-----#\n",
55
  " for filename in os.listdir(f'{path}'):\n",
56
  " print(f'reading {filename}....')\n",
@@ -60,28 +58,19 @@
60
  " data = json.load(f)\n",
61
  " #------#\n",
62
  " _df = pd.DataFrame({'count': data})['count']\n",
63
- " _prompts = {\n",
64
  " key : value for key, value in _df.items()\n",
65
  " }\n",
66
- " #-------#\n",
67
- " %cd {path_vec}\n",
68
- " _text_encodings = load_file(f'{_file_name}.safetensors')\n",
69
- "\n",
70
- " for key in _prompts:\n",
71
- " _index = int(key)\n",
72
- " value = _prompts[key]\n",
73
- " #------#\n",
74
- " #Read the text_encodings + prompts\n",
75
- " text_encodings[f'{index}'] = _text_encodings[f'{_index}']\n",
76
- " prompts[f'{index}'] = _prompts[f'{_index}'] + separator\n",
77
  " index = index + 1\n",
78
- " continue\n",
79
- " #-------#\n",
80
- " #--------#\n",
81
- " #_text_encodings.close() #close the text_encodings file\n",
82
- " file_index = file_index + 1\n",
 
83
  " #----------#\n",
84
- " NUM_ITEMS = index -1\n",
85
  " return prompts , text_encodings , NUM_ITEMS\n",
86
  "#--------#\n",
87
  "\n",
@@ -103,7 +92,7 @@
103
  "base_uri": "https://localhost:8080/"
104
  },
105
  "id": "V-1DrszLqEVj",
106
- "outputId": "9b894182-a7e0-436e-9bf1-5a7d3d920ac7"
107
  },
108
  "execution_count": 5,
109
  "outputs": [
@@ -131,61 +120,18 @@
131
  " url = '/content/text-to-image-prompts/vocab'\n",
132
  " vocab , tokens, nA = append_from_url(vocab , tokens, nA , url , '')\n",
133
  "#-------#\n",
134
- "NUM_TOKENS = nA # NUM_TOKENS = 49407\n",
135
  "#--------#\n",
136
  "\n",
137
- "print(NUM_TOKENS)"
 
 
138
  ],
139
  "metadata": {
140
- "colab": {
141
- "base_uri": "https://localhost:8080/"
142
- },
143
- "id": "EDCd1IGEqj3-",
144
- "outputId": "bbaab5ab-4bd3-4766-ad44-f139a0ec7a02"
145
  },
146
- "execution_count": 12,
147
- "outputs": [
148
- {
149
- "output_type": "stream",
150
- "name": "stdout",
151
- "text": [
152
- "reading vocab.json....\n",
153
- "/content/text-to-image-prompts/vocab/text\n",
154
- "/content/text-to-image-prompts/vocab/token_vectors\n",
155
- "49407\n"
156
- ]
157
- }
158
- ]
159
- },
160
- {
161
- "cell_type": "code",
162
- "source": [
163
- "vocab[f'{8922}']"
164
- ],
165
- "metadata": {
166
- "colab": {
167
- "base_uri": "https://localhost:8080/",
168
- "height": 35
169
- },
170
- "id": "o9AfUKkvwUdG",
171
- "outputId": "029e1148-056b-4040-da23-7ed6caaca878"
172
- },
173
- "execution_count": 19,
174
- "outputs": [
175
- {
176
- "output_type": "execute_result",
177
- "data": {
178
- "text/plain": [
179
- "'benedict</w>'"
180
- ],
181
- "application/vnd.google.colaboratory.intrinsic+json": {
182
- "type": "string"
183
- }
184
- },
185
- "metadata": {},
186
- "execution_count": 19
187
- }
188
- ]
189
  },
190
  {
191
  "cell_type": "code",
@@ -352,14 +298,13 @@
352
  },
353
  {
354
  "cell_type": "code",
355
- "execution_count": 1,
356
  "metadata": {
357
  "colab": {
358
- "base_uri": "https://localhost:8080/",
359
- "height": 599
360
  },
361
  "id": "AyhYBlP2pYyI",
362
- "outputId": "0168beb3-428c-4886-f159-adc479b9da4b"
363
  },
364
  "outputs": [
365
  {
@@ -368,33 +313,10 @@
368
  "text": [
369
  "/content\n",
370
  "/content\n",
371
- "Cloning into 'text-to-image-prompts'...\n",
372
- "remote: Enumerating objects: 1552, done.\u001b[K\n",
373
- "remote: Counting objects: 100% (1549/1549), done.\u001b[K\n",
374
- "remote: Compressing objects: 100% (1506/1506), done.\u001b[K\n",
375
- "remote: Total 1552 (delta 190), reused 0 (delta 0), pack-reused 3 (from 1)\u001b[K\n",
376
- "Receiving objects: 100% (1552/1552), 9.09 MiB | 6.30 MiB/s, done.\n",
377
- "Resolving deltas: 100% (190/190), done.\n",
378
- "Updating files: 100% (906/906), done.\n",
379
- "Filtering content: 100% (438/438), 1.49 GiB | 56.42 MiB/s, done.\n",
380
- "/content\n",
381
  "/content/text-to-image-prompts/vocab/raw\n",
382
- "/content/text-to-image-prompts/vocab/raw\n"
383
- ]
384
- },
385
- {
386
- "output_type": "error",
387
- "ename": "JSONDecodeError",
388
- "evalue": "Expecting ':' delimiter: line 28 column 7 (char 569)",
389
- "traceback": [
390
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
391
- "\u001b[0;31mJSONDecodeError\u001b[0m Traceback (most recent call last)",
392
- "\u001b[0;32m<ipython-input-1-542fe0f58fcc>\u001b[0m in \u001b[0;36m<cell line: 56>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[0mget_ipython\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun_line_magic\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'cd'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'{target_raw}'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 56\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf'{root_filename}.json'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'r'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 57\u001b[0;31m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mjson\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 58\u001b[0m \u001b[0m_df\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDataFrame\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0;34m'count'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'count'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 59\u001b[0m \u001b[0;31m#reverse key and value in the dict\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
393
- "\u001b[0;32m/usr/lib/python3.10/json/__init__.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(fp, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw)\u001b[0m\n\u001b[1;32m 291\u001b[0m \u001b[0mkwarg\u001b[0m\u001b[0;34m;\u001b[0m \u001b[0motherwise\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mJSONDecoder\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0mused\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 292\u001b[0m \"\"\"\n\u001b[0;32m--> 293\u001b[0;31m return loads(fp.read(),\n\u001b[0m\u001b[1;32m 294\u001b[0m \u001b[0mcls\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcls\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobject_hook\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mobject_hook\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 295\u001b[0m \u001b[0mparse_float\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mparse_float\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparse_int\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mparse_int\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
394
- "\u001b[0;32m/usr/lib/python3.10/json/__init__.py\u001b[0m in \u001b[0;36mloads\u001b[0;34m(s, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw)\u001b[0m\n\u001b[1;32m 344\u001b[0m \u001b[0mparse_int\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mparse_float\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 345\u001b[0m parse_constant is None and object_pairs_hook is None and not kw):\n\u001b[0;32m--> 346\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_default_decoder\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdecode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 347\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mcls\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 348\u001b[0m \u001b[0mcls\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mJSONDecoder\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
395
- "\u001b[0;32m/usr/lib/python3.10/json/decoder.py\u001b[0m in \u001b[0;36mdecode\u001b[0;34m(self, s, _w)\u001b[0m\n\u001b[1;32m 335\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 336\u001b[0m \"\"\"\n\u001b[0;32m--> 337\u001b[0;31m \u001b[0mobj\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mend\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mraw_decode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0midx\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0m_w\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 338\u001b[0m \u001b[0mend\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_w\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mend\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 339\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mend\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
396
- "\u001b[0;32m/usr/lib/python3.10/json/decoder.py\u001b[0m in \u001b[0;36mraw_decode\u001b[0;34m(self, s, idx)\u001b[0m\n\u001b[1;32m 351\u001b[0m \"\"\"\n\u001b[1;32m 352\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 353\u001b[0;31m \u001b[0mobj\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mend\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mscan_once\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0midx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 354\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0merr\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 355\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mJSONDecodeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Expecting value\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0merr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
397
- "\u001b[0;31mJSONDecodeError\u001b[0m: Expecting ':' delimiter: line 28 column 7 (char 569)"
398
  ]
399
  }
400
  ],
@@ -465,10 +387,11 @@
465
  "\n",
466
  "\n",
467
  "tensors = {}\n",
 
468
  "for key in vocab:\n",
469
- " name = vocab[key]\n",
470
  " token = tokens[int(key)]\n",
471
- " tensors[key] = token\n",
 
472
  "#-----#\n",
473
  "\n",
474
  "%cd {output_folder_token_vectors}\n",
@@ -476,7 +399,35 @@
476
  "\n",
477
  "%cd {output_folder_text}\n",
478
  "with open('vocab.json', 'w') as f:\n",
479
- " json.dump(vocab, f)\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
480
  ]
481
  },
482
  {
@@ -496,13 +447,34 @@
496
  "#os.remove(f'{home_directory}results.zip')\n",
497
  "root_output_folder = home_directory + 'output/'\n",
498
  "zip_dest = f'{home_directory}results.zip'\n",
499
- "!zip -r {zip_dest} '/content/text-to-image-prompts/tokens'"
500
  ],
501
  "metadata": {
502
- "id": "9uIDf9IUpzh2"
 
 
 
 
503
  },
504
- "execution_count": null,
505
- "outputs": []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
  ]
508
  }
 
47
  " _file_name = 'vocab'\n",
48
  " #-----#\n",
49
  " index = 0\n",
 
50
  " prompts = {}\n",
51
  " text_encodings = {}\n",
 
52
  " #-----#\n",
53
  " for filename in os.listdir(f'{path}'):\n",
54
  " print(f'reading {filename}....')\n",
 
58
  " data = json.load(f)\n",
59
  " #------#\n",
60
  " _df = pd.DataFrame({'count': data})['count']\n",
61
+ " prompts = {\n",
62
  " key : value for key, value in _df.items()\n",
63
  " }\n",
64
+ "\n",
65
+ " for key in prompts:\n",
 
 
 
 
 
 
 
 
 
66
  " index = index + 1\n",
67
+ " #------#\n",
68
+ " NUM_ITEMS = index -1\n",
69
+ " #------#\n",
70
+ " %cd {path_vec}\n",
71
+ " text_encodings = load_file(f'{_file_name}.safetensors')\n",
72
+ " continue\n",
73
  " #----------#\n",
 
74
  " return prompts , text_encodings , NUM_ITEMS\n",
75
  "#--------#\n",
76
  "\n",
 
92
  "base_uri": "https://localhost:8080/"
93
  },
94
  "id": "V-1DrszLqEVj",
95
+ "outputId": "8788d8fc-59ce-4cba-9867-4860291afcb2"
96
  },
97
  "execution_count": 5,
98
  "outputs": [
 
120
  " url = '/content/text-to-image-prompts/vocab'\n",
121
  " vocab , tokens, nA = append_from_url(vocab , tokens, nA , url , '')\n",
122
  "#-------#\n",
123
+ "NUM_TOKENS = nA\n",
124
  "#--------#\n",
125
  "\n",
126
+ "if False:\n",
127
+ " print(NUM_TOKENS) # NUM_TOKENS = 49407\n",
128
+ " print(vocab['8922']) #ID for banana is 8922"
129
  ],
130
  "metadata": {
131
+ "id": "EDCd1IGEqj3-"
 
 
 
 
132
  },
133
+ "execution_count": null,
134
+ "outputs": []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  },
136
  {
137
  "cell_type": "code",
 
298
  },
299
  {
300
  "cell_type": "code",
301
+ "execution_count": 3,
302
  "metadata": {
303
  "colab": {
304
+ "base_uri": "https://localhost:8080/"
 
305
  },
306
  "id": "AyhYBlP2pYyI",
307
+ "outputId": "9e2fc730-23ee-4b05-9957-6fb2db82f2cf"
308
  },
309
  "outputs": [
310
  {
 
313
  "text": [
314
  "/content\n",
315
  "/content\n",
 
 
 
 
 
 
 
 
 
 
316
  "/content/text-to-image-prompts/vocab/raw\n",
317
+ "/content/text-to-image-prompts/vocab/raw\n",
318
+ "/content/output/vocab/token_vectors\n",
319
+ "/content/output/vocab/text\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
320
  ]
321
  }
322
  ],
 
387
  "\n",
388
  "\n",
389
  "tensors = {}\n",
390
+ "names = {}\n",
391
  "for key in vocab:\n",
 
392
  " token = tokens[int(key)]\n",
393
+ " tensors[f'{key}'] = token\n",
394
+ " names[f'{key}'] = vocab[key]\n",
395
  "#-----#\n",
396
  "\n",
397
  "%cd {output_folder_token_vectors}\n",
 
399
  "\n",
400
  "%cd {output_folder_text}\n",
401
  "with open('vocab.json', 'w') as f:\n",
402
+ " json.dump(names, f)\n"
403
+ ]
404
+ },
405
+ {
406
+ "cell_type": "code",
407
+ "source": [],
408
+ "metadata": {
409
+ "id": "W_Ig4ZGH18hX",
410
+ "outputId": "5f2c0a6e-9b6e-4135-d7de-900673f34e1c",
411
+ "colab": {
412
+ "base_uri": "https://localhost:8080/",
413
+ "height": 35
414
+ }
415
+ },
416
+ "execution_count": 4,
417
+ "outputs": [
418
+ {
419
+ "output_type": "execute_result",
420
+ "data": {
421
+ "text/plain": [
422
+ "'banana</w>'"
423
+ ],
424
+ "application/vnd.google.colaboratory.intrinsic+json": {
425
+ "type": "string"
426
+ }
427
+ },
428
+ "metadata": {},
429
+ "execution_count": 4
430
+ }
431
  ]
432
  },
433
  {
 
447
  "#os.remove(f'{home_directory}results.zip')\n",
448
  "root_output_folder = home_directory + 'output/'\n",
449
  "zip_dest = f'{home_directory}results.zip'\n",
450
+ "!zip -r {zip_dest} {root_output_folder}"
451
  ],
452
  "metadata": {
453
+ "id": "9uIDf9IUpzh2",
454
+ "outputId": "949f95c8-7657-42dd-d70a-d3cc7da2c72f",
455
+ "colab": {
456
+ "base_uri": "https://localhost:8080/"
457
+ }
458
  },
459
+ "execution_count": 6,
460
+ "outputs": [
461
+ {
462
+ "output_type": "stream",
463
+ "name": "stdout",
464
+ "text": [
465
+ "/content\n",
466
+ "/content\n",
467
+ " adding: content/output/ (stored 0%)\n",
468
+ " adding: content/output/vocab/ (stored 0%)\n",
469
+ " adding: content/output/vocab/text/ (stored 0%)\n",
470
+ " adding: content/output/vocab/text/vocab.json (deflated 71%)\n",
471
+ " adding: content/output/vocab/text/.ipynb_checkpoints/ (stored 0%)\n",
472
+ " adding: content/output/vocab/token_vectors/ (stored 0%)\n",
473
+ " adding: content/output/vocab/token_vectors/vocab.safetensors (deflated 9%)\n",
474
+ " adding: content/output/vocab/token_vectors/.ipynb_checkpoints/ (stored 0%)\n"
475
+ ]
476
+ }
477
+ ]
478
  }
479
  ]
480
  }