Upload sd_token_similarity_calculator.ipynb
Browse files
Google Colab Notebooks/sd_token_similarity_calculator.ipynb
CHANGED
@@ -539,6 +539,113 @@
|
|
539 |
"id": "hyK423TQCRup"
|
540 |
}
|
541 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
542 |
{
|
543 |
"cell_type": "code",
|
544 |
"source": [
|
@@ -743,10 +850,28 @@
|
|
743 |
"!zip -r {zip_dest} {root_output_folder}"
|
744 |
],
|
745 |
"metadata": {
|
746 |
-
"id": "V4YCpmWlkPMG"
|
|
|
|
|
|
|
|
|
747 |
},
|
748 |
-
"execution_count":
|
749 |
-
"outputs": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
750 |
},
|
751 |
{
|
752 |
"cell_type": "code",
|
|
|
539 |
"id": "hyK423TQCRup"
|
540 |
}
|
541 |
},
|
542 |
+
{
|
543 |
+
"cell_type": "code",
|
544 |
+
"source": [
|
545 |
+
"# @title Process the raw vocab into json + .safetensor pair\n",
|
546 |
+
"\n",
|
547 |
+
"# NOTE : although they have 1x768 dimension , these are not text_encodings , but token vectors\n",
|
548 |
+
"import json\n",
|
549 |
+
"import pandas as pd\n",
|
550 |
+
"import os\n",
|
551 |
+
"import shelve\n",
|
552 |
+
"import torch\n",
|
553 |
+
"from safetensors.torch import save_file , load_file\n",
|
554 |
+
"import json\n",
|
555 |
+
"\n",
|
556 |
+
"home_directory = '/content/'\n",
|
557 |
+
"using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
|
558 |
+
"if using_Kaggle : home_directory = '/kaggle/working/'\n",
|
559 |
+
"%cd {home_directory}\n",
|
560 |
+
"#-------#\n",
|
561 |
+
"\n",
|
562 |
+
"# Load the data if not already loaded\n",
|
563 |
+
"try:\n",
|
564 |
+
" loaded\n",
|
565 |
+
"except:\n",
|
566 |
+
" %cd {home_directory}\n",
|
567 |
+
" !git clone https://huggingface.co/datasets/codeShare/text-to-image-prompts\n",
|
568 |
+
" loaded = True\n",
|
569 |
+
"#--------#\n",
|
570 |
+
"\n",
|
571 |
+
"# User input\n",
|
572 |
+
"target = home_directory + 'text-to-image-prompts/vocab/'\n",
|
573 |
+
"root_output_folder = home_directory + 'output/'\n",
|
574 |
+
"output_folder = root_output_folder + 'vocab/'\n",
|
575 |
+
"root_filename = 'vocab'\n",
|
576 |
+
"NUM_FILES = 1\n",
|
577 |
+
"#--------#\n",
|
578 |
+
"\n",
|
579 |
+
"# Setup environment\n",
|
580 |
+
"def my_mkdirs(folder):\n",
|
581 |
+
" if os.path.exists(folder)==False:\n",
|
582 |
+
" os.makedirs(folder)\n",
|
583 |
+
"#--------#\n",
|
584 |
+
"output_folder_text = output_folder + 'text/'\n",
|
585 |
+
"output_folder_text = output_folder + 'text/'\n",
|
586 |
+
"output_folder_token_vectors = output_folder + 'token_vectors/'\n",
|
587 |
+
"target_raw = target + 'raw/'\n",
|
588 |
+
"%cd {home_directory}\n",
|
589 |
+
"my_mkdirs(output_folder)\n",
|
590 |
+
"my_mkdirs(output_folder_text)\n",
|
591 |
+
"my_mkdirs(output_folder_token_vectors)\n",
|
592 |
+
"#-------#\n",
|
593 |
+
"\n",
|
594 |
+
"%cd {target_raw}\n",
|
595 |
+
"tokens = torch.load(f'{root_filename}.pt' , weights_only=True)\n",
|
596 |
+
"tokens = model.clone().detach()\n",
|
597 |
+
"\n",
|
598 |
+
"\n",
|
599 |
+
"%cd {target_raw}\n",
|
600 |
+
"with open(f'{root_filename}.json', 'r') as f:\n",
|
601 |
+
" data = json.load(f)\n",
|
602 |
+
"_df = pd.DataFrame({'count': data})['count']\n",
|
603 |
+
"#reverse key and value in the dict\n",
|
604 |
+
"vocab = {\n",
|
605 |
+
" value : key for key, value in _df.items()\n",
|
606 |
+
"}\n",
|
607 |
+
"#------#\n",
|
608 |
+
"\n",
|
609 |
+
"\n",
|
610 |
+
"tensors = {}\n",
|
611 |
+
"for key in vocab:\n",
|
612 |
+
" name = vocab[key]\n",
|
613 |
+
" token = tokens[int(key)]\n",
|
614 |
+
" tensors[key] = token\n",
|
615 |
+
"#-----#\n",
|
616 |
+
"\n",
|
617 |
+
"%cd {output_folder_token_vectors}\n",
|
618 |
+
"save_file(tensors, \"vocab.safetensors\")\n",
|
619 |
+
"\n",
|
620 |
+
"%cd {output_folder_text}\n",
|
621 |
+
"with open('vocab.json', 'w') as f:\n",
|
622 |
+
" json.dump(vocab, f)\n",
|
623 |
+
"\n",
|
624 |
+
"\n"
|
625 |
+
],
|
626 |
+
"metadata": {
|
627 |
+
"id": "H3JRx5rhWIEo",
|
628 |
+
"outputId": "df7f400b-1f0a-4c1e-c6c9-4c3db0491545",
|
629 |
+
"colab": {
|
630 |
+
"base_uri": "https://localhost:8080/"
|
631 |
+
}
|
632 |
+
},
|
633 |
+
"execution_count": 26,
|
634 |
+
"outputs": [
|
635 |
+
{
|
636 |
+
"output_type": "stream",
|
637 |
+
"name": "stdout",
|
638 |
+
"text": [
|
639 |
+
"/content\n",
|
640 |
+
"/content\n",
|
641 |
+
"/content/text-to-image-prompts/vocab/raw\n",
|
642 |
+
"/content/text-to-image-prompts/vocab/raw\n",
|
643 |
+
"/content/output/vocab/token_vectors\n",
|
644 |
+
"/content/output/vocab/text\n"
|
645 |
+
]
|
646 |
+
}
|
647 |
+
]
|
648 |
+
},
|
649 |
{
|
650 |
"cell_type": "code",
|
651 |
"source": [
|
|
|
850 |
"!zip -r {zip_dest} {root_output_folder}"
|
851 |
],
|
852 |
"metadata": {
|
853 |
+
"id": "V4YCpmWlkPMG",
|
854 |
+
"colab": {
|
855 |
+
"base_uri": "https://localhost:8080/"
|
856 |
+
},
|
857 |
+
"outputId": "9eafe028-a982-4b67-adf3-0633cbbe81f7"
|
858 |
},
|
859 |
+
"execution_count": 27,
|
860 |
+
"outputs": [
|
861 |
+
{
|
862 |
+
"output_type": "stream",
|
863 |
+
"name": "stdout",
|
864 |
+
"text": [
|
865 |
+
"/content\n",
|
866 |
+
" adding: content/output/ (stored 0%)\n",
|
867 |
+
" adding: content/output/vocab/ (stored 0%)\n",
|
868 |
+
" adding: content/output/vocab/text/ (stored 0%)\n",
|
869 |
+
" adding: content/output/vocab/text/vocab.json (deflated 71%)\n",
|
870 |
+
" adding: content/output/vocab/token_vectors/ (stored 0%)\n",
|
871 |
+
" adding: content/output/vocab/token_vectors/vocab.safetensors (deflated 9%)\n"
|
872 |
+
]
|
873 |
+
}
|
874 |
+
]
|
875 |
},
|
876 |
{
|
877 |
"cell_type": "code",
|