{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "markdown",
"source": [
"This Notebook is a Stable-diffusion tool which allows you to find similiar tokens from the SD 1.5 vocab.json that you can use for text-to-image generation. Try this Free online SD 1.5 generator with the results: https://perchance.org/fusion-ai-image-generator\n",
"\n",
"Scroll to the bottom of the notebook to see the guide for how this works."
],
"metadata": {
"id": "L7JTcbOdBPfh"
}
},
{
"cell_type": "code",
"source": [
"# @title ✳️ Load/initialize uncompressed data\n",
"#Imports\n",
"#!pip install safetensors\n",
"from safetensors.torch import load_file\n",
"import json , os , shelve , torch\n",
"import pandas as pd\n",
"#----#\n",
"\n",
"def my_mkdirs(folder):\n",
" if os.path.exists(folder)==False:\n",
" os.makedirs(folder)\n",
"\n",
"def fix_bad_symbols(txt):\n",
" result = txt\n",
" for symbol in ['^', '}', '{' , ')', '(', '[' , ']' , ':' , '=' ]:\n",
" result = result.replace(symbol,'\\\\' + symbol)\n",
" #------#\n",
" return result;\n",
"\n",
"\n",
"def getPrompts(_path, separator):\n",
" path = _path + '/text'\n",
" path_enc = _path + '/text_encodings'\n",
" #-----#\n",
" index = 0\n",
" prompts = {}\n",
" text_encodings = {}\n",
" _text_encodings = {}\n",
" #-----#\n",
" for filename in os.listdir(f'{path}'):\n",
" print(f'reading {filename}....')\n",
" _index = 0\n",
" %cd {path}\n",
" with open(f'{filename}', 'r') as f:\n",
" data = json.load(f)\n",
" #------#\n",
" _df = pd.DataFrame({'count': data})['count']\n",
" _prompts = {\n",
" key : value for key, value in _df.items()\n",
" }\n",
" _file_name = _prompts[f'{1}']\n",
" %cd {path_enc}\n",
" _text_encodings = load_file(f'{_file_name}.safetensors')\n",
" for key in _prompts:\n",
" _index = int(key)\n",
" value = _prompts[key]\n",
" if _index<2:continue\n",
" #------#\n",
" #Read the text_encodings + prompts\n",
" text_encodings[f'{index}'] = _text_encodings[f'{_index}']\n",
" prompts[f'{index}'] = _prompts[f'{_index}'] + separator\n",
" index = index + 1\n",
" continue\n",
" #-------#\n",
" #--------#\n",
" #----------#\n",
" NUM_ITEMS = index -1\n",
" return prompts , text_encodings , NUM_ITEMS\n",
"#--------#\n",
"\n",
"def append_from_url(dictA, tensA , nA , url , separator):\n",
" dictB , tensB, nB = getPrompts(url, separator)\n",
" dictAB = dictA\n",
" tensAB = tensA\n",
" nAB = nA\n",
" for key in dictB:\n",
" nAB = nAB + 1\n",
" dictAB[f'{nA + int(key)}'] = dictB[key]\n",
" tensAB[f'{nA + int(key)}'] = tensB[key]\n",
" #-----#\n",
" return dictAB, tensAB , nAB-1\n",
"#-------#\n",
"\n",
"home_directory = '/content/'\n",
"using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
"if using_Kaggle : home_directory = '/kaggle/working/'\n",
"%cd {home_directory}\n",
"\n",
"#🔸🔹\n",
"# Load the data if not already loaded\n",
"try:\n",
" loaded\n",
"except:\n",
" %cd {home_directory}\n",
" !git clone https://huggingface.co/datasets/codeShare/text-to-image-prompts\n",
" loaded = True\n",
"#--------#\n",
"\n",
"#default NEG values\n",
"try: name_NEG\n",
"except: name_NEG = ''\n",
"try: image_NEG\n",
"except: image_NEG = ''\n",
"try: strength_image_NEG\n",
"except: strength_image_NEG = 1\n",
"try: strength_NEG\n",
"except: strength_NEG = 1\n",
"try: NUM_VOCAB_ITEMS\n",
"except: NUM_VOCAB_ITEMS = 0\n",
"try: using_NEG\n",
"except: using_NEG = False\n",
"try: using_image_NEG\n",
"except: using_image_NEG = False\n",
"#------#\n",
"\n",
"def getJSON(path , filename):\n",
" %cd {path}\n",
" with open(f'{filename}', 'r') as f:\n",
" data = json.load(f)\n",
" #------#\n",
" print(f'reading {filename}....')\n",
" _df = pd.DataFrame({'count': data})['count']\n",
" _prompts = {\n",
" key : value for key, value in _df.items()\n",
" }\n",
" return _prompts\n",
"\n",
"#----#\n",
"\n",
"def getPromptsAndLinks(_path):\n",
" path = _path + '/text'\n",
" path_enc = _path + '/text_encodings'\n",
" #-----#\n",
" path_images = _path + '/images'\n",
" path_enc_images = _path + '/image_encodings'\n",
" #----#\n",
" _file_name = ''\n",
" _file_name_image = ''\n",
" #-----#\n",
" index = 0\n",
" prompts = {}\n",
" _prompts = {}\n",
" #-------#\n",
" urls = {}\n",
" _urls = {}\n",
" #------#\n",
" text_encodings = {}\n",
" _text_encodings = {}\n",
" image_encodings = {}\n",
" _image_encodings = {}\n",
" #-----#\n",
" for filename in os.listdir(f'{path}'):\n",
"\n",
" print(f'reading {filename}.json...')\n",
" _index = 0\n",
" %cd {path}\n",
" with open(f'{filename}', 'r') as f:\n",
" data = json.load(f)\n",
" _df = pd.DataFrame({'count': data})['count']\n",
" _prompts = {\n",
" key : value for key, value in _df.items()\n",
" }\n",
"\n",
" for key in _prompts:\n",
" _index = int(key)\n",
" value = _prompts[key]\n",
" if _index<=0: continue\n",
" if _index<=1:\n",
" _file_name = f'{value}'\n",
" _file_name_images = _prompts[f'{0}']\n",
" #-------#\n",
" print(f'reading {_file_name_images}.json..')\n",
" %cd {path_images}\n",
" with open(f'{_file_name_images}.json', 'r') as f:\n",
" data = json.load(f)\n",
" _df = pd.DataFrame({'count': data})['count']\n",
" _urls = {\n",
" key : value for key, value in _df.items()\n",
" }\n",
" #--------#\n",
" %cd {path_enc}\n",
" _text_encodings = load_file(f'{_file_name}.safetensors')\n",
" text_encodings[f'{index-1}'] = _text_encodings[f'{_index-1}']\n",
" text_encodings[f'{index}'] = _text_encodings[f'{_index}']\n",
" #-------#\n",
" %cd {path_enc_images}\n",
" _image_encodings = load_file(f'{_file_name_images}.safetensors')\n",
" image_encodings[f'{index-1}'] = _image_encodings[f'{_index-1}']\n",
" image_encodings[f'{index}'] = _image_encodings[f'{_index}']\n",
" #-------#\n",
" prompts[f'{index-1}'] = _prompts[f'{_index-1}']\n",
" urls[f'{index-1}'] = _urls[f'{_index-1}']\n",
" prompts[f'{index}'] = _prompts[f'{_index}']\n",
" urls[f'{index}'] = _urls[f'{_index}']\n",
" #-------#\n",
" index = index + 1\n",
" continue\n",
" #--------#\n",
" #Read the text_encodings + prompts\n",
" text_encodings[f'{index}'] = _text_encodings[f'{_index}']\n",
" image_encodings[f'{index}'] = _image_encodings[f'{_index}']\n",
" prompts[f'{index}'] = _prompts[f'{_index}']\n",
" urls[f'{index}'] = _urls[f'{_index}']\n",
" index = index + 1\n",
" continue\n",
" #-------#\n",
" #--------#\n",
" #----------#\n",
" NUM_ITEMS = index -1\n",
" return prompts , text_encodings , urls , image_encodings , NUM_ITEMS\n",
"#--------#\n",
"\n"
],
"metadata": {
"id": "rUXQ73IbonHY",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "288ba177-baa7-437a-c43f-489ffb8e93cf"
},
"execution_count": 1,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"/content\n",
"/content\n",
"Cloning into 'text-to-image-prompts'...\n",
"remote: Enumerating objects: 17503, done.\u001b[K\n",
"remote: Counting objects: 100% (1517/1517), done.\u001b[K\n",
"remote: Compressing objects: 100% (1514/1514), done.\u001b[K\n",
"remote: Total 17503 (delta 3), reused 0 (delta 0), pack-reused 15986 (from 1)\u001b[K\n",
"Receiving objects: 100% (17503/17503), 179.20 MiB | 9.40 MiB/s, done.\n",
"Resolving deltas: 100% (2227/2227), done.\n",
"Updating files: 100% (7700/7700), done.\n",
"Filtering content: 100% (3083/3083), 9.01 GiB | 62.15 MiB/s, done.\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"# @title 📚 Select items to build the compressed vocab with\n",
"from safetensors.torch import save_file\n",
"#------#\n",
"\n",
"#------#\n",
"def quantize(__text_encodings, __nA):\n",
" # @title \tCheck quantize params\n",
" #Q(x,scale,zero_point) = round(x/scale + zero_point)\n",
" scale = (1/0.0043)\n",
" zero_point = 0\n",
" dim = 768\n",
" _nA = __nA\n",
" _text_encodings = torch.zeros(_nA+1,dim + 1).to(torch.uint8)\n",
" for index in range(_nA):\n",
" test = torch.round(torch.mul(__text_encodings[f'{index}'], scale) + zero_point)\n",
" less_than_zero = test<0\n",
" while(torch.any(less_than_zero).item()):\n",
" zero_point = zero_point + 1\n",
" test = torch.round(torch.mul(__text_encodings[f'{index}'], scale) + zero_point).to(torch.uint8)\n",
" less_than_zero = test<0\n",
" #------#\n",
" _text_encodings[index][0] = zero_point\n",
" _text_encodings[index][1:dim+1] = test\n",
" #---------#\n",
" _text_encodings = _text_encodings.detach().clone()\n",
" return _text_encodings , _nA\n",
"#--------#\n",
"\n",
"def push(_prompts , __text_encodings, __nA , text_encodings):\n",
" _text_encodings , _nA = quantize(__text_encodings , __nA)\n",
" for _key in _prompts:\n",
" key = _prompts[_key]\n",
" try: text_encodings[f'{key}']\n",
" except:\n",
" text_encodings[f'{key}'] = _text_encodings[int(_key)].clone().detach()\n",
" __nA = __nA + 1\n",
" #--------#\n",
" return text_encodings , _nA\n",
"#--------#\n",
"\n",
"def save_as(name, text_encodings , _nA):\n",
" %cd /content/\n",
" save_file(text_encodings, f'{name}_vocab_q0043_{_nA}.safetensors')\n",
"#-------#\n",
"\n",
"def save_from_urls(urls , savename):\n",
" nA = 0\n",
" text_encodings={}\n",
" for url in urls:\n",
" _prompts , _text_encodings, _nA = append_from_url({} ,{}, 0 , url , '')\n",
" text_encodings , _nA = push(_prompts , _text_encodings, _nA , text_encodings)\n",
" nA = nA + _nA\n",
" #------#\n",
" save_as(savename, text_encodings , nA)\n",
"#------#\n",
"\n",
"# FANFIC TAGS\n",
"urls = ['/content/text-to-image-prompts/fanfic/tags']\n",
"save_from_urls(urls , 'fanfic_tags')\n",
"#------#\n",
"\n",
"# CIVITAI SET\n",
"urls = ['/content/text-to-image-prompts/civitai-prompts/blue',\n",
" '/content/text-to-image-prompts/civitai-prompts/red',\n",
" '/content/text-to-image-prompts/civitai-prompts/green',\n",
" '/content/text-to-image-prompts/civitai-prompts/yellow']\n",
"save_from_urls(urls , 'civitai')\n",
"#------#\n",
"\n",
"\n",
"\n",
"\n",
"# LYRICS\n",
"urls = ['/content/text-to-image-prompts/lyrics']\n",
"save_from_urls(urls , 'lyrics')\n",
"#------#\n",
"\n",
"# PEOPLE NAMES\n",
"urls = ['/content/text-to-image-prompts/names/firstnames',\n",
" '/content/text-to-image-prompts/names/lastnames',\n",
" '/content/text-to-image-prompts/names/celebs/mixed',\n",
" '/content/text-to-image-prompts/names/fullnames']\n",
"save_from_urls(urls , 'names')\n",
"#------#\n",
"\n",
"# RULE 34\n",
"urls = ['/content/text-to-image-prompts/e621',\n",
" '/content/text-to-image-prompts/manga',\n",
" '/content/text-to-image-prompts/danbooru']\n",
"save_from_urls(urls , 'r34')\n",
"#------#\n",
"\n",
"\n",
"# CLIP MIX\n",
"urls = ['/content/text-to-image-prompts/prefix_suffix_pairs',\n",
" '/content/text-to-image-prompts/suffix_tripple',\n",
" '/content/text-to-image-prompts/suffix_quad' ,\n",
" '/content/text-to-image-prompts/nouns' ,\n",
" '/content/text-to-image-prompts/vocab/text_encodings/emoji',\n",
" ] #'/content/text-to-image-prompts/vocab/text_encodings/suffix/' ['common','average','rare','weird','exotic']\n",
" #'/content/text-to-image-prompts/vocab/text_encodings/suffix/' ['common','average','rare','weird','exotic']\n",
"save_from_urls(urls , 'clip')\n",
"#------#\n",
"\n",
"if False :\n",
" tmp = '/content/text-to-image-prompts/vocab/text_encodings/prefix/'\n",
" for item in ['common','average','rare','weird','exotic'] :\n",
" url = tmp + item\n",
" _prompts , _text_encodings, _nA = append_from_url({} ,{}, 0 , url , '')\n",
" nA = nA + push(_prompts,quantize(_text_encodings, _nA) , _nA)\n",
"#------#\n",
"\n",
"if False:\n",
" url = '/content/text-to-image-prompts/fusion'\n",
" _prompts , _text_encodings, _nA = append_from_url({} ,{}, 0 , url , '')\n",
" nA = nA + push(_prompts,quantize(_text_encodings, _nA) , _nA)\n",
"#--------#"
],
"metadata": {
"id": "ZMG4CThUAmwW"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title \t⚄ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
"\n",
"#image_index = 0 # @param {type:'number'}\n",
"# @markdown 📥 Load the data (only required one time)\n",
"load_the_data = True # @param {type:\"boolean\"}\n",
"\n",
"# @markdown 🖼️ Choose a pre-encoded reference\n",
"index = 708 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
"\n",
"PROMPT_INDEX = index\n",
"\n",
"# @markdown ⚖️ Set the value for C in the reference
sim = C* text_enc + image_enc*(1-C)
\n",
"\n",
"C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
"\n",
"# @markdown 🚫 Penalize similarity to this prompt(optional)\n",
"\n",
"NEG = '' # @param {type:'string'}\n",
"strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.1}\n",
"\n",
"# @markdown Calculate most similiar items using above settings?\n",
"enable = True # @param {type:\"boolean\"}\n",
"\n",
"if (load_the_data):\n",
" target_prompts , target_text_encodings , urls , target_image_encodings , NUM_ITEMS = getPromptsAndLinks('/content/text-to-image-prompts/fusion')\n",
" from transformers import AutoTokenizer\n",
" tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
" from transformers import CLIPProcessor, CLIPModel\n",
" processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
" model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
" logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
"\n",
"from PIL import Image\n",
"import requests\n",
"prompt = target_prompts[f'{index}']\n",
"url = urls[f'{index}']\n",
"if url.find('perchance')>-1:\n",
" image = Image.open(requests.get(url, stream=True).raw)\n",
"else: print(\"(No image for this ID)\")\n",
"\n",
"print(\"\")\n",
"print(f\"'{prompt}'\")\n",
"print(\"\")\n",
"\n",
"if(enable):\n",
" text_features_A = target_text_encodings[f'{index}']\n",
" image_features_A = target_image_encodings[f'{index}']\n",
"\n",
" # text-similarity\n",
" sims = C * torch.matmul(text_tensor, text_features_A.t())\n",
"\n",
" neg_sims = 0*sims\n",
" if(NEG != ''):\n",
"\n",
" # Get text features for user input\n",
" inputs = tokenizer(text = NEG, padding=True, return_tensors=\"pt\")\n",
" text_features_NEG = model.get_text_features(**inputs)\n",
" text_features_NEG = text_features_A/text_features_A.norm(p=2, dim=-1, keepdim=True)\n",
"\n",
" # text-similarity\n",
" neg_sims = strength*torch.matmul(text_tensor, text_features_NEG.t())\n",
" #------#\n",
"\n",
" # plus image-similarity\n",
" sims = sims + (1-C) * torch.matmul(text_tensor, image_features_A.t()) * logit_scale\n",
"\n",
"\n",
" # minus NEG-similarity\n",
" sims = sims - neg_sims\n",
"\n",
" # Sort the items\n",
" sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
"\n",
" # @title ⚙️📝 Print the results (Advanced)\n",
" list_size = 1000 # param {type:'number'}\n",
" start_at_index = 0 # param {type:'number'}\n",
" print_Similarity = True # param {type:\"boolean\"}\n",
" print_Prompts = True # param {type:\"boolean\"}\n",
" print_Prefix = True # param {type:\"boolean\"}\n",
" print_Descriptions = True # param {type:\"boolean\"}\n",
" compact_Output = True # param {type:\"boolean\"}\n",
"\n",
" # @markdown -----------\n",
" # @markdown ⚙️📝 Printing options\n",
" newline_Separator = False # @param {type:\"boolean\"}\n",
"\n",
" import random\n",
" list_size2 = 1000 # param {type:'number'}\n",
" start_at_index2 = 10000 # param {type:'number'}\n",
" rate_percent = 0 # param {type:\"slider\", min:0, max:100, step:1}\n",
"\n",
" # @markdown Repeat output N times\n",
" N = 7 # @param {type:\"slider\", min:0, max:10, step:1}\n",
"\n",
" # title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
" RANGE = list_size\n",
" separator = '|'\n",
" if newline_Separator : separator = separator + '\\n'\n",
"\n",
" _prompts = ''\n",
" _sims = ''\n",
" for _index in range(start_at_index + RANGE):\n",
" if _index < start_at_index : continue\n",
" index = indices[_index].item()\n",
"\n",
" prompt = prompts[f'{index}']\n",
" if rate_percent >= random.randint(0,100) : prompt = prompts[f'{random.randint(start_at_index2 , start_at_index2 + list_size2)}']\n",
"\n",
" #Remove duplicates\n",
" if _prompts.find(prompt + separator)<=-1:\n",
" _sims = _sims + f'{round(100*sims[index].item(), 2)} %' + separator\n",
" #-------#\n",
" _prompts = _prompts.replace(prompt + separator,'')\n",
" _prompts = _prompts + prompt + separator\n",
" #------#\n",
" #------#\n",
" _prompts = fix_bad_symbols(_prompts)\n",
" __prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n",
" __sims = ('{' + _sims + '}').replace(separator + '}', '}')\n",
" #------#\n",
"\n",
" if(not print_Prompts): __prompts = ''\n",
" if(not print_Similarity): __sims = ''\n",
"\n",
" if(not compact_Output):\n",
" if(print_Descriptions):\n",
" print(f'The {start_at_index}-{start_at_index + RANGE} most similiar items to prompt : \\n\\n ')\n",
" for i in range(N) : print(__prompts)\n",
" print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for items : \\n\\n' + __sims)\n",
" print('')\n",
" else:\n",
" for i in range(N) : print(__prompts)\n",
" else:\n",
" for i in range(N) : print(__prompts)\n",
" #-------#\n",
" #-------#\n",
"#-------#\n",
"image\n"
],
"metadata": {
"id": "7qk3MgPVmApD"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title \t⚄ Create a savefile-set from the entire range of pre-encoded items\n",
"\n",
"# @markdown 📥 Load the data (only required one time)\n",
"load_the_data = True # @param {type:\"boolean\"}\n",
"\n",
"import math\n",
"from safetensors.torch import load_file\n",
"import json , os , torch\n",
"import pandas as pd\n",
"from PIL import Image\n",
"import requests\n",
"\n",
"def my_mkdirs(folder):\n",
" if os.path.exists(folder)==False:\n",
" os.makedirs(folder)\n",
"\n",
"# @markdown ⚖️ Set the value for C in the reference
sim = C* text_enc + image_enc*(1-C)
\n",
"\n",
"C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
"\n",
"# @markdown 🚫 Penalize similarity to this prompt(optional)\n",
"if(load_the_data):\n",
" target_prompts , target_text_encodings , urls , target_image_encodings , NUM_ITEMS = getPromptsAndLinks('/content/text-to-image-prompts/fusion')\n",
" from transformers import AutoTokenizer\n",
" tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
" from transformers import CLIPProcessor, CLIPModel\n",
" processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
" model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
" logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
"#---------#\n",
"\n",
"filename = 'blank.json'\n",
"path = '/content/text-to-image-prompts/fusion/'\n",
"print(f'reading {filename}....')\n",
"_index = 0\n",
"%cd {path}\n",
"with open(f'{filename}', 'r') as f:\n",
" data = json.load(f)\n",
"#------#\n",
"_df = pd.DataFrame({'count': data})['count']\n",
"_blank = {\n",
" key : value for key, value in _df.items()\n",
"}\n",
"#------#\n",
"\n",
"root_savefile_name = 'fusion_C05_X7'\n",
"\n",
"%cd /content/\n",
"output_folder = '/content/output/savefiles/'\n",
"my_mkdirs(output_folder)\n",
"\n",
"\n",
"my_mkdirs('/content/output2/savefiles/')\n",
"my_mkdirs('/content/output3/savefiles/')\n",
"my_mkdirs('/content/output4/savefiles/')\n",
"my_mkdirs('/content/output5/savefiles/')\n",
"my_mkdirs('/content/output6/savefiles/')\n",
"my_mkdirs('/content/output7/savefiles/')\n",
"my_mkdirs('/content/output8/savefiles/')\n",
"my_mkdirs('/content/output9/savefiles/')\n",
"my_mkdirs('/content/output10/savefiles/')\n",
"my_mkdirs('/content/output11/savefiles/')\n",
"my_mkdirs('/content/output12/savefiles/')\n",
"my_mkdirs('/content/output13/savefiles/')\n",
"\n",
"\n",
"NEG = '' # @param {type:'string'}\n",
"strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.1}\n",
"\n",
"for index in range(1667):\n",
"\n",
" PROMPT_INDEX = index\n",
" prompt = target_prompts[f'{index}']\n",
" url = urls[f'{index}']\n",
" if url.find('perchance')>-1:\n",
" image = Image.open(requests.get(url, stream=True).raw)\n",
" else: continue #print(\"(No image for this ID)\")\n",
"\n",
" print(f\"no. {PROMPT_INDEX} : '{prompt}'\")\n",
" text_features_A = target_text_encodings[f'{index}']\n",
" image_features_A = target_image_encodings[f'{index}']\n",
" # text-similarity\n",
" sims = C * torch.matmul(text_tensor, text_features_A.t())\n",
"\n",
" neg_sims = 0*sims\n",
" if(NEG != ''):\n",
" # Get text features for user input\n",
" inputs = tokenizer(text = NEG, padding=True, return_tensors=\"pt\")\n",
" text_features_NEG = model.get_text_features(**inputs)\n",
" text_features_NEG = text_features_A/text_features_A.norm(p=2, dim=-1, keepdim=True)\n",
" # text-similarity\n",
" neg_sims = strength*torch.matmul(text_tensor, text_features_NEG.t())\n",
" #------#\n",
"\n",
" # plus image-similarity\n",
" sims = sims + (1-C) * torch.matmul(text_tensor, image_features_A.t()) * logit_scale\n",
"\n",
" # minus NEG-similarity\n",
" sims = sims - neg_sims\n",
"\n",
" # Sort the items\n",
" sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
"\n",
" # @markdown Repeat output N times\n",
" RANGE = 1000\n",
" NUM_CHUNKS = 10+\n",
" separator = '|'\n",
" _savefiles = {}\n",
" #-----#\n",
" for chunk in range(NUM_CHUNKS):\n",
" if chunk=<10:continue\n",
" start_at_index = chunk * RANGE\n",
" _prompts = ''\n",
" for _index in range(start_at_index + RANGE):\n",
" if _index < start_at_index : continue\n",
" index = indices[_index].item()\n",
" prompt = prompts[f'{index}']\n",
" _prompts = _prompts.replace(prompt + separator,'')\n",
" _prompts = _prompts + prompt + separator\n",
" #------#\n",
" _prompts = fix_bad_symbols(_prompts)\n",
" _prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n",
" _savefiles[f'{chunk}'] = _prompts\n",
" #---------#\n",
" save_filename = f'{root_savefile_name}_{start_at_index + RANGE}_{PROMPT_INDEX}.json'\n",
"\n",
"\n",
" if (chunk=<20 && chunk>10): %cd '/content/output2/savefiles/'\n",
" if (chunk<=30 && chunk>20): %cd '/content/output3/savefiles/'\n",
" if (chunk=<40 && chunk>30): %cd '/content/output4/savefiles/'\n",
" if (chunk<=50 && chunk>40): %cd '/content/output5/savefiles/'\n",
" if (chunk=<60 && chunk>50): %cd '/content/output6/savefiles/'\n",
" if (chunk<=70 && chunk>60): %cd '/content/output7/savefiles/'\n",
" if (chunk=<80 && chunk>70): %cd '/content/output8/savefiles/'\n",
" if (chunk<=90 && chunk>80): %cd '/content/output9/savefiles/'\n",
" if (chunk=<100 && chunk>90): %cd '/content/output10/savefiles/'\n",
" if (chunk<=110 && chunk>100): %cd '/content/output11/savefiles/'\n",
" if (chunk=<120 && chunk>110): %cd '/content/output12/savefiles/'\n",
" if (chunk<=130 && chunk>120): %cd '/content/output13/savefiles/'\n",
"\n",
"\n",
" #------#\n",
" print(f'Saving savefile {save_filename} to {output_folder}...')\n",
" with open(save_filename, 'w') as f:\n",
" json.dump(_savefiles, f)\n",
" #---------#\n",
" continue\n",
"#-----------#"
],
"metadata": {
"id": "NZy2HrkZ1Rto"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Determine if this notebook is running on Colab or Kaggle\n",
"#Use https://www.kaggle.com/ if Google Colab GPU is busy\n",
"home_directory = '/content/'\n",
"using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
"if using_Kaggle : home_directory = '/kaggle/working/'\n",
"%cd {home_directory}\n",
"#-------#\n",
"\n",
"# @title Download the text_encodings as .zip\n",
"import os\n",
"%cd {home_directory}\n",
"#os.remove(f'{home_directory}results.zip')\n",
"root_output_folder = home_directory + 'output/'\n",
"zip_dest = f'{home_directory}results.zip'\n",
"!zip -r {zip_dest} {root_output_folder}"
],
"metadata": {
"id": "DaV1ynRs1XeS",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "056b0aaa-dcfa-4c0d-b412-633b336cd164"
},
"execution_count": 7,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"/content\n",
"/content\n",
" adding: content/output/ (stored 0%)\n",
" adding: content/output/names_vocab_q0043_162977.safetensors (deflated 40%)\n",
" adding: content/output/r34_vocab_q0043_96166.safetensors (deflated 39%)\n",
" adding: content/output/civitai_vocab_q0043_203663.safetensors (deflated 40%)\n",
" adding: content/output/lyrics_vocab_q0043_41905.safetensors (deflated 44%)\n",
" adding: content/output/fanfic_tags_vocab_q0043_1617133.safetensors (deflated 40%)\n",
" adding: content/output/clip_vocab_q0043_541291.safetensors (deflated 39%)\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"# @title ⚙️📝 Print the results (Advanced)\n",
"list_size = 1000 # @param {type:'number'}\n",
"start_at_index = 0 # @param {type:'number'}\n",
"print_Similarity = True # @param {type:\"boolean\"}\n",
"print_Prompts = True # @param {type:\"boolean\"}\n",
"print_Descriptions = True # @param {type:\"boolean\"}\n",
"compact_Output = True # @param {type:\"boolean\"}\n",
"newline_Separator = False # @param {type:\"boolean\"}\n",
"\n",
"import random\n",
"# @markdown -----------\n",
"# @markdown Mix with...\n",
"list_size2 = 1000 # @param {type:'number'}\n",
"start_at_index2 = 10000 # @param {type:'number'}\n",
"rate_percent = 0 # @param {type:\"slider\", min:0, max:100, step:1}\n",
"\n",
"# @markdown -----------\n",
"# @markdown Repeat output N times\n",
"N = 6 # @param {type:\"slider\", min:0, max:10, step:1}\n",
"\n",
"# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
"RANGE = list_size\n",
"separator = '|'\n",
"if newline_Separator : separator = separator + '\\n'\n",
"\n",
"_prompts = ''\n",
"_sims = ''\n",
"for _index in range(start_at_index + RANGE):\n",
" if _index < start_at_index : continue\n",
" index = indices[_index].item()\n",
"\n",
" prompt = prompts[f'{index}']\n",
" if rate_percent >= random.randint(0,100) : prompt = prompts[f'{random.randint(start_at_index2 , start_at_index2 + list_size2)}']\n",
"\n",
" #Remove duplicates\n",
" if _prompts.find(prompt + separator)<=-1:\n",
" _sims = _sims + f'{round(100*sims[index].item(), 2)} %' + separator\n",
" #-------#\n",
" _prompts = _prompts.replace(prompt + separator,'')\n",
" _prompts = _prompts + prompt + separator\n",
" #------#\n",
"#------#\n",
"__prompts = fix_bad_symbols(__prompts)\n",
"__prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n",
"__sims = ('{' + _sims + '}').replace(separator + '}', '}')\n",
"#------#\n",
"\n",
"if(not print_Prompts): __prompts = ''\n",
"if(not print_Similarity): __sims = ''\n",
"\n",
"if(not compact_Output):\n",
" if(print_Descriptions):\n",
" print(f'The {start_at_index}-{start_at_index + RANGE} most similiar items to prompt : \\n\\n ')\n",
" for i in range(N) : print(__prompts)\n",
" print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for items : \\n\\n' + __sims)\n",
" print('')\n",
" else:\n",
" for i in range(N) : print(__prompts)\n",
"else:\n",
" for i in range(N) : print(__prompts)\n",
"#-------#"
],
"metadata": {
"id": "Qz05kRtU236V"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title Quick fix to created json files above\n",
"output_folder = '/content/output/fusion-gen-savefiles/'\n",
"index = 0\n",
"path = '/content/text-to-image-prompts/fusion-gen-savefiles'\n",
"\n",
"def my_mkdirs(folder):\n",
" if os.path.exists(folder)==False:\n",
" os.makedirs(folder)\n",
"\n",
"my_mkdirs(output_folder)\n",
"for filename in os.listdir(f'{path}'):\n",
" if filename.find('fusion_C05_X7_1000_')<=-1: continue\n",
" print(f'reading {filename}...')\n",
" %cd {path}\n",
" with open(f'{filename}', 'r') as f:\n",
" data = json.load(f)\n",
" _df = pd.DataFrame({'count': data})['count']\n",
" _savefile = {\n",
" key : value for key, value in _df.items()\n",
" }\n",
"\n",
" _savefile2 = {}\n",
"\n",
" for key in _savefile:\n",
" _savefile2[key] = _savefile[key]\n",
" if(key == \"_main\") :\n",
" _savefile2[key] = \"Prompt input only ✏️\"\n",
" print(\"changed\")\n",
" #----------#\n",
"\n",
" save_filename = f'fusion_C05_X7_1000_{index}.json'\n",
" index = index + 1\n",
"\n",
" %cd {output_folder}\n",
" print(f'Saving savefile {save_filename} to {output_folder}...')\n",
" with open(save_filename, 'w') as f:\n",
" json.dump(_savefile2, f)"
],
"metadata": {
"id": "mRhTZ6wS1g0m"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title 📝 Get Prompt text_encoding similarity to the pre-calc. text_encodings\n",
"prompt = \"pixar animation\" # @param {\"type\":\"string\",\"placeholder\":\"Write a prompt\"}\n",
"\n",
"use_negatives = False # @param {type:\"boolean\"}\n",
"\n",
"from transformers import AutoTokenizer\n",
"tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
"from transformers import CLIPProcessor, CLIPModel\n",
"processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
"model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
"logit_scale = model.logit_scale.exp()\n",
"\n",
"# Get text features for user input\n",
"inputs = tokenizer(text = prompt, padding=True, return_tensors=\"pt\")\n",
"text_features_A = model.get_text_features(**inputs)\n",
"text_features_A = text_features_A/text_features_A.norm(p=2, dim=-1, keepdim=True)\n",
"name_A = prompt\n",
"#------#\n",
"\n",
"penalty_NEG = 0\n",
"image_penalty_NEG = 0\n",
"\n",
"#------#\n",
"try: strength_NEG\n",
"except: strength_NEG = 1\n",
"\n",
"try: strength_image_NEG\n",
"except: strength_image_NEG = 1\n",
"#------#\n",
"\n",
"if using_NEG and use_negatives:\n",
" penalty_NEG = strength_NEG* torch.nn.functional.cosine_similarity(text_features_A, text_features_NEG)\n",
"if using_image_NEG and use_negatives:\n",
" torch.matmul(text_features_A, image_features_NEG.t()) * logit_scale\n",
" image_penalty_NEG = strength_image_NEG* torch.nn.functional.cosine_similarity(text_features_A, image_features_NEG)\n",
"#-------#\n",
"\n",
"sims = torch.zeros(NUM_VOCAB_ITEMS)\n",
"for index in range(NUM_VOCAB_ITEMS):\n",
" if index<2: continue\n",
" text_features = text_encodings[f'{index}']\n",
" sims[index] = torch.nn.functional.cosine_similarity(text_features, text_features_A) - penalty_NEG - image_penalty_NEG\n",
" #------#\n",
"\n",
"#------#\n",
"\n",
"sorted , indices = torch.sort(sims,dim=0 , descending=True)"
],
"metadata": {
"id": "xc-PbIYF428y"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title ⚙️📝 Print the results (Advanced)\n",
"list_size = 1000 # @param {type:'number'}\n",
"start_at_index = 0 # @param {type:'number'}\n",
"print_Similarity = True # @param {type:\"boolean\"}\n",
"print_Prompts = True # @param {type:\"boolean\"}\n",
"print_Prefix = True # @param {type:\"boolean\"}\n",
"print_Descriptions = True # @param {type:\"boolean\"}\n",
"compact_Output = True # @param {type:\"boolean\"}\n",
"newline_Separator = False # @param {type:\"boolean\"}\n",
"\n",
"import random\n",
"# @markdown -----------\n",
"# @markdown Mix with...\n",
"list_size2 = 1000 # @param {type:'number'}\n",
"start_at_index2 = 10000 # @param {type:'number'}\n",
"rate_percent = 0 # @param {type:\"slider\", min:0, max:100, step:1}\n",
"\n",
"# @markdown -----------\n",
"# @markdown Repeat output N times\n",
"N = 6 # @param {type:\"slider\", min:0, max:10, step:1}\n",
"\n",
"# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
"RANGE = list_size\n",
"separator = '|'\n",
"if newline_Separator : separator = separator + '\\n'\n",
"\n",
"_prompts = '{'\n",
"_sims = '{'\n",
"for _index in range(start_at_index + RANGE):\n",
" if _index < start_at_index : continue\n",
" index = indices[_index]\n",
"\n",
" prompt = prompts[f'{index}']\n",
" if rate_percent >= random.randint(0,100) : prompt = prompts[f'{random.randint(start_at_index2 , start_at_index2 + list_size2)}']\n",
"\n",
" #Remove duplicates\n",
" if _prompts.find(prompt + separator)<=-1:\n",
" _sims = _sims + f'{round(100*sims[index].item(), 2)} %' + separator\n",
" #-------#\n",
" _prompts = _prompts.replace(prompt + separator,'')\n",
" _prompts = _prompts + prompt + separator\n",
" #------#\n",
"#------#\n",
"__prompts = (_prompts + '}').replace(separator + '}', '}')\n",
"__sims = (_sims + '}').replace(separator + '}', '}')\n",
"#------#\n",
"\n",
"if(not print_Prompts): __prompts = ''\n",
"if(not print_Similarity): __sims = ''\n",
"\n",
"if(not compact_Output):\n",
" if(print_Descriptions):\n",
" print(f'The {start_at_index}-{start_at_index + RANGE} most similiar items to prompt : \\n\\n ')\n",
" for i in range(N) : print(__prompts)\n",
" print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for items : \\n\\n' + __sims)\n",
" print('')\n",
" else:\n",
" for i in range(N) : print(__prompts)\n",
"else:\n",
" for i in range(N) : print(__prompts)\n",
"#-------#"
],
"metadata": {
"id": "ifblBRcXoB6t",
"cellView": "form"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title 📝🚫 Penalize similarity to Prompt text_encoding (optional)\n",
"neg_prompt = \"a drawing of a cat \" # @param {\"type\":\"string\",\"placeholder\":\"Write something to avoid\"}\n",
"\n",
"neg_strength = 1 # @param {type:\"slider\", min:0, max:5, step:0.01}\n",
"\n",
"enable = True # @param {\"type\":\"boolean\",\"placeholder\":\"😃\"}\n",
"\n",
"using_NEG = enable\n",
"\n",
"from transformers import AutoTokenizer\n",
"tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
"from transformers import CLIPProcessor, CLIPModel\n",
"processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
"model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
"\n",
"\n",
"name_NEG = ''\n",
"strength_NEG = 1\n",
"if enable:\n",
" # Get text features for user input\n",
" inputs = tokenizer(text = neg_prompt, padding=True, return_tensors=\"pt\")\n",
" text_features_NEG = model.get_text_features(**inputs)\n",
" text_features_NEG = text_features_NEG/text_features_NEG.norm(p=2, dim=-1, keepdim=True)\n",
" name_NEG = neg_prompt\n",
" strength_NEG = neg_strength\n",
" #------#"
],
"metadata": {
"id": "sX2JGqOH5B8g",
"cellView": "form"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title 🖼️🚫 Penalize similarity to Prompt image_encoding (optional)\n",
"from google.colab import files\n",
"def upload_files():\n",
" from google.colab import files\n",
" uploaded = files.upload()\n",
" for k, v in uploaded.items():\n",
" open(k, 'wb').write(v)\n",
" return list(uploaded.keys())\n",
"\n",
"\n",
"neg_strength = 1 # @param {type:\"slider\", min:0, max:5, step:0.01}\n",
"enable = True # @param {\"type\":\"boolean\",\"placeholder\":\"😃\"}\n",
"using_image_NEG = enable\n",
"\n",
"\n",
"colab_image_folder = '/content/text-to-image-prompts/images/'\n",
"#Get image\n",
"# You can use \"http://images.cocodataset.org/val2017/000000039769.jpg\" for testing\n",
"image_url = \"\" # @param {\"type\":\"string\",\"placeholder\":\"leave empty for local upload (scroll down to see it)\"}\n",
"colab_image_path = \"imperial.png\" # @param {\"type\":\"string\",\"placeholder\": \"eval. as '/content/sd_tokens/' + **your input**\"}\n",
"# @markdown --------------------------\n",
"\n",
"image_path = \"\"\n",
"\n",
"from PIL import Image\n",
"import requests\n",
"image_NEG = \"\"\n",
"image_features_NEG = \"\"\n",
"strength_image_NEG = 1\n",
"\n",
"#----#\n",
"if enable :\n",
" strength_image_NEG = neg_strength\n",
" if image_url == \"\":\n",
" import cv2\n",
" from google.colab.patches import cv2_imshow\n",
" # Open the image.\n",
" if colab_image_path == \"\":\n",
" keys = upload_files()\n",
" for key in keys:\n",
" image_NEG = cv2.imread(colab_image_folder + key)\n",
" colab_image_path = colab_image_folder + key\n",
" image_path = colab_image_folder + key\n",
" else:\n",
" image_NEG = cv2.imread(colab_image_folder + colab_image_path)\n",
" else:\n",
" image_NEG = Image.open(requests.get(image_url, stream=True).raw)\n",
" #------#\n",
" from google.colab.patches import cv2_imshow\n",
" cv2_imshow(image_NEG)\n",
"\n",
" inputs = processor(images=image_NEG, return_tensors=\"pt\")\n",
" image_features_NEG = model.get_image_features(**inputs)\n",
" image_features_NEG = image_features_NEG / image_features_NEG.norm(p=2, dim=-1, keepdim=True)"
],
"metadata": {
"id": "oCJ97b-B7927",
"cellView": "form"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title 📝 Print the results\n",
"list_size = 1000 # @param {type:'number'}\n",
"start_at_index = 0 # @param {type:'number'}\n",
"print_Similarity = True # @param {type:\"boolean\"}\n",
"print_Prompts = True # @param {type:\"boolean\"}\n",
"print_Prefix = True # @param {type:\"boolean\"}\n",
"print_Descriptions = True # @param {type:\"boolean\"}\n",
"compact_Output = True # @param {type:\"boolean\"}\n",
"newline_Separator = True # @param {type:\"boolean\"}\n",
"\n",
"# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
"RANGE = list_size\n",
"separator = '|'\n",
"if newline_Separator : separator = separator + '\\n'\n",
"\n",
"_prompts = '{'\n",
"_sims = '{'\n",
"for _index in range(start_at_index + RANGE):\n",
" if _index < start_at_index : continue\n",
" index = indices[_index]\n",
" #Remove duplicates\n",
" if _prompts.find(prompts[f'{index}'] + separator)<=-1:\n",
" _sims = _sims + f'{round(100*sims[index].item(), 2)} %' + separator\n",
" #-------#\n",
" _prompts = _prompts.replace(prompts[f'{index}'] + separator,'')\n",
" _prompts = _prompts + prompts[f'{index}'] + separator\n",
" #------#\n",
"#------#\n",
"__prompts = (_prompts + '}').replace(separator + '}', '}')\n",
"__sims = (_sims + '}').replace(separator + '}', '}')\n",
"#------#\n",
"\n",
"if(not print_Prompts): __prompts = ''\n",
"if(not print_Similarity): __sims = ''\n",
"\n",
"if(not compact_Output):\n",
" if(print_Descriptions):\n",
" print(f'The {start_at_index}-{start_at_index + RANGE} most similiar items to prompt : \\n\\n ' + __prompts)\n",
" print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for items : \\n\\n' + __sims)\n",
" print('')\n",
" else:\n",
" print(__prompts)\n",
"else:\n",
" print(__prompts)\n",
"#-------#"
],
"metadata": {
"id": "_vnVbxcFf7WV",
"cellView": "form"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"# Below are the Image interrogators"
],
"metadata": {
"id": "qZvLkJCtGC89"
}
},
{
"cell_type": "code",
"source": [
"# @title 🖼️ Upload an image\n",
"def upload_files():\n",
" from google.colab import files\n",
" uploaded = files.upload()\n",
" for k, v in uploaded.items():\n",
" open(k, 'wb').write(v)\n",
" return list(uploaded.keys())\n",
"\n",
"\n",
"colab_image_folder = '/content/text-to-image-prompts/images/'\n",
"#Get image\n",
"# You can use \"http://images.cocodataset.org/val2017/000000039769.jpg\" for testing\n",
"image_url = \"\" # @param {\"type\":\"string\",\"placeholder\":\"leave empty for local upload (scroll down to see it)\"}\n",
"colab_image_path = \"imperial.png\" # @param {\"type\":\"string\",\"placeholder\": \"eval. as '/content/sd_tokens/' + **your input**\"}\n",
"# @markdown --------------------------\n",
"\n",
"image_path = \"\"\n",
"\n",
"from PIL import Image\n",
"import requests\n",
"image_A = \"\"\n",
"#----#\n",
"if image_url == \"\":\n",
" import cv2\n",
" from google.colab.patches import cv2_imshow\n",
" # Open the image.\n",
" if colab_image_path == \"\":\n",
" keys = upload_files()\n",
" for key in keys:\n",
" image_A = cv2.imread(colab_image_folder + key)\n",
" colab_image_path = colab_image_folder + key\n",
" image_path = colab_image_folder + key\n",
" else:\n",
" image_A = cv2.imread(colab_image_folder + colab_image_path)\n",
" #---------#\n",
"else:\n",
" image_A = Image.open(requests.get(image_url, stream=True).raw)\n",
" image_A\n",
"#------#\n",
"if image_url == \"\":\n",
" from google.colab.patches import cv2_imshow\n",
" cv2_imshow(image_A)\n",
"#------#\n",
"image_A\n",
"\n"
],
"metadata": {
"id": "ke6mZ1RZDOeB",
"cellView": "form"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"from transformers import AutoTokenizer\n",
"tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
"from transformers import CLIPProcessor, CLIPModel\n",
"processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
"model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
"\n",
"# Get image features\n",
"inputs = processor(images=image_A, return_tensors=\"pt\")\n",
"image_features = model.get_image_features(**inputs)\n",
"image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)\n",
"name_A = \"the image\"\n",
"#-----#"
],
"metadata": {
"id": "gAqsRQaZVf1A"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#'/content/text-to-image-prompts/fusion/image_encodings/links-1.safetensors'\n",
"path = '/content/text-to-image-prompts/fusion/image_encodings/'\n",
"filename = 'links-1'\n",
"#------#\n",
"from safetensors.torch import load_file\n",
"import json , os , shelve , torch\n",
"import pandas as pd\n",
"\n",
"\n",
"%cd {path}\n",
"_image_encodings = load_file(f'{filename}.safetensors')\n",
"#Store text_encodings for the header items"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "SEPUbRwpVwRQ",
"outputId": "b058be19-2fe5-4de2-ff3c-3e821043a177"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"/content/text-to-image-prompts/fusion/image_encodings\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"_image_encoding = _image_encodings[f'{16}']\n",
"sim = torch.nn.functional.cosine_similarity(image_features, _image_encoding)\n",
"print(sim.item())"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "5oXvYS1aXdjt",
"outputId": "00491826-4329-4c02-d038-bc3b221937b1"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"1.0\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"# @title 🖼️ Get image_encoding similarity to the pre-calc. text_encodings\n",
"\n",
"use_negatives = False # @param {type:\"boolean\"}\n",
"\n",
"from transformers import AutoTokenizer\n",
"tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
"from transformers import CLIPProcessor, CLIPModel\n",
"processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
"model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
"\n",
"# Get image features\n",
"inputs = processor(images=image_A, return_tensors=\"pt\")\n",
"image_features = model.get_image_features(**inputs)\n",
"image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)\n",
"name_A = \"the image\"\n",
"#-----#\n",
"\n",
"sims = torch.zeros(NUM_VOCAB_ITEMS)\n",
"logit_scale = model.logit_scale.exp()\n",
"for index in range(NUM_VOCAB_ITEMS):\n",
" text_features = text_encodings[f'{index}']\n",
"\n",
" torch.matmul(text_features, image_features.t()) * logit_scale\n",
" sims[index] = torch.nn.functional.cosine_similarity(text_features, image_features)\n",
" if using_NEG and use_negatives :\n",
" torch.matmul(text_features_NEG, image_features.t()) * logit_scale\n",
"\n",
" sims[index] = sims[index] - neg_strength* torch.nn.functional.cosine_similarity(text_features_NEG, image_features)\n",
" if using_image_NEG and use_negatives :\n",
" sims[index] = sims[index] - neg_strength* torch.nn.functional.cosine_similarity(image_features, image_features_NEG)\n",
"#-------#\n",
"sorted , indices = torch.sort(sims,dim=0 , descending=True)"
],
"metadata": {
"id": "rebogpoyOG8k"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title 🖼️ Print the results\n",
"list_size = 1000 # @param {type:'number'}\n",
"start_at_index = 0 # @param {type:'number'}\n",
"print_Similarity = True # @param {type:\"boolean\"}\n",
"print_Prompts = True # @param {type:\"boolean\"}\n",
"print_Prefix = True # @param {type:\"boolean\"}\n",
"print_Descriptions = True # @param {type:\"boolean\"}\n",
"compact_Output = True # @param {type:\"boolean\"}\n",
"newline_Separator = True # @param {type:\"boolean\"}\n",
"\n",
"# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
"RANGE = list_size\n",
"separator = '|'\n",
"if newline_Separator : separator = separator + '\\n'\n",
"\n",
"_prompts = '{'\n",
"_sims = '{'\n",
"for _index in range(start_at_index + RANGE):\n",
" if _index < start_at_index : continue\n",
" index = indices[_index]\n",
" _prompts = _prompts + prompts[f'{index}'] + separator\n",
" _sims = _sims + f'{round(100*sims[index].item(), 2)} %' + separator\n",
"#------#\n",
"__prompts = (_prompts + '}').replace(separator + '}', '}')\n",
"__sims = (_sims + '}').replace(separator + '}', '}')\n",
"#------#\n",
"\n",
"if(not print_Prompts): __prompts = ''\n",
"if(not print_Similarity): __sims = ''\n",
"\n",
"if(not compact_Output):\n",
" if(print_Descriptions):\n",
" print(f'The {start_at_index}-{start_at_index + RANGE} most similiar items to prompt : \\n\\n ' + __prompts)\n",
" print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for items : \\n\\n' + __sims)\n",
" print('')\n",
" if name_NEG != '': print(f'Using negatives at {strength_NEG} strength for this text : {name_NEG}')\n",
" else:\n",
" print(__prompts)\n",
"else:\n",
" print(__prompts)\n",
"#-------#"
],
"metadata": {
"id": "JkzncP8SgKtS"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title ⚙️🖼️ Print the results (Advanced)\n",
"list_size = 1000 # @param {type:'number'}\n",
"start_at_index = 0 # @param {type:'number'}\n",
"print_Similarity = True # @param {type:\"boolean\"}\n",
"print_Prompts = True # @param {type:\"boolean\"}\n",
"print_Prefix = True # @param {type:\"boolean\"}\n",
"print_Descriptions = True # @param {type:\"boolean\"}\n",
"compact_Output = True # @param {type:\"boolean\"}\n",
"newline_Separator = True # @param {type:\"boolean\"}\n",
"\n",
"\n",
"import random\n",
"# @markdown -----------\n",
"# @markdown Mix with...\n",
"list_size2 = 1000 # @param {type:'number'}\n",
"start_at_index2 = 10000 # @param {type:'number'}\n",
"rate_percent = 50 # @param {type:\"slider\", min:0, max:100, step:1}\n",
"\n",
"# @markdown -----------\n",
"# @markdown Repeat output N times\n",
"\n",
"N = 6 # @param {type:\"slider\", min:0, max:10, step:1}\n",
"\n",
"# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
"RANGE = list_size\n",
"separator = '|'\n",
"if newline_Separator : separator = separator + '\\n'\n",
"\n",
"_prompts = '{'\n",
"_sims = '{'\n",
"for _index in range(start_at_index + RANGE):\n",
" if _index < start_at_index : continue\n",
" index = indices[_index]\n",
"\n",
" prompt = prompts[f'{index}']\n",
" if rate_percent >= random.randint(0,100) : prompt = prompts[f'{random.randint(start_at_index2 , start_at_index2 + list_size2)}']\n",
"\n",
" #Remove duplicates\n",
" if _prompts.find(prompt + separator)<=-1:\n",
" _sims = _sims + f'{round(100*sims[index].item(), 2)} %' + separator\n",
" #-------#\n",
" _prompts = _prompts.replace(prompt + separator,'')\n",
" _prompts = _prompts + prompt + separator\n",
" #------#\n",
"#------#\n",
"__prompts = (_prompts + '}').replace(separator + '}', '}')\n",
"__sims = (_sims + '}').replace(separator + '}', '}')\n",
"#------#\n",
"\n",
"if(not print_Prompts): __prompts = ''\n",
"if(not print_Similarity): __sims = ''\n",
"\n",
"if(not compact_Output):\n",
" if(print_Descriptions):\n",
" print(f'The {start_at_index}-{start_at_index + RANGE} most similiar items to prompt : \\n\\n ')\n",
" for i in range(N) : print(__prompts)\n",
" print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for items : \\n\\n' + __sims)\n",
" print('')\n",
" else:\n",
" for i in range(N) : print(__prompts)\n",
"else:\n",
" for i in range(N) : print(__prompts)\n",
"#-------#\n",
"\n",
"\n"
],
"metadata": {
"id": "6FEmV02tArrh",
"cellView": "form"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title 💫 Compare Text encodings\n",
"prompt_A = \"banana\" # @param {\"type\":\"string\",\"placeholder\":\"Write a prompt\"}\n",
"prompt_B = \"bike \" # @param {\"type\":\"string\",\"placeholder\":\"Write a prompt\"}\n",
"use_token_padding = True # param {type:\"boolean\"} <----- Enabled by default\n",
"#-----#\n",
"from transformers import AutoTokenizer\n",
"tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\",\n",
"clean_up_tokenization_spaces = False)\n",
"#-----#\n",
"from transformers import CLIPProcessor, CLIPModel\n",
"processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
"model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
"#----#\n",
"inputs = tokenizer(text = prompt_A, padding=True, return_tensors=\"pt\")\n",
"text_features_A = model.get_text_features(**inputs)\n",
"text_features_A = text_features_A / text_features_A.norm(p=2, dim=-1, keepdim=True)\n",
"name_A = prompt_A\n",
"#----#\n",
"inputs = tokenizer(text = prompt_B, padding=True, return_tensors=\"pt\")\n",
"text_features_B = model.get_text_features(**inputs)\n",
"text_features_B = text_features_B / text_features_B.norm(p=2, dim=-1, keepdim=True)\n",
"name_B = prompt_B\n",
"#----#\n",
"import torch\n",
"sim_AB = torch.nn.functional.cosine_similarity(text_features_A, text_features_B)\n",
"#----#\n",
"print(f'The similarity between the text_encoding for A:\"{prompt_A}\" and B: \"{prompt_B}\" is {round(sim_AB.item()*100,2)} %')"
],
"metadata": {
"id": "QQOjh5BvnG8M",
"collapsed": true,
"cellView": "form"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title Quick Fix\n",
"#Imports\n",
"#!pip install safetensors\n",
"from safetensors.torch import load_file\n",
"import json , os , shelve , torch\n",
"import pandas as pd\n",
"#----#\n",
"\n",
"def my_mkdirs(folder):\n",
" if os.path.exists(folder)==False:\n",
" os.makedirs(folder)\n",
"\n",
"\n",
"def doFixPrompts(_path):\n",
" output_folder = '/content/outputs/text'\n",
" my_mkdirs(output_folder)\n",
" path = _path + '/text'\n",
" #-----#\n",
" index = 0\n",
" file_index = 0\n",
" prompts = {}\n",
" text_encodings = {}\n",
" _text_encodings = {}\n",
" #-----#\n",
" for filename in os.listdir(f'{path}'):\n",
" print(f'reading {filename}....')\n",
" _index = 0\n",
" %cd {path}\n",
" with open(f'{filename}', 'r') as f:\n",
" data = json.load(f)\n",
" #------#\n",
" _df = pd.DataFrame({'count': data})['count']\n",
" _prompts = {\n",
" key : value for key, value in _df.items()\n",
" }\n",
" #-----#\n",
" text_encoding_filename = _prompts['1']\n",
" links_encoding_filename = _prompts['1'].replace('prompts','links')\n",
" _prompts['0'] = links_encoding_filename\n",
" #-----#\n",
" %cd {output_folder}\n",
" print(f'Saving segment {filename} to {output_folder}...')\n",
" with open(filename, 'w') as f:\n",
" json.dump(_prompts, f)\n",
" #-------#\n",
" #--------#\n",
"#----------#\n",
"\n",
"\n",
"def doFixLinks(_path):\n",
" output_folder = '/content/outputs/images'\n",
" my_mkdirs(output_folder)\n",
" path = _path + '/images'\n",
" #-----#\n",
" index = 0\n",
" file_index = 0\n",
" prompts = {}\n",
" text_encodings = {}\n",
" _text_encodings = {}\n",
" #-----#\n",
" for filename in os.listdir(f'{path}'):\n",
" print(f'reading {filename}....')\n",
" _index = 0\n",
" %cd {path}\n",
" with open(f'{filename}', 'r') as f:\n",
" data = json.load(f)\n",
" #------#\n",
" _df = pd.DataFrame({'count': data})['count']\n",
" _links = {\n",
" key : value for key, value in _df.items()\n",
" }\n",
" #-----#\n",
" links_encoding_filename = _links['1']\n",
" text_encoding_filename = _links['1'].replace('links','prompts')\n",
" _links['0'] = links_encoding_filename\n",
" _links['1'] = text_encoding_filename\n",
" #-----#\n",
" %cd {output_folder}\n",
" print(f'Saving segment {filename} to {output_folder}...')\n",
" with open(filename, 'w') as f:\n",
" json.dump(_links, f)\n",
" #-------#\n",
" #--------#"
],
"metadata": {
"cellView": "form",
"id": "Cbt78mgJYHgr"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"You can write an url or upload a file locally from your device to use as reference. The image will by saved in the 'sd_tokens' folder. Note that the 'sd_tokens' folder will be deleted upon exiting this runtime."
],
"metadata": {
"id": "hyK423TQCRup"
}
},
{
"cell_type": "code",
"source": [
"# @title Process the raw vocab into json + .safetensor pair\n",
"\n",
"# NOTE : although they have 1x768 dimension , these are not text_encodings , but token vectors\n",
"import json\n",
"import pandas as pd\n",
"import os\n",
"import shelve\n",
"import torch\n",
"from safetensors.torch import save_file , load_file\n",
"import json\n",
"\n",
"home_directory = '/content/'\n",
"using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
"if using_Kaggle : home_directory = '/kaggle/working/'\n",
"%cd {home_directory}\n",
"#-------#\n",
"\n",
"# Load the data if not already loaded\n",
"try:\n",
" loaded\n",
"except:\n",
" %cd {home_directory}\n",
" !git clone https://huggingface.co/datasets/codeShare/text-to-image-prompts\n",
" loaded = True\n",
"#--------#\n",
"\n",
"# User input\n",
"target = home_directory + 'text-to-image-prompts/vocab/'\n",
"root_output_folder = home_directory + 'output/'\n",
"output_folder = root_output_folder + 'vocab/'\n",
"root_filename = 'vocab'\n",
"NUM_FILES = 1\n",
"#--------#\n",
"\n",
"# Setup environment\n",
"def my_mkdirs(folder):\n",
" if os.path.exists(folder)==False:\n",
" os.makedirs(folder)\n",
"#--------#\n",
"output_folder_text = output_folder + 'text/'\n",
"output_folder_text = output_folder + 'text/'\n",
"output_folder_token_vectors = output_folder + 'token_vectors/'\n",
"target_raw = target + 'raw/'\n",
"%cd {home_directory}\n",
"my_mkdirs(output_folder)\n",
"my_mkdirs(output_folder_text)\n",
"my_mkdirs(output_folder_token_vectors)\n",
"#-------#\n",
"\n",
"%cd {target_raw}\n",
"tokens = torch.load(f'{root_filename}.pt' , weights_only=True)\n",
"tokens = model.clone().detach()\n",
"\n",
"\n",
"%cd {target_raw}\n",
"with open(f'{root_filename}.json', 'r') as f:\n",
" data = json.load(f)\n",
"_df = pd.DataFrame({'count': data})['count']\n",
"#reverse key and value in the dict\n",
"vocab = {\n",
" value : key for key, value in _df.items()\n",
"}\n",
"#------#\n",
"\n",
"\n",
"tensors = {}\n",
"for key in vocab:\n",
" name = vocab[key]\n",
" token = tokens[int(key)]\n",
" tensors[key] = token\n",
"#-----#\n",
"\n",
"%cd {output_folder_token_vectors}\n",
"save_file(tensors, \"vocab.safetensors\")\n",
"\n",
"%cd {output_folder_text}\n",
"with open('vocab.json', 'w') as f:\n",
" json.dump(vocab, f)\n",
"\n",
"\n"
],
"metadata": {
"id": "H3JRx5rhWIEo",
"cellView": "form"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title Do the same but for image encodings (if urls exist)\n",
"import json\n",
"import pandas as pd\n",
"import os\n",
"import shelve\n",
"import torch\n",
"from safetensors.torch import save_file\n",
"import json\n",
"from PIL import Image\n",
"import requests\n",
"\n",
"# Determine if this notebook is running on Colab or Kaggle\n",
"#Use https://www.kaggle.com/ if Google Colab GPU is busy\n",
"home_directory = '/content/'\n",
"using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
"if using_Kaggle : home_directory = '/kaggle/working/'\n",
"%cd {home_directory}\n",
"#-------#\n",
"\n",
"# Load the data if not already loaded\n",
"try:\n",
" loaded\n",
"except:\n",
" %cd {home_directory}\n",
" !git clone https://huggingface.co/datasets/codeShare/text-to-image-prompts\n",
" loaded = True\n",
"#--------#\n",
"\n",
"# User input\n",
"target = home_directory + 'text-to-image-prompts/fusion/'\n",
"root_output_folder = home_directory + 'output/'\n",
"output_folder = root_output_folder + 'fusion/'\n",
"root_filename = 'prompts'\n",
"root_filename_links = 'links'\n",
"NUM_FILES = 1\n",
"#--------#\n",
"\n",
"# Setup environment\n",
"def my_mkdirs(folder):\n",
" if os.path.exists(folder)==False:\n",
" os.makedirs(folder)\n",
"#--------#\n",
"output_folder_text = output_folder + 'text/'\n",
"output_folder_images = output_folder + 'images/'\n",
"output_folder_text_encodings = output_folder + 'text_encodings/'\n",
"output_folder_image_encodings = output_folder + 'image_encodings/'\n",
"target_raw_text = target + 'raw/text/'\n",
"target_raw_images = target + 'raw/images/'\n",
"%cd {home_directory}\n",
"my_mkdirs(output_folder)\n",
"my_mkdirs(output_folder_text)\n",
"my_mkdirs(output_folder_images)\n",
"my_mkdirs(output_folder_text_encodings)\n",
"my_mkdirs(output_folder_image_encodings)\n",
"#-------#\n",
"\n",
"\n",
"device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
"from transformers import AutoTokenizer\n",
"tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
"from transformers import CLIPProcessor, CLIPModel\n",
"processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
"model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\").to(device)\n",
"#---------#\n",
"for file_index in range(NUM_FILES + 1):\n",
" if (file_index < 1): continue\n",
"\n",
" # Assign name of JSON file to read\n",
" filename = f'{root_filename}{file_index}'\n",
" if NUM_FILES == 1 : filename = f'{root_filename}'\n",
" #--------#\n",
"\n",
" # Assign name of JSON file to read\n",
" filename_links = f'{root_filename_links}{file_index}'\n",
" if NUM_FILES == 1 : filename_links = f'{root_filename_links}'\n",
" #--------#\n",
"\n",
" # Read {filename}.json\n",
" %cd {target_raw_text}\n",
" with open(filename + '.json', 'r') as f:\n",
" data = json.load(f)\n",
" _df = pd.DataFrame({'count': data})['count']\n",
" prompts = {\n",
" key : value.replace(\"\",\" \") for key, value in _df.items()\n",
" }\n",
" index = 0\n",
" for key in prompts:\n",
" index = index + 1\n",
" #----------#\n",
" NUM_ITEMS = index\n",
" #------#\n",
"\n",
" # Read image_urls\n",
" %cd {target_raw_images}\n",
" with open(filename_links + '.json', 'r') as f:\n",
" data = json.load(f)\n",
" _df = pd.DataFrame({'count': data})['count']\n",
" image_urls = {\n",
" key : value.replace(\"\",\" \") for key, value in _df.items()\n",
" }\n",
" index = 0\n",
" for key in image_urls:\n",
" index = index + 1\n",
" #----------#\n",
" NUM_ITEMS2 = index\n",
" #------#\n",
"\n",
" if (NUM_ITEMS != NUM_ITEMS2) :\n",
" print(f\"NUM_ITEMS (text) : {NUM_ITEMS}\")\n",
" print(f\"NUM_ITEMS (links) : {NUM_ITEMS2}\")\n",
"\n",
" # Calculate text_encoding for .json file contents and results as .db file\n",
" NUM_HEADERS = 2\n",
" CHUNKS_SIZE = 20\n",
" START_AT = 0 #<---Use this is job was aborted and you wish to continue where you left of. Set the value to 0 otherwise\n",
" #--------#\n",
" names_dict = {}\n",
" image_encoding_dict = {}\n",
" text_encoding_dict = {}\n",
" segments = {}\n",
" index = 0;\n",
" subby = 1;\n",
" _filename = ''\n",
"\n",
" print(f'processing batch no {subby}....')\n",
" print(f'----------')\n",
" for _index in range(NUM_ITEMS2):\n",
" if not (f'{_index}' in prompts) : continue\n",
" if (prompts[f'{_index}']==\"SKIP\") : continue\n",
" if (index % 100 == 0) : print(index)\n",
" if (index == 0 and _index>0) : index = index + 2 #make space for headers\n",
" if (index % (CHUNKS_SIZE-NUM_HEADERS)> 0 or _index <= 0) :\n",
" index = index + 1\n",
" else:\n",
" if index\",\" \") for key, value in _df.items()\n",
" }\n",
" index = 0\n",
" for key in prompts:\n",
" index = index + 1\n",
" #----------#\n",
" NUM_ITEMS = index\n",
" #------#\n",
"\n",
" # Read image_urls\n",
" %cd {target_raw_images}\n",
" with open(filename_links + '.json', 'r') as f:\n",
" data = json.load(f)\n",
" _df = pd.DataFrame({'count': data})['count']\n",
" image_urls = {\n",
" key : value.replace(\"\",\" \") for key, value in _df.items()\n",
" }\n",
" index = 0\n",
" for key in image_urls:\n",
" index = index + 1\n",
" #----------#\n",
" NUM_ITEMS2 = index\n",
" #------#\n",
"\n",
" if (NUM_ITEMS != NUM_ITEMS2) :\n",
" print(f\"NUM_ITEMS (text) : {NUM_ITEMS}\")\n",
" print(f\"NUM_ITEMS (links) : {NUM_ITEMS2}\")\n",
"\n",
" # Calculate text_encoding for .json file contents and results as .db file\n",
" NUM_HEADERS = 2\n",
" CHUNKS_SIZE = 20\n",
" START_AT = 0 #<---Use this is job was aborted and you wish to continue where you left of. Set the value to 0 otherwise\n",
" #--------#\n",
" names_dict = {}\n",
" image_encoding_dict = {}\n",
" segments = {}\n",
" index = 0;\n",
" subby = 1;\n",
" _filename = ''\n",
"\n",
" print(f'processing batch no {subby}....')\n",
" print(f'----------')\n",
" for _index in range(NUM_ITEMS2):\n",
" if not (f'{_index}' in prompts) : continue\n",
" if (prompts[f'{_index}']==\"SKIP\") : continue\n",
" if (index % 100 == 0) : print(index)\n",
" if (index == 0 and _index>0) : index = index + 2 #make space for headers\n",
" if (index % (CHUNKS_SIZE-NUM_HEADERS)> 0 or _index <= 0) :\n",
" index = index + 1\n",
" else:\n",
" if index\",\" \") for key, value in _df.items()\n",
" }\n",
" index = 0\n",
" for key in prompts:\n",
" index = index + 1\n",
" #----------#\n",
" NUM_ITEMS = index\n",
" #------#\n",
"\n",
"\n",
"\n",
" # Read image_urls\n",
" %cd {target_raw_images}\n",
" with open('links.json', 'r') as f:\n",
" data = json.load(f)\n",
" _df = pd.DataFrame({'count': data})['count']\n",
" image_urls = {\n",
" key : value.replace(\"\",\" \") for key, value in _df.items()\n",
" }\n",
" index = 0\n",
" for key in image_urls:\n",
" index = index + 1\n",
" #----------#\n",
" NUM_ITEMS = index\n",
" #------#\n",
"\n",
" # Calculate text_encoding for .json file contents and results as .db file\n",
" names_dict = {}\n",
" image_encoding_dict = {}\n",
" segments = {}\n",
" index = 0;\n",
" subby = 1;\n",
" NUM_HEADERS = 2\n",
" CHUNKS_SIZE = 500\n",
" _filename = ''\n",
" for _index in range(NUM_ITEMS):\n",
" if not (f'{_index}' in prompts) : continue\n",
" if (prompts[f'{_index}']==\"SKIP\") : continue\n",
" if (index % 100 == 0) : print(index)\n",
" if (index == 0 and _index>0) : index = index + 2 #make space for headers\n",
" if (_index % (CHUNKS_SIZE-NUM_HEADERS) == 0 and _index > 0) :\n",
"\n",
" # Write headers in the .json\n",
" names_dict[f'{0}'] = f'{_index}'\n",
" names_dict[f'{1}'] = f'{filename}-{subby}'\n",
"\n",
" # Encode the headers into text_encoding\n",
" inputs = tokenizer(text = '' + names_dict[f'{0}'], padding=True,truncation=True, return_tensors=\"pt\").to(device)\n",
" text_features = model.get_text_features(**inputs).to(device)\n",
" text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
" image_encoding_dict[f'{0}'] = text_features.to(torch.device('cpu'))\n",
" inputs = tokenizer(text = '' + names_dict[f'{1}'], padding=True,truncation=True, return_tensors=\"pt\").to(device)\n",
" text_features = model.get_text_features(**inputs).to(device)\n",
" text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
" image_encoding_dict[f'{1}'] = text_features.to(torch.device('cpu'))\n",
" #-------#\n",
"\n",
" Write .json\n",
" _filename = f'{filename}-{subby}.json'\n",
" %cd {output_folder_images}\n",
" print(f'Saving segment {_filename} to {output_folder_images}...')\n",
" with open(_filename, 'w') as f:\n",
" json.dump(names_dict, f)\n",
" #-------#\n",
"\n",
" # Write .safetensors\n",
" _filename = f'{filename}-{subby}.safetensors'\n",
" %cd {output_folder_image_encodings}\n",
" print(f'Saving segment {_filename} to {output_folder_image_encodings}...')\n",
" save_file(image_encoding_dict, _filename)\n",
" #--------#\n",
"\n",
" #Iterate\n",
" subby = subby + 1\n",
" segments[f'{subby}'] = _filename\n",
" image_encoding_dict = {}\n",
" names_dict = {}\n",
" index = 0\n",
" #------#\n",
" #------#\n",
" else: index = index + 1\n",
" #--------#\n",
"\n",
"\n",
" inputs = tokenizer(text = '' + prompts[f'{_index}'], padding=True,truncation=True, return_tensors=\"pt\").to(device)\n",
" text_features = model.get_text_features(**inputs).to(device)\n",
" text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
" text_encoding_dict[f'{index}'] = text_features.to(torch.device('cpu'))\n",
"\n",
"\n",
" names_dict[f'{index}'] = prompts[f'{_index}']\n",
" continue\n",
" #-----#\n",
" #-----#\n",
" # Write headers in the .json\n",
" names_dict[f'{0}'] = f'{_index}'\n",
" names_dict[f'{1}'] = f'{filename}-{subby}'\n",
"\n",
" # Encode the headers into text_encoding\n",
" inputs = tokenizer(text = '' + names_dict[f'{0}'], padding=True,truncation=True, return_tensors=\"pt\").to(device)\n",
" text_features = model.get_text_features(**inputs).to(device)\n",
" text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
" text_encoding_dict[f'{0}'] = text_features.to(torch.device('cpu'))\n",
" inputs = tokenizer(text = '' + names_dict[f'{1}'], padding=True,truncation=True, return_tensors=\"pt\").to(device)\n",
" text_features = model.get_text_features(**inputs).to(device)\n",
" text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
" text_encoding_dict[f'{1}'] = text_features.to(torch.device('cpu'))\n",
" #-------#\n",
"\n",
" # Write .json\n",
" _filename = f'{filename}-{subby}.json'\n",
" %cd {output_folder_text}\n",
" print(f'Saving segment {_filename} to {output_folder_text}...')\n",
" with open(_filename, 'w') as f:\n",
" json.dump(names_dict, f)\n",
" #-------#\n",
"\n",
" # Write .safetensors\n",
" _filename = f'{filename}-{subby}.safetensors'\n",
" %cd {output_folder_text_encodings}\n",
" print(f'Saving segment {_filename} to {output_folder_text_encodings}...')\n",
" save_file(text_encoding_dict, _filename)\n",
" #--------#\n",
"\n",
" #Iterate\n",
" subby = subby + 1\n",
" segments[f'{subby}'] = _filename\n",
" text_encoding_dict = {}\n",
" names_dict = {}\n",
" index = 0\n",
" #------#\n",
" #----#"
],
"metadata": {
"id": "Sy5K7c-IDcic"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Determine if this notebook is running on Colab or Kaggle\n",
"#Use https://www.kaggle.com/ if Google Colab GPU is busy\n",
"home_directory = '/content/'\n",
"using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
"if using_Kaggle : home_directory = '/kaggle/working/'\n",
"%cd {home_directory}\n",
"#-------#\n",
"\n",
"# @title Download the text_encodings as .zip\n",
"import os\n",
"%cd {home_directory}\n",
"#os.remove(f'{home_directory}results.zip')\n",
"root_output_folder = home_directory + 'output/'\n",
"zip_dest = f'{home_directory}results.zip'\n",
"!zip -r {zip_dest} {root_output_folder}"
],
"metadata": {
"id": "V4YCpmWlkPMG"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title Extract tags from the Danbooru website (AI tags)\n",
"\n",
"import requests\n",
"import re\n",
"import json\n",
"\n",
"prompts = {}\n",
"index = 0\n",
"for url_index in range(10):\n",
" url = f'https://danbooru.donmai.us/ai_tags?commit=Search&mode=table&page={url_index}&search%5Bis_posted%5D=true&search%5Border%5D=media_asset_id'\n",
" r = requests.get(url)\n",
" #-----#\n",
" matches = re.findall(\"data-tag-name=.*.* href\", r.text)\n",
" for x in matches:\n",
" prompts[f'{index}'] = x.replace('data-tag-name=\"','').replace('\" href','')\n",
" index = index + 1\n",
"\n",
"#-------#\n",
"with open('danbooru_ai_tags.json', 'w') as f:\n",
" json.dump(prompts, f)"
],
"metadata": {
"cellView": "form",
"id": "tBbJnlA5pjd2"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title Extract tags from the Danbooru website (Normal tags)\n",
"prompts = {}\n",
"index = 0\n",
"for url_index in range(1000):\n",
" url = f'https://danbooru.donmai.us/tags?commit=Search&page={url_index}&search%5Bhide_empty%5D=yes&search%5Border%5D=count'\n",
" r = requests.get(url)\n",
" #-----#\n",
" matches = re.findall('%5D=.*.*\">Related tags', r.text)\n",
" for x in matches:\n",
" prompts[f'{index}'] = x.replace('\\\">Related tags','').replace('%5D=','')\n",
" index = index + 1\n",
"\n",
"#-------#\n",
"with open('danbooru_tags.json', 'w') as f:\n",
" json.dump(prompts, f)"
],
"metadata": {
"cellView": "form",
"id": "l8t-4GmsviJt"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#Remove URL Encoding from the fetched Danbooru tags\n",
"danboorus = getJSON('/content/text-to-image-prompts/danbooru/raw/','🎀 fusion-t2i-danbooru-tags.json')\n",
"from urllib.parse import unquote\n",
"for key in danboorus:\n",
" danboorus[key] = unquote(danboorus[key])\n",
"%cd /content/\n",
"with open(f'🎀 fusion-t2i-danbooru-tags', 'w') as f:\n",
" json.dump(danboorus, f)"
],
"metadata": {
"id": "AjSf585hWWMB"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title Download nouns - import data\n",
"import os\n",
"import json\n",
"\n",
"# Setup environment\n",
"def my_mkdirs(folder):\n",
" if os.path.exists(folder)==False:\n",
" os.makedirs(folder)\n",
"#--------#\n",
"\n",
"# Determine if this notebook is running on Colab or Kaggle\n",
"#Use https://www.kaggle.com/ if Google Colab GPU is busy\n",
"home_directory = '/content/'\n",
"using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
"if using_Kaggle : home_directory = '/kaggle/working/'\n",
"%cd {home_directory}\n",
"#-------#\n",
"\n",
"root_output_folder = home_directory + 'outputs/'\n",
"\n",
"# @title Extract nouns\n",
"my_mkdirs(root_output_folder)\n",
"%cd {root_output_folder}\n",
"\n",
"!pip install datasets\n",
"\n",
"from datasets import load_dataset\n",
"\n",
"ds = load_dataset(\"bartoszmaj/nouns_one\")\n",
"#ds2 = load_dataset(\"bartoszmaj/nouns_two\")\n",
"#ds3 = load_dataset(\"bartoszmaj/nouns_three\")\n",
"#ds4 = load_dataset(\"bartoszmaj/nouns_four\")\n",
"\n"
],
"metadata": {
"cellView": "form",
"id": "HC72wZW9llzw"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title Download nouns - pick three items at random and write in JSONs\n",
"import random\n",
"my_mkdirs(root_output_folder)\n",
"%cd {root_output_folder}\n",
"for file_index in range(21):\n",
" if file_index <=0: continue\n",
" tripple_nouns = {}\n",
" for index in range (10000):\n",
" word = \"\"\n",
" for its in range(3):\n",
" _index = random.randint(0,1000000-1)\n",
" words = list(ds['train'][_index]['nouns'])\n",
" if len(words)>0:\n",
" _word = random.choice(words)\n",
" word = word + ' ' + _word\n",
" #---------#\n",
" tripple_nouns[f'{index}'] = word\n",
" #--------#\n",
" with open(f'tripple_nouns_{file_index}.json', 'w') as f:\n",
" json.dump(tripple_nouns, f)\n",
" #----------#\n",
"\n"
],
"metadata": {
"cellView": "form",
"id": "CWlWk0KpuX55"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"\n",
"\n",
"# How does this notebook work?\n",
"\n",
"Similiar vectors = similiar output in the SD 1.5 / SDXL / FLUX model\n",
"\n",
"CLIP converts the prompt text to vectors (“tensors”) , with float32 values usually ranging from -1 to 1.\n",
"\n",
"Dimensions are \\[ 1x768 ] tensors for SD 1.5 , and a \\[ 1x768 , 1x1024 ] tensor for SDXL and FLUX.\n",
"\n",
"The SD models and FLUX converts these vectors to an image.\n",
"\n",
"This notebook takes an input string , tokenizes it and matches the first token against the 49407 token vectors in the vocab.json : [https://huggingface.co/black-forest-labs/FLUX.1-dev/tree/main/tokenizer](https://www.google.com/url?q=https%3A%2F%2Fhuggingface.co%2Fblack-forest-labs%2FFLUX.1-dev%2Ftree%2Fmain%2Ftokenizer)\n",
"\n",
"It finds the “most similiar tokens” in the list. Similarity is the theta angle between the token vectors.\n",
"\n",
"\n",
"
\n",
"
\n",
"\n",
"The angle is calculated using cosine similarity , where 1 = 100% similarity (parallell vectors) , and 0 = 0% similarity (perpendicular vectors).\n",
"\n",
"Negative similarity is also possible.\n",
"\n",
"# How can I use it?\n",
"\n",
"If you are bored of prompting “girl” and want something similiar you can run this notebook and use the “chick” token at 21.88% similarity , for example\n",
"\n",
"You can also run a mixed search , like “cute+girl”/2 , where for example “kpop” has a 16.71% similarity\n",
"\n",
"There are some strange tokens further down the list you go. Example: tokens similiar to the token \"pewdiepie\" (yes this is an actual token that exists in CLIP)\n",
"\n",
"\n",
"
\n",
"
\n",
"\n",
"Each of these correspond to a unique 1x768 token vector.\n",
"\n",
"The higher the ID value , the less often the token appeared in the CLIP training data.\n",
"\n",
"To reiterate; this is the CLIP model training data , not the SD-model training data.\n",
"\n",
"So for certain models , tokens with high ID can give very consistent results , if the SD model is trained to handle them.\n",
"\n",
"Example of this can be anime models , where japanese artist names can affect the output greatly. \n",
"\n",
"Tokens with high ID will often give the \"fun\" output when used in very short prompts.\n",
"\n",
"# What about token vector length?\n",
"\n",
"If you are wondering about token magnitude,\n",
"Prompt weights like (banana:1.2) will scale the magnitude of the corresponding 1x768 tensor(s) by 1.2 . So thats how prompt token magnitude works.\n",
"\n",
"Source: [https://huggingface.co/docs/diffusers/main/en/using-diffusers/weighted\\_prompts](https://www.google.com/url?q=https%3A%2F%2Fhuggingface.co%2Fdocs%2Fdiffusers%2Fmain%2Fen%2Fusing-diffusers%2Fweighted_prompts)\\*\n",
"\n",
"So TLDR; vector direction = “what to generate” , vector magnitude = “prompt weights”\n",
"\n",
"# How prompting works (technical summary)\n",
"\n",
" 1. There is no correct way to prompt.\n",
"\n",
"2. Stable diffusion reads your prompt left to right, one token at a time, finding association _from_ the previous token _to_ the current token _and to_ the image generated thus far (Cross Attention Rule)\n",
"\n",
"3. Stable Diffusion is an optimization problem that seeks to maximize similarity to prompt and minimize similarity to negatives (Optimization Rule)\n",
"\n",
"Reference material (covers entire SD , so not good source material really, but the info is there) : https://youtu.be/sFztPP9qPRc?si=ge2Ty7wnpPGmB0gi\n",
"\n",
"# The SD pipeline\n",
"\n",
"For every step (20 in total by default) for SD1.5 :\n",
"\n",
"1. Prompt text => (tokenizer)\n",
"2. => Nx768 token vectors =>(CLIP model) =>\n",
"3. 1x768 encoding => ( the SD model / Unet ) =>\n",
"4. => _Desired_ image per Rule 3 => ( sampler)\n",
"5. => Paint a section of the image => (image)\n",
"\n",
"# Disclaimer /Trivia\n",
"\n",
"This notebook should be seen as a \"dictionary search tool\" for the vocab.json , which is the same for SD1.5 , SDXL and FLUX. Feel free to verify this by checking the 'tokenizer' folder under each model.\n",
"\n",
"vocab.json in the FLUX model , for example (1 of 2 copies) : https://huggingface.co/black-forest-labs/FLUX.1-dev/tree/main/tokenizer\n",
"\n",
"I'm using Clip-vit-large-patch14 , which is used in SD 1.5 , and is one among the two tokenizers for SDXL and FLUX : https://huggingface.co/openai/clip-vit-large-patch14/blob/main/README.md\n",
"\n",
"This set of tokens has dimension 1x768. \n",
"\n",
"SDXL and FLUX uses an additional set of tokens of dimension 1x1024.\n",
"\n",
"These are not included in this notebook. Feel free to include them yourselves (I would appreciate that).\n",
"\n",
"To do so, you will have to download a FLUX and/or SDXL model\n",
"\n",
", and copy the 49407x1024 tensor list that is stored within the model and then save it as a .pt file.\n",
"\n",
"//---//\n",
"\n",
"I am aware it is actually the 1x768 text_encoding being processed into an image for the SD models + FLUX.\n",
"\n",
"As such , I've included text_encoding comparison at the bottom of the Notebook.\n",
"\n",
"I am also aware thar SDXL and FLUX uses additional encodings , which are not included in this notebook.\n",
"\n",
"* Clip-vit-bigG for SDXL: https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k/blob/main/README.md\n",
"\n",
"* And the T5 text encoder for FLUX. I have 0% understanding of FLUX T5 text_encoder.\n",
"\n",
"//---//\n",
"\n",
"If you want them , feel free to include them yourself and share the results (cuz I probably won't) :)!\n",
"\n",
"That being said , being an encoding , I reckon the CLIP Nx768 => 1x768 should be \"linear\" (or whatever one might call it)\n",
"\n",
"So exchange a few tokens in the Nx768 for something similiar , and the resulting 1x768 ought to be kinda similar to 1x768 we had earlier. Hopefully.\n",
"\n",
"I feel its important to mention this , in case some wonder why the token-token similarity don't match the text-encoding to text-encoding similarity.\n",
"\n",
"# Note regarding CLIP text encoding vs. token\n",
"\n",
"*To make this disclaimer clear; Token-to-token similarity is not the same as text_encoding similarity.*\n",
"\n",
"I have to say this , since it will otherwise get (even more) confusing , as both the individual tokens , and the text_encoding have dimensions 1x768.\n",
"\n",
"They are separate things. Separate results. etc.\n",
"\n",
"As such , you will not get anything useful if you start comparing similarity between a token , and a text-encoding. So don't do that :)!\n",
"\n",
"# What about the CLIP image encoding?\n",
"\n",
"The CLIP model can also do an image_encoding of an image, where the output will be a 1x768 tensor. These _can_ be compared with the text_encoding.\n",
"\n",
"Comparing CLIP image_encoding with the CLIP text_encoding for a bunch of random prompts until you find the \"highest similarity\" , is a method used in the CLIP interrogator : https://huggingface.co/spaces/pharmapsychotic/CLIP-Interrogator\n",
"\n",
"List of random prompts for CLIP interrogator can be found here, for reference : https://github.com/pharmapsychotic/clip-interrogator/tree/main/clip_interrogator/data\n",
"\n",
"The CLIP image_encoding is not included in this Notebook.\n",
"\n",
"If you spot errors / ideas for improvememts; feel free to fix the code in your own notebook and post the results.\n",
"\n",
"I'd appreciate that over people saying \"your math is wrong you n00b!\" with no constructive feedback.\n",
"\n",
"//---//\n",
"\n",
"Regarding output\n",
"\n",
"# What are the symbols?\n",
"\n",
"The whitespace symbol indicate if the tokenized item ends with whitespace ( the suffix \"banana\" => \"banana \" ) or not (the prefix \"post\" in \"post-apocalyptic \")\n",
"\n",
"For ease of reference , I call them prefix-tokens and suffix-tokens.\n",
"\n",
"Sidenote:\n",
"\n",
"Prefix tokens have the unique property in that they \"mutate\" suffix tokens\n",
"\n",
"Example: \"photo of a #prefix#-banana\"\n",
"\n",
"where #prefix# is a randomly selected prefix-token from the vocab.json\n",
"\n",
"The hyphen \"-\" exists to guarantee the tokenized text splits into the written #prefix# and #suffix# token respectively. The \"-\" hypen symbol can be replaced by any other special character of your choosing.\n",
"\n",
" Capital letters work too , e.g \"photo of a #prefix#Abanana\" since the capital letters A-Z are only listed once in the entire vocab.json.\n",
"\n",
"You can also choose to omit any separator and just rawdog it with the prompt \"photo of a #prefix#banana\" , however know that this may , on occasion , be tokenized as completely different tokens of lower ID:s.\n",
"\n",
"Curiously , common NSFW terms found online have in the CLIP model have been purposefully fragmented into separate #prefix# and #suffix# counterparts in the vocab.json. Likely for PR-reasons.\n",
"\n",
"You can verify the results using this online tokenizer: https://sd-tokenizer.rocker.boo/\n",
"\n",
"\n",
"\n",
"# What is that gibberish tokens that show up?\n",
"\n",
"The gibberish tokens like \"ðŁĺħ\\\" are actually emojis!\n",
"\n",
"Try writing some emojis in this online tokenizer to see the results: https://sd-tokenizer.rocker.boo/\n",
"\n",
"It is a bit borked as it can't process capital letters properly.\n",
"\n",
"Also note that this is not reversible.\n",
"\n",
"If tokenization \"😅\" => ðŁĺħ\n",
"\n",
"Then you can't prompt \"ðŁĺħ\" and expect to get the same result as the tokenized original emoji , \"😅\".\n",
"\n",
"SD 1.5 models actually have training for Emojis.\n",
"\n",
"But you have to set CLIP skip to 1 for this to work is intended.\n",
"\n",
"A tutorial on stuff you can do with the vocab.list concluded.\n",
"\n",
"Anyways, have fun with the notebook.\n",
"\n",
"There might be some updates in the future with features not mentioned here.\n",
"\n",
"//---//\n",
"\n",
"https://codeandlife.com/2023/01/26/mastering-the-huggingface-clip-model-how-to-extract-embeddings-and-calculate-similarity-for-text-and-images/\n",
"\n",
"https://arxiv.org/pdf/2303.03032"
],
"metadata": {
"id": "njeJx_nSSA8H"
}
},
{
"cell_type": "code",
"source": [
"\n",
"# @title Create random names from firstname and lastnames\n",
"import random\n",
"import json\n",
"import pandas as pd\n",
"import os\n",
"import shelve\n",
"import torch\n",
"from safetensors.torch import save_file\n",
"\n",
"def my_mkdirs(folder):\n",
" if os.path.exists(folder)==False:\n",
" os.makedirs(folder)\n",
"\n",
"\n",
"my_mkdirs('/content/female_full_names/')\n",
"filename = ''\n",
"\n",
"filename = '🆔👩_🦰 fusion-t2i-girl-firstname-1'\n",
"%cd /content/text-to-image-prompts/names/firstnames/text\n",
"with open(filename + '.json', 'r') as f:\n",
" data = json.load(f)\n",
"_df = pd.DataFrame({'count': data})['count']\n",
"firstname = {\n",
" key : value for key, value in _df.items()\n",
"}\n",
"\n",
"NUM_FIRSTNAME = 100901\n",
"\n",
"\n",
"NUM_FILES = 9\n",
"for file_index in range(NUM_FILES + 1):\n",
" if file_index <1: continue\n",
" #if file_index >4: break\n",
" filename = f'👱_♀️ fusion-t2i-lastnames-{file_index} plugin'\n",
" #🦜 fusion-t2i-prompt-features-1.json\n",
"\n",
" # Read suffix.json\n",
" %cd /content/text-to-image-prompts/names/lastnames/text\n",
" with open(filename + '.json', 'r') as f:\n",
" data = json.load(f)\n",
" _df = pd.DataFrame({'count': data})['count']\n",
" names = {\n",
" key : firstname[f'{random.randint(2,NUM_FIRSTNAME)}'] + ' ' f'{value}' + ' ' for key, value in _df.items()\n",
" }\n",
"\n",
" index = 0\n",
"\n",
" for key in names:\n",
" index = index + 1\n",
" #-----#\n",
" RANGE = min(index,1000)\n",
" output = {}\n",
"\n",
" for index in range(RANGE):\n",
" if index >1000: break\n",
" output[f'{index}'] = names[f'{index}']\n",
" #-----#\n",
" output[f'{1}'] = f'👱_♀️female_fullnames-{file_index}'\n",
" output[f'{0}'] = f'{RANGE}'\n",
" txt_filename = f'👱_♀️female_fullnames-{file_index}'\n",
" %cd /content/female_full_names/\n",
" with open(txt_filename + '.txt', 'w') as f:\n",
" f.write(str(output))\n",
"\n",
" #files.download(f'fullnames-{file_index}.txt')\n",
"\n",
"#firstname[f'{random.randint(2,NUM_FIRSTNAME)}'] + f'{value}'\n",
"\n",
" #------#\n",
"\n",
"\n"
],
"metadata": {
"id": "JR0wl2ecj6RJ"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title \tImport Archive of our own tags (awesome!)\n",
"!wget https://huggingface.co/codeShare/JupyterNotebooks/resolve/main/20210226-stats.zip '20210226-stats.zip'\n",
"!unzip /content/20210226-stats.zip\n",
"import pandas as pd\n",
"_df=pd.read_csv('/content/tags-20210226.csv')\n",
"\n",
"_tags = {\n",
" key : value for key, value in _df.items()\n",
"}\n",
"for key in _tags:\n",
" print(key)\n",
"\n",
"names = {}\n",
"index = 0\n",
"for name in _tags['name']:\n",
" if name == 'Redacted':continue\n",
" names[f'{index}'] = name\n",
" index = index + 1\n"
],
"metadata": {
"id": "Oq8C0unOI_hT"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title \tSave Archive of our own tags\n",
"import os\n",
"def my_mkdirs(folder):\n",
" if os.path.exists(folder)==False:\n",
" os.makedirs(folder)\n",
"\n",
"save_filename = 'fanfic_tags.json'\n",
"\n",
"output_folder = '/content/output/'\n",
"my_mkdirs(output_folder)\n",
"\n",
"print(f'Saving savefile {save_filename} to {output_folder}...')\n",
"with open(save_filename, 'w') as f:\n",
" json.dump(names, f)\n",
"#-----#\n",
"\n"
],
"metadata": {
"id": "eQkxcFnAJDcG"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title Create random suffix pairings\n",
"import random\n",
"import json\n",
"import pandas as pd\n",
"import os\n",
"import shelve\n",
"import torch\n",
"from safetensors.torch import save_file\n",
"\n",
"\n",
"def my_mkdirs(folder):\n",
" if os.path.exists(folder)==False:\n",
" os.makedirs(folder)\n",
"\n",
"\n",
"output_folder = '/content/output/prefix_suffix_pairs/'\n",
"my_mkdirs(output_folder)\n",
"\n",
"_prompts = {}\n",
"_text_encodings = {}\n",
"nA = 0\n",
"\n",
"try: loaded3\n",
"except:\n",
" loaded3 = True\n",
" if True:\n",
" tmp = '/content/text-to-image-prompts/vocab/text_encodings/prefix/'\n",
" for item in ['common','average','rare','weird','exotic'] :\n",
" url = tmp + item\n",
" prefix, text_encodings, PREFIX_NUM_VOCAB_ITEMS = append_from_url(_prompts , _text_encodings, nA , url , '')\n",
" #------#\n",
"\n",
" if True :\n",
" tmp = '/content/text-to-image-prompts/vocab/text_encodings/suffix/'\n",
" for item in ['common','average','rare','weird','exotic'] :\n",
" url = tmp + item\n",
" suffix , text_encodings, SUFFIX_NUM_VOCAB_ITEMS = append_from_url(_prompts , _text_encodings, nA , url , '')\n",
" #------#\n",
"\n",
" if False :\n",
" url = '/content/text-to-image-prompts/vocab/text_encodings/emoji/'\n",
" prompts , emojis_text_encodings, NUM_VOCAB_ITEMS = append_from_url(_prompts , _text_encodings, nA , url , '')\n",
" #------#\n",
"#--------#\n",
"\n",
"if False:\n",
" item3 = '#uc# '\n",
" while (item3.find('#uc#')>-1 or (not item3.isalpha())) :\n",
" item3 = prompts[f'{random.randint(0,NUM_VOCAB_ITEMS)}']\n",
" item3 = item3.replace('', '')\n",
" #------#\n",
"\n",
" item4 = '#uc# '\n",
" while (item4.find('#uc#')>-1 or (not item4.isalpha())) :\n",
" item4 = prompts[f'{random.randint(0,NUM_VOCAB_ITEMS)}']\n",
" item4 = item4.replace('', '')\n",
" #------#\n",
"#------#\n",
"\n",
"\n",
"output = ''\n",
"%cd {output_folder}\n",
"with open('prefix_suffix_pairs' + '.txt', 'w') as f:\n",
" for iter in range (200000):\n",
" item = '#uc# '\n",
" while (not item.isalpha()) :\n",
" item = prefix[f'{random.randint(0,PREFIX_NUM_VOCAB_ITEMS)}']\n",
" item = item.replace('', '')\n",
"\n",
" item2 = '#uc# '\n",
" while (item2.find('#uc#')>-1 or (not item2.isalpha())) :\n",
" item2 = suffix[f'{random.randint(0,SUFFIX_NUM_VOCAB_ITEMS)}']\n",
" item2 = item2.replace('', '')\n",
"\n",
" item3 = '#uc# '\n",
" while (item3.find('#uc#')>-1 or (not item3.isalpha())) :\n",
" item3 = suffix[f'{random.randint(0,SUFFIX_NUM_VOCAB_ITEMS)}']\n",
" item3 = item3.replace('', '')\n",
" #------#\n",
"\n",
" #------#\n",
" output = output + item + '-' + item2 + ' ' + item3\n",
" # + ' ' + item4\n",
" output = output + ' \\n'\n",
" #---------#\n",
" f.write(str(output))"
],
"metadata": {
"cellView": "form",
"id": "64c0zJDDChN7"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title Download the created text_encodings as .zip file\n",
"%cd /content/\n",
"!zip -r /content/female_full_names.zip /content/female_full_names/"
],
"metadata": {
"id": "IBenvYVrofil",
"cellView": "form"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title ⚡+🖼️ -> 📝 Token-Sampling Image interrogator (work in progress)\n",
"#-----#\n",
"NUM_TOKENS = 49407\n",
"import shelve\n",
"db_vocab = shelve.open(VOCAB_FILENAME)\n",
"print(f'using the tokens found in {VOCAB_FILENAME}.db as the vocab')\n",
"# @markdown # What do you want to to mimic?\n",
"use = '🖼️image_encoding from image' # @param ['📝text_encoding from prompt', '🖼️image_encoding from image']\n",
"# @markdown --------------------------\n",
"use_token_padding = True # param {type:\"boolean\"} <---- Enabled by default\n",
"prompt = \"photo of a banana\" # @param {\"type\":\"string\",\"placeholder\":\"Write a prompt\"}\n",
"#-----#\n",
"prompt_A = prompt\n",
"if(image_path != \"\") : image_A = cv2.imread(\"/content/sd_tokens/\" + image_path)\n",
"#-----#\n",
"\n",
"from transformers import AutoTokenizer\n",
"tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
"from transformers import CLIPProcessor, CLIPModel\n",
"processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
"model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
"#-----#\n",
"if(use == '🖼️image_encoding from image'):\n",
" # Get image features\n",
" inputs = processor(images=image_A, return_tensors=\"pt\")\n",
" image_features = model.get_image_features(**inputs)\n",
" image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)\n",
" name_A = \"the image\"\n",
"#-----#\n",
"if(use == '📝text_encoding from prompt'):\n",
" # Get text features\n",
" inputs = tokenizer(text = prompt, padding=True, return_tensors=\"pt\")\n",
" text_features_A = model.get_text_features(**inputs)\n",
" name_A = prompt\n",
"#-----#\n",
"# @markdown # The output...\n",
"must_start_with = \"\" # @param {\"type\":\"string\",\"placeholder\":\"write a text\"}\n",
"must_contain = \"\" # @param {\"type\":\"string\",\"placeholder\":\"write a text\"}\n",
"must_end_with = \"\" # @param {\"type\":\"string\",\"placeholder\":\"write a text\"}\n",
"# @markdown -----\n",
"# @markdown # Use a range of tokens from the vocab.json (slow method)\n",
"start_search_at_index = 0 # @param {type:\"slider\", min:0, max: 49407, step:100}\n",
"# @markdown The lower the start_index, the more similiar the sampled tokens will be to the target token assigned in the '⚡ Get similiar tokens' cell\". If the cell was not run, then it will use tokens ordered by similarity to the \"girl\\\" token\n",
"start_search_at_ID = start_search_at_index\n",
"search_range = 1000 # @param {type:\"slider\", min:100, max:49407, step:100}\n",
"\n",
"samples_per_iter = 10 # @param {type:\"slider\", min:10, max: 100, step:10}\n",
"\n",
"iterations = 5 # @param {type:\"slider\", min:1, max: 20, step:0}\n",
"restrictions = 'None' # @param [\"None\", \"Suffix only\", \"Prefix only\"]\n",
"#markdown Limit char size of included token <----- Disabled\n",
"min_char_size = 0 #param {type:\"slider\", min:0, max: 20, step:1}\n",
"char_range = 50 #param {type:\"slider\", min:0, max: 20, step:1}\n",
"# markdown # ...or paste prompt items\n",
"# markdown Format must be {item1|item2|...}. You can aquire prompt items using the Randomizer in the fusion gen: https://perchance.org/fusion-ai-image-generator\n",
"_enable = False # param {\"type\":\"boolean\"}\n",
"prompt_items = \"\" # param {\"type\":\"string\",\"placeholder\":\"{item1|item2|...}\"}\n",
"#-----#\n",
"#-----#\n",
"START = start_search_at_ID\n",
"RANGE = min(search_range , max(1,NUM_TOKENS - start_search_at_ID))\n",
"#-----#\n",
"import math, random\n",
"NUM_PERMUTATIONS = 6\n",
"ITERS = iterations\n",
"#-----#\n",
"#LOOP START\n",
"#-----#\n",
"# Check if original solution is best\n",
"best_sim = 0\n",
"name = must_start_with + must_contain + must_end_with\n",
"ids = processor.tokenizer(text=name, padding=use_token_padding, return_tensors=\"pt\")\n",
"text_features = model.get_text_features(**ids)\n",
"text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True)\n",
"#------#\n",
"sim = 0\n",
"if(use == '🖼️image_encoding from image'):\n",
" logit_scale = model.logit_scale.exp()\n",
" torch.matmul(text_features, image_features.t()) * logit_scale\n",
" sim = torch.nn.functional.cosine_similarity(text_features, image_features) * logit_scale\n",
"#-----#\n",
"if(use == '📝text_encoding from prompt'):\n",
" sim = torch.nn.functional.cosine_similarity(text_features, text_features_A)\n",
"#-----#\n",
"best_sim = sim\n",
"best_name = name\n",
"name_B = must_contain\n",
"#------#\n",
"results_sim = torch.zeros(ITERS*NUM_PERMUTATIONS)\n",
"results_name_B = {}\n",
"results_name = {}\n",
"#-----#\n",
"for iter in range(ITERS):\n",
" dots = torch.zeros(min(list_size,RANGE))\n",
" is_trail = torch.zeros(min(list_size,RANGE))\n",
"\n",
" #-----#\n",
"\n",
" for index in range(samples_per_iter):\n",
" _start = START\n",
" id_C = random.randint(_start , _start + RANGE)\n",
" name_C = db_vocab[f'{id_C}']\n",
" is_Prefix = 0\n",
" #Skip if non-AZ characters are found\n",
" #???\n",
" #-----#\n",
" # Decide if we should process prefix/suffix tokens\n",
" if name_C.find('')<=-1:\n",
" is_Prefix = 1\n",
" if restrictions != \"Prefix only\":\n",
" continue\n",
" else:\n",
" if restrictions == \"Prefix only\":\n",
" continue\n",
" #-----#\n",
" # Decide if char-size is within range\n",
" if len(name_C) < min_char_size:\n",
" continue\n",
" if len(name_C) > min_char_size + char_range:\n",
" continue\n",
" #-----#\n",
" name_CB = must_start_with + name_C + name_B + must_end_with\n",
" if is_Prefix>0:\n",
" name_CB = must_start_with + ' ' + name_C + '-' + name_B + ' ' + must_end_with\n",
" #-----#\n",
" if(use == '🖼️image_encoding from image'):\n",
" ids_CB = processor.tokenizer(text=name_CB, padding=use_token_padding, return_tensors=\"pt\")\n",
" text_features = model.get_text_features(**ids_CB)\n",
" text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True)\n",
" logit_scale = model.logit_scale.exp()\n",
" torch.matmul(text_features, image_features.t()) * logit_scale\n",
" sim_CB = torch.nn.functional.cosine_similarity(text_features, image_features) * logit_scale\n",
" #-----#\n",
" if(use == '📝text_encoding from prompt'):\n",
" ids_CB = processor.tokenizer(text=name_CB, padding=use_token_padding, return_tensors=\"pt\")\n",
" text_features = model.get_text_features(**ids_CB)\n",
" text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True)\n",
" sim_CB = torch.nn.functional.cosine_similarity(text_features, text_features_A)\n",
" #-----#\n",
" #-----#\n",
" if restrictions == \"Prefix only\":\n",
" result = sim_CB\n",
" result = result.item()\n",
" dots[index] = result\n",
" continue\n",
" #-----#\n",
" if(use == '🖼️image_encoding from image'):\n",
" name_BC = must_start_with + name_B + name_C + must_end_with\n",
" ids_BC = processor.tokenizer(text=name_BC, padding=use_token_padding, return_tensors=\"pt\")\n",
" text_features = model.get_text_features(**ids_BC)\n",
" text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True)\n",
" logit_scale = model.logit_scale.exp()\n",
" torch.matmul(text_features, image_features.t()) * logit_scale\n",
" sim_BC = torch.nn.functional.cosine_similarity(text_features, image_features) * logit_scale\n",
" #-----#\n",
" if(use == '📝text_encoding from prompt'):\n",
" name_BC = must_start_with + name_B + name_C + must_end_with\n",
" ids_BC = processor.tokenizer(text=name_BC, padding=use_token_padding, return_tensors=\"pt\")\n",
" text_features = model.get_text_features(**ids_BC)\n",
" text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True)\n",
" sim_BC = torch.nn.functional.cosine_similarity(text_features, text_features_A)\n",
" #-----#\n",
" result = sim_CB\n",
" if(sim_BC > sim_CB):\n",
" is_trail[index] = 1\n",
" result = sim_BC\n",
" #-----#\n",
" #result = absolute_value(result.item())\n",
" result = result.item()\n",
" dots[index] = result\n",
" #----#\n",
" sorted, indices = torch.sort(dots,dim=0 , descending=True)\n",
" # @markdown ----------\n",
" # @markdown # Print options\n",
" list_size = 100 # param {type:'number'}\n",
" print_ID = False # @param {type:\"boolean\"}\n",
" print_Similarity = True # @param {type:\"boolean\"}\n",
" print_Name = True # @param {type:\"boolean\"}\n",
" print_Divider = True # @param {type:\"boolean\"}\n",
" print_Suggestions = False # @param {type:\"boolean\"}\n",
" #----#\n",
" if (print_Divider):\n",
" print('//---//')\n",
" #----#\n",
" print('')\n",
"\n",
" used_reference = f'the text_encoding for {prompt_A}'\n",
" if(use == '🖼️image_encoding from image'):\n",
" used_reference = 'the image input'\n",
" print(f'These token pairings within the range ID = {_start} to ID = {_start + RANGE} most closely match {used_reference}: ')\n",
" print('')\n",
" #----#\n",
" aheads = \"{\"\n",
" trails = \"{\"\n",
" tmp = \"\"\n",
" #----#\n",
" max_sim_ahead = 0\n",
" max_sim_trail = 0\n",
" sim = 0\n",
" max_name_ahead = ''\n",
" max_name_trail = ''\n",
" #----#\n",
" for index in range(min(list_size,RANGE)):\n",
" id = _start + indices[index].item()\n",
" name = db_vocab[f'{id}']\n",
" #-----#\n",
" if (name.find('')<=-1):\n",
" name = name + '-'\n",
" if(is_trail[index]>0):\n",
" trails = trails + name + \"|\"\n",
" else:\n",
" aheads = aheads + name + \"|\"\n",
" #----#\n",
" sim = sorted[index].item()\n",
" #----#\n",
" if(is_trail[index]>0):\n",
" if sim>max_sim_trail:\n",
" max_sim_trail = sim\n",
" max_name_trail = name\n",
" max_name_trail = max_name_trail.strip()\n",
"\n",
" else:\n",
" if sim>max_sim_ahead:\n",
" max_sim_ahead = sim\n",
" max_name_ahead = name\n",
" #------#\n",
" trails = (trails + \"&&&&\").replace(\"|&&&&\", \"}\").replace(\"\", \" \").replace(\"{&&&&\", \"\")\n",
" aheads = (aheads + \"&&&&\").replace(\"|&&&&\", \"}\").replace(\"\", \" \").replace(\"{&&&&\", \"\")\n",
" #-----#\n",
"\n",
" if(print_Suggestions):\n",
" print(f\"place these items ahead of prompt : {aheads}\")\n",
" print(\"\")\n",
" print(f\"place these items behind the prompt : {trails}\")\n",
" print(\"\")\n",
"\n",
" tmp = must_start_with + ' ' + max_name_ahead + name_B + ' ' + must_end_with\n",
" tmp = tmp.strip().replace('', ' ')\n",
" print(f\"max_similarity_ahead = {round(max_sim_ahead,2)} % when using '{tmp}' \")\n",
" print(\"\")\n",
" tmp = must_start_with + ' ' + name_B + max_name_trail + ' ' + must_end_with\n",
" tmp = tmp.strip().replace('', ' ')\n",
" print(f\"max_similarity_trail = {round(max_sim_trail,2)} % when using '{tmp}' \")\n",
" #-----#\n",
" #STEP 2\n",
" import random\n",
" #-----#\n",
" for index in range(NUM_PERMUTATIONS):\n",
" name_inner = ''\n",
" if index == 0 : name_inner = name_B\n",
" if index == 1: name_inner = max_name_ahead\n",
" if index == 2: name_inner = max_name_trail\n",
" if index == 3: name_inner = name_B + max_name_trail\n",
" if index == 4: name_inner = max_name_ahead + name_B\n",
" if index == 5: name_inner = max_name_ahead + name_B + max_name_trail\n",
" if name_inner == '': name_inner = max_name_ahead + name_B + max_name_trail\n",
"\n",
" name = must_start_with + name_inner + must_end_with\n",
" #----#\n",
" ids = processor.tokenizer(text=name, padding=use_token_padding, return_tensors=\"pt\")\n",
" #----#\n",
" sim = 0\n",
" if(use == '🖼️image_encoding from image'):\n",
" text_features = model.get_text_features(**ids)\n",
" text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True)\n",
" logit_scale = model.logit_scale.exp()\n",
" torch.matmul(text_features, image_features.t()) * logit_scale\n",
" sim = torch.nn.functional.cosine_similarity(text_features, image_features) * logit_scale\n",
" #-----#\n",
" if(use == '📝text_encoding from prompt'):\n",
" text_features = model.get_text_features(**ids)\n",
" text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True)\n",
" sim = torch.nn.functional.cosine_similarity(text_features, text_features_A)\n",
" #-----#\n",
" results_name[iter*NUM_PERMUTATIONS + index] = name\n",
" results_sim[iter*NUM_PERMUTATIONS + index] = sim\n",
" results_name_B[iter*NUM_PERMUTATIONS + index] = name_inner.replace('',' ')\n",
" #------#\n",
" #name_B = results_name_B[iter*NUM_PERMUTATIONS + random.randint(0,3)]\n",
" tmp = iter*NUM_PERMUTATIONS\n",
" _name_B=''\n",
" if results_sim[tmp+1]>results_sim[tmp+2]: _name_B = results_name_B[tmp + 3]\n",
" if results_sim[tmp+2]>results_sim[tmp+1]: _name_B = results_name_B[tmp + 4]\n",
"\n",
" if _name_B != name_B:\n",
" name_B=_name_B\n",
" else:\n",
" name_B = results_name_B[tmp + 5]\n",
"\n",
"#--------#\n",
"print('')\n",
"if(use == '🖼️image_encoding from image' and colab_image_path != \"\"):\n",
" from google.colab.patches import cv2_imshow\n",
" cv2_imshow(image_A)\n",
"#-----#\n",
"print('')\n",
"sorted, indices = torch.sort(results_sim,dim=0 , descending=True)\n",
"\n",
"for index in range(ITERS*NUM_PERMUTATIONS):\n",
" name_inner = results_name[indices[index].item()]\n",
" print(must_start_with + name_inner + must_end_with)\n",
" print(f'similiarity = {round(sorted[index].item(),2)} %')\n",
" print('------')\n",
"#------#\n",
"db_vocab.close() #close the file"
],
"metadata": {
"collapsed": true,
"id": "fi0jRruI0-tu",
"cellView": "form"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title ⚡ Get similiar tokens (not updated yet)\n",
"import torch\n",
"from transformers import AutoTokenizer\n",
"tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
"\n",
"# @markdown Write name of token to match against\n",
"token_name = \"banana \" # @param {type:'string',\"placeholder\":\"leave empty for random value token\"}\n",
"\n",
"prompt = token_name\n",
"# @markdown (optional) Mix the token with something else\n",
"mix_with = \"\" # @param {\"type\":\"string\",\"placeholder\":\"leave empty for random value token\"}\n",
"mix_method = \"None\" # @param [\"None\" , \"Average\", \"Subtract\"] {allow-input: true}\n",
"w = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
"# @markdown Limit char size of included token\n",
"\n",
"min_char_size = 0 # param {type:\"slider\", min:0, max: 50, step:1}\n",
"char_range = 50 # param {type:\"slider\", min:0, max: 50, step:1}\n",
"\n",
"tokenizer_output = tokenizer(text = prompt)\n",
"input_ids = tokenizer_output['input_ids']\n",
"id_A = input_ids[1]\n",
"A = torch.tensor(token[id_A])\n",
"A = A/A.norm(p=2, dim=-1, keepdim=True)\n",
"#-----#\n",
"tokenizer_output = tokenizer(text = mix_with)\n",
"input_ids = tokenizer_output['input_ids']\n",
"id_C = input_ids[1]\n",
"C = torch.tensor(token[id_C])\n",
"C = C/C.norm(p=2, dim=-1, keepdim=True)\n",
"#-----#\n",
"sim_AC = torch.dot(A,C)\n",
"#-----#\n",
"print(input_ids)\n",
"#-----#\n",
"\n",
"#if no imput exists we just randomize the entire thing\n",
"if (prompt == \"\"):\n",
" id_A = -1\n",
" print(\"Tokenized prompt tensor A is a random valued tensor with no ID\")\n",
" R = torch.rand(A.shape)\n",
" R = R/R.norm(p=2, dim=-1, keepdim=True)\n",
" A = R\n",
" name_A = 'random_A'\n",
"\n",
"#if no imput exists we just randomize the entire thing\n",
"if (mix_with == \"\"):\n",
" id_C = -1\n",
" print(\"Tokenized prompt 'mix_with' tensor C is a random valued tensor with no ID\")\n",
" R = torch.rand(A.shape)\n",
" R = R/R.norm(p=2, dim=-1, keepdim=True)\n",
" C = R\n",
" name_C = 'random_C'\n",
"\n",
"name_A = \"A of random type\"\n",
"if (id_A>-1):\n",
" name_A = vocab(id_A)\n",
"\n",
"name_C = \"token C of random type\"\n",
"if (id_C>-1):\n",
" name_C = vocab(id_C)\n",
"\n",
"print(f\"The similarity between A '{name_A}' and C '{name_C}' is {round(sim_AC.item()*100,2)} %\")\n",
"\n",
"if (mix_method == \"None\"):\n",
" print(\"No operation\")\n",
"\n",
"if (mix_method == \"Average\"):\n",
" A = w*A + (1-w)*C\n",
" _A = LA.vector_norm(A, ord=2)\n",
" print(f\"Tokenized prompt tensor A '{name_A}' token has been recalculated as A = w*A + (1-w)*C , where C is '{name_C}' token , for w = {w} \")\n",
"\n",
"if (mix_method == \"Subtract\"):\n",
" tmp = w*A - (1-w)*C\n",
" tmp = tmp/tmp.norm(p=2, dim=-1, keepdim=True)\n",
" A = tmp\n",
" #//---//\n",
" print(f\"Tokenized prompt tensor A '{name_A}' token has been recalculated as A = _A*norm(w*A - (1-w)*C) , where C is '{name_C}' token , for w = {w} \")\n",
"\n",
"#OPTIONAL : Add/subtract + normalize above result with another token. Leave field empty to get a random value tensor\n",
"\n",
"dots = torch.zeros(NUM_TOKENS)\n",
"for index in range(NUM_TOKENS):\n",
" id_B = index\n",
" B = torch.tensor(token[id_B])\n",
" B = B/B.norm(p=2, dim=-1, keepdim=True)\n",
" sim_AB = torch.dot(A,B)\n",
" dots[index] = sim_AB\n",
"\n",
"\n",
"sorted, indices = torch.sort(dots,dim=0 , descending=True)\n",
"#----#\n",
"if (mix_method == \"Average\"):\n",
" print(f'Calculated all cosine-similarities between the average of token {name_A} and {name_C} with Id_A = {id_A} and mixed Id_C = {id_C} as a 1x{sorted.shape[0]} tensor')\n",
"if (mix_method == \"Subtract\"):\n",
" print(f'Calculated all cosine-similarities between the subtract of token {name_A} and {name_C} with Id_A = {id_A} and mixed Id_C = {id_C} as a 1x{sorted.shape[0]} tensor')\n",
"if (mix_method == \"None\"):\n",
" print(f'Calculated all cosine-similarities between the token {name_A} with Id_A = {id_A} with the the rest of the {NUM_TOKENS} tokens as a 1x{sorted.shape[0]} tensor')\n",
"\n",
"#Produce a list id IDs that are most similiar to the prompt ID at positiion 1 based on above result\n",
"\n",
"# @markdown Set print options\n",
"list_size = 100 # @param {type:'number'}\n",
"print_ID = False # @param {type:\"boolean\"}\n",
"print_Similarity = True # @param {type:\"boolean\"}\n",
"print_Name = True # @param {type:\"boolean\"}\n",
"print_Divider = True # @param {type:\"boolean\"}\n",
"\n",
"\n",
"if (print_Divider):\n",
" print('//---//')\n",
"\n",
"print('')\n",
"print('Here is the result : ')\n",
"print('')\n",
"\n",
"for index in range(list_size):\n",
" id = indices[index].item()\n",
" if (print_Name):\n",
" print(f'{vocab(id)}') # vocab item\n",
" if (print_ID):\n",
" print(f'ID = {id}') # IDs\n",
" if (print_Similarity):\n",
" print(f'similiarity = {round(sorted[index].item()*100,2)} %')\n",
" if (print_Divider):\n",
" print('--------')\n",
"\n",
"#Print the sorted list from above result\n",
"\n",
"#The prompt will be enclosed with the <|start-of-text|> and <|end-of-text|> tokens, which is why output will be [49406, ... , 49407].\n",
"\n",
"#You can leave the 'prompt' field empty to get a random value tensor. Since the tensor is random value, it will not correspond to any tensor in the vocab.json list , and this it will have no ID.\n",
"\n",
"# Save results as .db file\n",
"import shelve\n",
"VOCAB_FILENAME = 'tokens_most_similiar_to_' + name_A.replace('','').strip()\n",
"d = shelve.open(VOCAB_FILENAME)\n",
"#NUM TOKENS == 49407\n",
"for index in range(NUM_TOKENS):\n",
" #print(d[f'{index}']) #<-----Use this to read values from the .db file\n",
" d[f'{index}']= vocab(indices[index].item()) #<---- write values to .db file\n",
"#----#\n",
"d.close() #close the file\n",
"# See this link for additional stuff to do with shelve: https://docs.python.org/3/library/shelve.html"
],
"metadata": {
"id": "iWeFnT1gAx6A",
"cellView": "form"
},
"execution_count": null,
"outputs": []
}
]
}