{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "markdown",
"source": [
"Try this Free online SD 1.5 generator with the results: https://perchance.org/fusion-ai-image-generator\n",
"\n",
" This Notebook is a Stable-diffusion tool which allows you to find similiar prompts to an existing prompt. It uses the Nearest Neighbor decoder method listed here:https://arxiv.org/pdf/2303.03032"
],
"metadata": {
"id": "cRV2YWomjMBU"
}
},
{
"cell_type": "markdown",
"source": [
"THIS IS AN OLD VERSION OF THE CLIP INTERROGATOR.\n",
"\n",
"YOU WILL FIND THE UP TO DATE VERSION HERE:https://huggingface.co/datasets/codeShare/fusion-t2i-generator-data/tree/main/Google%20Colab%20Jupyter%20Notebooks"
],
"metadata": {
"id": "9slWHq0JIX6D"
}
},
{
"cell_type": "code",
"source": [
"import os\n",
"home_directory = '/content/'\n",
"using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
"if using_Kaggle : home_directory = '/kaggle/working/'\n",
"%cd {home_directory}\n",
"\n",
"def fix_bad_symbols(txt):\n",
" result = txt\n",
" for symbol in ['^', '}', '{' , ')', '(', '[' , ']' , ':' , '=' ]:\n",
" result = result.replace(symbol,'\\\\' + symbol)\n",
" #------#\n",
" return result;\n",
"\n",
"def my_mkdirs(folder):\n",
" if os.path.exists(folder)==False:\n",
" os.makedirs(folder)\n",
"\n",
"#πΈπΉ\n",
"# Load the data if not already loaded\n",
"try:\n",
" loaded\n",
"except:\n",
" from safetensors.torch import load_file , save_file\n",
" import json , torch , requests , math\n",
" import pandas as pd\n",
" from PIL import Image\n",
" #----#\n",
" %cd {home_directory}\n",
" !git clone https://huggingface.co/datasets/codeShare/fusion-t2i-generator-data\n",
" loaded = True\n",
" %cd {home_directory + 'fusion-t2i-generator-data/'}\n",
" !unzip vocab.zip\n",
" !unzip reference.zip\n",
"#------#\n",
"%cd {home_directory + 'fusion-t2i-generator-data/' + 'vocab'}\n",
"with open(f'prompts.json', 'r') as f:\n",
" data = json.load(f)\n",
" _df = pd.DataFrame({'count': data})['count']\n",
" prompts = {\n",
" key : value for key, value in _df.items()\n",
" }\n",
"#-------#\n",
"%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
"with open(f'reference_prompts.json', 'r') as f:\n",
" data = json.load(f)\n",
" _df = pd.DataFrame({'count': data})['count']\n",
" target_prompts = {\n",
" key : value for key, value in _df.items()\n",
" }\n",
"#------#\n",
"with open(f'reference_urls.json', 'r') as f:\n",
" data = json.load(f)\n",
" _df = pd.DataFrame({'count': data})['count']\n",
" target_urls = {\n",
" key : value for key, value in _df.items()\n",
" }\n",
"from transformers import AutoTokenizer\n",
"tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
"from transformers import CLIPProcessor, CLIPModel\n",
"processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
"model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
"logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
"\n",
"index = 0\n",
"%cd {home_directory + 'fusion-t2i-generator-data/' + 'vocab'}\n",
"vocab_encodings = torch.load('vocab_encodings.pt', weights_only=False)\n",
"for key in vocab_encodings:\n",
" index = index + 1;\n",
"#------#\n",
"NUM_VOCAB_ITEMS = index\n",
"\n",
"index = 0\n",
"%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
"for key in torch.load('reference_text_and_image_encodings.pt', weights_only=False):\n",
" index = index + 1;\n",
"#------#\n",
"NUM_REFERENCE_ITEMS = index\n",
"\n"
],
"metadata": {
"id": "TC5lMJrS1HCC"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title \tβ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
"# @markdown Choose a pre-encoded reference\n",
"index = 213 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
"PROMPT_INDEX = index\n",
"prompt = target_prompts[f'{PROMPT_INDEX}']\n",
"url = target_urls[f'{PROMPT_INDEX}']\n",
"if url.find('perchance')>-1:\n",
" image = Image.open(requests.get(url, stream=True).raw)\n",
"#------#\n",
"# @markdown βοΈ πΌοΈ encoding <-----?-----> π encoding
\n",
"C = 0.3 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
"log_strength_1 = 2.17 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
"prompt_strength = torch.tensor(math.pow(10 ,log_strength_1-1)).to(dtype = torch.float32)\n",
"reference = torch.zeros(768).to(dtype = torch.float32)\n",
"\n",
"%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
"references = torch.load('reference_text_and_image_encodings.pt' , weights_only=False)\n",
"reference = torch.add(reference, prompt_strength * C * references[index][0].dequantize().to(dtype = torch.float32))\n",
"reference = torch.add(reference, prompt_strength * (1-C) * references[index][1].dequantize().to(dtype = torch.float32))\n",
"references = '' # Clear up memory\n",
"# @markdown -----------\n",
"# @markdown πβ 1st Enhance similarity to prompt(s)\n",
"POS_2 = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
"log_strength_2 = 1.03 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
"pos_strength = torch.tensor(math.pow(10 ,log_strength_2-1)).to(dtype = torch.float32)\n",
"for _POS in POS_2.replace('' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').split(','):\n",
" inputs = tokenizer(text = _POS.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
" text_features_POS = model.get_text_features(**inputs).to(dtype = torch.float32)\n",
" text_features_POS = text_features_POS/text_features_POS.norm(p=2, dim=-1, keepdim=True)\n",
" reference = torch.add(reference, pos_strength * text_features_POS)\n",
"# @markdown -----------\n",
"\n",
"# @markdown -----------\n",
"# @markdown πβ 2nd Enhance similarity to prompt(s)\n",
"POS = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
"log_strength_3 = 1.06 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
"pos_strength = torch.tensor(math.pow(10 ,log_strength_3-1)).to(dtype = torch.float32)\n",
"for _POS in POS.replace('' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').split(','):\n",
" inputs = tokenizer(text = _POS.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
" text_features_POS = model.get_text_features(**inputs).to(dtype = torch.float32)\n",
" text_features_POS = text_features_POS/text_features_POS.norm(p=2, dim=-1, keepdim=True)\n",
" reference = torch.add(reference, pos_strength * text_features_POS)\n",
"# @markdown -----------\n",
"\n",
"# @markdown π« Penalize similarity to prompt(s)\n",
"NEG = '' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
"log_strength_4 = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
"neg_strength = torch.tensor(math.pow(10 ,log_strength_4-1)).to(dtype = torch.float32)\n",
"for _NEG in NEG.replace('' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').split(','):\n",
" inputs = tokenizer(text = _NEG.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
" text_features_NEG = model.get_text_features(**inputs).to(dtype = torch.float32)\n",
" text_features_NEG = text_features_NEG/text_features_NEG.norm(p=2, dim=-1, keepdim=True)\n",
" reference = torch.sub(reference, neg_strength * text_features_NEG)\n",
"# @markdown -----------\n",
"# @markdown β© Skip item(s) containing the word(s)\n",
"SKIP = '' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
"\n",
"min_wordcount = 0 # @param {type:\"slider\", min:0, max:20, step:1}\n",
"\n",
"def isBlacklisted(_txt, _blacklist):\n",
" blacklist = _blacklist.lower().replace('' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').strip()\n",
" txt = _txt.lower().strip()\n",
" if len(txt) -1 : return True\n",
" #------#\n",
" found = False\n",
" alphabet = 'abcdefghijklmnopqrstuvxyz'\n",
" for letter in alphabet:\n",
" found = txt.find(letter)>-1\n",
" if found:break\n",
" #------#\n",
" return not found\n",
"\n",
"# @markdown -----------\n",
"# @markdown π How similar should the results be?\n",
"list_size = 1000 # @param {type:'number'}\n",
"start_at_index = 1 # @param {type:'number'}\n",
"# @markdown -----------\n",
"# @markdown Repeat output N times\n",
"N = 7 # @param {type:\"slider\", min:0, max:20, step:1}\n",
"# @markdown -----------\n",
"# @markdown βοΈ Run the script?\n",
"update_list = True # @param {type:\"boolean\"}\n",
"\n",
"calculate_variance = False # @param {type:\"boolean\"}\n",
"\n",
"ne = update_list\n",
"\n",
"try: first\n",
"except:\n",
" enable = True\n",
" first = True\n",
"\n",
"if (enable):\n",
" reference = reference/reference.norm(p=2, dim=-1, keepdim=True)\n",
" %cd {home_directory + 'fusion-t2i-generator-data/' + 'vocab'}\n",
" sims = torch.matmul(vocab_encodings.dequantize(),reference.t())\n",
" sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
"\n",
" if calculate_variance:\n",
" average = torch.zeros(768)\n",
" for key in range(NUM_VOCAB_ITEMS):\n",
" if (key>=start_at_index and key < start_at_index + list_size):\n",
" average = torch.add(average, vocab_encodings[key].dequantize())\n",
" if (key>=start_at_index + list_size) : break\n",
" average = average * (1/max(1, list_size))\n",
" average = average/average.norm(p=2, dim=-1, keepdim=True)\n",
" average = average.clone().detach();\n",
" variance = torch.zeros(1)\n",
" for key in range(NUM_VOCAB_ITEMS):\n",
" if (key>=start_at_index and key < start_at_index + list_size):\n",
" #dot product\n",
" difference_to_average = 100 * (torch.ones(1) - torch.dot(average[0]\n",
" , vocab_encodings[key].dequantize()[0])/average.norm(p=2, dim=-1, keepdim=True))\n",
" variance = torch.add(variance, difference_to_average * difference_to_average)\n",
" if (key>=start_at_index + list_size) : break\n",
" #--------#\n",
" variance = variance * (1/max(1, list_size))\n",
" variance= variance.clone().detach();\n",
" print(f'The variance for the selected range is {math.sqrt(variance.item())} units from average')\n",
" #--------#\n",
"#---#\n",
"output = '{'\n",
"for _index in range(list_size):\n",
" tmp = prompts[f'{indices[min(_index+start_at_index,NUM_VOCAB_ITEMS-1)].item()}']\n",
" if isBlacklisted(tmp , SKIP): continue\n",
" tmp = fix_bad_symbols(tmp)\n",
" if output.find(tmp)>-1:continue\n",
" output = output + tmp + '|'\n",
"#---------#\n",
"output = (output + '}').replace('|}' , '} ')\n",
"print('')\n",
"print('')\n",
"for iter in range(N):\n",
" print(output)\n",
"#-------#\n",
"print('')\n",
"print('')\n",
"image or print('No image found')"
],
"metadata": {
"id": "NqL_I3ZSrISq"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Check the average value for this set\n",
"sims = torch.matmul(vocab_encodings.dequantize(),average.t())\n",
"sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
"for index in range(10):\n",
" print(prompts[f'{indices[index].item()}'])"
],
"metadata": {
"id": "XNHz0hfhHRUu"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title βοΈπ Print the results (Advanced)\n",
"list_size = 1000 # @param {type:'number'}\n",
"start_at_index = 0 # @param {type:'number'}\n",
"print_Similarity = True # @param {type:\"boolean\"}\n",
"print_Prompts = True # @param {type:\"boolean\"}\n",
"print_Descriptions = True # @param {type:\"boolean\"}\n",
"compact_Output = True # @param {type:\"boolean\"}\n",
"newline_Separator = False # @param {type:\"boolean\"}\n",
"\n",
"import random\n",
"# @markdown -----------\n",
"# @markdown Mix with...\n",
"list_size2 = 1000 # @param {type:'number'}\n",
"start_at_index2 = 10000 # @param {type:'number'}\n",
"rate_percent = 0 # @param {type:\"slider\", min:0, max:100, step:1}\n",
"\n",
"# @markdown -----------\n",
"# @markdown Repeat output N times\n",
"N = 6 # @param {type:\"slider\", min:0, max:10, step:1}\n",
"\n",
"# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
"RANGE = list_size\n",
"separator = '|'\n",
"if newline_Separator : separator = separator + '\\n'\n",
"\n",
"_prompts = ''\n",
"_sims = ''\n",
"for _index in range(start_at_index + RANGE):\n",
" if _index < start_at_index : continue\n",
" index = indices[_index].item()\n",
"\n",
" prompt = prompts[f'{index}']\n",
" if rate_percent >= random.randint(0,100) : prompt = prompts[f'{random.randint(start_at_index2 , start_at_index2 + list_size2)}']\n",
"\n",
" #Remove duplicates\n",
" if _prompts.find(prompt + separator)<=-1:\n",
" _sims = _sims + f'{round(100*sims[index].item(), 2)} %' + separator\n",
" #-------#\n",
" _prompts = _prompts.replace(prompt + separator,'')\n",
" _prompts = _prompts + prompt + separator\n",
" #------#\n",
"#------#\n",
"__prompts = fix_bad_symbols(__prompts)\n",
"__prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n",
"__sims = ('{' + _sims + '}').replace(separator + '}', '}')\n",
"#------#\n",
"\n",
"if(not print_Prompts): __prompts = ''\n",
"if(not print_Similarity): __sims = ''\n",
"\n",
"if(not compact_Output):\n",
" if(print_Descriptions):\n",
" print(f'The {start_at_index}-{start_at_index + RANGE} most similiar items to prompt : \\n\\n ')\n",
" for i in range(N) : print(__prompts)\n",
" print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for items : \\n\\n' + __sims)\n",
" print('')\n",
" else:\n",
" for i in range(N) : print(__prompts)\n",
"else:\n",
" for i in range(N) : print(__prompts)\n",
"#-------#"
],
"metadata": {
"id": "EdBiAguJO9aX",
"cellView": "form"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"The savefile can be used here : https://perchance.org/fusion-ai-image-generator"
],
"metadata": {
"id": "JldNmWy1iyvK"
}
},
{
"cell_type": "code",
"source": [
"# @title \tβ Create fusion-generator .json savefile from result\n",
"filename = 'blank.json'\n",
"path = '/content/text-to-image-prompts/fusion/'\n",
"\n",
"print(f'reading {filename}....')\n",
"_index = 0\n",
"%cd {path}\n",
"with open(f'{filename}', 'r') as f:\n",
" data = json.load(f)\n",
"#------#\n",
"_df = pd.DataFrame({'count': data})['count']\n",
"_savefile = {\n",
" key : value for key, value in _df.items()\n",
"}\n",
"#------#\n",
"from safetensors.torch import load_file\n",
"import json , os , torch\n",
"import pandas as pd\n",
"#----#\n",
"def my_mkdirs(folder):\n",
" if os.path.exists(folder)==False:\n",
" os.makedirs(folder)\n",
"#------#\n",
"savefile_prompt = ''\n",
"for i in range(N) : savefile_prompt = savefile_prompt + ' ' + __prompts\n",
"_savefile['main'] = savefile_prompt.replace('\\n', ' ').replace(' ', ' ').replace(' ', ' ')\n",
"#------#\n",
"save_filename = f'fusion_C05_X7_1000_{PROMPT_INDEX}.json'\n",
"output_folder = '/content/output/savefiles/'\n",
"my_mkdirs(output_folder)\n",
"#-----#\n",
"%cd {output_folder}\n",
"print(f'Saving segment {save_filename} to {output_folder}...')\n",
"with open(save_filename, 'w') as f:\n",
" json.dump(_savefile, f)\n"
],
"metadata": {
"id": "Q7vpNAXQilbf",
"cellView": "form"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title \tβ Create a savefile-set from the entire range of pre-encoded items\n",
"\n",
"# @markdown π₯ Load the data (only required one time)\n",
"load_the_data = True # @param {type:\"boolean\"}\n",
"\n",
"import math\n",
"from safetensors.torch import load_file\n",
"import json , os , torch\n",
"import pandas as pd\n",
"from PIL import Image\n",
"import requests\n",
"\n",
"def my_mkdirs(folder):\n",
" if os.path.exists(folder)==False:\n",
" os.makedirs(folder)\n",
"\n",
"# @markdown βοΈ Set the value for C in the reference
sim = C* text_enc + image_enc*(1-C)
\n",
"\n",
"C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
"\n",
"# @markdown π« Penalize similarity to this prompt(optional)\n",
"if(load_the_data):\n",
" target_prompts , target_text_encodings , urls , target_image_encodings , NUM_ITEMS = getPromptsAndLinks('/content/text-to-image-prompts/fusion')\n",
" from transformers import AutoTokenizer\n",
" tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
" from transformers import CLIPProcessor, CLIPModel\n",
" processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
" model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
" logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
"#---------#\n",
"\n",
"filename = 'blank.json'\n",
"path = '/content/text-to-image-prompts/fusion/'\n",
"print(f'reading {filename}....')\n",
"_index = 0\n",
"%cd {path}\n",
"with open(f'{filename}', 'r') as f:\n",
" data = json.load(f)\n",
"#------#\n",
"_df = pd.DataFrame({'count': data})['count']\n",
"_blank = {\n",
" key : value for key, value in _df.items()\n",
"}\n",
"#------#\n",
"\n",
"root_savefile_name = 'fusion_C05_X7'\n",
"\n",
"%cd /content/\n",
"output_folder = '/content/output/savefiles/'\n",
"my_mkdirs(output_folder)\n",
"my_mkdirs('/content/output2/savefiles/')\n",
"my_mkdirs('/content/output3/savefiles/')\n",
"my_mkdirs('/content/output4/savefiles/')\n",
"my_mkdirs('/content/output5/savefiles/')\n",
"my_mkdirs('/content/output6/savefiles/')\n",
"my_mkdirs('/content/output7/savefiles/')\n",
"my_mkdirs('/content/output8/savefiles/')\n",
"my_mkdirs('/content/output9/savefiles/')\n",
"my_mkdirs('/content/output10/savefiles/')\n",
"my_mkdirs('/content/output11/savefiles/')\n",
"my_mkdirs('/content/output12/savefiles/')\n",
"my_mkdirs('/content/output13/savefiles/')\n",
"\n",
"\n",
"NEG = '' # @param {type:'string'}\n",
"strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.1}\n",
"\n",
"for index in range(1667):\n",
"\n",
" PROMPT_INDEX = index\n",
" prompt = target_prompts[f'{index}']\n",
" url = urls[f'{index}']\n",
" if url.find('perchance')>-1:\n",
" image = Image.open(requests.get(url, stream=True).raw)\n",
" else: continue #print(\"(No image for this ID)\")\n",
"\n",
" print(f\"no. {PROMPT_INDEX} : '{prompt}'\")\n",
" text_features_A = target_text_encodings[f'{index}']\n",
" image_features_A = target_image_encodings[f'{index}']\n",
" # text-similarity\n",
" sims = C * torch.matmul(text_tensor, text_features_A.t())\n",
"\n",
" neg_sims = 0*sims\n",
" if(NEG != ''):\n",
" # Get text features for user input\n",
" inputs = tokenizer(text = NEG, padding=True, return_tensors=\"pt\")\n",
" text_features_NEG = model.get_text_features(**inputs)\n",
" text_features_NEG = text_features_A/text_features_A.norm(p=2, dim=-1, keepdim=True)\n",
" # text-similarity\n",
" neg_sims = strength*torch.matmul(text_tensor, text_features_NEG.t())\n",
" #------#\n",
"\n",
" # plus image-similarity\n",
" sims = sims + (1-C) * torch.matmul(text_tensor, image_features_A.t()) * logit_scale\n",
"\n",
" # minus NEG-similarity\n",
" sims = sims - neg_sims\n",
"\n",
" # Sort the items\n",
" sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
"\n",
" # @markdown Repeat output N times\n",
" RANGE = 1000\n",
" NUM_CHUNKS = 10+\n",
" separator = '|'\n",
" _savefiles = {}\n",
" #-----#\n",
" for chunk in range(NUM_CHUNKS):\n",
" if chunk=<10:continue\n",
" start_at_index = chunk * RANGE\n",
" _prompts = ''\n",
" for _index in range(start_at_index + RANGE):\n",
" if _index < start_at_index : continue\n",
" index = indices[_index].item()\n",
" prompt = prompts[f'{index}']\n",
" _prompts = _prompts.replace(prompt + separator,'')\n",
" _prompts = _prompts + prompt + separator\n",
" #------#\n",
" _prompts = fix_bad_symbols(_prompts)\n",
" _prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n",
" _savefiles[f'{chunk}'] = _prompts\n",
" #---------#\n",
" save_filename = f'{root_savefile_name}_{start_at_index + RANGE}_{PROMPT_INDEX}.json'\n",
"\n",
"\n",
" if (chunk=<20 && chunk>10): %cd '/content/output2/savefiles/'\n",
" if (chunk<=30 && chunk>20): %cd '/content/output3/savefiles/'\n",
" if (chunk=<40 && chunk>30): %cd '/content/output4/savefiles/'\n",
" if (chunk<=50 && chunk>40): %cd '/content/output5/savefiles/'\n",
" if (chunk=<60 && chunk>50): %cd '/content/output6/savefiles/'\n",
" if (chunk<=70 && chunk>60): %cd '/content/output7/savefiles/'\n",
" if (chunk=<80 && chunk>70): %cd '/content/output8/savefiles/'\n",
" if (chunk<=90 && chunk>80): %cd '/content/output9/savefiles/'\n",
" if (chunk=<100 && chunk>90): %cd '/content/output10/savefiles/'\n",
" if (chunk<=110 && chunk>100): %cd '/content/output11/savefiles/'\n",
" if (chunk=<120 && chunk>110): %cd '/content/output12/savefiles/'\n",
" if (chunk<=130 && chunk>120): %cd '/content/output13/savefiles/'\n",
"\n",
"\n",
" #------#\n",
" print(f'Saving savefile {save_filename} to {output_folder}...')\n",
" with open(save_filename, 'w') as f:\n",
" json.dump(_savefiles, f)\n",
" #---------#\n",
" continue\n",
"#-----------#"
],
"metadata": {
"id": "x1uAVXZEoL0T",
"cellView": "form"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Determine if this notebook is running on Colab or Kaggle\n",
"#Use https://www.kaggle.com/ if Google Colab GPU is busy\n",
"home_directory = '/content/'\n",
"using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
"if using_Kaggle : home_directory = '/kaggle/working/'\n",
"%cd {home_directory}\n",
"#-------#\n",
"\n",
"# @title Download the text_encodings as .zip\n",
"import os\n",
"%cd {home_directory}\n",
"#os.remove(f'{home_directory}results.zip')\n",
"root_output_folder = home_directory + 'output/'\n",
"zip_dest = f'/content/results.zip' #drive/MyDrive\n",
"!zip -r {zip_dest} {root_output_folder}"
],
"metadata": {
"id": "zivBNrw9uSVD",
"cellView": "form"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# @title \tβ New code (work in progress)\n",
"\n",
"def get_num_vocab_items(_url):\n",
" num_vocab_items = 0\n",
" for item in _url.split('_'):\n",
" if item.find('safetensors')>-1: num_vocab_items = int(item.replace('.safetensors', ''))\n",
" #------#\n",
" return num_vocab_items-1\n",
"\n",
"\n",
"def get_similiar(_ref , urls, _LIST_SIZE):\n",
" dot_dtype = torch.float16\n",
" _SCALE = torch.tensor(0.0043).to(dot_dtype)\n",
" _DIM = 768\n",
" _vocab = {}\n",
" #----#\n",
" inputs = tokenizer(text = _ref.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
" ref = model.get_text_features(**inputs)[0]\n",
" ref = (ref/ref.norm(p=2, dim=-1, keepdim=True)).to(dtype = dot_dtype)\n",
" #-----#\n",
" num_vocab_items = 0\n",
" for url in urls:\n",
" num_vocab_items = num_vocab_items + get_num_vocab_items(url)\n",
" #------#\n",
" vocab = torch.zeros(num_vocab_items , _DIM).to(torch.uint8)\n",
" prompts = {}\n",
" index = 0\n",
" for url in urls:\n",
" __vocab = load_file(url)\n",
" for key in load_file(url):\n",
" vocab[index] = __vocab[key][1:_DIM+1] - __vocab[key][0]*torch.ones(_DIM).t()\n",
" prompts[f'{index}'] = key\n",
" index = index + 1\n",
" #-------#\n",
" __vocab = {}\n",
" #-------#\n",
" sims = torch.matmul((vocab*_SCALE).to(dot_dtype) , ref.t())\n",
" sorted , indices = torch.sort(sims, dim = 0 , descending = True)\n",
" return indices , prompts , sims\n",
" _prompts = {}\n",
" for index in range(num_vocab_items):\n",
" key = prompts[f'{indices[index]}']\n",
" _prompts[f'{key}'] = sims[key].item()\n",
" index = index + 1\n",
" if index>_LIST_SIZE:break\n",
" #-------#\n",
" return _prompts\n",
"#-------#\n",
"\n"
],
"metadata": {
"cellView": "form",
"id": "uDzsk02CbMFc"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"vocab = {}\n",
"# @title \tβ New code (work in progress)\n",
"ref = 'impressionist painting by luis royo' # @param {type:'string' , placeholder:'type a single prompt to match'}\n",
"LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
"urls = [ '/content/fusion-t2i-generator-data/civitai_vocab_q0043_203663.safetensors' ,]\n",
"\n",
" #'/content/fusion-t2i-generator-data/clip_vocab_q0043_541291.safetensors' , '/content/fusion-t2i-generator-data/lyrics_vocab_q0043_41905.safetensors' , '/content/fusion-t2i-generator-data/names_vocab_q0043_162977.safetensors' , '/content/fusion-t2i-generator-data/r34_vocab_q0043_96166.safetensors' ]\n",
"\n",
"indices , prompts , sims = get_similiar(ref , urls , LIST_SIZE)\n",
"\n",
"index = 0\n",
"_prompts = {}\n",
"for index in range(203662):\n",
" try:\n",
" key = prompts[f'{indices[index].item()}']\n",
" print(key)\n",
" except: print('Not found!')\n",
" #_prompts[f'{key}'] = sims[key].item()\n",
" index = index + 1\n",
" if index>LIST_SIZE:break\n",
"\n"
],
"metadata": {
"cellView": "form",
"id": "Azz1kCza6LB3"
},
"execution_count": null,
"outputs": []
}
]
}