{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "markdown", "source": [ "Try this Free online SD 1.5 generator with the results: https://perchance.org/fusion-ai-image-generator\n", "\n", " This Notebook is a Stable-diffusion tool which allows you to find similiar prompts to an existing prompt. It uses the Nearest Neighbor decoder method listed here:https://arxiv.org/pdf/2303.03032" ], "metadata": { "id": "cRV2YWomjMBU" } }, { "cell_type": "code", "source": [ "# @title βš„ Initialize\n", "\n", "import os\n", "home_directory = '/content/'\n", "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n", "if using_Kaggle : home_directory = '/kaggle/working/'\n", "%cd {home_directory}\n", "\n", "def fix_bad_symbols(txt):\n", " result = txt\n", " for symbol in ['^', '}', '{' , ')', '(', '[' , ']' , ':' , '=' ]:\n", " result = result.replace(symbol,'\\\\' + symbol)\n", " #------#\n", " return result;\n", "\n", "def my_mkdirs(folder):\n", " if os.path.exists(folder)==False:\n", " os.makedirs(folder)\n", "\n", "#πŸ”ΈπŸ”Ή\n", "# Load the data if not already loaded\n", "try:\n", " loaded\n", "except:\n", " from safetensors.torch import load_file , save_file\n", " import json , torch , requests , math\n", " import pandas as pd\n", " from PIL import Image\n", " #----#\n", " %cd {home_directory}\n", " !git clone https://huggingface.co/datasets/codeShare/fusion-t2i-generator-data\n", " loaded = True\n", "\n", "from transformers import AutoTokenizer\n", "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n", "from transformers import CLIPProcessor, CLIPModel\n", "processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n", "model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n", "logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n", "\n", "%cd {home_directory + 'fusion-t2i-generator-data/'}\n", "!unzip reference.zip\n", "#------#\n", "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n", "with open(f'reference_prompts.json', 'r') as f:\n", " data = json.load(f)\n", " _df = pd.DataFrame({'count': data})['count']\n", " target_prompts = {\n", " key : value for key, value in _df.items()\n", " }\n", "#------#\n", "with open(f'reference_urls.json', 'r') as f:\n", " data = json.load(f)\n", " _df = pd.DataFrame({'count': data})['count']\n", " target_urls = {\n", " key : value for key, value in _df.items()\n", " }\n", "\n", "#------#\n", "dot_dtype = torch.float32\n", "dim = 768\n", "reference = torch.zeros(dim).to(dtype = dot_dtype)" ], "metadata": { "id": "TC5lMJrS1HCC" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "Feel free to skip these cells if you do not plan on using them\n" ], "metadata": { "id": "Xf9zoq-Za3wi" } }, { "cell_type": "code", "source": [ "# @markdown πŸ–ΌοΈ+πŸ“ Choose a pre-encoded reference (optional)\n", "index = 657 # @param {type:\"slider\", min:0, max:1666, step:1}\n", "PROMPT_INDEX = index\n", "prompt = target_prompts[f'{PROMPT_INDEX}']\n", "url = target_urls[f'{PROMPT_INDEX}']\n", "if url.find('perchance')>-1:\n", " image = Image.open(requests.get(url, stream=True).raw)\n", "#------#\n", "try: reference\n", "except: reference = torch.zeros(dim).to(dtype = dot_dtype)\n", "if reference == '': reference = torch.zeros(dim).to(dtype = dot_dtype)\n", "# @markdown βš–οΈ πŸ–ΌοΈ encoding <-----?-----> πŸ“ encoding
\n", "C = 0.3 # @param {type:\"slider\", min:0, max:1, step:0.01}\n", "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n", "references = torch.load('reference_text_and_image_encodings.pt' , weights_only=False)\n", "reference = torch.add(reference, math.pow(10 ,log_strength-1) * C * references[index][0].dequantize().to(dtype = torch.float32))\n", "reference = torch.add(reference, math.pow(10 ,log_strength-1) * (1-C) * references[index][1].dequantize().to(dtype = torch.float32))\n", "references = '' # Clear up memory\n", "ref = reference.clone().detach()\n", "#------#\n", "print(f'Prompt for this image : \\n\\n \"{prompt} \" \\n\\n')\n", "image" ], "metadata": { "id": "BwrEs5zVB0Sb" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# @markdown πŸ–ΌοΈ Upload your own image for use as reference via URL (optional)\n", "URL = '' # @param {type:'string' ,placeholder:'paste an url here'}\n", "image = Image.open(requests.get(URL, stream=True).raw)\n", "#---------#\n", "# Get image features\n", "inputs = processor(images=image, return_tensors=\"pt\")\n", "image_features = model.get_image_features(**inputs)\n", "image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)\n", "#-----#\n", "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", "ref = ref + math.pow(10,log_strength-1)*image_features\n", "image" ], "metadata": { "id": "IqUsiQw2HU2C" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# @markdown πŸ–ΌοΈ Upload your own image in the /content/ folder for use as reference (optional)\n", "FILENAME = '' # @param {type:'string' ,placeholder:'IMG_123.png'}\n", "import cv2\n", "image = cv2.imread(FILENAME)\n", "image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n", "\n", "#---------#\n", "# Get image features\n", "inputs = processor(images=image, return_tensors=\"pt\")\n", "image_features = model.get_image_features(**inputs)\n", "image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)\n", "#-----#\n", "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", "ref = ref + math.pow(10,log_strength-1)*image_features\n", "image" ], "metadata": { "id": "I_-GOwFPKkha" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "Save the reference prior to running the Interrogator" ], "metadata": { "id": "zeu6JcM-mk9z" } }, { "cell_type": "code", "source": [ "# @title βš„ Save the reference\n", "try: ref\n", "except: ref = torch.zeros(dim)\n", "_ref = {}\n", "_ref['weights'] = ref.to(dot_dtype)\n", "%cd /content/\n", "save_file(_ref , 'reference.safetensors' )" ], "metadata": { "id": "lOQuTPfBMK82" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# @title βš„ Run the CLIP interrogator on the saved reference\n", "LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n", "START_AT = 0 # @param {type:'number' , placeholder:'set how large the list should be'}\n", "# @markdown -----\n", "# @markdown Select vocab\n", "general = False # @param {type:\"boolean\"}\n", "civit9 = True # @param {type:\"boolean\"}\n", "fanfic1 = False # @param {type:\"boolean\"}\n", "fanfic2 = False # @param {type:\"boolean\"}\n", "# @markdown -----\n", "# @title βš„ New interrogator code using quantized text corpus\n", "%cd /content/\n", "_ref = load_file('reference.safetensors' )\n", "ref = _ref['weights'].to(dot_dtype)\n", "# @markdown πŸ“ Enhance/Penalize Similarity and skip items containing word(s)\n", "POS1 = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n", "POS2 = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n", "NEG = ''# @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n", "SKIP = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n", "min_wordcount = 0 # @param {type:\"slider\", min:0, max:20, step:1}\n", "def isBlacklisted(_txt):\n", " blacklist = SKIP.lower().replace('' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').strip()\n", " if blacklist == '': return False\n", " txt = _txt.lower().strip()\n", " if len(txt) -1 : return True\n", " #------#\n", " found = False\n", " alphabet = 'abcdefghijklmnopqrstuvxyz'\n", " for letter in alphabet:\n", " found = txt.find(letter)>-1\n", " if found:break\n", " #------#\n", " return not found\n", "# @markdown -----\n", "# @markdown logarithmic prompt strength x for value 10^(x-1)\n", "_POS1 = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", "_POS2 = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", "_NEG = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", "# @markdown -----\n", "for _item in POS1.split(','):\n", " item = _item.strip()\n", " if item == '':continue\n", " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n", " ref = ref + math.pow(10,_POS1-1) * model.get_text_features(**inputs)[0]\n", "#-------#\n", "for _item in POS2.split(','):\n", " item = _item.strip()\n", " if item == '':continue\n", " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n", " ref = ref + math.pow(10,_POS2-1) * model.get_text_features(**inputs)[0]\n", "#-------#\n", "for _item in NEG.split(','):\n", " item = _item.strip()\n", " if item == '':continue\n", " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n", " ref = ref + math.pow(10,_NEG-1) * model.get_text_features(**inputs)[0]\n", "#------#\n", "ref = (ref/ref.norm(p=2, dim=-1, keepdim=True)).to(dtype = dot_dtype)\n", "vocab_to_load = ''\n", "if (general): vocab_to_load = vocab_to_load + 'general , '\n", "if (civit9): vocab_to_load = vocab_to_load + 'civit9 , '\n", "if (fanfic1): vocab_to_load = vocab_to_load + 'fanfic1 , '\n", "if (fanfic2): vocab_to_load = vocab_to_load + 'fanfic2 , '\n", "vocab_to_load = (vocab_to_load +'}').replace(' , }' , '')\n", "multi = vocab_to_load.find(',')>-1\n", "\n", "#-----#\n", "prompts_folder = f'{home_directory}fusion-t2i-generator-data/vocab-v2/text'\n", "encodings_folder = f'{home_directory}fusion-t2i-generator-data/vocab-v2/text_encodings'\n", "#----#\n", "scale = 0.0043\n", "size = 0\n", "#------#\n", "total_items = 0\n", "for filename in os.listdir(prompts_folder):\n", " if (not general and filename.find('general')>-1):continue\n", " if (not civit9 and filename.find('civit9')>-1):continue\n", " if (not fanfic1 and filename.find('fanfic1')>-1):continue\n", " if (not fanfic2 and filename.find('fanfic2')>-1):continue\n", " size = size + LIST_SIZE\n", "#-------#\n", "similiar_sims = torch.zeros(size)\n", "similiar_prompts = {}\n", "_index = 0\n", "#-------#\n", "similiar_encodings = {}\n", "for filename in os.listdir(prompts_folder):\n", " if (not general and filename.find('general')>-1):continue\n", " if (not civit9 and filename.find('civit9')>-1):continue\n", " if (not fanfic1 and filename.find('fanfic1')>-1):continue\n", " if (not fanfic2 and filename.find('fanfic2')>-1):continue\n", " #------#\n", " root_filename = filename.replace('.json', '')\n", " %cd {prompts_folder}\n", " prompts = {}\n", " with open(f'{root_filename}.json', 'r') as f:\n", " data = json.load(f).items()\n", " for key,value in data:\n", " prompts[key] = value\n", " num_items = int(prompts['num_items'])\n", " total_items = total_items + num_items\n", "\n", " #------#\n", " try:vocab_loaded\n", " except:\n", " vocab_loaded = 'first'\n", " #-----#\n", "\n", " if vocab_loaded == 'first' or (vocab_loaded != vocab_to_load and not multi):\n", " %cd {encodings_folder}\n", " _text_encodings = load_file(f'{root_filename}.safetensors')['weights'].to(torch.uint8)\n", " text_encodings = torch.zeros(num_items , dim)\n", " tmp = torch.ones(dim).to(dot_dtype)\n", " for index in range(num_items):\n", " text_encodings[index] = torch.sub(_text_encodings[index][1:dim+1].to(dot_dtype) , tmp , alpha= _text_encodings[index][0].to(dot_dtype))\n", " vocab_loaded = vocab_to_load\n", " #------#\n", "\n", "\n", " sims = torch.matmul(text_encodings*scale, ref.t())\n", " sorted , indices = torch.sort(sims , dim=0 , descending = True)\n", " #-----#\n", " for index in range(LIST_SIZE + START_AT):\n", " if index
sim = C* text_enc + image_enc*(1-C)

\n", "\n", "C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n", "\n", "# @markdown 🚫 Penalize similarity to this prompt(optional)\n", "if(load_the_data):\n", " target_prompts , target_text_encodings , urls , target_image_encodings , NUM_ITEMS = getPromptsAndLinks('/content/text-to-image-prompts/fusion')\n", " from transformers import AutoTokenizer\n", " tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n", " from transformers import CLIPProcessor, CLIPModel\n", " processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n", " model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n", " logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n", "#---------#\n", "\n", "filename = 'blank.json'\n", "path = '/content/text-to-image-prompts/fusion/'\n", "print(f'reading {filename}....')\n", "_index = 0\n", "%cd {path}\n", "with open(f'{filename}', 'r') as f:\n", " data = json.load(f)\n", "#------#\n", "_df = pd.DataFrame({'count': data})['count']\n", "_blank = {\n", " key : value for key, value in _df.items()\n", "}\n", "#------#\n", "\n", "root_savefile_name = 'fusion_C05_X7'\n", "\n", "%cd /content/\n", "output_folder = '/content/output/savefiles/'\n", "my_mkdirs(output_folder)\n", "my_mkdirs('/content/output2/savefiles/')\n", "my_mkdirs('/content/output3/savefiles/')\n", "my_mkdirs('/content/output4/savefiles/')\n", "my_mkdirs('/content/output5/savefiles/')\n", "my_mkdirs('/content/output6/savefiles/')\n", "my_mkdirs('/content/output7/savefiles/')\n", "my_mkdirs('/content/output8/savefiles/')\n", "my_mkdirs('/content/output9/savefiles/')\n", "my_mkdirs('/content/output10/savefiles/')\n", "my_mkdirs('/content/output11/savefiles/')\n", "my_mkdirs('/content/output12/savefiles/')\n", "my_mkdirs('/content/output13/savefiles/')\n", "\n", "\n", "NEG = '' # @param {type:'string'}\n", "strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.1}\n", "\n", "for index in range(1667):\n", "\n", " PROMPT_INDEX = index\n", " prompt = target_prompts[f'{index}']\n", " url = urls[f'{index}']\n", " if url.find('perchance')>-1:\n", " image = Image.open(requests.get(url, stream=True).raw)\n", " else: continue #print(\"(No image for this ID)\")\n", "\n", " print(f\"no. {PROMPT_INDEX} : '{prompt}'\")\n", " text_features_A = target_text_encodings[f'{index}']\n", " image_features_A = target_image_encodings[f'{index}']\n", " # text-similarity\n", " sims = C * torch.matmul(text_tensor, text_features_A.t())\n", "\n", " neg_sims = 0*sims\n", " if(NEG != ''):\n", " # Get text features for user input\n", " inputs = tokenizer(text = NEG, padding=True, return_tensors=\"pt\")\n", " text_features_NEG = model.get_text_features(**inputs)\n", " text_features_NEG = text_features_A/text_features_A.norm(p=2, dim=-1, keepdim=True)\n", " # text-similarity\n", " neg_sims = strength*torch.matmul(text_tensor, text_features_NEG.t())\n", " #------#\n", "\n", " # plus image-similarity\n", " sims = sims + (1-C) * torch.matmul(text_tensor, image_features_A.t()) * logit_scale\n", "\n", " # minus NEG-similarity\n", " sims = sims - neg_sims\n", "\n", " # Sort the items\n", " sorted , indices = torch.sort(sims,dim=0 , descending=True)\n", "\n", " # @markdown Repeat output N times\n", " RANGE = 1000\n", " NUM_CHUNKS = 10+\n", " separator = '|'\n", " _savefiles = {}\n", " #-----#\n", " for chunk in range(NUM_CHUNKS):\n", " if chunk=<10:continue\n", " start_at_index = chunk * RANGE\n", " _prompts = ''\n", " for _index in range(start_at_index + RANGE):\n", " if _index < start_at_index : continue\n", " index = indices[_index].item()\n", " prompt = prompts[f'{index}']\n", " _prompts = _prompts.replace(prompt + separator,'')\n", " _prompts = _prompts + prompt + separator\n", " #------#\n", " _prompts = fix_bad_symbols(_prompts)\n", " _prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n", " _savefiles[f'{chunk}'] = _prompts\n", " #---------#\n", " save_filename = f'{root_savefile_name}_{start_at_index + RANGE}_{PROMPT_INDEX}.json'\n", "\n", "\n", " if (chunk=<20 && chunk>10): %cd '/content/output2/savefiles/'\n", " if (chunk<=30 && chunk>20): %cd '/content/output3/savefiles/'\n", " if (chunk=<40 && chunk>30): %cd '/content/output4/savefiles/'\n", " if (chunk<=50 && chunk>40): %cd '/content/output5/savefiles/'\n", " if (chunk=<60 && chunk>50): %cd '/content/output6/savefiles/'\n", " if (chunk<=70 && chunk>60): %cd '/content/output7/savefiles/'\n", " if (chunk=<80 && chunk>70): %cd '/content/output8/savefiles/'\n", " if (chunk<=90 && chunk>80): %cd '/content/output9/savefiles/'\n", " if (chunk=<100 && chunk>90): %cd '/content/output10/savefiles/'\n", " if (chunk<=110 && chunk>100): %cd '/content/output11/savefiles/'\n", " if (chunk=<120 && chunk>110): %cd '/content/output12/savefiles/'\n", " if (chunk<=130 && chunk>120): %cd '/content/output13/savefiles/'\n", "\n", "\n", " #------#\n", " print(f'Saving savefile {save_filename} to {output_folder}...')\n", " with open(save_filename, 'w') as f:\n", " json.dump(_savefiles, f)\n", " #---------#\n", " continue\n", "#-----------#" ], "metadata": { "id": "x1uAVXZEoL0T", "cellView": "form" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# Determine if this notebook is running on Colab or Kaggle\n", "#Use https://www.kaggle.com/ if Google Colab GPU is busy\n", "home_directory = '/content/'\n", "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n", "if using_Kaggle : home_directory = '/kaggle/working/'\n", "%cd {home_directory}\n", "#-------#\n", "\n", "# @title Download the text_encodings as .zip\n", "import os\n", "%cd {home_directory}\n", "#os.remove(f'{home_directory}results.zip')\n", "root_output_folder = home_directory + 'output/'\n", "zip_dest = f'/content/results.zip' #drive/MyDrive\n", "!zip -r {zip_dest} {root_output_folder}" ], "metadata": { "id": "zivBNrw9uSVD", "cellView": "form" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# @title \tβš„ Quick fix for normalizing encoded text corpus tensors\n", "\n", "import os\n", "my_mkdirs('/content/output')\n", "my_mkdirs('/content/output/text_encodings')\n", "\n", "for filename in os.listdir(f'{prompts_folder}'):\n", " %cd {prompts_folder}\n", " prompts = {}\n", " with open(f'{filename}', 'r') as f:\n", " data = json.load(f).items()\n", " for key,value in data:\n", " prompts[key] = value\n", " #------#\n", " num_items = int(prompts['num_items'])\n", "\n", " %cd {encodings_folder}\n", " enc_filename = filename.replace('json', 'safetensors')\n", " _text_encodings = load_file(f'{enc_filename}')['weights'].to(torch.uint8)\n", " text_encodings = torch.zeros(num_items , dim)\n", " tmp = torch.ones(dim)\n", " tmp2 = torch.tensor(1/0.0043)\n", " zero_point = 0\n", " for index in range(num_items):\n", " text_encodings[index] = torch.tensor(0.0043) * torch.sub(_text_encodings[index][1:dim+1] , tmp , alpha= _text_encodings[index][0]).to(torch.float32)\n", " text_encodings[index] = tmp2*text_encodings[index]/text_encodings[index].norm(p=2, dim=-1, keepdim = True)\n", " test = torch.round( torch.add(text_encodings[index],tmp*zero_point))\n", " less_than_zero = test<0\n", " while(torch.any(less_than_zero).item()):\n", " zero_point = zero_point + 1\n", " test = torch.round( torch.add(text_encodings[index],tmp*zero_point))\n", " less_than_zero = test<0\n", " #------#\n", " _text_encodings[index][0] = zero_point\n", " _text_encodings[index][1:dim+1] = test\n", " #-------#\n", " %cd /content/output/text_encodings\n", "\n", " tmp = {}\n", " tmp['weights'] = _text_encodings.to(torch.uint8)\n", " tmp['num_items'] = torch.tensor(num_items).to(torch.uint8)\n", " tmp['scale'] = torch.tensor(0.0043)\n", " save_file(tmp , f'{enc_filename}')\n", "#------#" ], "metadata": { "cellView": "form", "id": "9qgHW1Wr7kZn" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# Check the average value for this set\n", "sims = torch.matmul(vocab_encodings.dequantize(),average.t())\n", "sorted , indices = torch.sort(sims,dim=0 , descending=True)\n", "for index in range(10):\n", " print(prompts[f'{indices[index].item()}'])" ], "metadata": { "id": "XNHz0hfhHRUu" }, "execution_count": null, "outputs": [] } ] }