{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "markdown", "source": [ "Try this Free online SD 1.5 generator with the results: https://perchance.org/fusion-ai-image-generator\n", "\n", " This Notebook is a Stable-diffusion tool which allows you to find similiar prompts to an existing prompt. It uses the Nearest Neighbor decoder method listed here:https://arxiv.org/pdf/2303.03032" ], "metadata": { "id": "cRV2YWomjMBU" } }, { "cell_type": "code", "source": [ "# @title βš„ Initialize\n", "\n", "import os\n", "home_directory = '/content/'\n", "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n", "if using_Kaggle : home_directory = '/kaggle/working/'\n", "%cd {home_directory}\n", "\n", "def fix_bad_symbols(txt):\n", " result = txt\n", " for symbol in ['^', '}', '{' , ')', '(', '[' , ']' , ':' , '=' ]:\n", " result = result.replace(symbol,'\\\\' + symbol)\n", " #------#\n", " return result;\n", "\n", "def my_mkdirs(folder):\n", " if os.path.exists(folder)==False:\n", " os.makedirs(folder)\n", "\n", "#πŸ”ΈπŸ”Ή\n", "# Load the data if not already loaded\n", "try:\n", " loaded\n", "except:\n", " from safetensors.torch import load_file , save_file\n", " import json , torch , requests , math\n", " import pandas as pd\n", " from PIL import Image\n", " import cv2\n", " from matplotlib import pyplot as plt\n", " #----#\n", " %cd {home_directory}\n", " !git clone https://huggingface.co/datasets/codeShare/fusion-t2i-generator-data\n", " loaded = True\n", " %cd {home_directory + 'fusion-t2i-generator-data/'}\n", " !unzip reference.zip\n", "\n", "from transformers import AutoTokenizer\n", "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n", "from transformers import CLIPProcessor, CLIPModel\n", "processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n", "model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n", "logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n", "\n", "#------#\n", "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n", "with open(f'reference_prompts.json', 'r') as f:\n", " data = json.load(f)\n", " _df = pd.DataFrame({'count': data})['count']\n", " target_prompts = {\n", " key : value for key, value in _df.items()\n", " }\n", "#------#\n", "with open(f'reference_urls.json', 'r') as f:\n", " data = json.load(f)\n", " _df = pd.DataFrame({'count': data})['count']\n", " target_urls = {\n", " key : value for key, value in _df.items()\n", " }\n", "\n", "#------#\n", "dot_dtype = torch.float32\n", "dim = 768\n", "ref = torch.zeros(dim).to(dtype = dot_dtype)" ], "metadata": { "id": "TC5lMJrS1HCC", "cellView": "form" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "The visualization has no effect on the output. It will only be used if you enable the 'Show encoding' checkbox" ], "metadata": { "id": "OpOoRmaP3u2H" } }, { "cell_type": "code", "source": [ "# @title βš„ Define parameters for visalizing the reference in a 16x16 grid
(the visualization settings has no effect on output)\n", "from PIL import Image, ImageDraw\n", "SCALE = 0.0002 # @param {type:\"slider\", min:0.0001, max:0.001, step:0.00001}\n", "ZERO_POINT = 100 # @param {type:\"slider\", min:0, max:300, step:1}\n", "CELL_SIZE = 16\n", "\n", "BORDER_THICKNESS = 4\n", "\n", "def visualize(_ref):\n", " RGB_tensor = (torch.round(_ref/SCALE)+torch.ones(dim)*ZERO_POINT)\n", " cellsize = CELL_SIZE\n", " tick = round(cellsize/2)\n", " border_offset = round(BORDER_THICKNESS/2)\n", " width = 16*cellsize + BORDER_THICKNESS\n", " height = 16*cellsize + BORDER_THICKNESS\n", " image = Image.new('RGB', (width, height), (0, 0, 0))\n", " draw = ImageDraw.Draw(image)\n", " for row in range(16):\n", " for col in range(16):\n", " tmp = 3*row*col\n", " r = max(0,min(255,int(RGB_tensor[tmp].item())))\n", " g = max(0,min(255,int(RGB_tensor[tmp+1].item())))\n", " b = max(0,min(255,int(RGB_tensor[tmp+2].item())))\n", " fillColor = (r,g,b)\n", " x0 = row*cellsize +border_offset\n", " y0 = (15-col)*cellsize +border_offset\n", " x1 = row*cellsize + 2*tick + border_offset\n", " y1 = (15-col)*cellsize + 2*tick + border_offset\n", " shape = [(x0, y0), (x1, y1)]\n", " draw.rectangle(shape, fill=fillColor, outline=(0,0,0))\n", " return (image)\n", "\n", "num_plots = 1\n", "try:\n", " %cd /content/\n", " _ref = load_file('reference.safetensors' )\n", " num_plots = num_plots+1\n", "except: _ref = torch.zeros(dim).to(dtype = dot_dtype)\n", "#-----#\n", "try: ref\n", "except: ref = torch.zeros(dim).to(dtype = dot_dtype)\n", "\n", "image_size = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n", "show_encoding = True # @param {type:\"boolean\"}\n", "#------#\n", "if show_encoding:\n", " # create figure\n", " fig = plt.figure(figsize=(10*image_size, 10*image_size))\n", " fig.patch.set_facecolor((56/255,56/255,56/255))\n", " rows = 1\n", " columns = num_plots\n", " fig.add_subplot(rows, columns, 1)\n", " plt.imshow( visualize(ref))\n", " plt.axis('off')\n", " plt.title( \"Encoding (local variable)\", color='white', fontsize=round(20*image_size))\n", " if num_plots>1:\n", " fig.add_subplot(rows, columns, 2)\n", " plt.imshow( visualize( _ref['weights'].to(dot_dtype)))\n", " plt.axis('off')\n", " plt.title(\"Encoding (saved file)\", color='white', fontsize=round(20*image_size))\n", " #------#\n", "\n", "print(f'Using settings SCALE = {SCALE} and ZERO_POINT = {ZERO_POINT} for visualizing the text_encoding')" ], "metadata": { "id": "YDu8XlehhWID", "cellView": "form" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "**Paste a prompt in the cell below to create an encoding**\n", "\n" ], "metadata": { "id": "Xf9zoq-Za3wi" } }, { "cell_type": "code", "source": [ "\n", "# @markdown πŸ“ Write a text prompt (this will overwrite any savefile already stored)\n", "NEW_ENCODING = '' # @param {type:'string' ,placeholder:'write a prompt'}\n", "enable = True # @param {type:\"boolean\"}\n", "# @markdown -----\n", "# @markdown πŸ“ Enhance/Penalize Similarity and skip items containing word(s)\n", "POS = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n", "NEG = ''# @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n", "# @markdown -----\n", "# @markdown logarithmic prompt strength x for value 10^(x-1)\n", "_POS = 0 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", "_NEG = 0 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", "# @markdown -----\n", "# @markdown Check similiarity for this encoding against any written prompt(s)\n", "# @title βš„ Evaluate saved reference similarity to select items (optional)\n", "EVAL = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n", "\n", "show_local_reference = True # @param {type:\"boolean\"}\n", "show_encoding = True # @param {type:\"boolean\"}\n", "\n", "try:\n", " %cd /content/\n", " _ref = load_file('reference.safetensors' )\n", " ref = _ref['weights'].to(dot_dtype)\n", "except:\n", " ref = torch.zeros(dim).to(dtype = dot_dtype)\n", " _ref = {}\n", " _ref['weights'] = ref\n", " %cd /content/\n", " save_file(_ref, 'reference.safetensors')\n", "#-----#\n", "\n", "if NEW_ENCODING.strip() != ''\n", " item = NEW_ENCODING.strip()\n", " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n", " ref = model.get_text_features(**inputs)[0]\n", " ref= ref/ref.norm(p=2 , dim=-1 , keepdim=True)\n", "#------#\n", "\n", "try: ref\n", "except: ref = torch.zeros(dim).to(dtype = dot_dtype)\n", "\n", "if EVAL.strip() != '':\n", " print(\"Saved Reference:\\n\")\n", " for item in EVAL.split(','):\n", " if item.strip()=='':continue\n", " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n", " test = model.get_text_features(**inputs)[0]\n", " test = test/test.norm(p=2 , dim = -1 , keepdim = True)\n", " ref= ref/ref.norm(p=2 , dim=-1 , keepdim=True)\n", " eval = torch.dot(ref , test)\n", " print(f'{item.strip()} : {round(eval.item()*100, 2)}%')\n", " #-----#\n", " if(show_local_reference):\n", " print(\"\\n---------\\nLocal Reference with enchancements added :\\n\")\n", "\n", " for _item in POS.split(','):\n", " item = _item.strip()\n", " if item == '':continue\n", " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n", " ref = ref + math.pow(10,_POS-1) * model.get_text_features(**inputs)[0]\n", " #-------#\n", "\n", " for _item in NEG.split(','):\n", " item = _item.strip()\n", " if item == '':continue\n", " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n", " ref = ref + math.pow(10,_NEG-1) * model.get_text_features(**inputs)[0]\n", " #-------#\n", "\n", " ref= ref/ref.norm(p=2 , dim=-1 , keepdim=True)\n", " for item in EVAL.split(','):\n", " if item.strip()=='':continue\n", " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n", " test = model.get_text_features(**inputs)[0]\n", " test = test/test.norm(p=2 , dim = -1 , keepdim = True)\n", " eval = torch.dot(ref , test)\n", " print(f'{item.strip()} : {round(eval.item()*100, 2)}%')\n", " #-----#\n", "\n", " if show_encoding:\n", " # create figure\n", " fig = plt.figure(figsize=(10*image_size, 10*image_size))\n", " fig.patch.set_facecolor((56/255,56/255,56/255))\n", " rows = 1\n", " columns = 3\n", " fig.add_subplot(rows, columns, 1)\n", " plt.imshow( visualize(ref))\n", " plt.axis('off')\n", " plt.title( \"Encoding (local variable)\", color='white', fontsize=round(20*image_size))\n", " if num_plots>1:\n", " fig.add_subplot(rows, columns, 2)\n", " plt.imshow( visualize( _ref['weights'].to(dot_dtype)))\n", " plt.axis('off')\n", " plt.title(\"Encoding (saved file)\", color='white', fontsize=round(20*image_size))\n", "\n", " fig.add_subplot(rows, columns, 3)\n", " plt.imshow( visualize(ref - _ref['weights'].to(dot_dtype)))\n", " plt.axis('off')\n", " plt.title(\"Changes\", color='white', fontsize=round(20*image_size))\n", " #------#\n", "\n", "\n" ], "metadata": { "id": "Oxi6nOyrUTAe" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "**Use a pre-encoded image+prompt pair as reference (optional)**" ], "metadata": { "id": "f9_AcquM7AYZ" } }, { "cell_type": "code", "source": [ "\n", "loaded_ref = False\n", "try:\n", " ref\n", " loaded_ref = True\n", "except:ref = torch.zeros(dim).to(dtype = dot_dtype)\n", "if loaded_ref : prev_ref = ref.clone().detach()\n", "\n", "try:prompt\n", "except: prompt = ''\n", "\n", "# @markdown πŸ–ΌοΈ+πŸ“ Choose a pre-encoded reference (note: some results are NSFW!)\n", "index = 596 # @param {type:\"slider\", min:0, max:1666, step:1}\n", "PROMPT_INDEX = index\n", "prompt = target_prompts[f'{PROMPT_INDEX}']\n", "url = target_urls[f'{PROMPT_INDEX}']\n", "if url.find('perchance')>-1:\n", " image = Image.open(requests.get(url, stream=True).raw)\n", "#------#\n", "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n", "references = torch.load('reference_text_and_image_encodings.pt' , weights_only=False)\n", "# @markdown βš–οΈ πŸ–ΌοΈ encoding <-----?-----> πŸ“ encoding
\n", "C = 0.3 # @param {type:\"slider\", min:0, max:1, step:0.01}\n", "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", "method = 'Add to existing ref' # @param [\"Refresh\" , \"Add to existing ref\" , \"Subtract from existing ref\" , \"Do nothing\"]\n", "image_size = 0.57 # @param {type:\"slider\", min:0, max:1, step:0.01}\n", "show_encoding = True # @param {type:\"boolean\"}\n", "\n", "if(not method == 'Do nothing'):\n", " if method == 'Refresh': ref = torch.zeros(dim).to(dtype = dot_dtype)\n", " if method == 'Subtract from existing ref':\n", " ref = torch.sub(ref, math.pow(10 ,log_strength-1) * C * references[index][0].dequantize().to(dtype = torch.float32))\n", " ref = torch.sub(ref, math.pow(10 ,log_strength-1) * (1-C) * references[index][1].dequantize().to(dtype = torch.float32))\n", " else:\n", " ref = torch.add(ref, math.pow(10 ,log_strength-1) * C * references[index][0].dequantize().to(dtype = torch.float32))\n", " ref = torch.add(ref, math.pow(10 ,log_strength-1) * (1-C) * references[index][1].dequantize().to(dtype = torch.float32))\n", " #---------#\n", " references = '' # Clear up memory\n", " ref = ref/ref.norm(p=2, dim=-1, keepdim=True)\n", " ref = ref.clone().detach()\n", " #------#\n", " # create figure\n", " fig = plt.figure(figsize=(10*image_size, 10*image_size))\n", " fig.patch.set_facecolor((56/255,56/255,56/255))\n", " rows = 1\n", " columns = 1\n", " if show_encoding: columns = columns+1\n", " if show_encoding and loaded_ref : columns = columns+1\n", " fig.add_subplot(rows, columns, 1)\n", " plt.imshow(image)\n", " plt.axis('off')\n", " plt.title(f\"Reference image at index={index}\" , color='white' , fontsize=round(20*image_size))\n", " #-----#\n", " if show_encoding and loaded_ref:\n", " fig.add_subplot(rows, columns, columns-1)\n", " plt.imshow( visualize(prev_ref))\n", " plt.axis('off')\n", " plt.title(\"Encoding (before)\" , color='white' , fontsize=round(20*image_size))\n", " print(f'Prompt for this image : \\n\\n \"{prompt} \" \\n\\n')\n", "\n", " if show_encoding:\n", " fig.add_subplot(rows, columns, columns)\n", " plt.imshow( visualize(ref))\n", " plt.axis('off')\n", " plt.title(\"Encoding (now)\" , color='white' , fontsize=round(20*image_size))\n", " #------#\n" ], "metadata": { "id": "BwrEs5zVB0Sb" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "**Use an image as a reference via URL (optional)**" ], "metadata": { "id": "KI9Ho6CG7m3Z" } }, { "cell_type": "code", "source": [ "\n", "loaded_ref = False\n", "try:\n", " ref\n", " loaded_ref = True\n", "except:ref = torch.zeros(dim).to(dtype = dot_dtype)\n", "if loaded_ref : prev_ref = ref.clone().detach()\n", "\n", "try:prompt\n", "except: prompt = ''\n", "\n", "# @markdown πŸ–ΌοΈ Upload your own image for use as reference via URL (optional)\n", "URL = '' # @param {type:'string' ,placeholder:'paste an url here'}\n", "if URL.strip() != '':\n", " image = Image.open(requests.get(URL, stream=True).raw)\n", " log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", " method = 'Add to existing ref' # @param [\"Refresh\" , \"Add to existing ref\" , \"Subtract from existing ref\" , \"Do nothing\"]\n", " image_size = 0.79 # @param {type:\"slider\", min:0, max:1, step:0.01}\n", " show_encoding = True # @param {type:\"boolean\"}\n", " #---------#\n", " if(not method == 'Do nothing'):\n", " # Get image features\n", " inputs = processor(images=image, return_tensors=\"pt\")\n", " image_features = model.get_image_features(**inputs)\n", " image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)\n", " #-------#\n", " if method == 'Refresh':\n", " ref = torch.zeros(dim).to(dtype = dot_dtype)\n", " if method == 'Subtract from existing ref':\n", " ref = ref - math.pow(10,log_strength-1)*image_features\n", " else: ref = ref + math.pow(10,log_strength-1)*image_features\n", " #-----#\n", " ref = ref/ref.norm(p=2, dim=-1, keepdim=True)\n", " ref = ref[0]\n", " ref = ref.clone().detach()\n", " #------#\n", " # create figure\n", " fig = plt.figure(figsize=(10*image_size, 10*image_size))\n", " fig.patch.set_facecolor((56/255,56/255,56/255))\n", " rows = 1\n", " columns = 1\n", " if show_encoding: columns = 2\n", " if show_encoding and loaded_ref : columns = 3\n", " fig.add_subplot(rows, columns, 1)\n", " plt.imshow(image)\n", " plt.axis('off')\n", " plt.title(\"Reference image from URL\" , color='white' , fontsize=round(20*image_size))\n", " #-----#\n", " if show_encoding and loaded_ref:\n", " fig.add_subplot(rows, columns, columns-1)\n", " plt.imshow( visualize(prev_ref))\n", " plt.axis('off')\n", " plt.title(\"Encoding (before)\" , color='white' , fontsize=round(20*image_size))\n", " if show_encoding:\n", " fig.add_subplot(rows, columns, columns)\n", " plt.imshow( visualize(ref))\n", " plt.axis('off')\n", " plt.title(\"Encoding (now)\" , color='white' , fontsize=round(20*image_size))\n", " #------#" ], "metadata": { "id": "IqUsiQw2HU2C" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "**Use an image as a reference via uploading it to the /content/ folder (optional)**" ], "metadata": { "id": "MBPi7F8S7tg3" } }, { "cell_type": "code", "source": [ "\n", "loaded_ref = False\n", "try:\n", " ref\n", " loaded_ref = True\n", "except:ref = torch.zeros(dim).to(dtype = dot_dtype)\n", "if loaded_ref : prev_ref = ref.clone().detach()\n", "\n", "try:prompt\n", "except: prompt = ''\n", "\n", "# @markdown πŸ–ΌοΈ Upload your own image for use as reference via URL (optional)\n", "FILENAME = '' # @param {type:'string' ,placeholder:'IMG_123.png'}\n", "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", "method = 'Add to existing ref' # @param [\"Refresh\" , \"Add to existing ref\" , \"Subtract from existing ref\" , \"Do nothing\"]\n", "image_size = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n", "show_encoding = True # @param {type:\"boolean\"}\n", "\n", "if FILENAME.strip() != '':\n", " %cd /content/\n", " image = cv2.imread(FILENAME)\n", " b,g,r = cv2.split(image)\n", " image = cv2.merge([r,g,b])\n", " #---------#\n", " if(not method == 'Do nothing'):\n", " # Get image features\n", " inputs = processor(images=image, return_tensors=\"pt\")\n", " image_features = model.get_image_features(**inputs)\n", " image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)\n", " #-------#\n", " if method == 'Refresh':\n", " ref = torch.zeros(dim).to(dtype = dot_dtype)\n", " if method == 'Subtract from existing ref':\n", " ref = ref - math.pow(10,log_strength-1)*image_features\n", " else: ref = ref + math.pow(10,log_strength-1)*image_features\n", " #-----#\n", " ref = ref/ref.norm(p=2, dim=-1, keepdim=True)\n", " ref = ref[0]\n", " ref = ref.clone().detach()\n", " #------#\n", " # create figure\n", " fig = plt.figure(figsize=(10*image_size, 10*image_size))\n", " fig.patch.set_facecolor((56/255,56/255,56/255))\n", " rows = 1\n", " columns = 1\n", " if show_encoding: columns = 2\n", " if show_encoding and loaded_ref : columns = 3\n", " fig.add_subplot(rows, columns, 1)\n", " plt.imshow(image)\n", " plt.axis('off')\n", " plt.title(f\"Reference image from uploaded image {FILENAME}\" , color='white' , fontsize=round(20*image_size))\n", " #-----#\n", " if show_encoding and loaded_ref:\n", " fig.add_subplot(rows, columns, columns-1)\n", " plt.imshow( visualize(prev_ref))\n", " plt.axis('off')\n", " plt.title(\"Encoding (before)\" , color='white' , fontsize=round(20*image_size))\n", " if show_encoding:\n", " fig.add_subplot(rows, columns, columns)\n", " plt.imshow( visualize(ref))\n", " plt.axis('off')\n", " plt.title(\"Encoding (now)\" , color='white' , fontsize=round(20*image_size))\n", " #------#" ], "metadata": { "id": "I_-GOwFPKkha" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "**Save the reference prior to running the Interrogator**" ], "metadata": { "id": "zeu6JcM-mk9z" } }, { "cell_type": "code", "source": [ "# @title βš„ Save the reference\n", "\n", "loaded_ref = False\n", "try:\n", " ref\n", " loaded_ref = True\n", "except:ref = torch.zeros(dim).to(dtype = dot_dtype)\n", "if loaded_ref : prev_ref = ref.clone().detach()\n", "\n", "try:prompt\n", "except: prompt = ''\n", "\n", "reset_everything = False # @param {type:\"boolean\"}\n", "_ref = {}\n", "ref = ref/ref.norm(p=2, dim=-1, keepdim=True)\n", "if (reset_everything) : ref = torch.zeros(dim).to(dtype = dot_dtype)\n", "_ref['weights'] = ref.to(dot_dtype)\n", "%cd /content/\n", "save_file(_ref , 'reference.safetensors' )\n", "image_size = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n", "show_encoding = True # @param {type:\"boolean\"}\n", "#------#\n", "print(\"Saved local encoding to reference.safetensors\")\n", "if show_encoding:\n", " # create figure\n", " fig = plt.figure(figsize=(10*image_size, 10*image_size))\n", " fig.patch.set_facecolor((56/255,56/255,56/255))\n", " rows = 1\n", " columns = num_plots\n", " fig.add_subplot(rows, columns, 1)\n", " plt.imshow( visualize(ref))\n", " plt.axis('off')\n", " plt.title( \"Encoding (local variable)\", color='white', fontsize=round(20*image_size))\n", " if num_plots>1:\n", " fig.add_subplot(rows, columns, 2)\n", " plt.imshow( visualize( _ref['weights'].to(dot_dtype)))\n", " plt.axis('off')\n", " plt.title(\"Encoding (saved file)\", color='white', fontsize=round(20*image_size))\n", " #------#" ], "metadata": { "id": "lOQuTPfBMK82", "cellView": "form" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "**Run the interrogator**\n", "\n", " Since the list of items is large (>1 million items) you will need to select a range within the sorted results to print." ], "metadata": { "id": "ROKsoZrt7zMe" } }, { "cell_type": "code", "source": [ "# @title βš„ CLIP Interrogator\n", "LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n", "_START_AT = '0' # @param [\"0\", \"10000\", \"50000\"] {allow-input: true}\n", "START_AT = 0\n", "if _START_AT.isnumeric(): START_AT = int(_START_AT)\n", "\n", "output_folder = home_directory + 'results/'\n", "output_folder_sims = home_directory + 'results/sims/'\n", "my_mkdirs(output_folder)\n", "my_mkdirs(output_folder_sims)\n", "\n", "\n", "\n", "# @markdown -----\n", "# @markdown Select vocab\n", "general = True # @param {type:\"boolean\"}\n", "civit9 = True # @param {type:\"boolean\"}\n", "fanfic1 = False # @param {type:\"boolean\"}\n", "fanfic2 = False # @param {type:\"boolean\"}\n", "# @markdown -----\n", "# @title βš„ New interrogator code using quantized text corpus\n", "%cd /content/\n", "_ref = load_file('reference.safetensors' )\n", "ref = _ref['weights'].to(dot_dtype)\n", "# @markdown πŸ“ Enhance/Penalize Similarity and skip items containing word(s)\n", "POS1 = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n", "POS2 = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n", "NEG = ''# @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n", "SKIP = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n", "min_wordcount = 0 # @param {type:\"slider\", min:0, max:20, step:1}\n", "def isBlacklisted(_txt):\n", " blacklist = SKIP.lower().replace('' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').strip()\n", " if blacklist == '': return False\n", " txt = _txt.lower().strip()\n", " if len(txt) -1 : return True\n", " #------#\n", " found = False\n", " alphabet = 'abcdefghijklmnopqrstuvxyz'\n", " for letter in alphabet:\n", " found = txt.find(letter)>-1\n", " if found:break\n", " #------#\n", " return not found\n", "# @markdown -----\n", "# @markdown logarithmic prompt strength x for value 10^(x-1)\n", "_POS1 = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", "_POS2 = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", "_NEG = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", "# @markdown -----\n", "# @markdown Save similarity as a list for later review (this will slow down the code)\n", "save_similiarity = True # @param {type:\"boolean\"}\n", "# @markdown -----\n", "include_similiarity = False # @param {type:\"boolean\"}\n", "print_as_list = False # @param {type:\"boolean\"}\n", "N = 7 # @param {type:\"slider\", min:0, max:10, step:1}\n", "#-----#\n", "for _item in POS1.split(','):\n", " item = _item.strip()\n", " if item == '':continue\n", " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n", " ref = ref + math.pow(10,_POS1-1) * model.get_text_features(**inputs)[0]\n", "#-------#\n", "for _item in POS2.split(','):\n", " item = _item.strip()\n", " if item == '':continue\n", " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n", " ref = ref + math.pow(10,_POS2-1) * model.get_text_features(**inputs)[0]\n", "#-------#\n", "for _item in NEG.split(','):\n", " item = _item.strip()\n", " if item == '':continue\n", " inputs = tokenizer(text = item.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n", " ref = ref + math.pow(10,_NEG-1) * model.get_text_features(**inputs)[0]\n", "#------#\n", "ref = (ref/ref.norm(p=2, dim=-1, keepdim=True)).to(dtype = dot_dtype)\n", "vocab_to_load = ''\n", "if (general): vocab_to_load = vocab_to_load + 'general , '\n", "if (civit9): vocab_to_load = vocab_to_load + 'civit9 , '\n", "if (fanfic1): vocab_to_load = vocab_to_load + 'fanfic1 , '\n", "if (fanfic2): vocab_to_load = vocab_to_load + 'fanfic2 , '\n", "vocab_to_load = (vocab_to_load +'}').replace(' , }' , '')\n", "multi = vocab_to_load.find(',')>-1\n", "#-----#\n", "prompts_folder = f'{home_directory}fusion-t2i-generator-data/vocab-v2/text'\n", "encodings_folder = f'{home_directory}fusion-t2i-generator-data/vocab-v2/text_encodings'\n", "#----#\n", "scale = 0.0043\n", "size = 0\n", "#------#\n", "total_items = 0\n", "for filename in os.listdir(prompts_folder):\n", " if (not general and filename.find('general')>-1):continue\n", " if (not civit9 and filename.find('civit9')>-1):continue\n", " if (not fanfic1 and filename.find('fanfic1')>-1):continue\n", " if (not fanfic2 and filename.find('fanfic2')>-1):continue\n", " size = size + LIST_SIZE\n", "#-------#\n", "similiar_sims = torch.zeros(size)\n", "similiar_prompts = {}\n", "_index = 0\n", "#-------#\n", "similiar_encodings = {}\n", "for filename in os.listdir(prompts_folder):\n", " if (not general and filename.find('general')>-1):continue\n", " if (not civit9 and filename.find('civit9')>-1):continue\n", " if (not fanfic1 and filename.find('fanfic1')>-1):continue\n", " if (not fanfic2 and filename.find('fanfic2')>-1):continue\n", " #------#\n", " root_filename = filename.replace('.json', '')\n", " %cd {prompts_folder}\n", " prompts = {}\n", " with open(f'{root_filename}.json', 'r') as f:\n", " data = json.load(f).items()\n", " for key,value in data:\n", " prompts[key] = value\n", " num_items = int(prompts['num_items'])\n", " total_items = total_items + num_items\n", " #------#\n", " try:vocab_loaded\n", " except:\n", " vocab_loaded = 'first'\n", " #-----#\n", " if vocab_loaded == 'first' or (vocab_loaded != vocab_to_load and not multi):\n", " %cd {encodings_folder}\n", " _text_encodings = load_file(f'{root_filename}.safetensors')['weights'].to(torch.uint8)\n", " text_encodings = torch.zeros(num_items , dim)\n", " tmp = torch.ones(dim).to(dot_dtype)\n", " for index in range(num_items):\n", " text_encodings[index] = torch.sub(_text_encodings[index][1:dim+1].to(dot_dtype) , tmp , alpha= _text_encodings[index][0].to(dot_dtype))\n", " vocab_loaded = vocab_to_load\n", " #------#\n", " sims = torch.matmul(text_encodings*scale, ref.t())\n", " sorted , indices = torch.sort(sims , dim=0 , descending = True)\n", " tmp = {}\n", " tmp['weights'] = sorted\n", " %cd {output_folder_sims}\n", " save_file(tmp, root_filename + '_sims.safetensors')\n", " tmp={}\n", " #-----#\n", " for index in range(LIST_SIZE + START_AT):\n", " if index0:break\n", " index = index + 1\n", "#----#\n", "positive_bound = index\n", "ss =list(xx)\n", "tmp = 0\n", "chunk = 1\n", "CHUNK_SIZE = 1000\n", "index = 0\n", "for num in reversed(yy):\n", " tmp = tmp + num\n", " if(tmp>CHUNK_SIZE):\n", " _tmp = math.floor(tmp/CHUNK_SIZE)\n", " chunk = chunk + _tmp\n", " tmp = tmp - CHUNK_SIZE * _tmp\n", " ss[num_coords - index] = chunk\n", " index = index + 1\n", "#------#\n", "fig, ax = plt.subplots()\n", "fig.canvas.draw()\n", "plt.plot(ss[positive_bound:], xx[positive_bound:])\n", "plt.xlabel ('Search depth')\n", "plt.ylabel ('Similarity')\n", "plt.title ('Similarity to index')\n", "plt.grid()\n", "indices_depth = [item.get_text() for item in ax.get_xticklabels()]\n", "sim_pcnts = [item.get_text() for item in ax.get_yticklabels()]\n", "\n", "index = 0\n", "for index_depth in indices_depth:\n", " indices_depth[index] = index_depth + 'K'\n", " index = index + 1\n", "#-------#\n", "\n", "index = 0\n", "for sim_pcnt in sim_pcnts:\n", " sim_pcnts[index] = sim_pcnt + '%'\n", " index = index + 1\n", "#-------#\n", "ax.set_xticklabels(indices_depth)\n", "ax.set_yticklabels(sim_pcnts)\n", "plt.show()" ], "metadata": { "id": "ln6DsZPG99ez" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# @title βš„ Save the results\n", "\n", "def mkdir(folder):\n", " if os.path.exists(folder)==False:\n", " os.makedirs(folder)\n", "#-----#\n", "output_folder = home_directory + 'results'\n", "mkdir(output_folder)\n", "#-----#\n", "try: similiar_prompts\n", "except:similiar_prompts = {}\n", "%cd {output_folder}\n", "print(f'Saving similiar_prompts.json to {output_folder}...')\n", "with open('similiar_prompts.json', 'w') as f:\n", " json.dump(similiar_prompts, f)\n", "#-----#\n", "try: similiar_sims\n", "except: similiar_sims = torch.zeros(dim).to(dot_dtype)\n", "#-------#\n", "_similiar_sims = {}\n", "_similiar_sims['weights'] = similiar_sims.to(dot_dtype)\n", "%cd {output_folder}\n", "print(f'Saving similiar_sims.safetensors to {output_folder}...')\n", "save_file(_similiar_sims, 'similiar_sims.safetensors')\n" ], "metadata": { "id": "m-N553nXz9Jd", "cellView": "form" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "\n", "# @title βš„ Print results\n", "sorted , indices = torch.sort(similiar_sims , dim=0 , descending = True)\n", "include_similiarity = False # @param {type:\"boolean\"}\n", "print_as_list = False # @param {type:\"boolean\"}\n", "N = 7 # @param {type:\"slider\", min:0, max:10, step:1}\n", "FILENAME = '' # @param {type:'string' ,placeholder:'write .json file to load (optional)'}\n", "_FILENAME = FILENAME.replace('.json' , '')\n", "if _FILENAME.strip() == '': _FILENAME = 'similiar_prompts'\n", "#------#\n", "%cd {output_folder}\n", "with open(f'{_FILENAME}.json', 'r') as f:\n", " data = json.load(f)\n", " _df = pd.DataFrame({'count': data})['count']\n", " similiar_prompts = {\n", " key : value for key, value in _df.items()\n", " }\n", "#-------#\n", "_similiar_sims = load_file('similiar_sims.safetensors')\n", "similiar_sims = _similiar_sims['weights'].to(dot_dtype)\n", "\n", "# @title βš„ Run the CLIP interrogator on the saved reference\n", "\n", "# @markdown Select which values within the saved list to print\n", "LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n", "START_AT = 0 # @param {type:'number' , placeholder:'set how large the list should be'}\n", "\n", "if(print_as_list):\n", " for index in range(LIST_SIZE + START_AT):\n", " if index
sim = C* text_enc + image_enc*(1-C)

\n", "\n", "C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n", "\n", "# @markdown 🚫 Penalize similarity to this prompt(optional)\n", "if(load_the_data):\n", " target_prompts , target_text_encodings , urls , target_image_encodings , NUM_ITEMS = getPromptsAndLinks('/content/text-to-image-prompts/fusion')\n", " from transformers import AutoTokenizer\n", " tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n", " from transformers import CLIPProcessor, CLIPModel\n", " processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n", " model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n", " logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n", "#---------#\n", "\n", "filename = 'blank.json'\n", "path = '/content/text-to-image-prompts/fusion/'\n", "print(f'reading {filename}....')\n", "_index = 0\n", "%cd {path}\n", "with open(f'{filename}', 'r') as f:\n", " data = json.load(f)\n", "#------#\n", "_df = pd.DataFrame({'count': data})['count']\n", "_blank = {\n", " key : value for key, value in _df.items()\n", "}\n", "#------#\n", "\n", "root_savefile_name = 'fusion_C05_X7'\n", "\n", "%cd /content/\n", "output_folder = '/content/output/savefiles/'\n", "my_mkdirs(output_folder)\n", "my_mkdirs('/content/output2/savefiles/')\n", "my_mkdirs('/content/output3/savefiles/')\n", "my_mkdirs('/content/output4/savefiles/')\n", "my_mkdirs('/content/output5/savefiles/')\n", "my_mkdirs('/content/output6/savefiles/')\n", "my_mkdirs('/content/output7/savefiles/')\n", "my_mkdirs('/content/output8/savefiles/')\n", "my_mkdirs('/content/output9/savefiles/')\n", "my_mkdirs('/content/output10/savefiles/')\n", "my_mkdirs('/content/output11/savefiles/')\n", "my_mkdirs('/content/output12/savefiles/')\n", "my_mkdirs('/content/output13/savefiles/')\n", "\n", "\n", "NEG = '' # @param {type:'string'}\n", "strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.1}\n", "\n", "for index in range(1667):\n", "\n", " PROMPT_INDEX = index\n", " prompt = target_prompts[f'{index}']\n", " url = urls[f'{index}']\n", " if url.find('perchance')>-1:\n", " image = Image.open(requests.get(url, stream=True).raw)\n", " else: continue #print(\"(No image for this ID)\")\n", "\n", " print(f\"no. {PROMPT_INDEX} : '{prompt}'\")\n", " text_features_A = target_text_encodings[f'{index}']\n", " image_features_A = target_image_encodings[f'{index}']\n", " # text-similarity\n", " sims = C * torch.matmul(text_tensor, text_features_A.t())\n", "\n", " neg_sims = 0*sims\n", " if(NEG != ''):\n", " # Get text features for user input\n", " inputs = tokenizer(text = NEG, padding=True, return_tensors=\"pt\")\n", " text_features_NEG = model.get_text_features(**inputs)\n", " text_features_NEG = text_features_A/text_features_A.norm(p=2, dim=-1, keepdim=True)\n", " # text-similarity\n", " neg_sims = strength*torch.matmul(text_tensor, text_features_NEG.t())\n", " #------#\n", "\n", " # plus image-similarity\n", " sims = sims + (1-C) * torch.matmul(text_tensor, image_features_A.t()) * logit_scale\n", "\n", " # minus NEG-similarity\n", " sims = sims - neg_sims\n", "\n", " # Sort the items\n", " sorted , indices = torch.sort(sims,dim=0 , descending=True)\n", "\n", " # @markdown Repeat output N times\n", " RANGE = 1000\n", " NUM_CHUNKS = 10+\n", " separator = '|'\n", " _savefiles = {}\n", " #-----#\n", " for chunk in range(NUM_CHUNKS):\n", " if chunk=<10:continue\n", " start_at_index = chunk * RANGE\n", " _prompts = ''\n", " for _index in range(start_at_index + RANGE):\n", " if _index < start_at_index : continue\n", " index = indices[_index].item()\n", " prompt = prompts[f'{index}']\n", " _prompts = _prompts.replace(prompt + separator,'')\n", " _prompts = _prompts + prompt + separator\n", " #------#\n", " _prompts = fix_bad_symbols(_prompts)\n", " _prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n", " _savefiles[f'{chunk}'] = _prompts\n", " #---------#\n", " save_filename = f'{root_savefile_name}_{start_at_index + RANGE}_{PROMPT_INDEX}.json'\n", "\n", "\n", " if (chunk=<20 && chunk>10): %cd '/content/output2/savefiles/'\n", " if (chunk<=30 && chunk>20): %cd '/content/output3/savefiles/'\n", " if (chunk=<40 && chunk>30): %cd '/content/output4/savefiles/'\n", " if (chunk<=50 && chunk>40): %cd '/content/output5/savefiles/'\n", " if (chunk=<60 && chunk>50): %cd '/content/output6/savefiles/'\n", " if (chunk<=70 && chunk>60): %cd '/content/output7/savefiles/'\n", " if (chunk=<80 && chunk>70): %cd '/content/output8/savefiles/'\n", " if (chunk<=90 && chunk>80): %cd '/content/output9/savefiles/'\n", " if (chunk=<100 && chunk>90): %cd '/content/output10/savefiles/'\n", " if (chunk<=110 && chunk>100): %cd '/content/output11/savefiles/'\n", " if (chunk=<120 && chunk>110): %cd '/content/output12/savefiles/'\n", " if (chunk<=130 && chunk>120): %cd '/content/output13/savefiles/'\n", "\n", "\n", " #------#\n", " print(f'Saving savefile {save_filename} to {output_folder}...')\n", " with open(save_filename, 'w') as f:\n", " json.dump(_savefiles, f)\n", " #---------#\n", " continue\n", "#-----------#" ], "metadata": { "id": "x1uAVXZEoL0T", "cellView": "form" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# Determine if this notebook is running on Colab or Kaggle\n", "#Use https://www.kaggle.com/ if Google Colab GPU is busy\n", "home_directory = '/content/'\n", "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n", "if using_Kaggle : home_directory = '/kaggle/working/'\n", "%cd {home_directory}\n", "#-------#\n", "\n", "# @title Download the text_encodings as .zip\n", "import os\n", "%cd {home_directory}\n", "#os.remove(f'{home_directory}results.zip')\n", "root_output_folder = home_directory + 'output/'\n", "zip_dest = f'/content/results.zip' #drive/MyDrive\n", "!zip -r {zip_dest} {root_output_folder}" ], "metadata": { "id": "zivBNrw9uSVD", "cellView": "form" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# @title \tβš„ Quick fix for normalizing encoded text corpus tensors\n", "\n", "import os\n", "my_mkdirs('/content/output')\n", "my_mkdirs('/content/output/text_encodings')\n", "\n", "for filename in os.listdir(f'{prompts_folder}'):\n", " %cd {prompts_folder}\n", " prompts = {}\n", " with open(f'{filename}', 'r') as f:\n", " data = json.load(f).items()\n", " for key,value in data:\n", " prompts[key] = value\n", " #------#\n", " num_items = int(prompts['num_items'])\n", "\n", " %cd {encodings_folder}\n", " enc_filename = filename.replace('json', 'safetensors')\n", " _text_encodings = load_file(f'{enc_filename}')['weights'].to(torch.uint8)\n", " text_encodings = torch.zeros(num_items , dim)\n", " tmp = torch.ones(dim)\n", " tmp2 = torch.tensor(1/0.0043)\n", " zero_point = 0\n", " for index in range(num_items):\n", " text_encodings[index] = torch.tensor(0.0043) * torch.sub(_text_encodings[index][1:dim+1] , tmp , alpha= _text_encodings[index][0]).to(torch.float32)\n", " text_encodings[index] = tmp2*text_encodings[index]/text_encodings[index].norm(p=2, dim=-1, keepdim = True)\n", " test = torch.round( torch.add(text_encodings[index],tmp*zero_point))\n", " less_than_zero = test<0\n", " while(torch.any(less_than_zero).item()):\n", " zero_point = zero_point + 1\n", " test = torch.round( torch.add(text_encodings[index],tmp*zero_point))\n", " less_than_zero = test<0\n", " #------#\n", " _text_encodings[index][0] = zero_point\n", " _text_encodings[index][1:dim+1] = test\n", " #-------#\n", " %cd /content/output/text_encodings\n", "\n", " tmp = {}\n", " tmp['weights'] = _text_encodings.to(torch.uint8)\n", " tmp['num_items'] = torch.tensor(num_items).to(torch.uint8)\n", " tmp['scale'] = torch.tensor(0.0043)\n", " save_file(tmp , f'{enc_filename}')\n", "#------#" ], "metadata": { "cellView": "form", "id": "9qgHW1Wr7kZn" }, "execution_count": null, "outputs": [] } ] }