File size: 7,022 Bytes
113e061
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "VjYy0F2gZIPR"
      },
      "outputs": [],
      "source": [
        "!pip install gradio bitsandbytes transformers==4.43.3\n",
        "\n",
        "!wget https://huggingface.co/spaces/fancyfeast/joy-caption-pre-alpha/resolve/main/wpkklhc6/image_adapter.pt -O /content/image_adapter.pt\n",
        "!wget https://huggingface.co/spaces/fancyfeast/joy-caption-pre-alpha/raw/main/wpkklhc6/config.yaml -O /content/config.yaml\n",
        "\n",
        "import gradio as gr\n",
        "from huggingface_hub import InferenceClient\n",
        "from torch import nn\n",
        "from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM\n",
        "from pathlib import Path\n",
        "import torch\n",
        "import torch.amp.autocast_mode\n",
        "from PIL import Image\n",
        "import os\n",
        "\n",
        "CLIP_PATH = \"google/siglip-so400m-patch14-384\"\n",
        "VLM_PROMPT = \"A descriptive caption for this image:\\n\"\n",
        "# MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B\"\n",
        "MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B-bnb-4bit\"\n",
        "CHECKPOINT_PATH = Path(\"wpkklhc6\")\n",
        "\n",
        "class ImageAdapter(nn.Module):\n",
        "\tdef __init__(self, input_features: int, output_features: int):\n",
        "\t\tsuper().__init__()\n",
        "\t\tself.linear1 = nn.Linear(input_features, output_features)\n",
        "\t\tself.activation = nn.GELU()\n",
        "\t\tself.linear2 = nn.Linear(output_features, output_features)\n",
        "\n",
        "\tdef forward(self, vision_outputs: torch.Tensor):\n",
        "\t\tx = self.linear1(vision_outputs)\n",
        "\t\tx = self.activation(x)\n",
        "\t\tx = self.linear2(x)\n",
        "\t\treturn x\n",
        "\n",
        "# Load CLIP\n",
        "print(\"Loading CLIP\")\n",
        "clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)\n",
        "clip_model = AutoModel.from_pretrained(CLIP_PATH)\n",
        "clip_model = clip_model.vision_model\n",
        "clip_model.eval()\n",
        "clip_model.requires_grad_(False)\n",
        "clip_model.to(\"cuda\")\n",
        "\n",
        "# Tokenizer\n",
        "print(\"Loading tokenizer\")\n",
        "tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, load_in_4bit=True, use_fast=False)\n",
        "assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f\"Tokenizer is of type {type(tokenizer)}\"\n",
        "\n",
        "# LLM\n",
        "print(\"Loading LLM\")\n",
        "text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, load_in_4bit=True, device_map=\"auto\", torch_dtype=torch.float16)\n",
        "text_model.eval()\n",
        "\n",
        "# Image Adapter\n",
        "print(\"Loading image adapter\")\n",
        "image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size)\n",
        "image_adapter.load_state_dict(torch.load(\"/content/image_adapter.pt\", map_location=\"cpu\"))\n",
        "image_adapter.eval()\n",
        "image_adapter.to(\"cuda\")\n",
        "\n",
        "@torch.inference_mode()\n",
        "def stream_chat(input_image: Image.Image):\n",
        "\ttorch.cuda.empty_cache()\n",
        "\n",
        "\t# Preprocess image\n",
        "\timage = clip_processor(images=input_image, return_tensors='pt').pixel_values\n",
        "\timage = image.to('cuda')\n",
        "\n",
        "\t# Tokenize the prompt\n",
        "\tprompt = tokenizer.encode(VLM_PROMPT, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)\n",
        "\n",
        "\t# Embed image\n",
        "\twith torch.amp.autocast_mode.autocast('cuda', enabled=True):\n",
        "\t\tvision_outputs = clip_model(pixel_values=image, output_hidden_states=True)\n",
        "\t\timage_features = vision_outputs.hidden_states[-2]\n",
        "\t\tembedded_images = image_adapter(image_features)\n",
        "\t\tembedded_images = embedded_images.to('cuda')\n",
        "\n",
        "\t# Embed prompt\n",
        "\tprompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))\n",
        "\tassert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f\"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}\"\n",
        "\tembedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))\n",
        "\n",
        "\t# Construct prompts\n",
        "\tinputs_embeds = torch.cat([\n",
        "\t\tembedded_bos.expand(embedded_images.shape[0], -1, -1),\n",
        "\t\tembedded_images.to(dtype=embedded_bos.dtype),\n",
        "\t\tprompt_embeds.expand(embedded_images.shape[0], -1, -1),\n",
        "\t], dim=1)\n",
        "\n",
        "\tinput_ids = torch.cat([\n",
        "\t\ttorch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),\n",
        "\t\ttorch.zeros((1, embedded_images.shape[1]), dtype=torch.long),\n",
        "\t\tprompt,\n",
        "\t], dim=1).to('cuda')\n",
        "\tattention_mask = torch.ones_like(input_ids)\n",
        "\n",
        "\t#generate_ids = text_model.generate(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=300, do_sample=False, suppress_tokens=None)\n",
        "\tgenerate_ids = text_model.generate(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=300, do_sample=True, top_k=10, temperature=0.5, suppress_tokens=None)\n",
        "\n",
        "\t# Trim off the prompt\n",
        "\tgenerate_ids = generate_ids[:, input_ids.shape[1]:]\n",
        "\tif generate_ids[0][-1] == tokenizer.eos_token_id:\n",
        "\t\tgenerate_ids = generate_ids[:, :-1]\n",
        "\n",
        "\tcaption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]\n",
        "\n",
        "\treturn caption.strip()\n",
        "\n",
        "\n",
        "with gr.Blocks(css=\".gradio-container {max-width: 544px !important}\", analytics_enabled=False) as demo:\n",
        "\twith gr.Row():\n",
        "\t\twith gr.Column():\n",
        "\t\t\tinput_image = gr.Image(type=\"pil\", label=\"Input Image\")\n",
        "\t\t\trun_button = gr.Button(\"Caption\")\n",
        "\t\t\toutput_caption = gr.Textbox(label=\"Caption\")\n",
        "\trun_button.click(fn=stream_chat, inputs=[input_image], outputs=[output_caption])\n",
        "\n",
        "demo.queue().launch(share=True, inline=False, debug=True)"
      ]
    }
  ],
  "metadata": {
    "accelerator": "GPU",
    "colab": {
      "gpuType": "T4",
      "provenance": []
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}