#!/usr/bin/env python from __future__ import annotations import gradio as gr import PIL.Image import spaces import torch from transformers import AutoProcessor, BlipForConditionalGeneration from typing import Union import os DESCRIPTION = "# Image Captioning with LongCap" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("Using device: ", device) model_id = "unography/blip-long-cap" processor = AutoProcessor.from_pretrained(model_id) model = BlipForConditionalGeneration.from_pretrained(model_id).to(device) torch.hub.download_url_to_file("https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg", "demo.jpg") torch.hub.download_url_to_file( "https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png", "stop_sign.png" ) torch.hub.download_url_to_file( "https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg", "astronaut.jpg" ) @spaces.GPU() def run(image: Union[str, PIL.Image.Image]) -> str: if isinstance(image, str): image = Image.open(image) inputs = processor(images=image, return_tensors="pt").to(device) out = model.generate(pixel_values=inputs.pixel_values, num_beams=3, repetition_penalty=2.5, max_length=300) generated_caption = processor.decode(out[0], skip_special_tokens=True) return generated_caption with gr.Blocks(css="style.css") as demo: gr.Markdown(DESCRIPTION) input_image = gr.Image(type="pil") run_button = gr.Button("Caption") output = gr.Textbox(label="Result") gr.Examples( examples=[ "demo.jpg", "stop_sign.png", "astronaut.jpg", ], inputs=input_image, outputs=output, fn=run, cache_examples=os.getenv("CACHE_EXAMPLES") == "1", ) run_button.click( fn=run, inputs=input_image, outputs=output, api_name="caption", ) if __name__ == "__main__": demo.queue(max_size=20).launch()