|
import torch |
|
import gradio as gr |
|
from PIL import Image |
|
import scipy.io.wavfile as wavfile |
|
|
|
|
|
from transformers import pipeline |
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
caption_image = pipeline("image-to-text", |
|
model="Salesforce/blip-image-captioning-large", device=device) |
|
def caption_my_image(pil_image): |
|
semantics = caption_image(images=pil_image)[0]['generated_text'] |
|
return semantics |
|
demo = gr.Interface(fn=caption_my_image, |
|
inputs=[gr.Image(label="Select Image",type="pil")], |
|
outputs=[gr.Textbox(label="Image Caption")], |
|
title="Image Captioning", |
|
description="THIS APPLICATION WILL BE USED TO CAPTION THE IMAGE.") |
|
demo.launch() |