import gradio as gr import cv2 import numpy as np from PIL import Image import base64 from io import BytesIO from models.image_text_transformation import ImageTextTransformation import argparse import torch parser = argparse.ArgumentParser() parser.add_argument('--gpt_version', choices=['gpt-3.5-turbo', 'gpt4'], default='gpt-3.5-turbo') parser.add_argument('--image_caption', action='store_true', dest='image_caption', default=True, help='Set this flag to True if you want to use BLIP2 Image Caption') parser.add_argument('--dense_caption', action='store_true', dest='dense_caption', default=True, help='Set this flag to True if you want to use Dense Caption') parser.add_argument('--semantic_segment', action='store_true', dest='semantic_segment', default=True, help='Set this flag to True if you want to use semantic segmentation') parser.add_argument('--sam_arch', choices=['vit_b', 'vit_l', 'vit_h'], dest='sam_arch', default='vit_b', help='vit_b is the default model (fast but not accurate), vit_l and vit_h are larger models') parser.add_argument('--captioner_base_model', choices=['blip', 'blip2'], dest='captioner_base_model', default='blip', help='blip2 requires 15G GPU memory, blip requires 6G GPU memory') parser.add_argument('--region_classify_model', choices=['ssa', 'edit_anything'], dest='region_classify_model', default='edit_anything', help='Select the region classification model: edit anything is ten times faster than ssa, but less accurate.') parser.add_argument('--image_caption_device', choices=['cuda', 'cpu'], default='cuda', help='Select the device: cuda or cpu, gpu memory larger than 14G is recommended') parser.add_argument('--dense_caption_device', choices=['cuda', 'cpu'], default='cuda', help='Select the device: cuda or cpu, < 6G GPU is not recommended>') parser.add_argument('--semantic_segment_device', choices=['cuda', 'cpu'], default='cuda', help='Select the device: cuda or cpu, gpu memory larger than 14G is recommended. Make sue this model and image_caption model on same device.') parser.add_argument('--contolnet_device', choices=['cuda', 'cpu'], default='cpu', help='Select the device: cuda or cpu, <6G GPU is not recommended>') args = parser.parse_args() device = "cuda" if torch.cuda.is_available() else "cpu" # device = "cpu" if device == "cuda": args.image_caption_device = "cuda" args.dense_caption_device = "cuda" args.semantic_segment_device = "cuda" args.contolnet_device = "cuda" else: args.image_caption_device = "cpu" args.dense_caption_device = "cpu" args.semantic_segment_device = "cpu" args.contolnet_device = "cpu" def pil_image_to_base64(image): buffered = BytesIO() image.save(buffered, format="JPEG") img_str = base64.b64encode(buffered.getvalue()).decode() return img_str def add_logo(): with open("examples/logo.png", "rb") as f: logo_base64 = base64.b64encode(f.read()).decode() return logo_base64 def process_image(image_src, options=None, processor=None): print(options) if options is None: options = [] processor.args.semantic_segment = "Semantic Segment" in options image_generation_status = "Image Generation" in options image_caption, dense_caption, region_semantic, gen_text = processor.image_to_text(image_src) if image_generation_status: gen_image = processor.text_to_image(gen_text) gen_image_str = pil_image_to_base64(gen_image) # Combine the outputs into a single HTML output custom_output = f'''
{image_caption}
{dense_caption}
{region_semantic}
{gen_text}