import os import numpy as np import pandas as pd import gradio as gr from PIL import Image import torch import torchvision.transforms as transforms from transformers import pipeline # Load the Rao's Virtual Try-On model from Hugging Face model_id = "gouthaml/raos-virtual-try-on-model" tryon_pipeline = pipeline("image-to-image", model=model_id) # Load sample product data product_data = pd.DataFrame({ 'Product': ['Dress 1', 'Dress 2', 'Dress 3'], 'Size': ['S', 'M', 'L'], 'Color': ['Red', 'Blue', 'Green'], 'Image': ['sample_dress1.jpg', 'sample_dress2.jpg', 'sample_dress3.jpg'] }) def process_image(image, product): # Convert the uploaded image to a PIL image person_image = Image.fromarray(image).convert("RGB") # Fetch the garment image corresponding to the selected product garment_filename = product_data[product_data['Product'] == product]['Image'].values[0] garment_path = os.path.join(os.getcwd(), garment_filename) if not os.path.exists(garment_path): raise FileNotFoundError(f"File not found: {garment_path}") garment_image = Image.open(garment_path).convert("RGB") # Run the virtual try-on model inputs = {"image": person_image, "garment": garment_image} result = tryon_pipeline(inputs) # Post-process the output output_image = result[0] # Assuming the model returns a list of outputs # Convert to numpy array for Gradio result_array = np.array(output_image) # Fetch product details product_details = product_data[product_data['Product'] == product].iloc[0].to_dict() return result_array, product_details # Gradio interface iface = gr.Interface( fn=process_image, inputs=[ gr.Image(type="numpy", label="Upload Your Image"), gr.Dropdown(choices=product_data['Product'].tolist(), label="Select Product") ], outputs=[ gr.Image(type="numpy", label="Output Image"), gr.JSON(label="Product Details") ], title="Virtual Dress Fitting", description="Upload an image and select a product to see how it fits on you." ) if __name__ == "__main__": iface.launch()