|
import os |
|
import numpy as np |
|
import pandas as pd |
|
import gradio as gr |
|
from PIL import Image |
|
import torch |
|
import torchvision.transforms as transforms |
|
from transformers import pipeline |
|
|
|
|
|
model_id = "gouthaml/raos-virtual-try-on-model" |
|
tryon_pipeline = pipeline("image-to-image", model=model_id) |
|
|
|
|
|
product_data = pd.DataFrame({ |
|
'Product': ['Dress 1', 'Dress 2', 'Dress 3'], |
|
'Size': ['S', 'M', 'L'], |
|
'Color': ['Red', 'Blue', 'Green'], |
|
'Image': ['sample_dress1.jpg', 'sample_dress2.jpg', 'sample_dress3.jpg'] |
|
}) |
|
|
|
def process_image(image, product): |
|
|
|
person_image = Image.fromarray(image).convert("RGB") |
|
|
|
|
|
garment_filename = product_data[product_data['Product'] == product]['Image'].values[0] |
|
garment_path = os.path.join(os.getcwd(), garment_filename) |
|
|
|
if not os.path.exists(garment_path): |
|
raise FileNotFoundError(f"File not found: {garment_path}") |
|
|
|
garment_image = Image.open(garment_path).convert("RGB") |
|
|
|
|
|
inputs = {"image": person_image, "garment": garment_image} |
|
result = tryon_pipeline(inputs) |
|
|
|
|
|
output_image = result[0] |
|
|
|
|
|
result_array = np.array(output_image) |
|
|
|
|
|
product_details = product_data[product_data['Product'] == product].iloc[0].to_dict() |
|
|
|
return result_array, product_details |
|
|
|
|
|
iface = gr.Interface( |
|
fn=process_image, |
|
inputs=[ |
|
gr.Image(type="numpy", label="Upload Your Image"), |
|
gr.Dropdown(choices=product_data['Product'].tolist(), label="Select Product") |
|
], |
|
outputs=[ |
|
gr.Image(type="numpy", label="Output Image"), |
|
gr.JSON(label="Product Details") |
|
], |
|
title="Virtual Dress Fitting", |
|
description="Upload an image and select a product to see how it fits on you." |
|
) |
|
|
|
if __name__ == "__main__": |
|
iface.launch() |
|
|