maxcushion / handler.py
colt12's picture
Update handler.py
fa41512 verified
raw
history blame
1.1 kB
import torch
from diffusers import StableDiffusionXLPipeline
import base64
from io import BytesIO
class InferenceHandler:
def __init__(self):
self.device = "cuda" if torch.cuda.is_available() else "cpu"
model_name = "colt12/maxcushion"
self.pipe = StableDiffusionXLPipeline.from_pretrained(model_name, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
self.pipe = self.pipe.to(self.device)
def __call__(self, inputs):
prompt = inputs.get("prompt", "")
if not prompt:
raise ValueError("A prompt must be provided")
negative_prompt = inputs.get("negative_prompt", "")
image = self.pipe(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=30,
guidance_scale=7.5
).images[0]
buffered = BytesIO()
image.save(buffered, format="PNG")
image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
return {"image_base64": image_base64}
handler = InferenceHandler()