Spaces:
Running
on
Zero
Running
on
Zero
from collections import namedtuple | |
from typing import List | |
ModelInfo = namedtuple("ModelInfo", ["simple_name", "link", "description"]) | |
model_info = {} | |
def register_model_info( | |
full_names: List[str], simple_name: str, link: str, description: str | |
): | |
info = ModelInfo(simple_name, link, description) | |
for full_name in full_names: | |
model_info[full_name] = info | |
def get_model_info(name: str) -> ModelInfo: | |
if name in model_info: | |
return model_info[name] | |
else: | |
# To fix this, please use `register_model_info` to register your model | |
return ModelInfo( | |
name, "", "Register the description at fastchat/model/model_registry.py" | |
) | |
def get_model_description_md(model_list): | |
model_description_md = """ | |
| | | | | |
| ---- | ---- | ---- | | |
""" | |
ct = 0 | |
visited = set() | |
for i, name in enumerate(model_list): | |
minfo = get_model_info(name) | |
if minfo.simple_name in visited: | |
continue | |
visited.add(minfo.simple_name) | |
one_model_md = f"[{minfo.simple_name}]({minfo.link}): {minfo.description}" | |
if ct % 3 == 0: | |
model_description_md += "|" | |
model_description_md += f" {one_model_md} |" | |
if ct % 3 == 2: | |
model_description_md += "\n" | |
ct += 1 | |
return model_description_md | |
# regist image generation models | |
register_model_info( | |
["imagenhub_LCM_generation", "fal_LCM_text2image"], | |
"LCM", | |
"https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7", | |
"Latent Consistency Models.", | |
) | |
register_model_info( | |
["fal_LCM(v1.5/XL)_text2image"], | |
"LCM(v1.5/XL)", | |
"https://fal.ai/models/fast-lcm-diffusion-turbo", | |
"Latent Consistency Models (v1.5/XL)", | |
) | |
register_model_info( | |
["imagenhub_PlayGroundV2_generation", 'playground_PlayGroundV2_generation'], | |
"Playground v2", | |
"https://huggingface.co/playgroundai/playground-v2-1024px-aesthetic", | |
"Playground v2 – 1024px Aesthetic Model", | |
) | |
register_model_info( | |
["imagenhub_PlayGroundV2.5_generation", 'playground_PlayGroundV2.5_generation'], | |
"Playground v2.5", | |
"https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic", | |
"Playground v2.5 is the state-of-the-art open-source model in aesthetic quality", | |
) | |
register_model_info( | |
["imagenhub_OpenJourney_generation"], | |
"Openjourney", | |
"https://huggingface.co/prompthero/openjourney", | |
"Openjourney is an open source Stable Diffusion fine tuned model on Midjourney images, by PromptHero.", | |
) | |
register_model_info( | |
["imagenhub_SDXLTurbo_generation", "fal_SDXLTurbo_text2image"], | |
"SDXLTurbo", | |
"https://huggingface.co/stabilityai/sdxl-turbo", | |
"SDXL-Turbo is a fast generative text-to-image model.", | |
) | |
register_model_info( | |
["imagenhub_SDXL_generation", "fal_SDXL_text2image"], | |
"SDXL", | |
"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0", | |
"SDXL is a Latent Diffusion Model that uses two fixed, pretrained text encoders.", | |
) | |
register_model_info( | |
["imagenhub_PixArtAlpha_generation"], | |
"PixArtAlpha", | |
"https://huggingface.co/PixArt-alpha/PixArt-XL-2-1024-MS", | |
"Pixart-α consists of pure transformer blocks for latent diffusion.", | |
) | |
register_model_info( | |
["imagenhub_PixArtSigma_generation", "fal_PixArtSigma_text2image"], | |
"PixArtSigma", | |
"https://github.com/PixArt-alpha/PixArt-sigma", | |
"Improved version of Pixart-α.", | |
) | |
register_model_info( | |
["imagenhub_SDXLLightning_generation", "fal_SDXLLightning_text2image"], | |
"SDXL-Lightning", | |
"https://huggingface.co/ByteDance/SDXL-Lightning", | |
"SDXL-Lightning is a lightning-fast text-to-image generation model.", | |
) | |
register_model_info( | |
["imagenhub_StableCascade_generation", "fal_StableCascade_text2image"], | |
"StableCascade", | |
"https://huggingface.co/stabilityai/stable-cascade", | |
"StableCascade is built upon the Würstchen architecture and working at a much smaller latent space.", | |
) | |
# regist image edition models | |
register_model_info( | |
["imagenhub_CycleDiffusion_edition"], | |
"CycleDiffusion", | |
"https://github.com/ChenWu98/cycle-diffusion?tab=readme-ov-file", | |
"A latent space for stochastic diffusion models.", | |
) | |
register_model_info( | |
["imagenhub_Pix2PixZero_edition"], | |
"Pix2PixZero", | |
"https://pix2pixzero.github.io/", | |
"A zero-shot Image-to-Image translation model.", | |
) | |
register_model_info( | |
["imagenhub_Prompt2prompt_edition"], | |
"Prompt2prompt", | |
"https://prompt-to-prompt.github.io/", | |
"Image Editing with Cross-Attention Control.", | |
) | |
register_model_info( | |
["imagenhub_InstructPix2Pix_edition"], | |
"InstructPix2Pix", | |
"https://www.timothybrooks.com/instruct-pix2pix", | |
"An instruction-based image editing model.", | |
) | |
register_model_info( | |
["imagenhub_MagicBrush_edition"], | |
"MagicBrush", | |
"https://osu-nlp-group.github.io/MagicBrush/", | |
"Manually Annotated Dataset for Instruction-Guided Image Editing.", | |
) | |
register_model_info( | |
["imagenhub_PNP_edition"], | |
"PNP", | |
"https://github.com/MichalGeyer/plug-and-play", | |
"Plug-and-Play Diffusion Features for Text-Driven Image-to-Image Translation.", | |
) | |
register_model_info( | |
["imagenhub_InfEdit_edition"], | |
"InfEdit", | |
"https://sled-group.github.io/InfEdit/", | |
"Inversion-Free Image Editing with Natural Language.", | |
) | |
register_model_info( | |
["imagenhub_CosXLEdit_edition"], | |
"CosXLEdit", | |
"https://huggingface.co/stabilityai/cosxl", | |
"An instruction-based image editing model from SDXL.", | |
) | |
register_model_info( | |
["fal_stable-cascade_text2image"], | |
"StableCascade", | |
"https://fal.ai/models/stable-cascade/api", | |
"StableCascade is a generative model that can generate high-quality images from text prompts.", | |
) | |
register_model_info( | |
["fal_AnimateDiff_text2video"], | |
"AnimateDiff", | |
"https://fal.ai/models/fast-animatediff-t2v", | |
"AnimateDiff is a text-driven models that produce diverse and personalized animated images.", | |
) | |
register_model_info( | |
["fal_AnimateDiffTurbo_text2video"], | |
"AnimateDiff Turbo", | |
"https://fal.ai/models/fast-animatediff-t2v-turbo", | |
"AnimateDiff Turbo is a lightning version of AnimateDiff.", | |
) | |
register_model_info( | |
["videogenhub_LaVie_generation"], | |
"LaVie", | |
"https://github.com/Vchitect/LaVie", | |
"LaVie is a video generation model with cascaded latent diffusion models.", | |
) | |
register_model_info( | |
["videogenhub_VideoCrafter2_generation"], | |
"VideoCrafter2", | |
"https://ailab-cvc.github.io/videocrafter2/", | |
"VideoCrafter2 is a T2V model that disentangling motion from appearance.", | |
) | |
register_model_info( | |
["videogenhub_ModelScope_generation"], | |
"ModelScope", | |
"https://arxiv.org/abs/2308.06571", | |
"ModelScope is a a T2V synthesis model that evolves from a T2I synthesis model.", | |
) | |
register_model_info( | |
["videogenhub_OpenSora_generation"], | |
"OpenSora", | |
"https://github.com/hpcaitech/Open-Sora", | |
"A community-driven opensource implementation of Sora.", | |
) | |