import spaces import json import subprocess from llama_cpp import Llama from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType from llama_cpp_agent.providers import LlamaCppPythonProvider from llama_cpp_agent.chat_history import BasicChatHistory from llama_cpp_agent.chat_history.messages import Roles import gradio as gr from huggingface_hub import hf_hub_download #hf_hub_download( # repo_id="bartowski/gemma-2-9b-it-GGUF", # filename="gemma-2-9b-it-Q6_K_L.gguf", # local_dir="./models" #) hf_hub_download( repo_id="bartowski/gemma-2-27b-it-GGUF", filename="gemma-2-27b-it-Q8_0.gguf", local_dir="./models" ) llm = None llm_model = None @spaces.GPU(duration=120) def respond( message, history: list[tuple[str, str]], model, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty, ): chat_template = MessagesFormatterType.GEMMA_2 global llm global llm_model if llm is None or llm_model != model: llm = Llama( model_path=f"models/{model}", flash_attn=True, n_gpu_layers=81, n_batch=1024, n_ctx=8192, ) llm_model = model provider = LlamaCppPythonProvider(llm) agent = LlamaCppAgent( provider, system_prompt=f"{system_message}", predefined_messages_formatter_type=chat_template, debug_output=True ) settings = provider.get_provider_default_settings() settings.temperature = temperature settings.top_k = top_k settings.top_p = top_p settings.max_tokens = max_tokens settings.repeat_penalty = repeat_penalty settings.stream = True messages = BasicChatHistory() for msn in history: user = { 'role': Roles.user, 'content': msn[0] } assistant = { 'role': Roles.assistant, 'content': msn[1] } messages.add_message(user) messages.add_message(assistant) # ユーザーのメッセージに指示プロンプトを追加 message_with_prompt = message + "\n\n日本語に翻訳してください。翻訳文以外の返答はしないでください。" stream = agent.get_chat_response( message_with_prompt, llm_sampling_settings=settings, chat_history=messages, returns_streaming_generator=True, print_output=False ) outputs = "" for output in stream: outputs += output yield outputs description = """
English to japanese