File size: 2,876 Bytes
bdd4bbb
 
 
 
 
 
 
 
 
 
 
49be7f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bdd4bbb
 
49be7f2
bdd4bbb
 
 
 
49be7f2
bdd4bbb
 
 
 
 
 
 
 
 
903ec07
 
 
bdd4bbb
 
 
 
 
 
 
 
 
 
 
 
 
49be7f2
bdd4bbb
23450bf
921a11a
23450bf
921a11a
23450bf
 
bdd4bbb
 
 
 
 
 
 
 
 
 
 
ae43d0b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import os

from langchain.chains import LLMChain
from langchain_core.prompts import (
    ChatPromptTemplate,
    HumanMessagePromptTemplate,
    MessagesPlaceholder,
)
from langchain_core.messages import SystemMessage
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain_groq import ChatGroq
from groq import Groq

def test_prompt(prompt,question):
    client = Groq(api_key=os.getenv("api_key"))
    completion = client.chat.completions.create(
        model="llama3-8b-8192",
        messages=[
            {
                "role": "system",
                "content": prompt+" 毎回日本語で答える事"
            },
            {
                "role": "user",
                "content": question
            },
        ],
        temperature=1,
        max_tokens=1024,
        top_p=1,
        stream=False,
        stop=None,
    )

    print(completion.choices[0].message)
    return completion.choices[0].message.content
        


def prompt_genalate(word,sys_prompt="あなたはプロンプト作成の優秀なアシスタントです。答えは日本語で答えます"):
    # Get Groq API key
    groq_api_key = os.getenv("api_key")
    groq_chat = ChatGroq(groq_api_key=groq_api_key, model_name="llama3-70b-8192")

    system_prompt = sys_prompt
    conversational_memory_length = 50

    memory = ConversationBufferWindowMemory(
        k=conversational_memory_length, memory_key="chat_history", return_messages=True
    )

    #while True:
    user_question = word#input("質問を入力してください: ")

    #if user_question.lower() == "exit":
    #    print("Goodbye!")
    #    break

    if user_question:
        # Construct a chat prompt template using various components
        prompt = ChatPromptTemplate.from_messages(
            [
                # 毎回必ず含まれるSystemプロンプトを追加
                SystemMessage(content=system_prompt),
                # ConversationBufferWindowMemoryをプロンプトに追加
                MessagesPlaceholder(variable_name="chat_history"),
                # ユーザーの入力をプロンプトに追加
                HumanMessagePromptTemplate.from_template("{human_input}"),
            ]
        )
        

        # プロンプトを文字列としてフォーマット
        #formatted_prompt = prompt.format(chat_history=memory.load_memory_variables(), human_input=user_question)

        #print("Formatted Prompt:\n", formatted_prompt)


        conversation = LLMChain(
            llm=groq_chat,
            prompt=prompt,
            verbose=False,
            memory=memory,
        )
        response = conversation.predict(human_input=user_question)

        print("User: ", user_question)
        print("Assistant:", response)

        return user_question,user_question+"\r\n[役割]\r\n"+response