File size: 8,721 Bytes
5cfd8a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
917b084
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5cfd8a9
 
 
 
 
 
 
 
 
8b3be6a
5cfd8a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
917b084
5cfd8a9
 
 
 
 
917b084
 
 
 
 
5cfd8a9
917b084
 
5cfd8a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8b3be6a
5cfd8a9
 
 
 
2638861
 
 
 
 
 
5cfd8a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8b3be6a
5cfd8a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
from openai import OpenAI
# from unsloth import FastLanguageModel

class AI_Songwriter:
    def __init__(self, client_key):
        self.oai_client = OpenAI(api_key=client_key)

        # max_seq_length = 3072 # Choose any! We auto support RoPE Scaling internally!
        # dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
        # load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
            
        # model, tokenizer = FastLanguageModel.from_pretrained(
        #     model_name = "lora_model", # YOUR MODEL YOU USED FOR TRAINING
        #     max_seq_length = max_seq_length,
        #     dtype = dtype,
        #     load_in_4bit = load_in_4bit,
        # )
        # FastLanguageModel.for_inference(model) # Enable native 2x faster inference

        # self.model=model
        # self.tokenizer=tokenizer

        self.alpaca_prompt = """Below is an instruction that describes a songwriting task, paired with an input that provides further context. Write a response that appropriately completes the request.
        ### Instruction:
        {}

        ### Input:
        {}

        ### Response:
        {}"""


    
    def ask_question(self, messages):
        convo = messages[:-1]

        instruction = "Based on this conversation history, respond to the user acknowledging their most recent response and ask a concise question to further learn more about the user's story."

        ## iterate thru messages and format them into a single string where each message is separated by a newline (ie Assistant: ...\n User: ...\n)
        convo_str = ""
        for message in convo:
            convo_str += f"{message['role']}: {message['content']}\n"
        convo_str += "Assistant:"

        input = f"{instruction}\nConversation History:\n{convo_str}"
        
        response = self.oai_client.chat.completions.create(
            model="gpt-4o",
            messages=[
                {
                    "role": "user",
                    "content": input
                }
            ],
        )

        return response.choices[0].message.content

    
    def write_section(self, section_name, section_description, relevant_ideas, section_length, sections_written=None, overall_song_description=None):
        instruction = f"Write a {section_name} of length {section_length} that that incorporates the following ideas"
        if sections_written is not None:
            instruction += "and complements the sections provided."
        else:
            instruction += "."
        instruction += "You are also given a section description, genre, era, and overall description of the song."

        ## read in prompt lyrics from convo .txt and add it to instruction
        with open("prompts/write_section_ex.txt", "r") as f:
            convo = f.read()
        instruction += "Here's an example:\n{convo}\nNow do it for this input:"
        
        input = f"""Ideas to use:
                  - {relevant_ideas}
                  Section Description: {section_description}
                  Genre: Songwriter Pop
                  Era: 2010s
                  Overall song description: {overall_song_description}
                  """
        if sections_written is not None:
          written_sections = "\n".join(sections_written)
          input += f"Sections provided:\n{written_sections}\nLyrics:"
        else:
            input += "\nLyrics:"

        prompt = self.alpaca_prompt.format(instruction, input, "")

        convo = [
            {
                "role": "user",
                "content": prompt,
            },
        ]
        response = self.oai_client.chat.completions.create(
            model="gpt-4o",
            messages=convo,
        ) 

        return "Pass this back to the user and ask if they would like to receive an audio snippet or make any revisions before moving to the next section: \n" + response.choices[0].message.content

    def revise_section_lyrics(self, section_name, current_section, lines_to_revise, relevant_ideas=None, relevant_words=None):
        lines_to_infill = ", ".join([str(x) for x in lines_to_revise])

        full_incomplete_verse = current_section.strip("\n ").split("\n")

        max_line_num = max(lines_to_revise)
        if max_line_num > len(full_incomplete_verse):
            full_incomplete_verse.extend([''] * (max_line_num - len(full_incomplete_verse)))

        for line_num in lines_to_revise:
            if line_num <= len(full_incomplete_verse):
                full_incomplete_verse[line_num-1] = '___'

        line_phrase = "lines" if len(lines_to_infill) > 1 else "line"
        line_phrase = str(len(lines_to_infill)) + " " + line_phrase

        instruction = f"Infill the remaining {line_phrase} into {section_name}"

        if relevant_ideas is not None or relevant_words is not None:
            instruction += " while incorporating the following "
            if relevant_ideas is not None:
                instruction += "ideas"
                if relevant_words is not None:
                    instruction += "and words."
                else:
                    instruction += "."
            else:
                instruction += "words."
        else:
            instruction += "."
        
        instruction += "You are also given a genre, era, and the rest of the section."
        
        with open("prompts/revise_section_ex.txt", "r") as f:
            convo = f.read()
        instruction += "Here's an example:\n{convo}\nNow do it for this input:"
        
        
        input = ""
        if relevant_ideas is not None and isinstance(relevant_ideas, list):
            input += f"Ideas to use: {', '.join(relevant_ideas)}\n"
        if relevant_words is not None and isinstance(relevant_words, list):
            input += f"Words to use: {', '.join(relevant_words)}\n"
        input += f"Genre: Songwriter Pop\nEra: 2010s\nCurrent section:\n{full_incomplete_verse}\n\nLyrics:"

        prompt = self.alpaca_prompt.format(instruction, input, "")

        convo = [
            {
                "role": "user",
                "content": prompt,
            },
        ]
        response = self.oai_client.chat.completions.create(
            model="gpt-4o",
            messages=convo,
        ) 

        return response.choices[0].message.content

    def revise_instrumental_tags(self, current_instrumental_tags, user_instrumental_feedback):
        instruction = "Revise the current instrumental tags to better match the feedback provided:"
        input = f"""Current instrumental tags: {current_instrumental_tags}\ninstrumental feedback: {user_instrumental_feedback}\nNew tags:"""
        prompt = self.alpaca_prompt.format(instruction, input, "")

        convo = [
            {
                "role": "user",
                "content": prompt,
            },
        ]
        response = self.oai_client.chat.completions.create(
            model="gpt-4o",
            messages=convo,
        )

        return response.choices[0].message.content.split("New tags:")[-1].strip("\n ")

    def write_all_lyrics(self, sections_to_be_written, sections_written, overall_song_description):
        instruction = "Write the remainder of this full song given an overall description of the song, genre, era, and a description of the sections to complete:"
        
        with open("prompts/write_full_song_ex.txt", "r") as f:
            convo = f.read()
        instruction += "Here's an example:\n{convo}\nNow do it for this input:"
        
        
        sections_to_write = [x['section_name'] for x in sections_to_be_written]
        sections_to_write_str = ", ".join(sections_to_write)
        section_descriptions = [x['section_description'] for x in sections_to_be_written]
        full_meanings = "\n".join([f"{sections_to_write[i]}: {section_descriptions[i]}" for i in range(len(sections_to_write))])
        input = f"Sections to write: {sections_to_write_str}\nOverall song description: {overall_song_description}\nGenre: Songwriter Pop\nEra: 2010s\nSection Descriptions:\n{full_meanings}"

        if sections_written is not None:
          written_sections = "\n".join(sections_written)
          input += f"Sections provided:\n{written_sections}\n\nLyrics:"
        else:
            input += "\n\nLyrics:"

        prompt = self.alpaca_prompt.format(instruction, input, "")

        convo = [
            {
                "role": "user",
                "content": prompt,
            },
        ]
        response = self.oai_client.chat.completions.create(
            model="gpt-4o",
            messages=convo,
        )

        return response.choices[0].message.content