ajayarora1235 commited on
Commit
8b3be6a
β€’
1 Parent(s): 3cc7c63

massive cleanup, reorg, documentation

Browse files
app.py CHANGED
@@ -1,454 +1,12 @@
1
- import os
2
  import gradio as gr
3
- from typing import List, Optional, Tuple, Dict
4
- import time
5
- import datetime
6
-
7
- def get_current_time() -> str:
8
- """
9
- Returns the current time as a formatted string.
10
- """
11
- now = datetime.datetime.now()
12
- return now.strftime("%Y-%m-%d %H:%M:%S")
13
-
14
-
15
- from gpt_calls import AI_Songwriter
16
 
 
 
17
  from openai import OpenAI
18
- oai_client = OpenAI(
19
- api_key=os.getenv("OPEN_AI_KEY"),
20
- )
21
- client_key = os.getenv("OPEN_AI_KEY")
22
- client = OpenAI(
23
- api_key=os.getenv("OPEN_AI_KEY"),
24
- )
25
-
26
- import time
27
- import os
28
- import json
29
- import random
30
-
31
- from suno import generate_song, concat_snippets
32
-
33
-
34
- History = List[Tuple[str, str]] # pairs of (query, response), where query is user input and response is system output
35
- Messages = List[Dict[str, str]] # list of messages with role and content
36
-
37
- '''
38
- Genre list
39
- Preset dropdown: Missing Home, Heartbroken, Getting Turnt, Childhood Nostalgia, (Custom) How are you?
40
- - tags based on preset
41
- Artist identity dropdown: A 15-year old boy who dreams of being a broadway actor, A 23-year old soft but bombastic woman who loves to rap, dance, and take over the streets, A 30-year old man who has plans to take over the world as a villain
42
-
43
- male tenor, dramatic, emotional, strings
44
-
45
- pass artist identity in starting prompt to gpt-4 conversation.
46
- pass preset dropdown to gpt-4 conversation to inspire the questions that Lupe asks the user.
47
-
48
- -Ask every 4 back-and-forths do you want to talk more? Or are you ready for your song? (add button for song in assistant's message)
49
-
50
- -Mention lyrics
51
- -Mention every 4 back-and-forths lyrics that you’ll include in the song [calling gpt-4 to generate the lyrics and identify one line that's most relevant to the last message]
52
- '''
53
-
54
-
55
- def clear_session() -> History:
56
- return '', []
57
-
58
- def remove_quotes(s):
59
- if s[0] == '"' and s[-1] == '"' or s[0] == "'" and s[-1] == "'":
60
- return s[1:-1]
61
- return s
62
-
63
-
64
-
65
- def generate_song_seed(baseline_seed):
66
- song_details_prompt = "Analyze this description of how someone is feeling and provide a suggestion of a interesting song concept to base a song off of. Here are three examples, now provide a song concept for this fourth:\n\n"
67
-
68
- song_seed_prompt ='prompt_song_seed.txt'
69
- with open(song_seed_prompt, 'r', encoding='utf-8') as file:
70
- content_2 = file.read()
71
-
72
- song_details_prompt += "\n\n" + content_2 + baseline_seed + "\nSuggested Song Concept: "
73
-
74
- convo = [
75
- {
76
- "role": "user",
77
- "content": song_details_prompt,
78
- },
79
- ]
80
-
81
- gen = oai_client.chat.completions.create(
82
- model="gpt-4o",
83
- messages=convo,
84
- stream=True
85
- )
86
-
87
- current_response = ""
88
- for chunk in gen:
89
- if chunk.choices[0].delta.content is not None:
90
- # print ("chunk", chunk.choices[0].delta.content)
91
- current_response += chunk.choices[0].delta.content
92
- yield current_response
93
-
94
- def clean_song_seed(song_seed):
95
- if "Suggested Song Concept:" in song_seed:
96
- song_seed = song_seed.split("Suggested Song Concept:")[1].strip()
97
- return song_seed
98
-
99
- def make_song(snippet_lyrics, snippet_instrumental_tags, snippet_clip_to_continue_from=None, continue_at=None):
100
- os.makedirs("audio_clips", exist_ok=True)
101
- song_name = f"SG_{int(time.time())}"
102
- suno_song_path = f"./audio_clips/suno_{song_name}.wav"
103
- full_tags = f"{snippet_instrumental_tags}"
104
- print("Passing to generate_song:", full_tags, snippet_lyrics, suno_song_path)
105
-
106
- if snippet_clip_to_continue_from is not None and snippet_clip_to_continue_from != "":
107
- song_link = generate_song(full_tags, snippet_lyrics, suno_song_path, snippet_clip_to_continue_from, continue_at)
108
- else:
109
- song_link = generate_song(full_tags, snippet_lyrics, suno_song_path)
110
-
111
- return song_link
112
-
113
- def messages_to_history(messages: Messages) -> Tuple[str, History]:
114
- assert messages[0]['role'] == 'system', messages[1]['role'] == 'user'
115
- messages_for_parsing = messages[:1] + [{'role': 'user', 'content': ''}] + messages[2:]
116
- print("OLD MESSAGES FOR PARSING", messages_for_parsing)
117
- messages_for_parsing = [x for x in messages_for_parsing if x['role'] != 'tool' and 'tool_calls' not in x]
118
-
119
- messages_for_parsing = [
120
- {'role': x['role'], 'content': x['content'].split(" Use write_section")[0]} if x['role'] == 'user' else x
121
- for x in messages_for_parsing
122
- ]
123
- print("NEW MESSAGES FOR PARSING", messages_for_parsing)
124
- history = []
125
- for q, r in zip(messages_for_parsing[1::2], messages_for_parsing[2::2]):
126
- history.append([q['content'], r['content']])
127
- # print("made history:\n", history, "from messages\n", messages, "messages for parsing", messages_for_parsing)
128
- return history
129
-
130
-
131
- def model_chat(genre_input, query: Optional[str], history: Optional[History], messages: Optional [Messages], auto=False) -> Tuple[str, str, History, Messages]:
132
- if query is None:
133
- query = ''
134
-
135
- if not query.endswith('?'):
136
- query += " Use write_section when you have a large amount of story to pull from to write the next section! Alternatively ask me a follow up before moving to write."
137
-
138
- with open('ai_tools.json') as f:
139
- ai_tools = json.load(f)
140
-
141
- songwriterAssistant = AI_Songwriter(client_key=client_key)
142
-
143
- if auto:
144
- messages = messages[:-1] + [{'role': 'user', 'content': query}] #should this be a -1? for non-auto. why does the chatbot history get messed up?
145
- else:
146
- messages = messages + [{'role': 'user', 'content': query}]
147
-
148
-
149
-
150
- messages_filtered = messages
151
- response_message = oai_client.chat.completions.create(
152
- model="gpt-4o",
153
- messages=messages_filtered,
154
- tools = ai_tools,
155
- tool_choice="auto",
156
- )
157
- print(response_message, "model chat response")
158
- current_response = ""
159
- # Step 2: determine if the response from the model includes a tool call.
160
- tool_calls = response_message.choices[0].message.tool_calls
161
- if tool_calls:
162
- messages.append({
163
- "role": response_message.choices[0].message.role,
164
- "content": response_message.choices[0].message.content,
165
- "tool_calls": tool_calls,
166
- "function_call": response_message.choices[0].message.function_call
167
- })
168
- # If true the model will return the name of the tool / function to call and the argument(s)
169
- for tool_call in tool_calls:
170
- print(tool_call)
171
- tool_call_id = tool_call.id
172
- tool_function_name = tool_call.function.name
173
- tool_query_args = eval(tool_call.function.arguments)
174
-
175
- # Step 3: Call the function and retrieve results. Append the results to the messages list.
176
- if tool_function_name == 'write_section':
177
- snippet_instrumental_tags = tool_query_args.pop('snippet_instrumental_tags', None)
178
- snippet_clip_to_continue_from = tool_query_args.pop('snippet_clip_to_continue_from', None)
179
- suggested_lyrics = songwriterAssistant.write_section(**tool_query_args)
180
-
181
- ## yield suggested lyrics in tool and assistant message
182
- tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': suggested_lyrics}
183
- # audio_message = {'role': 'assistant', 'content': "Here's what I've come up with:\n" + suggested_lyrics + "\n\nGenerating audio snippet..."}
184
- new_messages = messages + [tool_message] #, audio_message
185
-
186
- model_response_with_function_call = client.chat.completions.create(
187
- model="gpt-4o",
188
- messages=new_messages,
189
- ) # get a new response from the model where it can see the function response
190
- current_response = model_response_with_function_call.choices[0].message.content
191
-
192
- role = "assistant"
193
- new_messages = new_messages + [{'role': role, 'content': current_response}]
194
- # new_messages = [msg for msg in new_messages if msg['content'] is not None and msg['role'] in ['user', 'assistant']]
195
- history = messages_to_history(new_messages)
196
- yield '', history, new_messages, '[...]'
197
-
198
-
199
- # new_history = messages_to_history(new_messages)
200
- # yield '', new_history, new_messages, '[...]'
201
-
202
- # ### call make_song here with the snippet_lyrics, snippet_instrumental_tags, and snippet_clip_to_continue
203
- # song_link = make_song(suggested_lyrics, snippet_instrumental_tags, snippet_clip_to_continue)
204
-
205
- # ## filter out suno link from tool query arg
206
- # clip_id = song_link.split("https://audiopipe.suno.ai/?item_id=")[1]
207
-
208
- # ## add song link to tool and audio message
209
- # tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': suggested_lyrics + '\nclip id: ' + clip_id}
210
- # audio_message = {'role': 'assistant', 'content': "Here's what I've come up with:\n" + suggested_lyrics + '\n\n' + f'<audio controls autoplay><source src="{song_link}" type="audio/mp3"></audio><p>clip id: {clip_id}</p><p>instrumental tags: {snippet_instrumental_tags}</p>'}
211
- # audio_message['content'] += f'<p>continued from clip: {snippet_clip_to_continue}</p>'
212
- # audio_message['content'] += f'\n\nWhat do you think?'
213
- # new_messages = messages + [tool_message, audio_message]
214
- # new_history = messages_to_history(new_messages)
215
- # yield '', new_history, new_messages, '[...]'
216
-
217
- elif tool_function_name == 'revise_section_lyrics':
218
- snippet_instrumental_tags = tool_query_args.pop('snippet_instrumental_tags', None)
219
- snippet_clip_to_continue_from = tool_query_args.pop('snippet_clip_to_continue_from', None)
220
- revised_lyrics = songwriterAssistant.revise_section_lyrics(**tool_query_args)
221
-
222
- # ## yield revised lyrics in tool and assistant message
223
- tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': revised_lyrics}
224
- # audio_message = {'role': 'assistant', 'content': "Here's my revised lyrics:\n" + revised_lyrics + "\n\nGenerating audio snippet..."}
225
- new_messages = messages + [tool_message] #, audio_message]
226
-
227
- model_response_with_function_call = client.chat.completions.create(
228
- model="gpt-4o",
229
- messages=new_messages,
230
- ) # get a new response from the model where it can see the function response
231
- current_response = model_response_with_function_call.choices[0].message.content
232
-
233
- role = "assistant"
234
- new_messages = new_messages + [{'role': role, 'content': current_response}]
235
- # new_messages = [msg for msg in new_messages if msg['content'] is not None and msg['role'] in ['user', 'assistant']]
236
- history = messages_to_history(new_messages)
237
- yield '', history, new_messages, '[...]'
238
- # new_history = messages_to_history(new_messages)
239
- # yield '', new_history, new_messages, '[...]'
240
-
241
- # ### call make_song here with the snippet_lyrics, snippet_instrumental_tags, and snippet_clip_to_continue
242
- # song_link = make_song(revised_lyrics, snippet_instrumental_tags, snippet_clip_to_continue)
243
-
244
- # ## filter out suno link from tool query arg
245
- # clip_id = song_link.split("https://audiopipe.suno.ai/?item_id=")[1]
246
-
247
- # ## add song link to tool and audio message
248
- # tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': revised_lyrics + '\nclip id: ' + clip_id}
249
- # audio_message = {'role': 'assistant', 'content': "Here's what I've come up with:\n" + revised_lyrics + '\n\n' + f'<audio controls autoplay><source src="{song_link}" type="audio/mp3"></audio><p>clip id: {clip_id}</p><p>instrumental tags: {snippet_instrumental_tags}</p>'}
250
- # audio_message['content'] += f'<p>continued from clip: {snippet_clip_to_continue}</p>'
251
- # audio_message['content'] += f'\n\nWhat do you think?'
252
- # new_messages = messages + [tool_message, audio_message]
253
- # new_history = messages_to_history(new_messages)
254
- # yield '', new_history, new_messages, '[...]'
255
-
256
- elif tool_function_name == 'revise_instrumental_tags':
257
- #detangle tool_query_args dict
258
- #snippet_lyrics = tool_query_args['snippet_lyrics'] + "\n[End]"
259
- snippet_instrumental_tags = tool_query_args['current_instrumental_tags']
260
- user_instrumental_feedback = tool_query_args['user_instrumental_feedback']
261
- # if 'snippet_clip_to_continue_from' not in tool_query_args:
262
- # tool_query_args['snippet_clip_to_continue_from'] = None
263
- # snippet_clip_to_continue_from = tool_query_args['snippet_clip_to_continue_from']
264
-
265
- new_instrumental_tags = songwriterAssistant.revise_instrumental_tags(snippet_instrumental_tags, user_instrumental_feedback)
266
- # yield new_instrumental_tags in tool and assistant message
267
- # tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': f'new instrumental tags: {new_instrumental_tags}'}
268
- # audio_message = {'role': 'assistant', 'content': f'Sure! I\'ve revised the instrumental tags: {new_instrumental_tags}\n\n Generating audio snippet...'}
269
- # new_messages = messages + [tool_message, audio_message]
270
- # new_history = messages_to_history(new_messages)
271
- # yield '', new_history, new_messages, '[...]'
272
 
273
- if isinstance(tool_query_args['sections_written'], str):
274
- current_lyrics = tool_query_args['sections_written']
275
- elif isinstance(tool_query_args['sections_written'], list):
276
- current_lyrics = "\n".join(tool_query_args['sections_written'])
277
- else:
278
- current_lyrics = ""
279
-
280
- #current_lyrics = "\n".join(tool_query_args['sections_written'])
281
- song_link = make_song(current_lyrics, new_instrumental_tags)
282
- ## filter out suno link from tool query arg
283
- clip_id = song_link.split("https://audiopipe.suno.ai/?item_id=")[1]
284
-
285
- tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': f'new instrumental tags: {new_instrumental_tags}, clip id: {clip_id}'}
286
- audio_message = {'role': 'assistant', 'content': f'Sure! I\'ve revised the instrumental tags: {new_instrumental_tags}\nCurrent lyrics: {current_lyrics}\n\n <audio controls autoplay><source src="{song_link}" type="audio/mp3"></audio><p>clip id: {clip_id}</p>'}
287
- audio_message['content'] += f'\n\nWhat do you think?'
288
- new_messages = messages + [tool_message, audio_message]
289
- new_history = messages_to_history(new_messages)
290
- yield '', new_history, new_messages, '[...]'
291
- elif tool_function_name == 'merge_all_snippets':
292
- updated_clip_url, updated_lyrics, clips_list = concat_snippets(tool_query_args['last_snippet_id'])
293
- updated_clip_id = updated_clip_url.split("https://audiopipe.suno.ai/?item_id=")[1]
294
-
295
- #pass this info in new tool and assistant message
296
- tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': f'updated clip id: {updated_clip_id}\nupdated lyrics: {updated_lyrics}\nupdated clips path: {clips_list}'}
297
- audio_message = {'role': 'assistant', 'content': f'Sure! All the clips are now merged. <p>updated lyrics: {updated_lyrics}</p><audio controls autoplay><source src="{updated_clip_url}" type="audio/mp3"></audio><p>updated clip id: {updated_clip_id}</p><p>updated clips path: {clips_list}</p>'}
298
-
299
- new_messages = messages + [tool_message, audio_message]
300
- new_history = messages_to_history(new_messages)
301
- yield '', new_history, new_messages, '[...]'
302
- elif tool_function_name == 'finish_full_song':
303
- ## args are sections_to_be_written, relevant_ideas, last_snippet_id, sni
304
-
305
- ## STEP 0: POP out instrumental args
306
- snippet_instrumental_tags = tool_query_args.pop('snippet_instrumental_tags', None)
307
- snippet_clip_to_continue_from = tool_query_args.pop('snippet_clip_to_continue_from', None)
308
-
309
- if isinstance(tool_query_args['sections_written'], str):
310
- current_lyrics = tool_query_args['sections_written']
311
- elif isinstance(tool_query_args['sections_written'], list):
312
- current_lyrics = "\n".join(tool_query_args['sections_written'])
313
- else:
314
- current_lyrics = ""
315
-
316
- ## STEP 1: WRITE ALL LYRICS using songwriterAssistant
317
- remaining_lyrics = songwriterAssistant.write_all_lyrics(**tool_query_args)
318
- full_lyrics = current_lyrics + remaining_lyrics + "\n[End]"
319
- yield '', history, messages, full_lyrics
320
-
321
- ## STEP 2: MAKE SONG FOR REMAINING LYRICS
322
- song_link = make_song(remaining_lyrics, snippet_instrumental_tags, snippet_clip_to_continue_from)
323
-
324
- #tool and assistant message
325
- tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': f'{full_lyrics}'}
326
- audio_message = {'role': 'assistant', 'content': f'New snippet: \n <audio controls autoplay><source src="{song_link}" type="audio/mp3"></audio>'}
327
-
328
- new_messages = messages + [tool_message, audio_message]
329
- new_history = messages_to_history(new_messages)
330
- yield '', new_history, new_messages, full_lyrics
331
-
332
- ## STEP 3: MERGE FULL SONG
333
- if snippet_clip_to_continue_from not in [None, ""]:
334
- updated_clip_url, updated_lyrics, clips_list = concat_snippets(song_link.split("https://audiopipe.suno.ai/?item_id=")[1])
335
- else:
336
- updated_clip_url, updated_lyrics, clips_list = song_link, remaining_lyrics, []
337
- ## YIELD UPDATED CLIP URL, LYRICS, AND CLIPS LIST
338
- updated_clip_id = updated_clip_url.split("https://audiopipe.suno.ai/?item_id=")[1]
339
-
340
- #tool and assistant message
341
- tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': f'updated clip id: {updated_clip_id}\nupdated lyrics: {updated_lyrics}\nupdated clips path: {clips_list}'}
342
- audio_message = {'role': 'assistant', 'content': f'All done! Thank you for participating :) \nFinal Lyrics: {full_lyrics} \nFinal song: <audio controls autoplay><source src="{updated_clip_url}" type="audio/mp3"></audio><p>clip id: {updated_clip_id}</p>'}
343
-
344
- new_messages = messages + [tool_message, audio_message]
345
- new_history = messages_to_history(new_messages)
346
- yield '', new_history, new_messages, '[...]'
347
-
348
- elif tool_function_name == 'get_audio_snippet':
349
- #detangle tool_query_args dict
350
- snippet_lyrics = tool_query_args['snippet_lyrics'] + "\n[End]"
351
- snippet_instrumental_tags = tool_query_args['snippet_instrumental_tags']
352
- if 'snippet_clip_to_continue_from' not in tool_query_args:
353
- tool_query_args['snippet_clip_to_continue_from'] = None
354
- snippet_clip_to_continue_from = tool_query_args['snippet_clip_to_continue_from']
355
- song_link = make_song(snippet_lyrics, snippet_instrumental_tags, snippet_clip_to_continue_from)
356
- ## filter out suno link from tool query arg
357
- clip_id = song_link.split("https://audiopipe.suno.ai/?item_id=")[1]
358
-
359
- tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': f'instrumental tags: {tool_query_args["snippet_instrumental_tags"]}, clip id: {clip_id}'}
360
- audio_message_content = "Here's what I've come up with:\n" + snippet_lyrics + '\n\n' + f'<audio controls autoplay><source src="{song_link}" type="audio/mp3"></audio><p>instrumental tags: {tool_query_args["snippet_instrumental_tags"]}</p><p>clip id: {clip_id}</p>'
361
- audio_message_content += f'<p>continued from clip: {snippet_clip_to_continue_from}</p>'
362
- audio_message = {'role': 'assistant', 'content': audio_message_content}
363
- new_messages = messages + [tool_message, audio_message]
364
- new_history = messages_to_history(new_messages)
365
- yield '', new_history, new_messages
366
- else:
367
- print(f"Error: function {tool_function_name} does not exist")
368
-
369
- # messages.append({
370
- # "role":"tool",
371
- # "tool_call_id":tool_call_id,
372
- # "name": tool_function_name,
373
- # "content":results
374
- # })
375
-
376
- # Step 4: Invoke the chat completions API with the function response appended to the messages list
377
- # Note that messages with role 'tool' must be a response to a preceding message with 'tool_calls'
378
-
379
- else:
380
- # Model did not identify a function to call, result can be returned to the user
381
- current_response = response_message.choices[0].message.content
382
-
383
- role = "assistant"
384
- new_messages = messages + [{'role': role, 'content': current_response}]
385
- # new_messages = [msg for msg in new_messages if msg['content'] is not None and msg['role'] in ['user', 'assistant']]
386
- history = messages_to_history(new_messages)
387
- yield '', history, new_messages, '[...]'
388
-
389
- def get_sections(overall_meaning, section_list):
390
- section_list = section_list.split("\n")
391
- filepath_2='prompt_section_writer.txt'
392
- with open(filepath_2, 'r', encoding='utf-8') as file:
393
- content_2 = file.read()
394
-
395
- response = oai_client.chat.completions.create(
396
- model="gpt-4o",
397
- messages=[
398
- {
399
- "role": "user",
400
- "content": content_2 + f"\n\nOverall meaning: {overall_meaning}\nSection list: {', '.join(section_list)}\nSection meanings:",
401
- },
402
- ],
403
- )
404
-
405
- text_response = response.choices[0].message.content
406
- return text_response
407
-
408
-
409
- def get_starting_messages(song_lengths, song_title, song_blurb, song_genre, init_sections):
410
- system_prompt = "You are an expert at writing songs. You are with an everyday person, and you will write the lyrics of the song based on this person's life has by asking questions about a story of theirs. Design your questions on your own, without using your tools, to help you understand the user's story, so you can write a song about the user's experience that resonates with them. We have equipped you with a set of tools to help you write this story; please use them. You are very good at making the user feel comfortable, understood, and ready to share their feelings and story. Occasionally (every 2 messages or so) you will suggest some lyrics, one section at a time, and see what the user thinks of them. Do not suggest or ask for thoughts on more than one section at a time. Be concise and youthful."
411
-
412
- user_prompt = f"I have a story that could make this concept work well. The title is {song_title}, its about {song_blurb} with a genre {song_genre} and I think this should be the structure: {init_sections}\n{song_lengths}"
413
-
414
-
415
-
416
- first_msg_res = oai_client.chat.completions.create(
417
- model="gpt-4o",
418
- messages=[
419
- {"role": "system", "content": system_prompt},
420
- {"role": "user", "content": "The user has stated the following:\n " + user_prompt + "\n Introduce yourself and kick-off the songwriting process with a question."},
421
- ],
422
- )
423
-
424
- # if "Section meanings:\n" in init_sections:
425
- # init_sections = init_sections.split("Section meanings:\n")[1]
426
- # else:
427
- # if "[" in init_sections:
428
- # init_sections = init_sections[init_sections.index("["):]
429
-
430
- # first_message = init_sections + "\n\n" + first_message
431
-
432
- first_message = first_msg_res.choices[0].message.content
433
-
434
- starting_messages = [
435
- {'role': 'system', 'content': system_prompt},
436
- {'role': 'user', 'content': user_prompt},
437
- {'role': 'assistant', 'content': first_message},
438
- ]
439
-
440
- return starting_messages, messages_to_history(starting_messages)
441
-
442
- # def update_messages_with_lyrics(messages, lyrics):
443
- # text_to_append = "\n\nHere are the lyrics I came up with!\n\n" + lyrics
444
- # if messages[-1]['role'] == 'assistant':
445
- # messages[-1]['content'] += text_to_append
446
- # elif messages[-1]['role'] == 'user':
447
- # messages.append({'role': 'assistant', 'content': text_to_append})
448
- # return messages, messages_to_history(messages)
449
-
450
- def change_tab(id):
451
- return gr.Tabs(selected=id)
452
 
453
  with gr.Blocks() as demo:
454
  gr.Markdown("""<center><font size=8>AI Songwriter (alpha)</center>""")
@@ -488,44 +46,27 @@ with gr.Blocks() as demo:
488
  with gr.Row():
489
  continue_btn = gr.Button("Continue to Next Step", interactive=False)
490
 
 
 
 
 
 
491
  generate_seed_button.click(generate_song_seed, inputs=[feeling_input], outputs=[instrumental_output]).then(clean_song_seed, inputs=[instrumental_output], outputs=[instrumental_output])
 
492
  def make_row_visible(x):
493
  return gr.Row(visible=True), gr.Markdown("""<center><font size=4>Here it is! Hit 'Approve' to confirm this concept. Edit the concept directly or hit 'Try Again' to get another suggestion.</font></center>""", visible=True)
494
  def enable_button(x):
495
  return gr.Button("Continue to Next Step", interactive=True)
496
  generate_seed_button.click(make_row_visible, inputs=[generate_seed_button], outputs=[concept_row, concept_desc])
497
  approve_button.click(enable_button, inputs=[approve_button], outputs=[continue_btn])
498
-
499
- def update_song_details(instrumental_output):
500
- song_details_prompt = "Analyze this assessment and suggestion of a song concept to extract the genre, one sentence blurb of what the song is about. Based on this, also suggest a song title. Output exactly three lines, in the format of 'genre: [genre]', 'title: [title]', 'blurb: [blurb]'. "
501
-
502
- song_details_prompt += "\n\n" + instrumental_output
503
-
504
- convo = [
505
- {
506
- "role": "user",
507
- "content": song_details_prompt,
508
- },
509
- ]
510
-
511
- response = oai_client.chat.completions.create(
512
- model="gpt-4o",
513
- messages=convo
514
- )
515
- response_lines = response.choices[0].message.content.split('\n')
516
- genre = next((line.split(": ")[1] for line in response_lines if "genre: " in line.lower()), None)
517
- title = next((line.split(": ")[1] for line in response_lines if "title: " in line.lower()), None)
518
- blurb = next((line.split(": ")[1] for line in response_lines if "blurb: " in line.lower()), None)
519
- return genre, title, blurb
520
-
521
-
522
- section_meanings = gr.State(value="")
523
 
524
  try_again_button.click(generate_song_seed, inputs=[feeling_input], outputs=[instrumental_output])
 
 
 
525
  continue_btn.click(change_tab, gr.Number(1, visible=False), tabs)
526
 
527
 
528
-
529
  with gr.TabItem("Generation", id=1): #index is 1
530
  start_song_gen = gr.State(value=False)
531
  gr.Markdown("""<center><font size=4>Now, chat with an AI songwriter to make your song! Tip: get and tune an audio snippet well first and then put effort into the story. Hit finish when ready to hear full song.</font></center>""")
@@ -545,22 +86,12 @@ with gr.Blocks() as demo:
545
  songwriter_creativity = gr.Slider(label="Songwriter LLM Temperature", minimum=0, maximum=1, step=0.01, value=1)
546
  lyrics_display = gr.TextArea("[...]", label="Generated Lyrics", show_copy_button=True, container=True)
547
 
 
548
  approve_button.click(update_song_details, inputs=[instrumental_output], outputs=[genre_input, title_input, blurb_input]).then(get_sections, inputs=[blurb_input, instrumental_output], outputs=[section_meanings])
549
  continue_btn.click(get_starting_messages, inputs=[instrumental_textbox, title_input, blurb_input, genre_input, section_meanings], outputs=[messages, chatbot_history])
550
 
551
  with gr.Row():
552
  textbox = gr.Textbox(lines=1, label='Send a message', show_label=False, placeholder='Send a message', scale=4)
553
- # melody_recorder = gr.Audio(
554
- # sources=["microphone"],
555
- # label="Record Melody to suggest",
556
- # waveform_options=gr.WaveformOptions(
557
- # waveform_color="#01C6FF",
558
- # waveform_progress_color="#0066B4",
559
- # skip_length=2,
560
- # show_controls=False,
561
- # ),
562
- # )
563
- # clear_history = gr.Button("🧹 Clear history", visible=False)
564
  submit = gr.Button("Send", scale=2)
565
 
566
  with gr.Row():
@@ -576,88 +107,27 @@ with gr.Blocks() as demo:
576
  return messages, chatbot_history, ''
577
 
578
  reset_button.click(reset_chat, inputs=[messages, chatbot_history], outputs=[messages, chatbot_history, lyrics_display])
579
-
580
-
581
- # generate_seed_button.click(get_starting_messages, inputs=[character, title_input, blurb_input, preset, genre_input, instrumental_textbox], outputs=[messages, chatbot_history])
582
-
583
- # def get_conversation(story_textbox, chatbot_history, messages):
584
- # curr_chatbot_value = chatbot_history.copy()
585
- # curr_messages_value = messages.copy()
586
- # for i in range(3):
587
- # for journal_response_value, chatbot_history_value, messages_value in get_journal_response(story_textbox, curr_chatbot_value, curr_messages_value):
588
- # curr_chatbot_value = chatbot_history_value
589
- # curr_messages_value = messages_value
590
- # journal_response.value = journal_response_value
591
- # yield chatbot_history_value, messages_value
592
-
593
- # for _, chatbot_history_value, messages_value in model_chat(journal_response_value, curr_chatbot_value, curr_messages_value, auto=True):
594
- # # Update the gr.State objects
595
- # curr_chatbot_value = chatbot_history_value
596
- # curr_messages_value = messages_value
597
- # yield chatbot_history_value, messages_value
598
 
599
 
600
  with gr.Row():
601
  song_link = gr.State(value="")
602
  song = gr.HTML()
603
 
604
- download_btn = gr.Button("Download Conversation")
605
-
606
- def download_conversation(messages):
607
- #get time
608
- now = get_current_time()
609
- # write messages to JSON file
610
- with open(f'conversation_{now}.json', 'w') as f:
611
- json.dump(messages, f)
612
 
 
 
 
 
 
 
613
 
614
- # with gr.Row():
615
- # song = gr.Audio(label='Song', format="bytes", streaming=True) # type='filepath', sources=[])
616
- # with gr.Accordion("Show Lyrics", open=True):
617
- # lyrics_display = gr.Markdown("[...]")
618
- # song_tags = gr.Markdown("")
619
 
620
  # with gr.Accordion("Advanced", open=False):
621
  # suno_tags = gr.Textbox(value="ballad, male, dramatic, emotional, strings", label="Gen input tags")
622
  # story_textbox = gr.TextArea(label="Story to provide context to songwriter", value="", max_lines=3)
623
 
624
- # genre_input.blur(get_starting_messages, inputs=[character, preset, genre_input], outputs=[messages, chatbot_history])
625
-
626
- def lyrics_from_convo(self, messages, character_preset, section_list, temperature=1.0):
627
- conversation_text = ""
628
- for m in messages[1:]:
629
- name = "Lupe" if m['role'] == 'assistant' else "User"
630
- conversation_text += f"{name}: {m['content']}\n"
631
 
632
- section_list = [x[:x.index(':')] + " (" + x[x.index(':')+2:] + ")" for x in section_list.split("\n")]
633
-
634
- filepath='./prompt_lyrics_from_convo.txt'
635
- with open(filepath, 'r', encoding='utf-8') as file:
636
- prompt = file.read()
637
- prompt = prompt.replace("{conversation_text}", conversation_text).replace("{a songwriter from NYC}", character_preset)
638
- prompt += "\nSections: " + ", ".join(section_list)
639
- convo = [
640
- {
641
- "role": "user",
642
- "content": prompt,
643
- },
644
- ]
645
- response = self.oai_client.chat.completions.create(
646
- model="gpt-4o",
647
- messages=convo,
648
- stream=True,
649
- temperature=temperature
650
- )
651
-
652
- current_response = ""
653
- for chunk in response:
654
- if chunk.choices[0].delta.content is not None:
655
- # print ("chunk", chunk.choices[0].delta.content)
656
- current_response += chunk.choices[0].delta.content
657
- yield "\n".join(current_response.split("\n")[1:])
658
-
659
- # generate_lyrics.click(get_conversation, inputs=[story_textbox, chatbot_history, messages], outputs=[chatbot_history, messages]).then(lyrics_from_convo, inputs=[messages, character, instrumental_textbox, songwriter_creativity], outputs=[lyrics_display])
660
-
661
  def reset_textbox(textbox):
662
  return ""
663
  def set_snippet_query(textbox):
@@ -681,15 +151,10 @@ with gr.Blocks() as demo:
681
  inputs=[genre_input, textbox, chatbot_history, messages],
682
  outputs=[textbox, chatbot_history, messages]).then(reset_textbox, inputs=[textbox], outputs=[textbox])
683
 
684
- # start.click(make_song,
685
- # inputs=[genre_input, lyrics_display, suno_tags], outputs=[song])
686
-
687
  done.click(set_finish_query, inputs=[textbox], outputs=[textbox]).then(model_chat,
688
  inputs=[genre_input, textbox, chatbot_history, messages],
689
  outputs=[textbox, chatbot_history, messages, lyrics_display]).then(
690
  set_lyrics_song_displays, inputs=[messages], outputs=[lyrics_display, song]).then(reset_textbox, inputs=[textbox], outputs=[textbox])
691
-
692
-
693
 
694
 
695
  demo.queue(api_open=False)
 
1
+ from typing import List, Tuple, Dict
2
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
+ from utils.song_utils import generate_song_seed, get_starting_messages, messages_to_history, update_song_details, get_sections
5
+ from chat import model_chat
6
  from openai import OpenAI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ History = List[Tuple[str, str]] # a type: pairs of (query, response), where query is user input and response is system output
9
+ Messages = List[Dict[str, str]] # a type: list of messages with role and content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  with gr.Blocks() as demo:
12
  gr.Markdown("""<center><font size=8>AI Songwriter (alpha)</center>""")
 
46
  with gr.Row():
47
  continue_btn = gr.Button("Continue to Next Step", interactive=False)
48
 
49
+
50
+ def clean_song_seed(song_seed):
51
+ if "Suggested Song Concept:" in song_seed:
52
+ song_seed = song_seed.split("Suggested Song Concept:")[1].strip()
53
+ return song_seed
54
  generate_seed_button.click(generate_song_seed, inputs=[feeling_input], outputs=[instrumental_output]).then(clean_song_seed, inputs=[instrumental_output], outputs=[instrumental_output])
55
+
56
  def make_row_visible(x):
57
  return gr.Row(visible=True), gr.Markdown("""<center><font size=4>Here it is! Hit 'Approve' to confirm this concept. Edit the concept directly or hit 'Try Again' to get another suggestion.</font></center>""", visible=True)
58
  def enable_button(x):
59
  return gr.Button("Continue to Next Step", interactive=True)
60
  generate_seed_button.click(make_row_visible, inputs=[generate_seed_button], outputs=[concept_row, concept_desc])
61
  approve_button.click(enable_button, inputs=[approve_button], outputs=[continue_btn])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  try_again_button.click(generate_song_seed, inputs=[feeling_input], outputs=[instrumental_output])
64
+
65
+ def change_tab(id):
66
+ return gr.Tabs(selected=id)
67
  continue_btn.click(change_tab, gr.Number(1, visible=False), tabs)
68
 
69
 
 
70
  with gr.TabItem("Generation", id=1): #index is 1
71
  start_song_gen = gr.State(value=False)
72
  gr.Markdown("""<center><font size=4>Now, chat with an AI songwriter to make your song! Tip: get and tune an audio snippet well first and then put effort into the story. Hit finish when ready to hear full song.</font></center>""")
 
86
  songwriter_creativity = gr.Slider(label="Songwriter LLM Temperature", minimum=0, maximum=1, step=0.01, value=1)
87
  lyrics_display = gr.TextArea("[...]", label="Generated Lyrics", show_copy_button=True, container=True)
88
 
89
+ section_meanings = gr.State(value="")
90
  approve_button.click(update_song_details, inputs=[instrumental_output], outputs=[genre_input, title_input, blurb_input]).then(get_sections, inputs=[blurb_input, instrumental_output], outputs=[section_meanings])
91
  continue_btn.click(get_starting_messages, inputs=[instrumental_textbox, title_input, blurb_input, genre_input, section_meanings], outputs=[messages, chatbot_history])
92
 
93
  with gr.Row():
94
  textbox = gr.Textbox(lines=1, label='Send a message', show_label=False, placeholder='Send a message', scale=4)
 
 
 
 
 
 
 
 
 
 
 
95
  submit = gr.Button("Send", scale=2)
96
 
97
  with gr.Row():
 
107
  return messages, chatbot_history, ''
108
 
109
  reset_button.click(reset_chat, inputs=[messages, chatbot_history], outputs=[messages, chatbot_history, lyrics_display])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
 
112
  with gr.Row():
113
  song_link = gr.State(value="")
114
  song = gr.HTML()
115
 
116
+ # download_btn = gr.Button("Download Conversation")
 
 
 
 
 
 
 
117
 
118
+ # def download_conversation(messages):
119
+ # #get time
120
+ # now = get_current_time()
121
+ # # write messages to JSON file
122
+ # with open(f'conversation_{now}.json', 'w') as f:
123
+ # json.dump(messages, f)
124
 
 
 
 
 
 
125
 
126
  # with gr.Accordion("Advanced", open=False):
127
  # suno_tags = gr.Textbox(value="ballad, male, dramatic, emotional, strings", label="Gen input tags")
128
  # story_textbox = gr.TextArea(label="Story to provide context to songwriter", value="", max_lines=3)
129
 
 
 
 
 
 
 
 
130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  def reset_textbox(textbox):
132
  return ""
133
  def set_snippet_query(textbox):
 
151
  inputs=[genre_input, textbox, chatbot_history, messages],
152
  outputs=[textbox, chatbot_history, messages]).then(reset_textbox, inputs=[textbox], outputs=[textbox])
153
 
 
 
 
154
  done.click(set_finish_query, inputs=[textbox], outputs=[textbox]).then(model_chat,
155
  inputs=[genre_input, textbox, chatbot_history, messages],
156
  outputs=[textbox, chatbot_history, messages, lyrics_display]).then(
157
  set_lyrics_song_displays, inputs=[messages], outputs=[lyrics_display, song]).then(reset_textbox, inputs=[textbox], outputs=[textbox])
 
 
158
 
159
 
160
  demo.queue(api_open=False)
assistant.py DELETED
@@ -1,53 +0,0 @@
1
- import requests
2
- import json
3
- import time
4
- import os
5
- from typing import Optional, Tuple, List, Dict
6
- from typing_extensions import override
7
- from openai import AssistantEventHandler, OpenAI
8
- from openai.types.beta.threads import Text, TextDelta
9
-
10
- client = OpenAI(
11
- api_key=os.getenv("OPEN_AI_KEY"),
12
- )
13
-
14
- class EventHandler(AssistantEventHandler):
15
- def __init__(self):
16
- self.current_response = ""
17
- self.text_deltas = []
18
-
19
- @override
20
- def on_event(self, event):
21
- if event.event == 'thread.run.requires_action':
22
- run_id = event.data.id
23
- self.handle_requires_action(event.data, run_id)
24
- elif event.event == "thread.message.delta" and event.data.delta.content:
25
- self.on_text_delta(event.data.delta, event.data.snapshot)
26
-
27
- def handle_requires_action(self, data, run_id):
28
- tool_outputs = []
29
- for tool in data.required_action.submit_tool_outputs.tool_calls:
30
- if tool.function.name == "get_current_temperature":
31
- tool_outputs.append({"tool_call_id": tool.id, "output": "57"})
32
- elif tool.function.name == "get_rain_probability":
33
- tool_outputs.append({"tool_call_id": tool.id, "output": "0.06"})
34
- self.submit_tool_outputs(tool_outputs, run_id)
35
-
36
- def submit_tool_outputs(self, tool_outputs, run_id):
37
- with client.beta.threads.runs.submit_tool_outputs_stream(
38
- thread_id=self.current_run.thread_id,
39
- run_id=self.current_run.id,
40
- tool_outputs=tool_outputs,
41
- event_handler=EventHandler(),
42
- ) as stream:
43
- for text in stream.text_deltas:
44
- print(text, end="", flush=True)
45
- print()
46
-
47
- @override
48
- def on_text_delta(self, delta: TextDelta, snapshot: Text):
49
- if delta.value:
50
- self.current_response += delta.value
51
- self.text_deltas.append(delta.value)
52
- print(delta.value, end="", flush=True)
53
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
chat.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Tuple, Dict
2
+ import os
3
+ import json
4
+ from openai import OpenAI
5
+
6
+ from suno import make_song, concat_snippets
7
+ from gpt_calls import AI_Songwriter
8
+ from utils.song_utils import messages_to_history
9
+
10
+ History = List[Tuple[str, str]] # a type: pairs of (query, response), where query is user input and response is system output
11
+ Messages = List[Dict[str, str]] # a type: list of messages with role and content
12
+
13
+ client_key = os.getenv("OPEN_AI_KEY")
14
+ oai_client = OpenAI(
15
+ api_key=client_key,
16
+ )
17
+
18
+ def model_chat(genre_input, query: Optional[str], history: Optional[History], messages: Optional [Messages], auto=False) -> Tuple[str, str, History, Messages]:
19
+ if query is None:
20
+ query = ''
21
+
22
+ if not query.endswith('?'):
23
+ query += " Use write_section when you have a large amount of story to pull from to write the next section! Alternatively ask me a follow up before moving to write."
24
+
25
+ with open('ai_tools.json') as f:
26
+ ai_tools = json.load(f)
27
+
28
+ songwriterAssistant = AI_Songwriter(client_key=client_key)
29
+
30
+ if auto:
31
+ messages = messages[:-1] + [{'role': 'user', 'content': query}] #should this be a -1? for non-auto. why does the chatbot history get messed up?
32
+ else:
33
+ messages = messages + [{'role': 'user', 'content': query}]
34
+
35
+
36
+
37
+ messages_filtered = messages
38
+ response_message = oai_client.chat.completions.create(
39
+ model="gpt-4o",
40
+ messages=messages_filtered,
41
+ tools = ai_tools,
42
+ tool_choice="auto",
43
+ )
44
+ print(response_message, "model chat response")
45
+ current_response = ""
46
+ # Step 2: determine if the response from the model includes a tool call.
47
+ tool_calls = response_message.choices[0].message.tool_calls
48
+ if tool_calls:
49
+ messages.append({
50
+ "role": response_message.choices[0].message.role,
51
+ "content": response_message.choices[0].message.content,
52
+ "tool_calls": tool_calls,
53
+ "function_call": response_message.choices[0].message.function_call
54
+ })
55
+ # If true the model will return the name of the tool / function to call and the argument(s)
56
+ for tool_call in tool_calls:
57
+ print(tool_call)
58
+ tool_call_id = tool_call.id
59
+ tool_function_name = tool_call.function.name
60
+ tool_query_args = eval(tool_call.function.arguments)
61
+
62
+ # Step 3: Call the function and retrieve results. Append the results to the messages list.
63
+ if tool_function_name == 'write_section':
64
+ snippet_instrumental_tags = tool_query_args.pop('snippet_instrumental_tags', None)
65
+ snippet_clip_to_continue_from = tool_query_args.pop('snippet_clip_to_continue_from', None)
66
+ suggested_lyrics = songwriterAssistant.write_section(**tool_query_args)
67
+
68
+ ## yield suggested lyrics in tool and assistant message
69
+ tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': suggested_lyrics}
70
+ # audio_message = {'role': 'assistant', 'content': "Here's what I've come up with:\n" + suggested_lyrics + "\n\nGenerating audio snippet..."}
71
+ new_messages = messages + [tool_message] #, audio_message
72
+
73
+ model_response_with_function_call = oai_client.chat.completions.create(
74
+ model="gpt-4o",
75
+ messages=new_messages,
76
+ ) # get a new response from the model where it can see the function response
77
+ current_response = model_response_with_function_call.choices[0].message.content
78
+
79
+ role = "assistant"
80
+ new_messages = new_messages + [{'role': role, 'content': current_response}]
81
+ # new_messages = [msg for msg in new_messages if msg['content'] is not None and msg['role'] in ['user', 'assistant']]
82
+ history = messages_to_history(new_messages)
83
+ yield '', history, new_messages, '[...]'
84
+
85
+
86
+ # new_history = messages_to_history(new_messages)
87
+ # yield '', new_history, new_messages, '[...]'
88
+
89
+ # ### call make_song here with the snippet_lyrics, snippet_instrumental_tags, and snippet_clip_to_continue
90
+ # song_link = make_song(suggested_lyrics, snippet_instrumental_tags, snippet_clip_to_continue)
91
+
92
+ # ## filter out suno link from tool query arg
93
+ # clip_id = song_link.split("https://audiopipe.suno.ai/?item_id=")[1]
94
+
95
+ # ## add song link to tool and audio message
96
+ # tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': suggested_lyrics + '\nclip id: ' + clip_id}
97
+ # audio_message = {'role': 'assistant', 'content': "Here's what I've come up with:\n" + suggested_lyrics + '\n\n' + f'<audio controls autoplay><source src="{song_link}" type="audio/mp3"></audio><p>clip id: {clip_id}</p><p>instrumental tags: {snippet_instrumental_tags}</p>'}
98
+ # audio_message['content'] += f'<p>continued from clip: {snippet_clip_to_continue}</p>'
99
+ # audio_message['content'] += f'\n\nWhat do you think?'
100
+ # new_messages = messages + [tool_message, audio_message]
101
+ # new_history = messages_to_history(new_messages)
102
+ # yield '', new_history, new_messages, '[...]'
103
+
104
+ elif tool_function_name == 'revise_section_lyrics':
105
+ snippet_instrumental_tags = tool_query_args.pop('snippet_instrumental_tags', None)
106
+ snippet_clip_to_continue_from = tool_query_args.pop('snippet_clip_to_continue_from', None)
107
+ revised_lyrics = songwriterAssistant.revise_section_lyrics(**tool_query_args)
108
+
109
+ # ## yield revised lyrics in tool and assistant message
110
+ tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': revised_lyrics}
111
+ # audio_message = {'role': 'assistant', 'content': "Here's my revised lyrics:\n" + revised_lyrics + "\n\nGenerating audio snippet..."}
112
+ new_messages = messages + [tool_message] #, audio_message]
113
+
114
+ model_response_with_function_call = oai_client.chat.completions.create(
115
+ model="gpt-4o",
116
+ messages=new_messages,
117
+ ) # get a new response from the model where it can see the function response
118
+ current_response = model_response_with_function_call.choices[0].message.content
119
+
120
+ role = "assistant"
121
+ new_messages = new_messages + [{'role': role, 'content': current_response}]
122
+ # new_messages = [msg for msg in new_messages if msg['content'] is not None and msg['role'] in ['user', 'assistant']]
123
+ history = messages_to_history(new_messages)
124
+ yield '', history, new_messages, '[...]'
125
+ # new_history = messages_to_history(new_messages)
126
+ # yield '', new_history, new_messages, '[...]'
127
+
128
+ # ### call make_song here with the snippet_lyrics, snippet_instrumental_tags, and snippet_clip_to_continue
129
+ # song_link = make_song(revised_lyrics, snippet_instrumental_tags, snippet_clip_to_continue)
130
+
131
+ # ## filter out suno link from tool query arg
132
+ # clip_id = song_link.split("https://audiopipe.suno.ai/?item_id=")[1]
133
+
134
+ # ## add song link to tool and audio message
135
+ # tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': revised_lyrics + '\nclip id: ' + clip_id}
136
+ # audio_message = {'role': 'assistant', 'content': "Here's what I've come up with:\n" + revised_lyrics + '\n\n' + f'<audio controls autoplay><source src="{song_link}" type="audio/mp3"></audio><p>clip id: {clip_id}</p><p>instrumental tags: {snippet_instrumental_tags}</p>'}
137
+ # audio_message['content'] += f'<p>continued from clip: {snippet_clip_to_continue}</p>'
138
+ # audio_message['content'] += f'\n\nWhat do you think?'
139
+ # new_messages = messages + [tool_message, audio_message]
140
+ # new_history = messages_to_history(new_messages)
141
+ # yield '', new_history, new_messages, '[...]'
142
+
143
+ elif tool_function_name == 'revise_instrumental_tags':
144
+ #detangle tool_query_args dict
145
+ #snippet_lyrics = tool_query_args['snippet_lyrics'] + "\n[End]"
146
+ snippet_instrumental_tags = tool_query_args['current_instrumental_tags']
147
+ user_instrumental_feedback = tool_query_args['user_instrumental_feedback']
148
+ # if 'snippet_clip_to_continue_from' not in tool_query_args:
149
+ # tool_query_args['snippet_clip_to_continue_from'] = None
150
+ # snippet_clip_to_continue_from = tool_query_args['snippet_clip_to_continue_from']
151
+
152
+ new_instrumental_tags = songwriterAssistant.revise_instrumental_tags(snippet_instrumental_tags, user_instrumental_feedback)
153
+ # yield new_instrumental_tags in tool and assistant message
154
+ # tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': f'new instrumental tags: {new_instrumental_tags}'}
155
+ # audio_message = {'role': 'assistant', 'content': f'Sure! I\'ve revised the instrumental tags: {new_instrumental_tags}\n\n Generating audio snippet...'}
156
+ # new_messages = messages + [tool_message, audio_message]
157
+ # new_history = messages_to_history(new_messages)
158
+ # yield '', new_history, new_messages, '[...]'
159
+
160
+ if isinstance(tool_query_args['sections_written'], str):
161
+ current_lyrics = tool_query_args['sections_written']
162
+ elif isinstance(tool_query_args['sections_written'], list):
163
+ current_lyrics = "\n".join(tool_query_args['sections_written'])
164
+ else:
165
+ current_lyrics = ""
166
+
167
+ #current_lyrics = "\n".join(tool_query_args['sections_written'])
168
+ song_link = make_song(current_lyrics, new_instrumental_tags)
169
+ ## filter out suno link from tool query arg
170
+ clip_id = song_link.split("https://audiopipe.suno.ai/?item_id=")[1]
171
+
172
+ tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': f'new instrumental tags: {new_instrumental_tags}, clip id: {clip_id}'}
173
+ audio_message = {'role': 'assistant', 'content': f'Sure! I\'ve revised the instrumental tags: {new_instrumental_tags}\nCurrent lyrics: {current_lyrics}\n\n <audio controls autoplay><source src="{song_link}" type="audio/mp3"></audio><p>clip id: {clip_id}</p>'}
174
+ audio_message['content'] += f'\n\nWhat do you think?'
175
+ new_messages = messages + [tool_message, audio_message]
176
+ new_history = messages_to_history(new_messages)
177
+ yield '', new_history, new_messages, '[...]'
178
+ elif tool_function_name == 'merge_all_snippets':
179
+ updated_clip_url, updated_lyrics, clips_list = concat_snippets(tool_query_args['last_snippet_id'])
180
+ updated_clip_id = updated_clip_url.split("https://audiopipe.suno.ai/?item_id=")[1]
181
+
182
+ #pass this info in new tool and assistant message
183
+ tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': f'updated clip id: {updated_clip_id}\nupdated lyrics: {updated_lyrics}\nupdated clips path: {clips_list}'}
184
+ audio_message = {'role': 'assistant', 'content': f'Sure! All the clips are now merged. <p>updated lyrics: {updated_lyrics}</p><audio controls autoplay><source src="{updated_clip_url}" type="audio/mp3"></audio><p>updated clip id: {updated_clip_id}</p><p>updated clips path: {clips_list}</p>'}
185
+
186
+ new_messages = messages + [tool_message, audio_message]
187
+ new_history = messages_to_history(new_messages)
188
+ yield '', new_history, new_messages, '[...]'
189
+ elif tool_function_name == 'finish_full_song':
190
+ ## args are sections_to_be_written, relevant_ideas, last_snippet_id, sni
191
+
192
+ ## STEP 0: POP out instrumental args
193
+ snippet_instrumental_tags = tool_query_args.pop('snippet_instrumental_tags', None)
194
+ snippet_clip_to_continue_from = tool_query_args.pop('snippet_clip_to_continue_from', None)
195
+
196
+ if isinstance(tool_query_args['sections_written'], str):
197
+ current_lyrics = tool_query_args['sections_written']
198
+ elif isinstance(tool_query_args['sections_written'], list):
199
+ current_lyrics = "\n".join(tool_query_args['sections_written'])
200
+ else:
201
+ current_lyrics = ""
202
+
203
+ ## STEP 1: WRITE ALL LYRICS using songwriterAssistant
204
+ remaining_lyrics = songwriterAssistant.write_all_lyrics(**tool_query_args)
205
+ full_lyrics = current_lyrics + remaining_lyrics + "\n[End]"
206
+ yield '', history, messages, full_lyrics
207
+
208
+ ## STEP 2: MAKE SONG FOR REMAINING LYRICS
209
+ song_link = make_song(remaining_lyrics, snippet_instrumental_tags, snippet_clip_to_continue_from)
210
+
211
+ #tool and assistant message
212
+ tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': f'{full_lyrics}'}
213
+ audio_message = {'role': 'assistant', 'content': f'New snippet: \n <audio controls autoplay><source src="{song_link}" type="audio/mp3"></audio>'}
214
+
215
+ new_messages = messages + [tool_message, audio_message]
216
+ new_history = messages_to_history(new_messages)
217
+ yield '', new_history, new_messages, full_lyrics
218
+
219
+ ## STEP 3: MERGE FULL SONG
220
+ if snippet_clip_to_continue_from not in [None, ""]:
221
+ updated_clip_url, updated_lyrics, clips_list = concat_snippets(song_link.split("https://audiopipe.suno.ai/?item_id=")[1])
222
+ else:
223
+ updated_clip_url, updated_lyrics, clips_list = song_link, remaining_lyrics, []
224
+ ## YIELD UPDATED CLIP URL, LYRICS, AND CLIPS LIST
225
+ updated_clip_id = updated_clip_url.split("https://audiopipe.suno.ai/?item_id=")[1]
226
+
227
+ #tool and assistant message
228
+ tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': f'updated clip id: {updated_clip_id}\nupdated lyrics: {updated_lyrics}\nupdated clips path: {clips_list}'}
229
+ audio_message = {'role': 'assistant', 'content': f'All done! Thank you for participating :) \nFinal Lyrics: {full_lyrics} \nFinal song: <audio controls autoplay><source src="{updated_clip_url}" type="audio/mp3"></audio><p>clip id: {updated_clip_id}</p>'}
230
+
231
+ new_messages = messages + [tool_message, audio_message]
232
+ new_history = messages_to_history(new_messages)
233
+ yield '', new_history, new_messages, '[...]'
234
+
235
+ elif tool_function_name == 'get_audio_snippet':
236
+ #detangle tool_query_args dict
237
+ snippet_lyrics = tool_query_args['snippet_lyrics'] + "\n[End]"
238
+ snippet_instrumental_tags = tool_query_args['snippet_instrumental_tags']
239
+ if 'snippet_clip_to_continue_from' not in tool_query_args:
240
+ tool_query_args['snippet_clip_to_continue_from'] = None
241
+ snippet_clip_to_continue_from = tool_query_args['snippet_clip_to_continue_from']
242
+ song_link = make_song(snippet_lyrics, snippet_instrumental_tags, snippet_clip_to_continue_from)
243
+ ## filter out suno link from tool query arg
244
+ clip_id = song_link.split("https://audiopipe.suno.ai/?item_id=")[1]
245
+
246
+ tool_message = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': f'instrumental tags: {tool_query_args["snippet_instrumental_tags"]}, clip id: {clip_id}'}
247
+ audio_message_content = "Here's what I've come up with:\n" + snippet_lyrics + '\n\n' + f'<audio controls autoplay><source src="{song_link}" type="audio/mp3"></audio><p>instrumental tags: {tool_query_args["snippet_instrumental_tags"]}</p><p>clip id: {clip_id}</p>'
248
+ audio_message_content += f'<p>continued from clip: {snippet_clip_to_continue_from}</p>'
249
+ audio_message = {'role': 'assistant', 'content': audio_message_content}
250
+ new_messages = messages + [tool_message, audio_message]
251
+ new_history = messages_to_history(new_messages)
252
+ yield '', new_history, new_messages
253
+ else:
254
+ print(f"Error: function {tool_function_name} does not exist")
255
+
256
+ # messages.append({
257
+ # "role":"tool",
258
+ # "tool_call_id":tool_call_id,
259
+ # "name": tool_function_name,
260
+ # "content":results
261
+ # })
262
+
263
+ # Step 4: Invoke the chat completions API with the function response appended to the messages list
264
+ # Note that messages with role 'tool' must be a response to a preceding message with 'tool_calls'
265
+
266
+ else:
267
+ # Model did not identify a function to call, result can be returned to the user
268
+ current_response = response_message.choices[0].message.content
269
+
270
+ role = "assistant"
271
+ new_messages = messages + [{'role': role, 'content': current_response}]
272
+ # new_messages = [msg for msg in new_messages if msg['content'] is not None and msg['role'] in ['user', 'assistant']]
273
+ history = messages_to_history(new_messages)
274
+ yield '', history, new_messages, '[...]'
data/emotions.json DELETED
@@ -1,28 +0,0 @@
1
- [
2
- "Inspired",
3
- "Lonely",
4
- "Fearful",
5
- "Bitter",
6
- "Nostalgic",
7
- "Defiant",
8
- "Stressed",
9
- "Overwhelmed",
10
- "Motivated",
11
- "Content",
12
- "Confident",
13
- "Happy",
14
- "Bitter",
15
- "Cursed",
16
- "Depressed",
17
- "Excited",
18
- "Determined",
19
- "Angry",
20
- "Lost",
21
- "Regretful",
22
- "Humble",
23
- "Heartbroken",
24
- "Hopeful",
25
- "Anxious",
26
- "Grateful",
27
- "Longful"
28
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/genres.json DELETED
@@ -1,34 +0,0 @@
1
- [
2
- "R&B",
3
- "Disco Pop",
4
- "Hyperpop",
5
- "Pitched up R&B",
6
- "Nostalgic 2000s Hip Hop",
7
- "H&M Pop",
8
- "Cinematic Pop",
9
- "Drill",
10
- "Feel good House",
11
- "R&B House",
12
- "Piano ballad Pop",
13
- "Piano ballad R&B",
14
- "Carti Indie Pop",
15
- "Mountaintop Guitar",
16
- "Arpeggiated Guitar",
17
- "Indie Rock",
18
- "EDM",
19
- "Sped up funk house",
20
- "Alternative Hip Hop",
21
- "Reggaeton",
22
- "Trap",
23
- "Country Pop",
24
- "Synthwave",
25
- "Ambient",
26
- "Folk Rock",
27
- "K-Pop",
28
- "Jazz Fusion",
29
- "Classical Crossover",
30
- "Latin Pop",
31
- "Funk",
32
- "Psychedelic Rock",
33
- "Liquid DnB"
34
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/keywords.json DELETED
@@ -1,7 +0,0 @@
1
- [
2
- "Balcony",
3
- "Teasing",
4
- "Club",
5
- "Detached",
6
- "Responding"
7
- ]
 
 
 
 
 
 
 
 
data/topics.json DELETED
@@ -1,34 +0,0 @@
1
- [
2
- "Ambition",
3
- "Breakup",
4
- "Longing",
5
- "Growing up",
6
- "Growth",
7
- "Acceptance",
8
- "Opportunity",
9
- "Passion",
10
- "Longing",
11
- "Romance",
12
- "House Party",
13
- "Confidence",
14
- "Support",
15
- "Hope",
16
- "Suicide",
17
- "Love",
18
- "Sex",
19
- "Drugs",
20
- "Life Changes",
21
- "Rehab",
22
- "Being Bipolar",
23
- "Feeling free",
24
- "Alcoholism",
25
- "Post-breakup depression",
26
- "Getting attached",
27
- "Catching feeling too fast",
28
- "Missing Home",
29
- "Heartbroken",
30
- "Getting Turnt",
31
- "Childhood Nostalgia",
32
- "Falling in Love",
33
- "Self-questioning"
34
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
extra_app_ideas.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Genre list
3
+ Preset dropdown: Missing Home, Heartbroken, Getting Turnt, Childhood Nostalgia, (Custom) How are you?
4
+ - tags based on preset
5
+ Artist identity dropdown: A 15-year old boy who dreams of being a broadway actor, A 23-year old soft but bombastic woman who loves to rap, dance, and take over the streets, A 30-year old man who has plans to take over the world as a villain
6
+
7
+ male tenor, dramatic, emotional, strings
8
+
9
+ pass artist identity in starting prompt to gpt-4 conversation.
10
+ pass preset dropdown to gpt-4 conversation to inspire the questions that Lupe asks the user.
11
+
12
+ -Ask every 4 back-and-forths do you want to talk more? Or are you ready for your song? (add button for song in assistant's message)
13
+
14
+ -Mention lyrics
15
+ -Mention every 4 back-and-forths lyrics that you’ll include in the song [calling gpt-4 to generate the lyrics and identify one line that's most relevant to the last message]
16
+ '''
gpt_calls.py CHANGED
@@ -40,7 +40,7 @@ class AI_Songwriter:
40
  instruction += "You are also given a section description, genre, era, and overall description of the song."
41
 
42
  ## read in prompt lyrics from convo .txt and add it to instruction
43
- with open("write_section_ex.txt", "r") as f:
44
  convo = f.read()
45
  instruction += "Here's an example:\n{convo}\nNow do it for this input:"
46
 
@@ -99,7 +99,7 @@ class AI_Songwriter:
99
 
100
  instruction += "You are also given a genre, era, and the rest of the section."
101
 
102
- with open("revise_section_ex.txt", "r") as f:
103
  convo = f.read()
104
  instruction += "Here's an example:\n{convo}\nNow do it for this input:"
105
 
@@ -142,7 +142,7 @@ class AI_Songwriter:
142
  def write_all_lyrics(self, sections_to_be_written, sections_written, overall_song_description):
143
  instruction = "Write the remainder of this full song given an overall description of the song, genre, era, and a description of the sections to complete:"
144
 
145
- with open("write_full_song_ex.txt", "r") as f:
146
  convo = f.read()
147
  instruction += "Here's an example:\n{convo}\nNow do it for this input:"
148
 
 
40
  instruction += "You are also given a section description, genre, era, and overall description of the song."
41
 
42
  ## read in prompt lyrics from convo .txt and add it to instruction
43
+ with open("prompts/write_section_ex.txt", "r") as f:
44
  convo = f.read()
45
  instruction += "Here's an example:\n{convo}\nNow do it for this input:"
46
 
 
99
 
100
  instruction += "You are also given a genre, era, and the rest of the section."
101
 
102
+ with open("prompts/revise_section_ex.txt", "r") as f:
103
  convo = f.read()
104
  instruction += "Here's an example:\n{convo}\nNow do it for this input:"
105
 
 
142
  def write_all_lyrics(self, sections_to_be_written, sections_written, overall_song_description):
143
  instruction = "Write the remainder of this full song given an overall description of the song, genre, era, and a description of the sections to complete:"
144
 
145
+ with open("prompts/write_full_song_ex.txt", "r") as f:
146
  convo = f.read()
147
  instruction += "Here's an example:\n{convo}\nNow do it for this input:"
148
 
old_code.py DELETED
@@ -1,67 +0,0 @@
1
- # open_step_two = gr.Button("STEP 2: Pick a story (REQUIRED FOR AUTOGPT)")
2
- # journal_entries_visible = gr.State(value=False)
3
-
4
-
5
- # # Preset dropdown: Missing Home, Heartbroken, Getting Turnt, Childhood Nostalgia, (Custom) How are you?
6
-
7
- # story_choices = [
8
- # "ENTER YOUR OWN",
9
- # "Missing Home after a lonely night",
10
- # "Heartbroken after the fourth date",
11
- # "Getting Turnt after making it big",
12
- # "Childhood Nostalgia",
13
- # "Falling in Love on the train",
14
- # "Self-questioning after my first big song failure",
15
- # "The night in Spain with the crazy Manchester girl",
16
- # "Blacking out my last night in NOLA",
17
- # "My first concert: the Off-Season tour",
18
- # "The night I got my first tattoo",
19
- # "The summer after high school (Kaylee)",
20
- # "Deciding to take control of shit",
21
- # "The DJ had us falling in love",
22
- # "Why does drinking feel so good",
23
- # "The camera girl from Royale",
24
- # "St. Patty's with the boys",
25
- # "Losing my VVVVV",
26
- # "In love with the idea of success",
27
- # "Summer nights in Washington Square Park",
28
- # "All I'm asking for is just one night",
29
- # "I don't think imma make it"
30
- # ]
31
-
32
- # with gr.Row(visible=journal_entries_visible):
33
- # preset = gr.Dropdown(
34
- # label="Journal entries",
35
- # choices=story_choices,
36
- # value="",
37
- # interactive=True,
38
- # )
39
- # entry_text = {
40
- # "The night in Spain with the crazy Manchester girl": "data/journals/manchester_girl.txt",
41
- # "Missing Home after a lonely night": "data/journals/missing_home.txt",
42
- # "Heartbroken after the fourth date": "data/journals/heartbroken.txt",
43
- # }
44
-
45
-
46
- # with gr.Column():
47
- # journal_title = gr.Textbox(label="Journal Title")
48
- #
49
- # add_story_button = gr.Button("Add Story")
50
-
51
- # def update_story_textbox(preset):
52
- # return gr.TextArea(label="Full Story", value=open(entry_text[preset]).read(), max_lines=3)
53
-
54
- # def save_journal_entry(journal_title_value, story_textbox_value):
55
- # song_path = f"data/journals/{journal_title.value}.txt"
56
- # with open("data/journals/custom_journal.txt", "w") as f:
57
- # f.write(story_textbox_value)
58
-
59
- # preset.change(update_story_textbox, inputs=[preset], outputs=[story_textbox])
60
-
61
-
62
-
63
- # # Toggle visibility when button is clicked
64
- # def toggle_journal_entries():
65
- # return not journal_entries_visible.value
66
-
67
- # open_step_two.click(toggle_journal_entries, outputs=[journal_entries_visible])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
prompt_artist_convo.txt β†’ prompts/prompt_artist_convo.txt RENAMED
File without changes
prompt_lyric_snippet_extractor.txt β†’ prompts/prompt_lyric_snippet_extractor.txt RENAMED
File without changes
prompt_lyrics_from_convo.txt β†’ prompts/prompt_lyrics_from_convo.txt RENAMED
File without changes
prompt_lyrics_writer.txt β†’ prompts/prompt_lyrics_writer.txt RENAMED
File without changes
prompt_section_writer.txt β†’ prompts/prompt_section_writer.txt RENAMED
File without changes
prompt_snippet_checker.txt β†’ prompts/prompt_snippet_checker.txt RENAMED
File without changes
prompt_song_seed.txt β†’ prompts/prompt_song_seed.txt RENAMED
File without changes
revise_section_ex.txt β†’ prompts/revise_section_ex.txt RENAMED
File without changes
write_full_song_ex.txt β†’ prompts/write_full_song_ex.txt RENAMED
File without changes
write_section_ex.txt β†’ prompts/write_section_ex.txt RENAMED
File without changes
suno.py CHANGED
@@ -1,5 +1,6 @@
1
  import requests
2
  import time
 
3
 
4
  base_url = "http://127.0.0.1:8000"
5
  api_endpoint_submit = f"{base_url}/generate/"
@@ -11,15 +12,33 @@ headers = {"api-key": api_key}
11
  # tags = "lofi, chill, happy"
12
  # prompt = "I'm a fish swimming in the ocean\nI'm a bird flying in the sky\nI'm a flower blooming in the garden\nI'm a tree standing tall and high"
13
 
14
- # Takes about 2.5 minutes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  def generate_song(tags, prompt, save_path, clip_id=None, continue_at=30):
16
  # print("Generating song with tags", tags, "and prompt", prompt)
17
 
18
- # prompt_word_count = len(prompt.split(" "))
19
- # if prompt_word_count > 230:
20
- # print("Prompt too long, truncating to 230 words")
21
- # prompt = " ".join(prompt.split(" ")[:230])
22
-
23
  data = {
24
  "title": "Songchat " + str(int(time.time())),
25
  "tags": tags,
@@ -88,21 +107,6 @@ def generate_song(tags, prompt, save_path, clip_id=None, continue_at=30):
88
 
89
  return url
90
 
91
- # response = requests.get(url) #, stream=True)
92
- # chunk_size = 8192
93
- # print(url)
94
-
95
- # i = 0
96
-
97
- # for chunk in response.iter_content(chunk_size):
98
- # print("got chunk")
99
- # i += 1
100
- # if i % 20 == 0:
101
- # print(chunk)
102
- # yield chunk
103
- # with open(save_path, "wb") as f:
104
- # f.write(response.content)
105
- # print("Saved song to", save_path)
106
 
107
  def concat_snippets(clip_id):
108
  concat_url = f"{api_endpoint_concat}?clip_id={clip_id}"
 
1
  import requests
2
  import time
3
+ import os
4
 
5
  base_url = "http://127.0.0.1:8000"
6
  api_endpoint_submit = f"{base_url}/generate/"
 
12
  # tags = "lofi, chill, happy"
13
  # prompt = "I'm a fish swimming in the ocean\nI'm a bird flying in the sky\nI'm a flower blooming in the garden\nI'm a tree standing tall and high"
14
 
15
+ def make_song(snippet_lyrics, inst_tags, continue_from_clip=None, continue_at=None):
16
+ """
17
+ Generates a song based on provided lyrics and instrumental tags.
18
+
19
+ Args:
20
+ snippet_lyrics (str): The lyrics for the song snippet.
21
+ inst_tags (str): The instrumental tags for the song.
22
+ continue_from_clip (str, optional): The clip ID to continue from, if any. Defaults to None.
23
+ continue_at (int, optional): The position to continue at in the clip. Defaults to None.
24
+
25
+ Returns:
26
+ str: The link to the generated song.
27
+ """
28
+ os.makedirs("audio_clips", exist_ok=True)
29
+ song_name = f"SG_{int(time.time())}"
30
+ suno_song_path = f"./audio_clips/suno_{song_name}.wav"
31
+ print("Passing to generate_song:", inst_tags, snippet_lyrics, suno_song_path)
32
+
33
+ song_link = generate_song(inst_tags, snippet_lyrics, suno_song_path, continue_from_clip, continue_at) \
34
+ if continue_from_clip else generate_song(inst_tags, snippet_lyrics, suno_song_path)
35
+
36
+ return song_link
37
+
38
+ # Takes about 30 seconds
39
  def generate_song(tags, prompt, save_path, clip_id=None, continue_at=30):
40
  # print("Generating song with tags", tags, "and prompt", prompt)
41
 
 
 
 
 
 
42
  data = {
43
  "title": "Songchat " + str(int(time.time())),
44
  "tags": tags,
 
107
 
108
  return url
109
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
  def concat_snippets(clip_id):
112
  concat_url = f"{api_endpoint_concat}?clip_id={clip_id}"
utils/__init__.py ADDED
File without changes
utils/song_utils.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from openai import OpenAI
3
+ from typing import Optional, Tuple, List, Dict
4
+
5
+ client_key = os.getenv("OPEN_AI_KEY")
6
+ oai_client = OpenAI(
7
+ api_key=client_key,
8
+ )
9
+
10
+ History = List[Tuple[str, str]] # a type: pairs of (query, response), where query is user input and response is system output
11
+ Messages = List[Dict[str, str]] # a type: list of messages with role and content
12
+
13
+ def generate_song_seed(baseline_seed):
14
+ """
15
+ Generates a song seed based on a baseline seed description.
16
+
17
+ Args:
18
+ baseline_seed (str): The baseline seed description to generate the song concept from.
19
+
20
+ Yields:
21
+ str: The generated song concept in chunks.
22
+ """
23
+ song_details_prompt = (
24
+ "Analyze this description of how someone is feeling and provide a suggestion of an interesting song concept to base a song off of. "
25
+ "Here are three examples, now provide a song concept for this fourth:\n\n"
26
+ )
27
+
28
+ song_seed_prompt_path = 'prompts/prompt_song_seed.txt'
29
+ with open(song_seed_prompt_path, 'r', encoding='utf-8') as file:
30
+ content_2 = file.read()
31
+
32
+ song_details_prompt += f"\n\n{content_2}{baseline_seed}\nSuggested Song Concept: "
33
+
34
+ response_generator = oai_client.chat.completions.create(
35
+ model="gpt-4o",
36
+ messages=[{"role": "user", "content": song_details_prompt}],
37
+ stream=True
38
+ )
39
+
40
+ current_response = ""
41
+ for chunk in response_generator:
42
+ delta_content = chunk.choices[0].delta.content
43
+ if delta_content:
44
+ current_response += delta_content
45
+ yield current_response
46
+
47
+ def get_sections(overall_meaning: str, section_list: str) -> str:
48
+ """
49
+ Generates section meanings based on the overall meaning and section list.
50
+
51
+ Args:
52
+ overall_meaning (str): The overall meaning of the song.
53
+ section_list (str): A newline-separated string of section names.
54
+
55
+ Returns:
56
+ str: The generated section meanings.
57
+ """
58
+ section_list = section_list.split("\n")
59
+ prompt_path = 'prompts/prompt_section_writer.txt'
60
+
61
+ with open(prompt_path, 'r', encoding='utf-8') as file:
62
+ prompt_content = file.read()
63
+
64
+ user_message = {
65
+ "role": "user",
66
+ "content": f"{prompt_content}\n\nOverall meaning: {overall_meaning}\nSection list: {', '.join(section_list)}\nSection meanings:"
67
+ }
68
+
69
+ response = oai_client.chat.completions.create(
70
+ model="gpt-4o",
71
+ messages=[user_message],
72
+ )
73
+
74
+ return response.choices[0].message.content
75
+
76
+ def messages_to_history(messages: Messages) -> Tuple[str, History]:
77
+ """
78
+ Converts a list of messages into a history of user-assistant interactions.
79
+
80
+ Args:
81
+ messages (Messages): A list of message dictionaries, where each dictionary contains
82
+ 'role' (str) and 'content' (str) keys.
83
+
84
+ Returns:
85
+ Tuple[str, History]: A tuple containing a string (empty in this case) and a list of tuples,
86
+ where each tuple represents a user-assistant message pair.
87
+ """
88
+ assert messages[0]['role'] == 'system' and messages[1]['role'] == 'user'
89
+
90
+ # Filter out 'tool' messages and those containing 'tool_calls'
91
+ messages_for_parsing = [msg for msg in messages if msg['role'] != 'tool' and 'tool_calls' not in msg]
92
+
93
+ # Remove " Use write_section" from user messages
94
+ messages_for_parsing = [
95
+ {'role': msg['role'], 'content': msg['content'].split(" Use write_section")[0]} if msg['role'] == 'user' else msg
96
+ for msg in messages_for_parsing
97
+ ]
98
+
99
+ # Create history from user-assistant message pairs
100
+ history = [
101
+ (q['content'], r['content'])
102
+ for q, r in zip(messages_for_parsing[1::2], messages_for_parsing[2::2])
103
+ ]
104
+
105
+ return history
106
+
107
+ def get_starting_messages(song_lengths: str, song_title: str, song_blurb: str, song_genre: str, init_sections: str) -> Tuple[List[Dict[str, str]], History]:
108
+ """
109
+ Generates the initial messages for starting a songwriting session.
110
+
111
+ Args:
112
+ song_lengths (str): The lengths of the song sections.
113
+ song_title (str): The title of the song.
114
+ song_blurb (str): A brief description of the song.
115
+ song_genre (str): The genre of the song.
116
+ init_sections (str): The initial structure of the song sections.
117
+
118
+ Returns:
119
+ Tuple[List[Dict[str, str]], History]: A tuple containing the starting messages and the message history.
120
+ """
121
+ system_prompt = (
122
+ "You are an expert at writing songs. You are with an everyday person, and you will write the lyrics of the song "
123
+ "based on this person's life by asking questions about a story of theirs. Design your questions on your own, without "
124
+ "using your tools, to help you understand the user's story, so you can write a song about the user's experience that "
125
+ "resonates with them. We have equipped you with a set of tools to help you write this story; please use them. You are "
126
+ "very good at making the user feel comfortable, understood, and ready to share their feelings and story. Occasionally "
127
+ "(every 2 messages or so) you will suggest some lyrics, one section at a time, and see what the user thinks of them. "
128
+ "Do not suggest or ask for thoughts on more than one section at a time. Be concise and youthful."
129
+ )
130
+
131
+ user_prompt = (
132
+ f"I have a story that could make this concept work well. The title is {song_title}, it's about {song_blurb} with a genre "
133
+ f"{song_genre} and I think this should be the structure: {init_sections}\n{song_lengths}"
134
+ )
135
+
136
+ initial_messages = [
137
+ {"role": "system", "content": system_prompt},
138
+ {"role": "user", "content": f"The user has stated the following:\n {user_prompt}\n Introduce yourself and kick-off the songwriting process with a question."},
139
+ ]
140
+
141
+ first_msg_res = oai_client.chat.completions.create(
142
+ model="gpt-4o",
143
+ messages=initial_messages,
144
+ )
145
+
146
+ first_message = first_msg_res.choices[0].message.content
147
+ starting_messages = initial_messages + [{'role': 'assistant', 'content': first_message}]
148
+ return starting_messages, messages_to_history(starting_messages)
149
+
150
+ def update_song_details(instrumental_output: str) -> Tuple[Optional[str], Optional[str], Optional[str]]:
151
+ """
152
+ Analyzes the given instrumental output to extract the genre, title, and blurb of a song.
153
+
154
+ Args:
155
+ instrumental_output (str): The assessment and suggestion of a song concept.
156
+
157
+ Returns:
158
+ Tuple[Optional[str], Optional[str], Optional[str]]: A tuple containing the genre, title, and blurb of the song.
159
+ """
160
+ song_details_prompt = (
161
+ "Analyze this assessment and suggestion of a song concept to extract the genre, one sentence blurb of what the song is about. "
162
+ "Based on this, also suggest a song title. Output exactly three lines, in the format of 'genre: [genre]', 'title: [title]', 'blurb: [blurb]'.\n\n"
163
+ f"{instrumental_output}"
164
+ )
165
+
166
+ response = oai_client.chat.completions.create(
167
+ model="gpt-4o",
168
+ messages=[{"role": "user", "content": song_details_prompt}]
169
+ )
170
+
171
+ response_lines = response.choices[0].message.content.split('\n')
172
+ genre = next((line.split(": ")[1] for line in response_lines if "genre: " in line.lower()), None)
173
+ title = next((line.split(": ")[1] for line in response_lines if "title: " in line.lower()), None)
174
+ blurb = next((line.split(": ")[1] for line in response_lines if "blurb: " in line.lower()), None)
175
+
176
+ return genre, title, blurb