ajayarora1235 commited on
Commit
97045b3
1 Parent(s): 146352e

cleaning things up, fixing direct editing and bug catching

Browse files
Files changed (4) hide show
  1. ai_tools.json +1 -1
  2. app.py +24 -18
  3. chat.py +19 -10
  4. gpt_calls.py +2 -2
ai_tools.json CHANGED
@@ -66,7 +66,7 @@
66
  "type": "function",
67
  "function": {
68
  "name": "edit_directly",
69
- "description": "A function to be called when the user asks to directly edit either the lyrics or genre via the instrumental tags. Returns the current section or instrumental tags for the user to edit directly.",
70
  "parameters": {
71
  "type": "object",
72
  "properties": {
 
66
  "type": "function",
67
  "function": {
68
  "name": "edit_directly",
69
+ "description": "A function to be called when the user asks to directly edit either the lyrics or genre instead of together via back-and-forth chat. Returns the current section or instrumental tags for the user to edit directly. Do not call if the user asks to revise together; instead call ask_question.",
70
  "parameters": {
71
  "type": "object",
72
  "properties": {
app.py CHANGED
@@ -31,7 +31,7 @@ with gr.Blocks(css=css) as demo:
31
  gr.Markdown("""<center><font size=4>But first, let's generate a song seed to provide context to the AI Songwriter.</font></center>""")
32
  gr.Markdown("""<center><font size=3>If you're stuck thinking of a song idea, check out <a href="https://onestopforwriters.com/emotions" target="_blank">here</a>.</font></center>""")
33
  with gr.Row():
34
- feeling_input = gr.Textbox(label="What do you want the song to capture? What's an emotion(s) that you've personally felt that should be in the song? More vulnerable, better the song.", placeholder='Enter your story', scale=2)
35
  # audio_input = gr.Audio(sources=["upload"], type="numpy", label="Instrumental",
36
  # interactive=True, elem_id="instrumental-input")
37
 
@@ -69,7 +69,7 @@ with gr.Blocks(css=css) as demo:
69
  feeling_input.submit(generate_song_seed, inputs=[feeling_input], outputs=[instrumental_output]).then(clean_song_seed, inputs=[instrumental_output], outputs=[instrumental_output])
70
 
71
  def make_row_visible(x):
72
- return gr.Row(visible=True), gr.Markdown("""<center><font size=4>Here it is! Hit 'Approve' to confirm this concept. Edit the concept directly or hit 'Try Again' to get another suggestion.</font></center>""", visible=True)
73
  def enable_button(x):
74
  return gr.Button("Ready to Create", interactive=True)
75
  generate_seed_button.click(make_row_visible, inputs=[generate_seed_button], outputs=[concept_row, concept_desc])
@@ -101,7 +101,7 @@ with gr.Blocks(css=css) as demo:
101
 
102
  with gr.Row():
103
  with gr.Column(scale=2):
104
- chatbot_history = gr.Chatbot(type="messages", value=starting_history, label='SongChat', placeholder=None, layout='bubble', bubble_full_width=False, height=500, show_copy_button=True)
105
  with gr.Row():
106
  typical_responses = [textbox, submit]
107
 
@@ -110,15 +110,15 @@ with gr.Blocks(css=css) as demo:
110
 
111
  button_options = gr.State([])
112
  button_dict = gr.State({
113
- "revise lyrics": "Can we revise the lyrics?",
114
- "re-revise lyrics": "Can we revise the lyrics?",
115
  "edit lyrics directly": "Can I edit the lyrics directly for the whole section?",
116
  "generate audio snippet": "Can you generate an audio snippet?",
117
  "continue revising" : "Can we continue revising this section?",
118
  "generate audio snippet with new lyrics": "Can you generate an audio snippet with these new lyrics?",
119
  "return to original instrumental": "Can you use the original clip for this section instead?",
120
- "revise genre": "Can we revise the instrumental tags?",
121
- "re-revise genre": "Can we revise the instrumental tags?",
122
  "revise genre directly": "Can I edit the genre directly for the whole song?",
123
  "continue to next section": "Looks good! Let's move on to the next section.",
124
  "merge snippets": "Can you merge this snippet into its full song?"
@@ -171,15 +171,15 @@ with gr.Blocks(css=css) as demo:
171
  continue_btn.click(get_starting_messages, inputs=[instrumental_textbox, title_input, blurb_input, genre_input, section_meanings], outputs=[messages, chatbot_history])
172
 
173
  with Modal(visible=False) as modal_0:
174
- gr.Markdown("Welcome to the AI songwriter! The AI songwriter will help you write a song. You can chat with the AI, generate lyrics, and listen to audio snippets. Let's start by chatting with the AI.")
175
  with Modal(visible=False) as modal:
176
- gr.Markdown("The chatbot is run by an AI songwriter. It can respond to your conversations, generate lyrics and audio, and edit prior generations.\n\nNow, continue and respond to this second question from the AI songwriter.")
177
  with Modal(visible=False) as modal_1:
178
- gr.Markdown("The AI songwriter has now proposed a first verse! You now have the option to hear an audio snippet, revise the lyrics, or continue to the next section. The latter two options continue the conversation, whereas the first starts audio generation models. Select the 'get audio snippet' button to continue to the next step.")
179
  with Modal(visible=False) as modal_2:
180
- gr.Markdown("Awesome! You generated your first audio snippet./n/n As you work thru each section, the generated snippets are populated on the right panel. You'll be able to listen thru snippets as you work thru the song. \n\n "
181
- "The current section is also displayed on the right panel. You'll be able to revise sections via the chat or directly via the right panel. \n\n "
182
- "You're ready to start your official song! Hit the 'Start' button to start.")
183
  start_button = gr.Button("Start")
184
 
185
  continue_btn.click(lambda: Modal(visible=True), None, modal_0)
@@ -260,18 +260,24 @@ with gr.Blocks(css=css) as demo:
260
  reset_button = gr.Button("Reset", scale=2)
261
 
262
  def reset_chat(messages, chatbot_history):
263
- messages = messages[:2]
264
- chatbot_history = messages_to_history(messages[:2])
265
  return messages, chatbot_history, '', '', '', '', gr.HTML('<center>generating...</center>'), [], []
266
 
267
- reset_button.click(reset_chat, inputs=[messages, chatbot_history], outputs=[messages, chatbot_history, current_section, current_lyrics, curr_tags, clip_to_continue, curr_audio, generated_audios, button_options])
 
 
268
 
269
 
270
  done.click(set_finish_query, inputs=[textbox], outputs=[textbox]).then(model_chat,
271
  inputs=[genre_input, textbox, chatbot_history, messages, generated_audios],
272
- outputs=[textbox, chatbot_history, messages, current_section, current_lyrics, curr_tags, clip_to_continue, curr_audio, generated_audios, button_options])
 
 
273
 
274
- demo.load(reset_chat, inputs=[messages, chatbot_history], outputs=[messages, chatbot_history, current_section, current_lyrics, curr_tags, clip_to_continue, curr_audio, generated_audios])
 
 
275
 
276
 
277
  # with gr.Row():
 
31
  gr.Markdown("""<center><font size=4>But first, let's generate a song seed to provide context to the AI Songwriter.</font></center>""")
32
  gr.Markdown("""<center><font size=3>If you're stuck thinking of a song idea, check out <a href="https://onestopforwriters.com/emotions" target="_blank">here</a>.</font></center>""")
33
  with gr.Row():
34
+ feeling_input = gr.Textbox(label="How are you feeling today?", placeholder='Enter your emotions', scale=2)
35
  # audio_input = gr.Audio(sources=["upload"], type="numpy", label="Instrumental",
36
  # interactive=True, elem_id="instrumental-input")
37
 
 
69
  feeling_input.submit(generate_song_seed, inputs=[feeling_input], outputs=[instrumental_output]).then(clean_song_seed, inputs=[instrumental_output], outputs=[instrumental_output])
70
 
71
  def make_row_visible(x):
72
+ return gr.Row(visible=True), gr.Markdown("""<center><font size=4>Here it is! Hit 'Approve' to confirm this concept. Edit the concept directly or hit 'Try Again' to get another suggestion.</font></center>""", visible=False)
73
  def enable_button(x):
74
  return gr.Button("Ready to Create", interactive=True)
75
  generate_seed_button.click(make_row_visible, inputs=[generate_seed_button], outputs=[concept_row, concept_desc])
 
101
 
102
  with gr.Row():
103
  with gr.Column(scale=2):
104
+ chatbot_history = gr.Chatbot(type="messages", value=starting_history, label='SongChat', placeholder=None, layout='bubble', bubble_full_width=False, height=500)
105
  with gr.Row():
106
  typical_responses = [textbox, submit]
107
 
 
110
 
111
  button_options = gr.State([])
112
  button_dict = gr.State({
113
+ "revise lyrics": "Can we revise the lyrics together?",
114
+ "re-revise lyrics": "Can we revise the lyrics together?",
115
  "edit lyrics directly": "Can I edit the lyrics directly for the whole section?",
116
  "generate audio snippet": "Can you generate an audio snippet?",
117
  "continue revising" : "Can we continue revising this section?",
118
  "generate audio snippet with new lyrics": "Can you generate an audio snippet with these new lyrics?",
119
  "return to original instrumental": "Can you use the original clip for this section instead?",
120
+ "revise genre": "Can we revise the instrumental tags together?",
121
+ "re-revise genre": "Can we revise the instrumental tags together?",
122
  "revise genre directly": "Can I edit the genre directly for the whole song?",
123
  "continue to next section": "Looks good! Let's move on to the next section.",
124
  "merge snippets": "Can you merge this snippet into its full song?"
 
171
  continue_btn.click(get_starting_messages, inputs=[instrumental_textbox, title_input, blurb_input, genre_input, section_meanings], outputs=[messages, chatbot_history])
172
 
173
  with Modal(visible=False) as modal_0:
174
+ gr.Markdown("Welcome to the AI songwriter! The AI songwriter is a chatbot that will help you write a song. You can chat with the AI and guide it however you'd like. Let's start by chatting with the AI.")
175
  with Modal(visible=False) as modal:
176
+ gr.Markdown("The AI songwriter can respond to your stories and requests, generate lyrics and audio, and edit prior generations.\n\nNow, continue and respond to this second question from the AI songwriter to get to know you.")
177
  with Modal(visible=False) as modal_1:
178
+ gr.Markdown("The AI songwriter has now proposed a first verse! After each generation from the AI, you'll receive a list of buttons to guide it further. Select the 'get audio snippet' button to continue to the next step.")
179
  with Modal(visible=False) as modal_2:
180
+ gr.Markdown("Awesome! You generated your first audio snippet. The songwriter will continue for the each section for the rest of the song, revising and iterating with you. \n"
181
+ "As the song gets generated, feel free to ask the songwriter any questions or guide it in any direction. \n"
182
+ "You're ready to start your study with the AI Songwriter! Hit the 'Start' button to start.")
183
  start_button = gr.Button("Start")
184
 
185
  continue_btn.click(lambda: Modal(visible=True), None, modal_0)
 
260
  reset_button = gr.Button("Reset", scale=2)
261
 
262
  def reset_chat(messages, chatbot_history):
263
+ messages = messages[:3]
264
+ chatbot_history = messages_to_history(messages[:3])
265
  return messages, chatbot_history, '', '', '', '', gr.HTML('<center>generating...</center>'), [], []
266
 
267
+ reset_button.click(reset_chat, inputs=[messages, chatbot_history], outputs=[messages, chatbot_history, current_section, current_lyrics, curr_tags, clip_to_continue, curr_audio, generated_audios, button_options]).then(
268
+ update_response_options, [button_options, button_dict], typical_responses
269
+ )
270
 
271
 
272
  done.click(set_finish_query, inputs=[textbox], outputs=[textbox]).then(model_chat,
273
  inputs=[genre_input, textbox, chatbot_history, messages, generated_audios],
274
+ outputs=[textbox, chatbot_history, messages, current_section, current_lyrics, curr_tags, clip_to_continue, curr_audio, generated_audios, button_options]).then(
275
+ update_response_options, [button_options, button_dict], typical_responses
276
+ )
277
 
278
+ demo.load(reset_chat, inputs=[messages, chatbot_history], outputs=[messages, chatbot_history, current_section, current_lyrics, curr_tags, clip_to_continue, curr_audio, generated_audios, button_options]).then(
279
+ update_response_options, [button_options, button_dict], typical_responses
280
+ )
281
 
282
 
283
  # with gr.Row():
chat.py CHANGED
@@ -8,6 +8,8 @@ import regex as re
8
  from gradio_modal import Modal
9
  import gradio as gr
10
  import time
 
 
11
 
12
  # Load environment variables from .env file
13
  load_dotenv()
@@ -37,8 +39,14 @@ async def call_with_timeout(coro, timeout):
37
  except asyncio.TimeoutError:
38
  return "Timeout"
39
 
 
 
 
 
 
 
40
 
41
- def model_chat(genre_input, query: Optional[str], history: Optional[History], messages: Optional[Messages], generated_audios: List[Tuple[str, str, str]], auto=False) -> Tuple[str, History, Messages, str, str, str, str, str, List]:
42
  if query is None:
43
  query = ''
44
  with open('ai_tools.json') as f:
@@ -52,7 +60,7 @@ def model_chat(genre_input, query: Optional[str], history: Optional[History], me
52
  messages = messages + [{'role': 'user', 'content': query}]
53
 
54
 
55
- messages_filtered = messages
56
  response_message = oai_client.chat.completions.create(
57
  model="gpt-4o",
58
  messages=messages_filtered,
@@ -302,7 +310,7 @@ def model_chat(genre_input, query: Optional[str], history: Optional[History], me
302
  sections_list = re.findall(r'\[.*?\]', current_lyrics)
303
 
304
  #current_lyrics = "\n".join(tool_query_args['sections_written'])
305
- song_link = make_song(current_lyrics, new_instrumental_tags)
306
  ## filter out suno link from tool query arg
307
  while "https://audiopipe.suno.ai/?item_id=" not in song_link:
308
  print("BUGGED OUT, trying again...")
@@ -314,7 +322,7 @@ def model_chat(genre_input, query: Optional[str], history: Optional[History], me
314
  yield '', new_history, new_messages, '', '', '', None, None, generated_audios, []
315
  return
316
  time.sleep(5)
317
- song_link = make_song(current_lyrics, new_instrumental_tags)
318
 
319
  clip_id = song_link.split("https://audiopipe.suno.ai/?item_id=")[1]
320
 
@@ -350,7 +358,7 @@ def model_chat(genre_input, query: Optional[str], history: Optional[History], me
350
 
351
  new_instrumental_tags = songwriterAssistant.revise_instrumental_tags(snippet_instrumental_tags, user_instrumental_feedback)
352
 
353
- song_link = make_song(current_lyrics, new_instrumental_tags)
354
  ## filter out suno link from tool query arg
355
  while "https://audiopipe.suno.ai/?item_id=" not in song_link:
356
  print("BUGGED OUT, trying again...")
@@ -362,7 +370,7 @@ def model_chat(genre_input, query: Optional[str], history: Optional[History], me
362
  yield '', new_history, new_messages, '', '', '', None, None, generated_audios, []
363
  return
364
  time.sleep(5)
365
- song_link = make_song(current_lyrics, new_instrumental_tags)
366
  clip_id = song_link.split("https://audiopipe.suno.ai/?item_id=")[1]
367
 
368
  tool_message_instrumental = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': f'revised lyrics: {revised_lyrics}\nrevised instrumental tags: {new_instrumental_tags}, clip id: {clip_id}'}
@@ -381,7 +389,7 @@ def model_chat(genre_input, query: Optional[str], history: Optional[History], me
381
  yield '', new_history, new_messages, tool_query_args["section_name"], revised_lyrics, new_instrumental_tags, clips_to_continue, f'<audio controls><source src="{song_link}" type="audio/mp3"></audio>', generated_audios, buttons
382
 
383
  elif tool_function_name == 'merge_all_snippets':
384
- updated_clip_url, updated_lyrics, updated_tags, clips_list = concat_snippets(tool_query_args['last_snippet_id'])
385
 
386
  if updated_clip_url == "Timeout":
387
  # Handle the timeout case
@@ -508,7 +516,7 @@ def model_chat(genre_input, query: Optional[str], history: Optional[History], me
508
  snippet_instrumental_tags = tool_query_args['snippet_instrumental_tags']
509
 
510
  snippet_clip_to_continue_from = tool_query_args.get('snippet_clip_to_continue_from', None)
511
- song_link = make_song(snippet_lyrics, snippet_instrumental_tags, snippet_clip_to_continue_from)
512
 
513
  if song_link == "Timeout":
514
  tool_message = {
@@ -625,12 +633,13 @@ def model_chat(genre_input, query: Optional[str], history: Optional[History], me
625
  else:
626
  print(f"Error: function {tool_function_name} does not exist")
627
  except Exception as e:
 
628
  error_message = {
629
  'role': 'assistant',
630
- 'content': f"An error occurred while processing your request: {str(e)}. Please re-phrase your request and try again."
631
  }
632
  messages_filtered.append(error_message)
633
- yield '', messages_to_history(messages), messages_filtered, '', '', '', '', None, generated_audios, []
634
 
635
 
636
  else:
 
8
  from gradio_modal import Modal
9
  import gradio as gr
10
  import time
11
+ from concurrent.futures import ThreadPoolExecutor
12
+
13
 
14
  # Load environment variables from .env file
15
  load_dotenv()
 
39
  except asyncio.TimeoutError:
40
  return "Timeout"
41
 
42
+ executor = ThreadPoolExecutor()
43
+
44
+ async def run_in_executor(func, *args):
45
+ loop = asyncio.get_event_loop()
46
+ return await loop.run_in_executor(executor, func, *args)
47
+
48
 
49
+ async def model_chat(genre_input, query: Optional[str], history: Optional[History], messages: Optional[Messages], generated_audios: List[Tuple[str, str, str]], auto=False) -> Tuple[str, History, Messages, str, str, str, str, str, List]:
50
  if query is None:
51
  query = ''
52
  with open('ai_tools.json') as f:
 
60
  messages = messages + [{'role': 'user', 'content': query}]
61
 
62
 
63
+ messages_filtered = messages.copy()
64
  response_message = oai_client.chat.completions.create(
65
  model="gpt-4o",
66
  messages=messages_filtered,
 
310
  sections_list = re.findall(r'\[.*?\]', current_lyrics)
311
 
312
  #current_lyrics = "\n".join(tool_query_args['sections_written'])
313
+ song_link = await call_with_timeout(run_in_executor(make_song, current_lyrics, new_instrumental_tags))
314
  ## filter out suno link from tool query arg
315
  while "https://audiopipe.suno.ai/?item_id=" not in song_link:
316
  print("BUGGED OUT, trying again...")
 
322
  yield '', new_history, new_messages, '', '', '', None, None, generated_audios, []
323
  return
324
  time.sleep(5)
325
+ song_link = await call_with_timeout(run_in_executor(make_song, current_lyrics, new_instrumental_tags))
326
 
327
  clip_id = song_link.split("https://audiopipe.suno.ai/?item_id=")[1]
328
 
 
358
 
359
  new_instrumental_tags = songwriterAssistant.revise_instrumental_tags(snippet_instrumental_tags, user_instrumental_feedback)
360
 
361
+ song_link = await call_with_timeout(run_in_executor(make_song, current_lyrics, new_instrumental_tags))
362
  ## filter out suno link from tool query arg
363
  while "https://audiopipe.suno.ai/?item_id=" not in song_link:
364
  print("BUGGED OUT, trying again...")
 
370
  yield '', new_history, new_messages, '', '', '', None, None, generated_audios, []
371
  return
372
  time.sleep(5)
373
+ song_link = await call_with_timeout(run_in_executor(make_song, current_lyrics, new_instrumental_tags))
374
  clip_id = song_link.split("https://audiopipe.suno.ai/?item_id=")[1]
375
 
376
  tool_message_instrumental = {'role': 'tool', 'tool_call_id': tool_call_id, 'name': tool_function_name, 'content': f'revised lyrics: {revised_lyrics}\nrevised instrumental tags: {new_instrumental_tags}, clip id: {clip_id}'}
 
389
  yield '', new_history, new_messages, tool_query_args["section_name"], revised_lyrics, new_instrumental_tags, clips_to_continue, f'<audio controls><source src="{song_link}" type="audio/mp3"></audio>', generated_audios, buttons
390
 
391
  elif tool_function_name == 'merge_all_snippets':
392
+ updated_clip_url, updated_lyrics, updated_tags, clips_list = await call_with_timeout(run_in_executor(concat_snippets, tool_query_args['last_snippet_id']))
393
 
394
  if updated_clip_url == "Timeout":
395
  # Handle the timeout case
 
516
  snippet_instrumental_tags = tool_query_args['snippet_instrumental_tags']
517
 
518
  snippet_clip_to_continue_from = tool_query_args.get('snippet_clip_to_continue_from', None)
519
+ song_link = await call_with_timeout(run_in_executor(make_song, snippet_lyrics, snippet_instrumental_tags, snippet_clip_to_continue_from), 45)
520
 
521
  if song_link == "Timeout":
522
  tool_message = {
 
633
  else:
634
  print(f"Error: function {tool_function_name} does not exist")
635
  except Exception as e:
636
+ print(str(e))
637
  error_message = {
638
  'role': 'assistant',
639
+ 'content': f"An error occurred while processing your request. Please re-phrase your request and try again."
640
  }
641
  messages_filtered.append(error_message)
642
+ yield '', messages_to_history(messages_filtered), messages_filtered, '', '', '', '', None, generated_audios, []
643
 
644
 
645
  else:
gpt_calls.py CHANGED
@@ -64,7 +64,7 @@ class AI_Songwriter:
64
  instruction += "and complements the sections provided."
65
  else:
66
  instruction += "."
67
- instruction += "You are also given a section description, genre, era, and overall description of the song."
68
 
69
  ## read in prompt lyrics from convo .txt and add it to instruction
70
  with open("prompts/write_section_ex.txt", "r") as f:
@@ -130,7 +130,7 @@ class AI_Songwriter:
130
  else:
131
  instruction += "."
132
 
133
- instruction += "You are also given a genre, era, and the rest of the section."
134
 
135
  with open("prompts/revise_section_ex.txt", "r") as f:
136
  convo = f.read()
 
64
  instruction += "and complements the sections provided."
65
  else:
66
  instruction += "."
67
+ instruction += "You are also given a section description, genre, era, and overall description of the song. Please do not literally quote and incorporate the ideas - be poetic and incorporate them artistically to express the emotion. "
68
 
69
  ## read in prompt lyrics from convo .txt and add it to instruction
70
  with open("prompts/write_section_ex.txt", "r") as f:
 
130
  else:
131
  instruction += "."
132
 
133
+ instruction += "You are also given a genre, era, and the rest of the section. Please do not literally quote and incorporate the ideas - be poetic and incorporate them artistically to express the emotion. "
134
 
135
  with open("prompts/revise_section_ex.txt", "r") as f:
136
  convo = f.read()