from fastapi import FastAPI, Form from fastapi.responses import HTMLResponse from jinja2 import Template import markdown import time from datetime import datetime, timedelta from apscheduler.schedulers.background import BackgroundScheduler from agents import DeepResearchAgent, get_llms import threading from queue import Queue import logging lock = threading.Lock() app = FastAPI() # 每日最大回复次数 MAX_REPLIES_PER_DAY = 100 # 当日回复次数计数器 reply_count = 0 # 启动时设置计数器重置 last_reset_time = datetime.now() is_processing = False # HTML模板 html_template = """ CoI Agent online demo 😊

CoI Agent online demo 😊

Time Taken: {{ time_taken }} seconds
Today's Replies: {{ reply_count }}
Example Input:
Generating content, Usually takes 3-4 minutes, please wait...

Idea

{{ idea | safe }}
{% if error %}

Error

{{ error }}
{% endif %}
""" # 重置每日计数器 def reset_counter(): global reply_count reply_count = 0 # 设置定时任务每天0点重置计数器 scheduler = BackgroundScheduler() scheduler.add_job(reset_counter, 'cron', hour=0, minute=0) scheduler.start() request_queue = Queue() @app.get("/", response_class=HTMLResponse) def form_get(): return Template(html_template).render(idea= "This is a example of the idea geneartion", error=None, reply_count=reply_count) @app.post("/", response_class=HTMLResponse) def form_post(topic: str = Form(...)): global reply_count global is_processing start_time = time.time() if is_processing: error = "The server is processing another request. Please try again later." return Template(html_template).render(idea="", error=error, reply_count=reply_count) with lock: is_processing = True logging.info(f"Processing request for topic: {topic}") start_time = time.time() # 检查是否超过每日最大回复次数 if reply_count >= MAX_REPLIES_PER_DAY: error_message = "Today's maximum number of replies has been reached. Please try again tomorrow." logging.info(f"Today's maximum number of replies has been reached. Please try again tomorrow.") is_processing = False return Template(html_template).render(idea="", error=error_message, reply_count=reply_count) try: main_llm, cheap_llm = get_llms() deep_research_agent = DeepResearchAgent(llm=main_llm, cheap_llm=cheap_llm, improve_cnt=1, max_chain_length=5, min_chain_length=3, max_chain_numbers=1) print(f"begin to generate idea of topic {topic}") idea, related_experiments, entities, idea_chain, ideas, trend, future, human, year = deep_research_agent.generate_idea_with_chain(topic) idea = idea.replace("\n", "
") idea_md = markdown.markdown(idea) # 更新每日回复次数 reply_count += 1 end_time = time.time() time_taken = round(end_time - start_time, 2) logging.info(f"Successfully generated idea for topic: {topic}") is_processing = False return Template(html_template).render(idea=idea_md, error=None, reply_count=reply_count, time_taken=time_taken) except Exception as e: end_time = time.time() time_taken = round(end_time - start_time, 2) logging.error(f"Failed to generate idea for topic: {topic}, Error: {str(e)}") is_processing = False return Template(html_template).render(idea="", error=str(e), reply_count=reply_count, time_taken=time_taken)