Spaces:
Runtime error
Runtime error
JJteam
commited on
Commit
•
7ce3696
1
Parent(s):
8ab76f2
upgrading to GPT4 MM-ReAct
Browse files- Dockerfile +1 -1
- MM-REACT/app.py +5 -7
- langchain-0.0.94-py3-none-any.whl +0 -0
- requirements.txt +3 -2
Dockerfile
CHANGED
@@ -15,4 +15,4 @@ RUN pip install --no-cache-dir --upgrade -r /src/requirements.txt
|
|
15 |
WORKDIR /src/MM-REACT
|
16 |
|
17 |
|
18 |
-
CMD ["python", "app.py", "--port", "7860", "--openAIModel", "
|
|
|
15 |
WORKDIR /src/MM-REACT
|
16 |
|
17 |
|
18 |
+
CMD ["python", "app.py", "--port", "7860", "--openAIModel", "azureGPT4", "--noIntermediateConv"]
|
MM-REACT/app.py
CHANGED
@@ -33,11 +33,6 @@ REFRESH_MSG = "Please refresh and hit 'Click to wake up MM-REACT'"
|
|
33 |
MAX_TOKENS = 512
|
34 |
|
35 |
|
36 |
-
############## ARGS #################
|
37 |
-
AGRS = None
|
38 |
-
#####################################
|
39 |
-
|
40 |
-
|
41 |
def get_logger():
|
42 |
global logger
|
43 |
if logger is None:
|
@@ -62,6 +57,9 @@ def load_chain(history, log_state):
|
|
62 |
elif ARGS.openAIModel == 'azureTextDavinci003':
|
63 |
# for Azure OpenAI text davinci
|
64 |
llm = AzureOpenAI(deployment_name="text-davinci-003", model_name="text-davinci-003", temperature=0, max_tokens=MAX_TOKENS)
|
|
|
|
|
|
|
65 |
|
66 |
memory = ConversationBufferMemory(memory_key="chat_history")
|
67 |
|
@@ -418,7 +416,7 @@ def init_and_kick_off():
|
|
418 |
chat = ChatWrapper()
|
419 |
|
420 |
exampleTitle = """<h3>Examples to start conversation..</h3>"""
|
421 |
-
comingSoon = """<center><b><p style="color:Red;">MM-REACT:
|
422 |
detailLinks = """
|
423 |
<center>
|
424 |
<a href="https://multimodal-react.github.io/"> MM-ReAct Website</a>
|
@@ -521,7 +519,7 @@ if __name__ == '__main__':
|
|
521 |
parser = argparse.ArgumentParser()
|
522 |
|
523 |
parser.add_argument('--port', type=int, required=False, default=7860)
|
524 |
-
parser.add_argument('--openAIModel', type=str, required=False, default='
|
525 |
parser.add_argument('--noIntermediateConv', default=True, action='store_true', help='if this flag is turned on no intermediate conversation should be shown')
|
526 |
|
527 |
global ARGS
|
|
|
33 |
MAX_TOKENS = 512
|
34 |
|
35 |
|
|
|
|
|
|
|
|
|
|
|
36 |
def get_logger():
|
37 |
global logger
|
38 |
if logger is None:
|
|
|
57 |
elif ARGS.openAIModel == 'azureTextDavinci003':
|
58 |
# for Azure OpenAI text davinci
|
59 |
llm = AzureOpenAI(deployment_name="text-davinci-003", model_name="text-davinci-003", temperature=0, max_tokens=MAX_TOKENS)
|
60 |
+
elif ARGS.openAIModel == 'azureGPT4':
|
61 |
+
# for Azure GPT4 private preview
|
62 |
+
llm = AzureOpenAI(deployment_name="gpt-4-32k-0314", temperature=0, chat_completion=True, max_tokens=MAX_TOKENS, openai_api_version="2023-03-15-preview")
|
63 |
|
64 |
memory = ConversationBufferMemory(memory_key="chat_history")
|
65 |
|
|
|
416 |
chat = ChatWrapper()
|
417 |
|
418 |
exampleTitle = """<h3>Examples to start conversation..</h3>"""
|
419 |
+
comingSoon = """<center><b><p style="color:Red;">MM-REACT: April 20th version with GPT4 and image understanding capabilities</p></b></center>"""
|
420 |
detailLinks = """
|
421 |
<center>
|
422 |
<a href="https://multimodal-react.github.io/"> MM-ReAct Website</a>
|
|
|
519 |
parser = argparse.ArgumentParser()
|
520 |
|
521 |
parser.add_argument('--port', type=int, required=False, default=7860)
|
522 |
+
parser.add_argument('--openAIModel', type=str, required=False, default='azureGPT4')
|
523 |
parser.add_argument('--noIntermediateConv', default=True, action='store_true', help='if this flag is turned on no intermediate conversation should be shown')
|
524 |
|
525 |
global ARGS
|
langchain-0.0.94-py3-none-any.whl
CHANGED
Binary files a/langchain-0.0.94-py3-none-any.whl and b/langchain-0.0.94-py3-none-any.whl differ
|
|
requirements.txt
CHANGED
@@ -3,6 +3,7 @@ opencensus-context==0.1.3
|
|
3 |
opencensus-ext-azure==1.1.6
|
4 |
opencensus-ext-logging==0.1.1
|
5 |
imagesize==1.4.1
|
6 |
-
gradio==3.
|
7 |
-
|
|
|
8 |
requests==2.28.2
|
|
|
3 |
opencensus-ext-azure==1.1.6
|
4 |
opencensus-ext-logging==0.1.1
|
5 |
imagesize==1.4.1
|
6 |
+
gradio==3.25.0
|
7 |
+
gradio_client==0.1.0
|
8 |
+
openai==0.27.4
|
9 |
requests==2.28.2
|