File size: 2,261 Bytes
7009660
 
 
 
 
 
 
 
 
 
 
 
 
fbb697c
 
 
7009660
 
fbb697c
 
 
 
 
8d717c1
fbb697c
 
 
 
 
 
 
 
 
 
 
8d717c1
fbb697c
 
 
 
 
 
 
 
1f84a9a
fbb697c
aeb550e
6f8476a
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import streamlit as st
import load_model
import utils as ut
import os

persist_directory = load_model.persist_directory
st.title('myGPT')
st.header('An GPT example brought to you by Heiko Wagner')

st.markdown('*\"Parametrised models are simply functions that depend on inputs and trainable parameters. There is no fundamental difference between the two, except that trainable parameters are shared across training samples whereas the input varies from sample to sample.\"* [(Yann LeCun, Deep learning course)](https://atcold.github.io/pytorch-Deep-Learning/en/week02/02-1/#Parametrised-models)')

st.latex(r'''h(\boldsymbol x, \boldsymbol w)= \sum_{k=1}^{K}\boldsymbol w_{k} \phi_{k}(\boldsymbol x)''')

agree = st.checkbox('Load new Documents')
if agree:
    ut.load_files()
else:

    import torch
    torch.cuda.empty_cache()

    model_type = st.selectbox(
        'Select the Documents to be used to answer your question',
        ('OpenAI', 'Load local model') ) 

    if model_type=='OpenAI':
        if 'openai_key' not in st.session_state:
            openai_key= st.text_area('OpenAI Key:', '')
            if len(openai_key)>10:
                st.session_state['openai_key'] = openai_key
                os.environ["OPENAI_API_KEY"] = openai_key
        else:
            os.environ["OPENAI_API_KEY"] = st.session_state.openai_key
        llm= load_model.load_openai_model()
    else:
        # Add more models here
        llm = load_model.load_gpu_model("decapoda-research/llama-7b-hf")


    collections = ut.retrieve_collections()
    option = st.selectbox(
        'Select the Documents to be used to answer your question',
        collections )

    st.write('You selected:', option['name'])

    chain = load_model.create_chain(llm, collection=option['name'], model_name=option['model_name'], metadata= option['metadata'])
    query = st.text_area('Ask a question:', 'Hallo how are you today?')
    result = chain({"query": query + " Add a Score of the propability that your answer is correct to your answer"})
    ut.format_result_set(result)

#from langchain.chains import ConversationChain
#from langchain.memory import ConversationBufferMemory

#conversation = ConversationChain(
#    llm=chat,
#    memory=ConversationBufferMemory()
#)