File size: 2,186 Bytes
7009660
 
 
98639ab
7009660
98639ab
7009660
 
e84bd04
 
7009660
defb879
7009660
fbb697c
 
98639ab
7009660
 
fbb697c
 
 
 
 
0cd40c7
fbb697c
 
 
 
be996f7
fbb697c
 
 
 
 
0cd40c7
8d717c1
fbb697c
0cd40c7
 
fbb697c
 
 
 
 
 
 
1f84a9a
fbb697c
aeb550e
6f8476a
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import streamlit as st
import load_model
import utils as ut
import elements as el
import os
import torch

persist_directory = load_model.persist_directory
st.title('myRetrievalGPT')
st.header('An GPT Retrieval example brought to you by Heiko Wagner')

st.markdown('*Let $\phi$ be a word embedding mapping $W$ → $\mathbb{R}^n$ where $W$ is the word space and $\mathbb{R}^n$ is an $n$-dimensional vector space then: $\phi(king)-\phi(man)+\phi(woman)=\phi(queen)$.* ')

agree = st.checkbox('Load new Documents')
if agree:
    el.load_files()
else:

    import torch
    torch.cuda.empty_cache()

    model_type = st.selectbox(
        'Select the Documents to be used to answer your question',
        ('OpenAI', 'decapoda-research/llama-7b-hf (gpu+cpu)', 'llama-7b 4bit (cpu only)',) ) 

    if model_type=='OpenAI':
        if 'openai_key' not in st.session_state:
            openai_key= st.text_area('OpenAI Key:', '')
            if len(openai_key)>-1:
                st.session_state['openai_key'] = openai_key
                os.environ["OPENAI_API_KEY"] = openai_key
        else:
            os.environ["OPENAI_API_KEY"] = st.session_state.openai_key
        llm= load_model.load_openai_model()
    elif model_type=='decapoda-research/llama-7b-hf (gpu+cpu)':
        # Add more models here
        llm = load_model.load_gpu_model("decapoda-research/llama-7b-hf")
    else:
        llm = load_model.load_cpu_model()


    collections = ut.retrieve_collections()
    option = st.selectbox(
        'Select the Documents to be used to answer your question',
        collections )

    st.write('You selected:', option['name'])

    chain = load_model.create_chain(llm, collection=option['name'], model_name=option['model_name'], metadata= option['metadata'])
    query = st.text_area('Ask a question:', 'Hallo how are you today?')
    result = chain({"query": query + " Add a Score of the propability that your answer is correct to your answer"})
    ut.format_result_set(result)

#from langchain.chains import ConversationChain
#from langchain.memory import ConversationBufferMemory

#conversation = ConversationChain(
#    llm=chat,
#    memory=ConversationBufferMemory()
#)