File size: 1,840 Bytes
7009660
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import streamlit as st
import langchain
import load_model
import utils as ut
import chromadb
from chromadb.config import Settings
import os

persist_directory = load_model.persist_directory

st.title('myGPT')
st.header('An GPT example brought to you by Heiko Wagner')

st.markdown('*\"Parametrised models are simply functions that depend on inputs and trainable parameters. There is no fundamental difference between the two, except that trainable parameters are shared across training samples whereas the input varies from sample to sample.\"* [(Yann LeCun, Deep learning course)](https://atcold.github.io/pytorch-Deep-Learning/en/week02/02-1/#Parametrised-models)')

st.latex(r'''h(\boldsymbol x, \boldsymbol w)= \sum_{k=1}^{K}\boldsymbol w_{k} \phi_{k}(\boldsymbol x)''')

import torch
torch.cuda.empty_cache()

model_type = st.selectbox(
    'Select the Documents to be used to answer your question',
    ('OpenAI', 'local_model') ) 

if model_type=='OpenAI':
    openai_key= st.text_area('OpenAI Key:', '')
    os.environ["OPENAI_API_KEY"] = openai_key
    llm= load_model.load_openai_model()
else:
    llm = load_model.load_gpu_model("decapoda-research/llama-7b-hf")


client = chromadb.Client(Settings(chroma_db_impl="duckdb+parquet",
                                persist_directory=persist_directory
                            ))

collections = tuple( [collection.name for collection in client.list_collections()] )
print(collections)
option = st.selectbox(
    'Select the Documents to be used to answer your question',
    collections )

st.write('You selected:', option)

chain = load_model.create_chain(llm, collection=option)
try:
    query = st.text_area('Ask a question:', 'Hallo how are you today?')
    result = chain({"query": query})
    ut.format_result_set(result)
finally:
    del chain
    torch.cuda.empty_cache()