import streamlit as st import load_model import utils as ut import os persist_directory = load_model.persist_directory st.title('myGPT') st.header('An GPT example brought to you by Heiko Wagner') st.markdown('*\"Parametrised models are simply functions that depend on inputs and trainable parameters. There is no fundamental difference between the two, except that trainable parameters are shared across training samples whereas the input varies from sample to sample.\"* [(Yann LeCun, Deep learning course)](https://atcold.github.io/pytorch-Deep-Learning/en/week02/02-1/#Parametrised-models)') st.latex(r'''h(\boldsymbol x, \boldsymbol w)= \sum_{k=1}^{K}\boldsymbol w_{k} \phi_{k}(\boldsymbol x)''') agree = st.checkbox('Load new Documents') if agree: ut.load_files() else: import torch torch.cuda.empty_cache() model_type = st.selectbox( 'Select the Documents to be used to answer your question', ('OpenAI', 'local_model') ) if model_type=='OpenAI': if 'openai_key' not in st.session_state: openai_key= st.text_area('OpenAI Key:', '') if len(openai_key)>10: st.session_state['openai_key'] = openai_key os.environ["OPENAI_API_KEY"] = openai_key else: os.environ["OPENAI_API_KEY"] = st.session_state.openai_key llm= load_model.load_openai_model() else: llm = load_model.load_gpu_model("decapoda-research/llama-7b-hf") collections = ut.retrieve_collections() option = st.selectbox( 'Select the Documents to be used to answer your question', collections ) st.write('You selected:', option) chain = load_model.create_chain(llm, collection=option) try: query = st.text_area('Ask a question:', 'Hallo how are you today?') result = chain({"query": query}) ut.format_result_set(result) finally: del chain torch.cuda.empty_cache()