File size: 756 Bytes
31d6ed6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
from transformers import AutoModelForCausalLM,AutoTokenizer
import streamlit as st

@st.cache_resource(show_spinner='Loading the Gemma model. Be patient🙏')
def LOAD_GEMMA():
  model_id = "aryachakraborty/GEMMA-2B-NL-SQL"
  tokenizer = AutoTokenizer.from_pretrained(model_id)
  model = AutoModelForCausalLM.from_pretrained(model_id,
                                               low_cpu_mem_usage = True
                                                ).cpu()
  return tokenizer,model


def LOAD_MISTRAL():
  model_id=''
  tokenizer = AutoTokenizer.from_pretrained(model_id)
  model = AutoModelForCausalLM.from_pretrained(model_id,
                                               low_cpu_usage=True,
                                               ).cpu()