Spaces:
Runtime error
Runtime error
File size: 1,740 Bytes
3370a4e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
# import streamlit as st
# from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
# import transformers
# import torch
#
# st.set_page_config(
# page_title="Falcon 11B"
# )
#
# st.title("Falcon 11B Showcase")
# @st.cache_resource
# def Chat_model():
# model_name = "tiiuae/falcon-11B"
# model = AutoModelForCausalLM.from_pretrained(model_name)
# tokenizer = AutoTokenizer.from_pretrained(model_name)
# pipeline = transformers.pipeline(
# "text-generation",
# model=model,
# tokenizer=tokenizer,
# torch_dtype=torch.bfloat16,
# device_map="auto",
# )
# return pipeline,tokenizer
#
# def get_text_output(user_input,pipeline,tokenizer):
# sequences = pipeline(
# user_input,
# max_length=200,
# do_sample=True,
# top_k=10,
# num_return_sequences=1,
# eos_token_id=tokenizer.eos_token_id,
# )
# return sequences
#
# if "Falcon_messages" not in st.session_state:
# st.session_state.Falcon_messages = []
#
# if "Falcon_model" not in st.session_state:
# st.session_state.Falcon_model,st.session_state.tokeniser = Chat_model()
#
# for message in st.session_state.Falcon_messages:
# with st.chat_message(message["role"]):
# st.markdown(message["content"])
#
# if prompt := st.chat_input("What is up?"):
# st.session_state.Falcon_messages.append({"role": "user", "content": prompt})
# with st.chat_message("user"):
# st.markdown(prompt)
# with st.chat_message("assistant"):
# response = get_text_output(prompt,st.session_state.Falcon_model,st.session_state.tokeniser)
# st.session_state.Falcon_messages.append({"role": "assistant", "content": response})
|