infogen-qa-bot / app.py
vishal0719's picture
reverted back to meta-llama model with quantization
9ccf1f4
raw
history blame
7.82 kB
# -*- coding: utf-8 -*-
"""InfogenQA_langchain.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ubmRCRQhU3K16iDYgBcJ4XMPRffvctaa
"""
# Installing all required libraries
# Langchain - for buiding retrieval chains
# faiss-gpu - for performing similarity search on GPUs
# sentence_transformers - pre-trained sentence embeddings for understanding semantics
# Install required libraries
# !pip install -qU transformers accelerate einops langchain xformers bitsandbytes faiss-gpu sentence_transformers
# !pip install gradio
import os
# For handling UTF-8 locale error
import locale
def getpreferredencoding(do_setlocale = True):
return "UTF-8"
locale.getpreferredencoding = getpreferredencoding
from torch import cuda, bfloat16
import transformers
from accelerate import disk_offload
# Model used
model_id = 'meta-llama/Llama-2-7b-chat-hf'
# Detects available device (GPU or CPU)
device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
# set quantization configuration to load large model with less GPU memory
# this requires the `bitsandbytes` library
bnb_config = transformers.BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type='nf4',
bnb_4bit_use_double_quant=True,
bnb_4bit_compute_dtype=bfloat16
)
# Hugging Face Access Token
hf_auth = os.environ.get("hf_auth")
# Downloading and parsing model's configuration from HF
model_config = transformers.AutoConfig.from_pretrained(
model_id,
token=hf_auth
)
# Downloading and Initializing the model
model = transformers.AutoModelForCausalLM.from_pretrained(
model_id,
trust_remote_code=True,
config=model_config,
quantization_config=bnb_config,
device_map='auto',
token=hf_auth
)
# enable evaluation mode to allow model inference
model.eval()
print(f"Model loaded on {device}")
# Initialize tokenization process for Llama-2
# used to process text into LLM compatible format
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_id,
use_auth_token=hf_auth
)
# Defining strings to be treated as 'stop tokens' during text generation
stop_list = ['\nHuman:', '\n```\n']
# Converting stop tokens to their corresponding numerical token IDs
stop_token_ids = [tokenizer(x)['input_ids'] for x in stop_list]
stop_token_ids
import torch
# Converitng stop_token_ids into long tensors (64-bit) and load into selected device
stop_token_ids = [torch.LongTensor(x).to(device) for x in stop_token_ids]
stop_token_ids
from transformers import StoppingCriteria, StoppingCriteriaList
# define custom stopping criteria object
# Allows us to check whether the generated text contains stop_token_ids
class StopOnTokens(StoppingCriteria):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
for stop_ids in stop_token_ids:
if torch.eq(input_ids[0][-len(stop_ids):], stop_ids).all():
return True
return False
# Defining a list of stopping criteria
stopping_criteria = StoppingCriteriaList([StopOnTokens()])
# Function to generate text using Llama
generate_text = transformers.pipeline(
model=model,
tokenizer=tokenizer,
return_full_text=True, # langchain expects the full text
task='text-generation',
# we pass model parameters here too
stopping_criteria=stopping_criteria, # without this model rambles during chat
temperature=0.1, # 'randomness' of outputs, 0.0 is the min and 1.0 the max
max_new_tokens=512, # max number of tokens to generate in the output
repetition_penalty=1.1 # without this output begins repeating
)
# Checking whether it is able to generate text or not
from langchain.llms import HuggingFacePipeline
llm = HuggingFacePipeline(pipeline=generate_text)
llm(prompt="Who is the CEO of Infogen Labs?")
# Importing WebBaseLoader class - used to load documents from web links
from langchain.document_loaders import WebBaseLoader
# A list containing web links from Infogen-Labs website
web_links = ["https://corp.infogen-labs.com/index.html",
"https://corp.infogen-labs.com/technology.html",
"https://corp.infogen-labs.com/EdTech.html",
"https://corp.infogen-labs.com/FinTech.html",
"https://corp.infogen-labs.com/retail.html",
"https://corp.infogen-labs.com/telecom.html",
"https://corp.infogen-labs.com/stud10.html",
"https://corp.infogen-labs.com/construction.html",
"https://corp.infogen-labs.com/RandD.html",
"https://corp.infogen-labs.com/microsoft.html",
"https://corp.infogen-labs.com/edge-technology.html",
"https://corp.infogen-labs.com/cloud-computing.html",
"https://corp.infogen-labs.com/uiux-studio.html",
"https://corp.infogen-labs.com/mobile-studio.html",
"https://corp.infogen-labs.com/qaqc-studio.html",
"https://corp.infogen-labs.com/platforms.html",
"https://corp.infogen-labs.com/about-us.html",
"https://corp.infogen-labs.com/career.html",
"https://corp.infogen-labs.com/contact-us.html"
]
# Fetch the content from web links and store the extracted text
loader = WebBaseLoader(web_links)
documents = loader.load()
# Splitting large text documents into smaller chunks for easier processing
from langchain.text_splitter import RecursiveCharacterTextSplitter
# Specifying chunk size
# chunk_overlap allows some overlap between cuts to maintain context
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=20)
# A lsit of splits from all the document
all_splits = text_splitter.split_documents(documents)
from langchain.embeddings import HuggingFaceEmbeddings # For numerical representation of the text
from langchain.vectorstores import FAISS # Similarity search in high-dimensional vector space
model_name = "sentence-transformers/all-mpnet-base-v2" # Embedding model
model_kwargs = {"device": "cuda"}
# used to generate embeddings from text
embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs)
# storing embeddings in the vector store
vectorstore = FAISS.from_documents(all_splits, embeddings)
# Creating conversational agents that combine retrieval and generation capabilities
from langchain.chains import ConversationalRetrievalChain
# Creating a conversational retrieval chain by taking three arguments:
# LLM - for text generation
# converts FAISS vector store into a retriver object
# Also return the original source document to provide more context
chain = ConversationalRetrievalChain.from_llm(llm, vectorstore.as_retriever(), return_source_documents=True)
# For demo purpose
# Storing chat history for asking follow up questions
# chat_history = []
# # Asking query
# query = "Who is the CEO of Infogen Labs?"
# result = chain({"question": query, "chat_history": chat_history})
# # Printing the result
# print(result['answer'])
# # Adding current question and generated answer
# chat_history.append((query, result["answer"]))
# # Printing source document from where the results were derived
# print(result['source_documents'])
import gradio as gr
def process_answer(answer):
answer = answer.replace('If you don\'t know the answer to this question, please say so.', '')
answer = answer.replace('Based on the information provided in the passage', 'Based on my current knowledge')
return answer
def generate_response(message, history):
chat_history = []
for val in history:
chat_history.append(tuple(val))
result = chain({"question": message, "chat_history": chat_history})
response = process_answer(result['answer'])
return response
gr.ChatInterface(generate_response).launch()