{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "[nltk_data] Downloading package averaged_perceptron_tagger_eng to\n", "[nltk_data] /home/fahadkhan/nltk_data...\n", "[nltk_data] Package averaged_perceptron_tagger_eng is already up-to-\n", "[nltk_data] date!\n" ] }, { "data": { "text/plain": [ "True" ] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import nltk\n", "nltk.download('averaged_perceptron_tagger_eng')" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", "from langchain_huggingface.embeddings import HuggingFaceEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", "from langchain_groq import ChatGroq\n", "import pickle\n", "import os\n", "\n", "GROQ_API_KEY=\"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\"\n", "os.environ['GROQ_API_KEY'] = GROQ_API_KEY" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/home/fahadkhan/Desktop/ADAFSA_Project/adafsa_env/lib/python3.8/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n", "Some weights of the model checkpoint at Alibaba-NLP/gte-multilingual-base were not used when initializing NewModel: ['classifier.bias', 'classifier.weight']\n", "- This IS expected if you are initializing NewModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", "- This IS NOT expected if you are initializing NewModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" ] } ], "source": [ "embed_model = HuggingFaceEmbeddings(model_name=\"Alibaba-NLP/gte-multilingual-base\", model_kwargs={\"trust_remote_code\":True})" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "excel_vectorstore = FAISS.load_local(folder_path=\"./faiss_excel_doc_index\", embeddings=embed_model, allow_dangerous_deserialization=True)\n", "word_vectorstore = FAISS.load_local(folder_path=\"./faiss_word_doc_index\", embeddings=embed_model, allow_dangerous_deserialization=True)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "excel_vectorstore.merge_from(word_vectorstore)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "combined_vectorstore = excel_vectorstore" ] }, { "cell_type": "code", "execution_count": 48, "metadata": {}, "outputs": [], "source": [ "with open('combined_keyword_retriever.pkl', 'rb') as f:\n", " combined_keyword_retriever = pickle.load(f)\n", " combined_keyword_retriever.k = 10" ] }, { "cell_type": "code", "execution_count": 49, "metadata": {}, "outputs": [], "source": [ "semantic_retriever = combined_vectorstore.as_retriever(search_type=\"mmr\", search_kwargs={'k': 10, 'lambda_mult': 0.25})" ] }, { "cell_type": "code", "execution_count": 50, "metadata": {}, "outputs": [], "source": [ "from langchain.retrievers import EnsembleRetriever\n", "\n", "# initialize the ensemble retriever\n", "ensemble_retriever = EnsembleRetriever(\n", " retrievers=[combined_keyword_retriever, semantic_retriever], weights=[0.5, 0.5]\n", ")" ] }, { "cell_type": "code", "execution_count": 51, "metadata": {}, "outputs": [], "source": [ "from langchain.retrievers.document_compressors import EmbeddingsFilter\n", "from langchain.retrievers import ContextualCompressionRetriever\n", "\n", "embeddings_filter = EmbeddingsFilter(embeddings=embed_model, similarity_threshold=0.6)\n", "compression_retriever = ContextualCompressionRetriever(\n", " base_compressor=embeddings_filter, base_retriever=ensemble_retriever\n", ")" ] }, { "cell_type": "code", "execution_count": 55, "metadata": {}, "outputs": [], "source": [ "llm = ChatGroq(\n", " model=\"llama-3.1-8b-instant\", \n", " temperature=0.0,\n", " max_tokens=1024, \n", " max_retries=2\n", ")" ] }, { "cell_type": "code", "execution_count": 53, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/home/fahadkhan/Desktop/ADAFSA_Project/adafsa_env/lib/python3.8/site-packages/langsmith/client.py:312: LangSmithMissingAPIKeyWarning: API key must be provided when using hosted LangSmith API\n", " warnings.warn(\n" ] } ], "source": [ "from langchain import hub\n", "\n", "prompt = hub.pull(\"rlm/rag-prompt\")" ] }, { "cell_type": "code", "execution_count": 56, "metadata": {}, "outputs": [], "source": [ "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", "\n", "\n", "def format_docs(docs):\n", " return \"\\n\\n\".join(doc.page_content for doc in docs)\n", "\n", "\n", "rag_chain = (\n", " {\"context\": compression_retriever | format_docs, \"question\": RunnablePassthrough()}\n", " | prompt\n", " | llm\n", " | StrOutputParser()\n", ")\n" ] }, { "cell_type": "code", "execution_count": 47, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'شروط اختيار مكان منحل العسل: \\n\\n1. أن يكون موقع المنحل في منطقة زراعية تتنوع فيها النباتات أو الأشجار النحلية المزهرة كمصدر متعدد للرحيق وحبوب اللقاح.\\n2. يفضل وضع المنحل بعيداً عن أماكن السكن وأماكن لعب الأطفال والأماكن المضيئة ليلاً.\\n3. يجب أن يكون المنحل بعيداً عن حظائر الأبقار والأغنام والدواجن لأن الروائح الكريهة تؤذي نحل العسل.'" ] }, "execution_count": 47, "metadata": {}, "output_type": "execute_result" } ], "source": [ "rag_chain.invoke(\"شروط اختيار مكان منحل العسل؟\")" ] }, { "cell_type": "code", "execution_count": 44, "metadata": {}, "outputs": [], "source": [ "# for chunk in rag_chain.stream(\"ما هي الاحتياجات المائية وجدولة الري ؟\"):\n", "# print(chunk, end=\"\", flush=True)" ] } ], "metadata": { "kernelspec": { "display_name": "rag", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" } }, "nbformat": 4, "nbformat_minor": 2 }