asoria HF staff commited on
Commit
117da13
β€’
1 Parent(s): 7f6f34c

Minor details for RAG

Browse files
Files changed (2) hide show
  1. app.py +2 -2
  2. utils/notebook_utils.py +39 -16
app.py CHANGED
@@ -185,13 +185,13 @@ def generate_cells(dataset_id, cells, notebook_type="eda"):
185
  cells, wildcards, replacements, has_numeric_columns, has_categoric_columns
186
  )
187
  generated_text = ""
188
- # Show only the first 40 lines, would like to have a scroll in gr.Code https://github.com/gradio-app/gradio/issues/9192
189
  for cell in cells:
190
  if cell["cell_type"] == "markdown":
191
  continue
192
  generated_text += cell["source"] + "\n\n"
193
  yield generated_text, ""
194
- if generated_text.count("\n") > 38:
195
  generated_text += (
196
  f"## See more lines available in the generated notebook πŸ€— ......"
197
  )
 
185
  cells, wildcards, replacements, has_numeric_columns, has_categoric_columns
186
  )
187
  generated_text = ""
188
+ # Show only the first 30 lines, would like to have a scroll in gr.Code https://github.com/gradio-app/gradio/issues/9192
189
  for cell in cells:
190
  if cell["cell_type"] == "markdown":
191
  continue
192
  generated_text += cell["source"] + "\n\n"
193
  yield generated_text, ""
194
+ if generated_text.count("\n") > 30:
195
  generated_text += (
196
  f"## See more lines available in the generated notebook πŸ€— ......"
197
  )
utils/notebook_utils.py CHANGED
@@ -291,15 +291,16 @@ rag_cells = [
291
  "cell_type": "code",
292
  "source": """
293
  # Install and import necessary libraries.
294
- !pip install pandas sentence-transformers faiss-cpu transformers torch
295
  """,
296
  },
297
  {
298
  "cell_type": "code",
299
  "source": """
300
- import pandas as pd
301
  from sentence_transformers import SentenceTransformer
302
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
 
 
303
  import faiss
304
  import torch
305
  """,
@@ -364,15 +365,10 @@ index.add(vectors)
364
  "cell_type": "code",
365
  "source": """
366
  # Specify the text you want to search for in the list
367
- text_to_search = text_list[0]
368
- print(f"Text to search: {text_to_search}")
369
- """,
370
- },
371
- {
372
- "cell_type": "code",
373
- "source": """
374
  # Generate the embedding for the search query
375
- query_embedding = model.encode([text_to_search])
376
  """,
377
  },
378
  {
@@ -385,7 +381,10 @@ D, I = index.search(query_embedding, k=10)
385
  print(f"Similar documents: {[text_list[i] for i in I[0]]}")
386
  """,
387
  },
388
- {"cell_type": "markdown", "source": "## 4. Load pipeline and perform inference"},
 
 
 
389
  {
390
  "cell_type": "code",
391
  "source": """
@@ -404,19 +403,43 @@ generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device
404
  "cell_type": "code",
405
  "source": """
406
  # Create a prompt with two parts: 'system' for instructions based on a 'context' from the retrieved documents, and 'user' for the query
407
- query = "How to prepare a cake?"
408
  selected_elements = [text_list[i] for i in I[0].tolist()]
409
  context = ','.join(selected_elements)
410
- prompt = f"system: Answer user's question based on '{context}'. user: {query}"
 
 
 
 
 
 
411
  """,
412
  },
413
  {
414
  "cell_type": "code",
415
  "source": """
416
  # Send the prompt to the pipeline and show the answer
417
- output = generator(prompt)
418
- print("Generated Summary:")
419
- print(output[0]['generated_text'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420
  """,
421
  },
422
  ]
 
291
  "cell_type": "code",
292
  "source": """
293
  # Install and import necessary libraries.
294
+ !pip install pandas sentence-transformers faiss-cpu transformers torch huggingface_hub
295
  """,
296
  },
297
  {
298
  "cell_type": "code",
299
  "source": """
 
300
  from sentence_transformers import SentenceTransformer
301
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
302
+ from huggingface_hub import InferenceClient
303
+ import pandas as pd
304
  import faiss
305
  import torch
306
  """,
 
365
  "cell_type": "code",
366
  "source": """
367
  # Specify the text you want to search for in the list
368
+ query = "How to prepare a cake?"
369
+
 
 
 
 
 
370
  # Generate the embedding for the search query
371
+ query_embedding = model.encode([query])
372
  """,
373
  },
374
  {
 
381
  print(f"Similar documents: {[text_list[i] for i in I[0]]}")
382
  """,
383
  },
384
+ {
385
+ "cell_type": "markdown",
386
+ "source": "## 4. Load pipeline and perform inference locally",
387
+ },
388
  {
389
  "cell_type": "code",
390
  "source": """
 
403
  "cell_type": "code",
404
  "source": """
405
  # Create a prompt with two parts: 'system' for instructions based on a 'context' from the retrieved documents, and 'user' for the query
 
406
  selected_elements = [text_list[i] for i in I[0].tolist()]
407
  context = ','.join(selected_elements)
408
+ messages = [
409
+ {
410
+ "role": "system",
411
+ "content": f"You are an intelligent assistant tasked with providing accurate and concise answers based on the following context. Use the information retrieved to construct your response. Context: {context}",
412
+ },
413
+ {"role": "user", "content": query},
414
+ ]
415
  """,
416
  },
417
  {
418
  "cell_type": "code",
419
  "source": """
420
  # Send the prompt to the pipeline and show the answer
421
+ output = generator(messages)
422
+ print("Generated result:")
423
+ print(output[0]['generated_text'][-1]['content']) # Print the assistant's response content
424
+ """,
425
+ },
426
+ {
427
+ "cell_type": "markdown",
428
+ "source": "## 5. Alternatively call the inference client",
429
+ },
430
+ {
431
+ "cell_type": "code",
432
+ "source": """
433
+ # Adjust model name as needed
434
+ checkpoint = "meta-llama/Meta-Llama-3-8B-Instruct"
435
+
436
+ # Change here your Hugging Face API token
437
+ token = "hf_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
438
+
439
+ inference_client = InferenceClient(checkpoint, token=token)
440
+ output = inference_client.chat_completion(messages=messages, stream=False)
441
+ print("Generated result:")
442
+ print(output.choices[0].message.content)
443
  """,
444
  },
445
  ]