Tom Aarsen commited on
Commit
6236065
1 Parent(s): 92d5d6d

Add Markdown to the demo

Browse files
Files changed (1) hide show
  1. app.py +33 -7
app.py CHANGED
@@ -25,7 +25,7 @@ model = SentenceTransformer(
25
  )
26
 
27
 
28
- def search(query, top_k: int = 10, rerank_multiplier: int = 4):
29
  # 1. Embed the query as float32
30
  start_time = time.time()
31
  query_embedding = model.encode(query)
@@ -38,7 +38,7 @@ def search(query, top_k: int = 10, rerank_multiplier: int = 4):
38
 
39
  # 3. Search the binary index
40
  start_time = time.time()
41
- _scores, binary_ids = binary_index.search(query_embedding_ubinary, top_k * rerank_multiplier)
42
  binary_ids = binary_ids[0]
43
  search_time = time.time() - start_time
44
 
@@ -47,10 +47,10 @@ def search(query, top_k: int = 10, rerank_multiplier: int = 4):
47
  int8_embeddings = int8_view[binary_ids].astype(int)
48
  load_time = time.time() - start_time
49
 
50
- # 5. Rerank the top_k * rerank_multiplier using the float32 query embedding and the int8 document embeddings
51
  start_time = time.time()
52
  scores = query_embedding @ int8_embeddings.T
53
- rerank_time = time.time() - start_time
54
 
55
  # 6. Sort the scores and return the top_k
56
  start_time = time.time()
@@ -65,13 +65,39 @@ def search(query, top_k: int = 10, rerank_multiplier: int = 4):
65
  "Quantize Time": f"{quantize_time:.4f} s",
66
  "Search Time": f"{search_time:.4f} s",
67
  "Load Time": f"{load_time:.4f} s",
68
- "Rerank Time": f"{rerank_time:.4f} s",
69
  "Sort Time": f"{sort_time:.4f} s",
70
- "Total Retrieval Time": f"{quantize_time + search_time + load_time + rerank_time + sort_time:.4f} s"
71
  }
72
 
73
  with gr.Blocks(title="Quantized Retrieval") as demo:
74
- query = gr.Textbox(label="Query")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  search_button = gr.Button(value="Search")
76
 
77
  with gr.Row():
 
25
  )
26
 
27
 
28
+ def search(query, top_k: int = 10, rescore_multiplier: int = 4):
29
  # 1. Embed the query as float32
30
  start_time = time.time()
31
  query_embedding = model.encode(query)
 
38
 
39
  # 3. Search the binary index
40
  start_time = time.time()
41
+ _scores, binary_ids = binary_index.search(query_embedding_ubinary, top_k * rescore_multiplier)
42
  binary_ids = binary_ids[0]
43
  search_time = time.time() - start_time
44
 
 
47
  int8_embeddings = int8_view[binary_ids].astype(int)
48
  load_time = time.time() - start_time
49
 
50
+ # 5. Rescore the top_k * rescore_multiplier using the float32 query embedding and the int8 document embeddings
51
  start_time = time.time()
52
  scores = query_embedding @ int8_embeddings.T
53
+ rescore_time = time.time() - start_time
54
 
55
  # 6. Sort the scores and return the top_k
56
  start_time = time.time()
 
65
  "Quantize Time": f"{quantize_time:.4f} s",
66
  "Search Time": f"{search_time:.4f} s",
67
  "Load Time": f"{load_time:.4f} s",
68
+ "Rescore Time": f"{rescore_time:.4f} s",
69
  "Sort Time": f"{sort_time:.4f} s",
70
+ "Total Retrieval Time": f"{quantize_time + search_time + load_time + rescore_time + sort_time:.4f} s"
71
  }
72
 
73
  with gr.Blocks(title="Quantized Retrieval") as demo:
74
+ gr.Markdown(
75
+ """
76
+ ## Quantized Retrieval - Binary Search with Scalar (int8) Rescoring
77
+ This demo showcases the retrieval using [quantized embeddings](https://huggingface.co/blog/embedding-quantization). The corpus consists of 1 million texts from Wikipedia articles.
78
+
79
+ <details><summary>Click to learn about the retrieval process</summary>
80
+
81
+ Details:
82
+ 1. The query is embedded using the [`mixedbread-ai/mxbai-embed-large-v1`](https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1) SentenceTransformer model.
83
+ 2. The query is quantized to binary using the `quantize_embeddings` function from the SentenceTransformers library.
84
+ 3. A binary index (1M binary embeddings; 130MB of memory/disk space) is searched using the quantized query for the top 40 documents.
85
+ 4. The top 40 documents are loaded on the fly from a int8 index on disk (1M int8 embeddings; 0 bytes of memory, 1.19GB of disk space).
86
+ 5. The top 40 documents are rescored using the float32 query and the int8 embeddings to get the top 10 documents.
87
+ 6. The top 10 documents are sorted by score and displayed.
88
+
89
+ This process is designed to be memory efficient and fast, with the binary index being small enough to fit in memory and the int8 index being loaded as a view to save memory.
90
+ In total, this process requires keeping 1) the model in memory, 2) the binary index in memory, and 3) the int8 index on disk. With a dimensionality of 1024,
91
+ we need `1024 / 8 * num_docs` bytes for the binary index and `1024 * num_docs` bytes for the int8 index.
92
+
93
+ This is notably cheaper than doing the same process with float32 embeddings, which would require `4 * 1024 * num_docs` bytes of memory/disk space for the float32 index, i.e. 32x as much memory and 4x as much disk space.
94
+ Additionally, the binary index is much faster (up to 32x) to search than the float32 index, while the rescoring is also extremely efficient. In conclusion, this process allows for fast, scalable, cheap, and memory-efficient retrieval.
95
+
96
+ Feel free to check out the [code for this demo](https://huggingface.co/spaces/tomaarsen/quantized_retrieval/blob/main/app.py) to learn more about how to apply this in practice.
97
+
98
+ </details>
99
+ """)
100
+ query = gr.Textbox(label="Query for Wikipedia articles", placeholder="Enter a query to search for relevant texts from Wikipedia.")
101
  search_button = gr.Button(value="Search")
102
 
103
  with gr.Row():