File size: 7,243 Bytes
841db35
 
 
 
 
5efeece
841db35
 
 
 
a3715e0
841db35
 
993e9c2
 
608ef2c
841db35
 
 
 
 
 
 
 
 
 
 
608ef2c
841db35
 
92d5d6d
841db35
 
 
 
92d5d6d
841db35
 
608ef2c
 
841db35
608ef2c
841db35
 
 
 
 
 
 
 
6236065
841db35
 
6236065
841db35
 
 
608ef2c
69f23b7
 
608ef2c
 
 
 
 
 
841db35
 
 
 
 
 
 
6236065
841db35
608ef2c
841db35
 
608ef2c
841db35
6236065
608ef2c
6236065
f9210a0
6236065
 
 
 
 
 
a3715e0
 
6236065
 
 
 
 
 
 
 
 
 
608ef2c
 
 
 
6236065
 
608ef2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
841db35
 
 
 
6902d0c
841db35
 
 
608ef2c
 
841db35
 
608ef2c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import time
import gradio as gr
from datasets import load_dataset
import pandas as pd
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings
import faiss
from usearch.index import Index

# Load titles and texts
title_text_dataset = load_dataset("mixedbread-ai/wikipedia-data-en-2023-11", split="train", num_proc=4).select_columns(["title", "text"])

# Load the int8 and binary indices. Int8 is loaded as a view to save memory, as we never actually perform search with it.
int8_view = Index.restore("wikipedia_int8_usearch_50m.index", view=True)
binary_index: faiss.IndexBinaryFlat = faiss.read_index_binary("wikipedia_ubinary_faiss_50m.index")
binary_ivf: faiss.IndexBinaryIVF = faiss.read_index_binary("wikipedia_ubinary_ivf_faiss_50m.index")

# Load the SentenceTransformer model for embedding the queries
model = SentenceTransformer(
    "mixedbread-ai/mxbai-embed-large-v1",
    prompts={
        "retrieval": "Represent this sentence for searching relevant passages: ",
    },
    default_prompt_name="retrieval",
)


def search(query, top_k: int = 100, rescore_multiplier: int = 1, use_approx: bool = False):
    # 1. Embed the query as float32
    start_time = time.time()
    query_embedding = model.encode(query)
    embed_time = time.time() - start_time

    # 2. Quantize the query to ubinary
    start_time = time.time()
    query_embedding_ubinary = quantize_embeddings(query_embedding.reshape(1, -1), "ubinary")
    quantize_time = time.time() - start_time

    # 3. Search the binary index (either exact or approximate)
    index = binary_ivf if use_approx else binary_index
    start_time = time.time()
    _scores, binary_ids = index.search(query_embedding_ubinary, top_k * rescore_multiplier)
    binary_ids = binary_ids[0]
    search_time = time.time() - start_time

    # 4. Load the corresponding int8 embeddings
    start_time = time.time()
    int8_embeddings = int8_view[binary_ids].astype(int)
    load_time = time.time() - start_time

    # 5. Rescore the top_k * rescore_multiplier using the float32 query embedding and the int8 document embeddings
    start_time = time.time()
    scores = query_embedding @ int8_embeddings.T
    rescore_time = time.time() - start_time

    # 6. Sort the scores and return the top_k
    start_time = time.time()
    indices = scores.argsort()[::-1][:top_k]
    top_k_indices = binary_ids[indices]
    top_k_scores = scores[indices]
    top_k_titles, top_k_texts = zip(
        *[(title_text_dataset[idx]["title"], title_text_dataset[idx]["text"]) for idx in top_k_indices.tolist()]
    )
    df = pd.DataFrame(
        {"Score": [round(value, 2) for value in top_k_scores], "Title": top_k_titles, "Text": top_k_texts}
    )
    sort_time = time.time() - start_time

    return df, {
        "Embed Time": f"{embed_time:.4f} s",
        "Quantize Time": f"{quantize_time:.4f} s",
        "Search Time": f"{search_time:.4f} s",
        "Load Time": f"{load_time:.4f} s",
        "Rescore Time": f"{rescore_time:.4f} s",
        "Sort Time": f"{sort_time:.4f} s",
        "Total Retrieval Time": f"{quantize_time + search_time + load_time + rescore_time + sort_time:.4f} s",
    }


with gr.Blocks(title="Quantized Retrieval") as demo:
    gr.Markdown(
        """
## Quantized Retrieval - Binary Search with Scalar (int8) Rescoring
This demo showcases retrieval using [quantized embeddings](https://huggingface.co/blog/embedding-quantization) on a CPU. The corpus consists of 41 million texts from Wikipedia articles.

<details><summary>Click to learn about the retrieval process</summary>

Details:
1. The query is embedded using the [`mixedbread-ai/mxbai-embed-large-v1`](https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1) SentenceTransformer model.
2. The query is quantized to binary using the `quantize_embeddings` function from the SentenceTransformers library.
3. A binary index (41M binary embeddings; 5.2GB of memory/disk space) is searched using the quantized query for the top 40 documents.
4. The top 40 documents are loaded on the fly from an int8 index on disk (41M int8 embeddings; 0 bytes of memory, 47.5GB of disk space).
5. The top 40 documents are rescored using the float32 query and the int8 embeddings to get the top 10 documents.
6. The top 10 documents are sorted by score and displayed.

This process is designed to be memory efficient and fast, with the binary index being small enough to fit in memory and the int8 index being loaded as a view to save memory. 
In total, this process requires keeping 1) the model in memory, 2) the binary index in memory, and 3) the int8 index on disk. With a dimensionality of 1024, 
we need `1024 / 8 * num_docs` bytes for the binary index and `1024 * num_docs` bytes for the int8 index.

This is notably cheaper than doing the same process with float32 embeddings, which would require `4 * 1024 * num_docs` bytes of memory/disk space for the float32 index, i.e. 32x as much memory and 4x as much disk space.
Additionally, the binary index is much faster (up to 32x) to search than the float32 index, while the rescoring is also extremely efficient. In conclusion, this process allows for fast, scalable, cheap, and memory-efficient retrieval.

Feel free to check out the [code for this demo](https://huggingface.co/spaces/sentence-transformers/quantized-retrieval/blob/main/app.py) to learn more about how to apply this in practice.

Notes:
- The approximate search index (a binary Inverted File Index (IVF)) is in beta and has not been trained with a lot of data. A better IVF index will be released soon.

</details>
"""
    )
    with gr.Row():
        with gr.Column(scale=75):
            query = gr.Textbox(
                label="Query for Wikipedia articles",
                placeholder="Enter a query to search for relevant texts from Wikipedia.",
            )
        with gr.Column(scale=25):
            use_approx = gr.Radio(
                choices=[("Exact Search", False), ("Approximate Search", True)],
                value=True,
                label="Search Index",
            )

    with gr.Row():
        with gr.Column(scale=2):
            top_k = gr.Slider(
                minimum=10,
                maximum=1000,
                step=5,
                value=100,
                label="Number of documents to retrieve",
                info="Number of documents to retrieve from the binary search",
            )
        with gr.Column(scale=2):
            rescore_multiplier = gr.Slider(
                minimum=1,
                maximum=10,
                step=1,
                value=1,
                label="Rescore multiplier",
                info="Search for `rescore_multiplier` as many documents to rescore",
            )

    search_button = gr.Button(value="Search")

    with gr.Row():
        with gr.Column(scale=4):
            output = gr.Dataframe(headers=["Score", "Title", "Text"])
        with gr.Column(scale=1):
            json = gr.JSON()

    query.submit(search, inputs=[query, top_k, rescore_multiplier, use_approx], outputs=[output, json])
    search_button.click(search, inputs=[query, top_k, rescore_multiplier, use_approx], outputs=[output, json])

demo.queue()
demo.launch()