phamson02
commited on
Commit
•
c1d85a2
1
Parent(s):
45f8ce2
add word segmentation before tokenization
Browse files- README.md +26 -25
- custom_tokenizer.py +11 -0
- pipeline.py +76 -0
- requirements.txt +1 -0
README.md
CHANGED
@@ -1,21 +1,22 @@
|
|
1 |
---
|
2 |
pipeline_tag: sentence-similarity
|
3 |
tags:
|
4 |
-
- sentence-transformers
|
5 |
-
- feature-extraction
|
6 |
-
- sentence-similarity
|
7 |
-
- transformers
|
|
|
8 |
language:
|
9 |
-
- vi
|
10 |
-
- en
|
11 |
widget:
|
12 |
-
- source_sentence:
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
---
|
20 |
|
21 |
# bkai-foundation-models/vietnamese-bi-encoder
|
@@ -43,9 +44,8 @@ embeddings = model.encode(sentences)
|
|
43 |
print(embeddings)
|
44 |
```
|
45 |
|
46 |
-
|
47 |
-
|
48 |
## Usage (HuggingFace Transformers)
|
|
|
49 |
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
|
50 |
|
51 |
```python
|
@@ -81,21 +81,20 @@ print("Sentence embeddings:")
|
|
81 |
print(sentence_embeddings)
|
82 |
```
|
83 |
|
84 |
-
|
85 |
-
|
86 |
## Evaluation Results
|
87 |
|
88 |
<!--- Describe how your model was evaluated -->
|
89 |
|
90 |
-
For an automated evaluation of this model, see the
|
91 |
-
|
92 |
|
93 |
## Training
|
|
|
94 |
The model was trained with the parameters:
|
95 |
|
96 |
**DataLoader**:
|
97 |
|
98 |
`torch.utils.data.dataloader.DataLoader` of length 17584 with parameters:
|
|
|
99 |
```
|
100 |
{'batch_size': 32, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
|
101 |
```
|
@@ -103,11 +102,13 @@ The model was trained with the parameters:
|
|
103 |
**Loss**:
|
104 |
|
105 |
`sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters:
|
106 |
-
|
107 |
-
|
108 |
-
|
|
|
109 |
|
110 |
Parameters of the fit()-Method:
|
|
|
111 |
```
|
112 |
{
|
113 |
"epochs": 15,
|
@@ -125,15 +126,15 @@ Parameters of the fit()-Method:
|
|
125 |
}
|
126 |
```
|
127 |
|
128 |
-
|
129 |
## Full Model Architecture
|
|
|
130 |
```
|
131 |
SentenceTransformer(
|
132 |
-
(0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: RobertaModel
|
133 |
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False})
|
134 |
)
|
135 |
```
|
136 |
|
137 |
## Citing & Authors
|
138 |
|
139 |
-
<!--- Describe where people can find more information -->
|
|
|
1 |
---
|
2 |
pipeline_tag: sentence-similarity
|
3 |
tags:
|
4 |
+
- sentence-transformers
|
5 |
+
- feature-extraction
|
6 |
+
- sentence-similarity
|
7 |
+
- transformers
|
8 |
+
library_name: generic
|
9 |
language:
|
10 |
+
- vi
|
11 |
+
- en
|
12 |
widget:
|
13 |
+
- source_sentence: 'Anh ấy đang là sinh viên năm cuối'
|
14 |
+
sentences:
|
15 |
+
- 'Anh ấy học tại Đại học Bách khoa Hà Nội, chuyên ngành Khoa học máy tính'
|
16 |
+
- 'Anh ấy đang làm việc tại nhà máy sản xuất linh kiện điện tử'
|
17 |
+
- 'Anh ấy chuẩn bị đi du học nước ngoài'
|
18 |
+
- 'Anh ấy sắp mở cửa hàng bán mỹ phẩm'
|
19 |
+
- 'Nhà anh ấy có rất nhiều cây cảnh'
|
20 |
---
|
21 |
|
22 |
# bkai-foundation-models/vietnamese-bi-encoder
|
|
|
44 |
print(embeddings)
|
45 |
```
|
46 |
|
|
|
|
|
47 |
## Usage (HuggingFace Transformers)
|
48 |
+
|
49 |
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
|
50 |
|
51 |
```python
|
|
|
81 |
print(sentence_embeddings)
|
82 |
```
|
83 |
|
|
|
|
|
84 |
## Evaluation Results
|
85 |
|
86 |
<!--- Describe how your model was evaluated -->
|
87 |
|
88 |
+
For an automated evaluation of this model, see the _Sentence Embeddings Benchmark_: [https://seb.sbert.net](https://seb.sbert.net?model_name=bkai-foundation-models/vietnamese-bi-encoder)
|
|
|
89 |
|
90 |
## Training
|
91 |
+
|
92 |
The model was trained with the parameters:
|
93 |
|
94 |
**DataLoader**:
|
95 |
|
96 |
`torch.utils.data.dataloader.DataLoader` of length 17584 with parameters:
|
97 |
+
|
98 |
```
|
99 |
{'batch_size': 32, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
|
100 |
```
|
|
|
102 |
**Loss**:
|
103 |
|
104 |
`sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters:
|
105 |
+
|
106 |
+
```
|
107 |
+
{'scale': 20.0, 'similarity_fct': 'cos_sim'}
|
108 |
+
```
|
109 |
|
110 |
Parameters of the fit()-Method:
|
111 |
+
|
112 |
```
|
113 |
{
|
114 |
"epochs": 15,
|
|
|
126 |
}
|
127 |
```
|
128 |
|
|
|
129 |
## Full Model Architecture
|
130 |
+
|
131 |
```
|
132 |
SentenceTransformer(
|
133 |
+
(0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: RobertaModel
|
134 |
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False})
|
135 |
)
|
136 |
```
|
137 |
|
138 |
## Citing & Authors
|
139 |
|
140 |
+
<!--- Describe where people can find more information -->
|
custom_tokenizer.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import PhobertTokenizer
|
2 |
+
from pyvi import ViTokenizer
|
3 |
+
|
4 |
+
|
5 |
+
class CustomPhobertTokenizer(PhobertTokenizer):
|
6 |
+
def rdr_segment(self, text):
|
7 |
+
return ViTokenizer.tokenize(text)
|
8 |
+
|
9 |
+
def _tokenize(self, text):
|
10 |
+
segmented_text = self.rdr_segment(text)
|
11 |
+
return super()._tokenize(segmented_text)
|
pipeline.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, List, Union
|
2 |
+
import torch
|
3 |
+
from transformers import AutoModel
|
4 |
+
from custom_tokenizer import CustomPhobertTokenizer
|
5 |
+
|
6 |
+
|
7 |
+
def mean_pooling(model_output, attention_mask):
|
8 |
+
token_embeddings = model_output[
|
9 |
+
0
|
10 |
+
] # First element of model_output contains all token embeddings
|
11 |
+
input_mask_expanded = (
|
12 |
+
attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
|
13 |
+
)
|
14 |
+
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
|
15 |
+
input_mask_expanded.sum(1), min=1e-9
|
16 |
+
)
|
17 |
+
|
18 |
+
|
19 |
+
class PreTrainedPipeline:
|
20 |
+
def __init__(self, path="."):
|
21 |
+
self.model = AutoModel.from_pretrained(path)
|
22 |
+
self.tokenizer = CustomPhobertTokenizer.from_pretrained(path)
|
23 |
+
|
24 |
+
def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]:
|
25 |
+
"""
|
26 |
+
Args:
|
27 |
+
inputs (Dict[str, Union[str, List[str]]]):
|
28 |
+
a dictionary containing a query sentence and a list of key sentences
|
29 |
+
"""
|
30 |
+
|
31 |
+
# Combine the query sentence and key sentences into one list
|
32 |
+
sentences = [inputs["source_sentence"]] + inputs["sentences"]
|
33 |
+
|
34 |
+
# Tokenize sentences
|
35 |
+
encoded_input = self.tokenizer(
|
36 |
+
sentences, padding=True, truncation=True, return_tensors="pt"
|
37 |
+
)
|
38 |
+
|
39 |
+
# Compute token embeddings
|
40 |
+
with torch.no_grad():
|
41 |
+
model_output = self.model(**encoded_input)
|
42 |
+
|
43 |
+
# Perform pooling to get sentence embeddings
|
44 |
+
sentence_embeddings = mean_pooling(
|
45 |
+
model_output, encoded_input["attention_mask"]
|
46 |
+
)
|
47 |
+
|
48 |
+
# Separate the query embedding from the key embeddings
|
49 |
+
query_embedding = sentence_embeddings[0]
|
50 |
+
key_embeddings = sentence_embeddings[1:]
|
51 |
+
|
52 |
+
# Compute cosine similarities (or any other comparison method you prefer)
|
53 |
+
cosine_similarities = torch.nn.functional.cosine_similarity(
|
54 |
+
query_embedding.unsqueeze(0), key_embeddings
|
55 |
+
)
|
56 |
+
|
57 |
+
# Convert the tensor of cosine similarities to a list of floats
|
58 |
+
scores = cosine_similarities.tolist()
|
59 |
+
|
60 |
+
return scores
|
61 |
+
|
62 |
+
|
63 |
+
if __name__ == "__main__":
|
64 |
+
inputs = {
|
65 |
+
"source_sentence": "Anh ấy đang là sinh viên năm cuối",
|
66 |
+
"sentences": [
|
67 |
+
"Anh ấy học tại Đại học Bách khoa Hà Nội, chuyên ngành Khoa học máy tính",
|
68 |
+
"Anh ấy đang làm việc tại nhà máy sản xuất linh kiện điện tử",
|
69 |
+
"Anh ấy chuẩn bị đi du học nước ngoài",
|
70 |
+
"Anh ấy sắp mở cửa hàng bán mỹ phẩm",
|
71 |
+
"Nhà anh ấy có rất nhiều cây cảnh",
|
72 |
+
],
|
73 |
+
}
|
74 |
+
|
75 |
+
pipeline = PreTrainedPipeline()
|
76 |
+
res = pipeline(inputs)
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pyvi>=0.1.1
|