wxy01giser commited on
Commit
018b7b3
·
verified ·
1 Parent(s): a215fac

Upload 22 files

Browse files
Files changed (23) hide show
  1. .gitattributes +1 -0
  2. sbert/models--shibing624--text2vec-base-chinese/.no_exist/183bb99aa7af74355fb58d16edf8c13ae7c5433e/added_tokens.json +0 -0
  3. sbert/models--shibing624--text2vec-base-chinese/.no_exist/183bb99aa7af74355fb58d16edf8c13ae7c5433e/config_sentence_transformers.json +0 -0
  4. sbert/models--shibing624--text2vec-base-chinese/.no_exist/183bb99aa7af74355fb58d16edf8c13ae7c5433e/tokenizer.json +0 -0
  5. sbert/models--shibing624--text2vec-base-chinese/blobs/0c855515479137398ce4ea985628548d4e8ed8c5764656dac966d6a24f39e721 +3 -0
  6. sbert/models--shibing624--text2vec-base-chinese/blobs/2c787222b8b9fc64a5a5c6bcf7506e2c4906bec5 +14 -0
  7. sbert/models--shibing624--text2vec-base-chinese/blobs/5bdf2723485c9b6e797c26b1c59edf10a638051d +1 -0
  8. sbert/models--shibing624--text2vec-base-chinese/blobs/6cf65ba8116d461a2baa0039d80bbc4b3eb7600b +4 -0
  9. sbert/models--shibing624--text2vec-base-chinese/blobs/7fac338dfa876bf83d235a13e72b06b21ea4f552 +238 -0
  10. sbert/models--shibing624--text2vec-base-chinese/blobs/90e03d46bdb660cb7a95fb0200a35e456457f78c +32 -0
  11. sbert/models--shibing624--text2vec-base-chinese/blobs/ca4f9781030019ab9b253c6dcb8c7878b6dc87a5 +0 -0
  12. sbert/models--shibing624--text2vec-base-chinese/blobs/e0021d480d68dfdf363d3639ee7f3c00f63239f7 +4 -0
  13. sbert/models--shibing624--text2vec-base-chinese/blobs/e7b0375001f109a6b8873d756ad4f7bbb15fbaa5 +1 -0
  14. sbert/models--shibing624--text2vec-base-chinese/refs/main +1 -0
  15. sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/1_Pooling/config.json +4 -0
  16. sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/README.md +238 -0
  17. sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/config.json +32 -0
  18. sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/model.safetensors +3 -0
  19. sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/modules.json +14 -0
  20. sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/sentence_bert_config.json +4 -0
  21. sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/special_tokens_map.json +1 -0
  22. sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/tokenizer_config.json +1 -0
  23. sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/vocab.txt +0 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ sbert/models--shibing624--text2vec-base-chinese/blobs/0c855515479137398ce4ea985628548d4e8ed8c5764656dac966d6a24f39e721 filter=lfs diff=lfs merge=lfs -text
sbert/models--shibing624--text2vec-base-chinese/.no_exist/183bb99aa7af74355fb58d16edf8c13ae7c5433e/added_tokens.json ADDED
File without changes
sbert/models--shibing624--text2vec-base-chinese/.no_exist/183bb99aa7af74355fb58d16edf8c13ae7c5433e/config_sentence_transformers.json ADDED
File without changes
sbert/models--shibing624--text2vec-base-chinese/.no_exist/183bb99aa7af74355fb58d16edf8c13ae7c5433e/tokenizer.json ADDED
File without changes
sbert/models--shibing624--text2vec-base-chinese/blobs/0c855515479137398ce4ea985628548d4e8ed8c5764656dac966d6a24f39e721 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c855515479137398ce4ea985628548d4e8ed8c5764656dac966d6a24f39e721
3
+ size 409098104
sbert/models--shibing624--text2vec-base-chinese/blobs/2c787222b8b9fc64a5a5c6bcf7506e2c4906bec5 ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ }
14
+ ]
sbert/models--shibing624--text2vec-base-chinese/blobs/5bdf2723485c9b6e797c26b1c59edf10a638051d ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "do_basic_tokenize": true, "never_split": null, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "name_or_path": "hfl/chinese-macbert-base", "tokenizer_class": "BertTokenizer"}
sbert/models--shibing624--text2vec-base-chinese/blobs/6cf65ba8116d461a2baa0039d80bbc4b3eb7600b ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_mean_tokens": true
4
+ }
sbert/models--shibing624--text2vec-base-chinese/blobs/7fac338dfa876bf83d235a13e72b06b21ea4f552 ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ pipeline_tag: sentence-similarity
4
+ tags:
5
+ - Sentence Transformers
6
+ - sentence-similarity
7
+ - sentence-transformers
8
+ datasets:
9
+ - shibing624/nli_zh
10
+ language:
11
+ - zh
12
+ library_name: sentence-transformers
13
+ ---
14
+
15
+
16
+ # shibing624/text2vec-base-chinese
17
+ This is a CoSENT(Cosine Sentence) model: shibing624/text2vec-base-chinese.
18
+
19
+ It maps sentences to a 768 dimensional dense vector space and can be used for tasks
20
+ like sentence embeddings, text matching or semantic search.
21
+
22
+
23
+ ## Evaluation
24
+ For an automated evaluation of this model, see the *Evaluation Benchmark*: [text2vec](https://github.com/shibing624/text2vec)
25
+
26
+ - chinese text matching task:
27
+
28
+ | Arch | BaseModel | Model | ATEC | BQ | LCQMC | PAWSX | STS-B | SOHU-dd | SOHU-dc | Avg | QPS |
29
+ |:-----------|:----------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------|:-----:|:-----:|:-----:|:-----:|:-----:|:-------:|:-------:|:---------:|:-----:|
30
+ | Word2Vec | word2vec | [w2v-light-tencent-chinese](https://ai.tencent.com/ailab/nlp/en/download.html) | 20.00 | 31.49 | 59.46 | 2.57 | 55.78 | 55.04 | 20.70 | 35.03 | 23769 |
31
+ | SBERT | xlm-roberta-base | [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2) | 18.42 | 38.52 | 63.96 | 10.14 | 78.90 | 63.01 | 52.28 | 46.46 | 3138 |
32
+ | Instructor | hfl/chinese-roberta-wwm-ext | [moka-ai/m3e-base](https://huggingface.co/moka-ai/m3e-base) | 41.27 | 63.81 | 74.87 | 12.20 | 76.96 | 75.83 | 60.55 | 57.93 | 2980 |
33
+ | CoSENT | hfl/chinese-macbert-base | [shibing624/text2vec-base-chinese](https://huggingface.co/shibing624/text2vec-base-chinese) | 31.93 | 42.67 | 70.16 | 17.21 | 79.30 | 70.27 | 50.42 | 51.61 | 3008 |
34
+ | CoSENT | hfl/chinese-lert-large | [GanymedeNil/text2vec-large-chinese](https://huggingface.co/GanymedeNil/text2vec-large-chinese) | 32.61 | 44.59 | 69.30 | 14.51 | 79.44 | 73.01 | 59.04 | 53.12 | 2092 |
35
+ | CoSENT | nghuyong/ernie-3.0-base-zh | [shibing624/text2vec-base-chinese-sentence](https://huggingface.co/shibing624/text2vec-base-chinese-sentence) | 43.37 | 61.43 | 73.48 | 38.90 | 78.25 | 70.60 | 53.08 | 59.87 | 3089 |
36
+ | CoSENT | nghuyong/ernie-3.0-base-zh | [shibing624/text2vec-base-chinese-paraphrase](https://huggingface.co/shibing624/text2vec-base-chinese-paraphrase) | 44.89 | 63.58 | 74.24 | 40.90 | 78.93 | 76.70 | 63.30 | 63.08 | 3066 |
37
+ | CoSENT | sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2 | [shibing624/text2vec-base-multilingual](https://huggingface.co/shibing624/text2vec-base-multilingual) | 32.39 | 50.33 | 65.64 | 32.56 | 74.45 | 68.88 | 51.17 | 53.67 | 4004 |
38
+
39
+
40
+ 说明:
41
+ - 结果评测指标:spearman系数
42
+ - `shibing624/text2vec-base-chinese`模型,是用CoSENT方法训练,基于`hfl/chinese-macbert-base`在中文STS-B数据训练得到,并在中文STS-B测试集评估达到较好效果,运行[examples/training_sup_text_matching_model.py](https://github.com/shibing624/text2vec/blob/master/examples/training_sup_text_matching_model.py)代码可训练模型,模型文件已经上传HF model hub,中文通用语义匹配任务推荐使用
43
+ - `shibing624/text2vec-base-chinese-sentence`模型,是用CoSENT方法训练,基于`nghuyong/ernie-3.0-base-zh`用人工挑选后的中文STS数据集[shibing624/nli-zh-all/text2vec-base-chinese-sentence-dataset](https://huggingface.co/datasets/shibing624/nli-zh-all/tree/main/text2vec-base-chinese-sentence-dataset)训练得到,并在中文各NLI测试集评估达到较好效果,运行[examples/training_sup_text_matching_model_jsonl_data.py](https://github.com/shibing624/text2vec/blob/master/examples/training_sup_text_matching_model_jsonl_data.py)代码可训练模型,模型文件已经上传HF model hub,中文s2s(句子vs句子)语义匹配任务推荐使用
44
+ - `shibing624/text2vec-base-chinese-paraphrase`模型,是用CoSENT方法训练,基于`nghuyong/ernie-3.0-base-zh`用人工挑选后的中文STS数据集[shibing624/nli-zh-all/text2vec-base-chinese-paraphrase-dataset](https://huggingface.co/datasets/shibing624/nli-zh-all/tree/main/text2vec-base-chinese-paraphrase-dataset),数据集相对于[shibing624/nli-zh-all/text2vec-base-chinese-sentence-dataset](https://huggingface.co/datasets/shibing624/nli-zh-all/tree/main/text2vec-base-chinese-sentence-dataset)加入了s2p(sentence to paraphrase)数据,强化了其长文本的表征能力,并在中文各NLI测试集评估达到SOTA,运行[examples/training_sup_text_matching_model_jsonl_data.py](https://github.com/shibing624/text2vec/blob/master/examples/training_sup_text_matching_model_jsonl_data.py)代码可训练模型,模型文件已经上传HF model hub,中文s2p(句子vs段落)语义匹配任务推荐使用
45
+ - `sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2`模型是用SBERT训练,是`paraphrase-MiniLM-L12-v2`模型的多语言版本,支持中文、英文等
46
+ - `w2v-light-tencent-chinese`是腾讯词向量的Word2Vec模型,CPU加载使用,适用于中文字面匹配任务和缺少数据的冷启动情况
47
+
48
+ ## Usage (text2vec)
49
+ Using this model becomes easy when you have [text2vec](https://github.com/shibing624/text2vec) installed:
50
+
51
+ ```
52
+ pip install -U text2vec
53
+ ```
54
+
55
+ Then you can use the model like this:
56
+
57
+ ```python
58
+ from text2vec import SentenceModel
59
+ sentences = ['如何更换花呗绑定银行卡', '花呗更改绑定银行卡']
60
+
61
+ model = SentenceModel('shibing624/text2vec-base-chinese')
62
+ embeddings = model.encode(sentences)
63
+ print(embeddings)
64
+ ```
65
+
66
+ ## Usage (HuggingFace Transformers)
67
+ Without [text2vec](https://github.com/shibing624/text2vec), you can use the model like this:
68
+
69
+ First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
70
+
71
+ Install transformers:
72
+ ```
73
+ pip install transformers
74
+ ```
75
+
76
+ Then load model and predict:
77
+ ```python
78
+ from transformers import BertTokenizer, BertModel
79
+ import torch
80
+
81
+ # Mean Pooling - Take attention mask into account for correct averaging
82
+ def mean_pooling(model_output, attention_mask):
83
+ token_embeddings = model_output[0] # First element of model_output contains all token embeddings
84
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
85
+ return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
86
+
87
+ # Load model from HuggingFace Hub
88
+ tokenizer = BertTokenizer.from_pretrained('shibing624/text2vec-base-chinese')
89
+ model = BertModel.from_pretrained('shibing624/text2vec-base-chinese')
90
+ sentences = ['如何更换花呗绑定银行卡', '花呗更改绑定银行卡']
91
+ # Tokenize sentences
92
+ encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
93
+
94
+ # Compute token embeddings
95
+ with torch.no_grad():
96
+ model_output = model(**encoded_input)
97
+ # Perform pooling. In this case, mean pooling.
98
+ sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
99
+ print("Sentence embeddings:")
100
+ print(sentence_embeddings)
101
+ ```
102
+
103
+ ## Usage (sentence-transformers)
104
+ [sentence-transformers](https://github.com/UKPLab/sentence-transformers) is a popular library to compute dense vector representations for sentences.
105
+
106
+ Install sentence-transformers:
107
+ ```
108
+ pip install -U sentence-transformers
109
+ ```
110
+
111
+ Then load model and predict:
112
+
113
+ ```python
114
+ from sentence_transformers import SentenceTransformer
115
+
116
+ m = SentenceTransformer("shibing624/text2vec-base-chinese")
117
+ sentences = ['如何更换花呗绑定银行卡', '花呗更改绑定银行卡']
118
+
119
+ sentence_embeddings = m.encode(sentences)
120
+ print("Sentence embeddings:")
121
+ print(sentence_embeddings)
122
+ ```
123
+
124
+ ## Model speed up
125
+
126
+
127
+ | Model | ATEC | BQ | LCQMC | PAWSX | STSB |
128
+ |------------------------------------------------------------------------------------------------------------------------------|-------------------|-------------------|------------------|------------------|------------------|
129
+ | shibing624/text2vec-base-chinese (fp32, baseline) | 0.31928 | 0.42672 | 0.70157 | 0.17214 | 0.79296 |
130
+ | shibing624/text2vec-base-chinese (onnx-O4, [#29](https://huggingface.co/shibing624/text2vec-base-chinese/discussions/29)) | 0.31928 | 0.42672 | 0.70157 | 0.17214 | 0.79296 |
131
+ | shibing624/text2vec-base-chinese (ov, [#27](https://huggingface.co/shibing624/text2vec-base-chinese/discussions/27)) | 0.31928 | 0.42672 | 0.70157 | 0.17214 | 0.79296 |
132
+ | shibing624/text2vec-base-chinese (ov-qint8, [#30](https://huggingface.co/shibing624/text2vec-base-chinese/discussions/30)) | 0.30778 (-3.60%) | 0.43474 (+1.88%) | 0.69620 (-0.77%) | 0.16662 (-3.20%) | 0.79396 (+0.13%) |
133
+
134
+ In short:
135
+ 1. ✅ shibing624/text2vec-base-chinese (onnx-O4), ONNX Optimized to [O4](https://huggingface.co/docs/optimum/en/onnxruntime/usage_guides/optimization) does not reduce performance, but gives a [~2x speedup](https://sbert.net/docs/sentence_transformer/usage/efficiency.html#benchmarks) on GPU.
136
+ 2. ✅ shibing624/text2vec-base-chinese (ov), OpenVINO does not reduce performance, but gives a 1.12x speedup on CPU.
137
+ 3. 🟡 shibing624/text2vec-base-chinese (ov-qint8), int8 quantization with OV incurs a small performance hit on some tasks, and a tiny performance gain on others, when quantizing with [Chinese STSB](https://huggingface.co/datasets/PhilipMay/stsb_multi_mt). Additionally, it results in a [4.78x speedup](https://sbert.net/docs/sentence_transformer/usage/efficiency.html#benchmarks) on CPU.
138
+
139
+ - usage: shibing624/text2vec-base-chinese (onnx-O4), for gpu
140
+ ```python
141
+ from sentence_transformers import SentenceTransformer
142
+
143
+ model = SentenceTransformer(
144
+ "shibing624/text2vec-base-chinese",
145
+ backend="onnx",
146
+ model_kwargs={"file_name": "model_O4.onnx"},
147
+ )
148
+ embeddings = model.encode(["如何更换花呗绑定银行卡", "花呗更改绑定银行卡", "你是谁"])
149
+ print(embeddings.shape)
150
+ similarities = model.similarity(embeddings, embeddings)
151
+ print(similarities)
152
+ ```
153
+
154
+
155
+ - usage: shibing624/text2vec-base-chinese (ov), for cpu
156
+ ```python
157
+ # pip install 'optimum[openvino]'
158
+
159
+ from sentence_transformers import SentenceTransformer
160
+
161
+ model = SentenceTransformer(
162
+ "shibing624/text2vec-base-chinese",
163
+ backend="openvino",
164
+ )
165
+
166
+ embeddings = model.encode(["如何更换花呗绑定银行卡", "花呗更改绑定银行卡", "你是谁"])
167
+ print(embeddings.shape)
168
+ similarities = model.similarity(embeddings, embeddings)
169
+ print(similarities)
170
+ ```
171
+
172
+ - usage: shibing624/text2vec-base-chinese (ov-qint8), for cpu
173
+ ```python
174
+ # pip install optimum
175
+ from sentence_transformers import SentenceTransformer
176
+
177
+ model = SentenceTransformer(
178
+ "shibing624/text2vec-base-chinese",
179
+ backend="onnx",
180
+ model_kwargs={"file_name": "model_qint8_avx512_vnni.onnx"},
181
+ )
182
+ embeddings = model.encode(["如何更换花呗绑定银行卡", "花呗更改绑定银行卡", "你是谁"])
183
+ print(embeddings.shape)
184
+ similarities = model.similarity(embeddings, embeddings)
185
+ print(similarities)
186
+ ```
187
+
188
+
189
+ ## Full Model Architecture
190
+ ```
191
+ CoSENT(
192
+ (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel
193
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_mean_tokens': True})
194
+ )
195
+ ```
196
+
197
+ ## Intended uses
198
+
199
+ Our model is intented to be used as a sentence and short paragraph encoder. Given an input text, it ouptuts a vector which captures
200
+ the semantic information. The sentence vector may be used for information retrieval, clustering or sentence similarity tasks.
201
+
202
+ By default, input text longer than 256 word pieces is truncated.
203
+
204
+
205
+ ## Training procedure
206
+
207
+ ### Pre-training
208
+
209
+ We use the pretrained [`hfl/chinese-macbert-base`](https://huggingface.co/hfl/chinese-macbert-base) model.
210
+ Please refer to the model card for more detailed information about the pre-training procedure.
211
+
212
+ ### Fine-tuning
213
+
214
+ We fine-tune the model using a contrastive objective. Formally, we compute the cosine similarity from each
215
+ possible sentence pairs from the batch.
216
+ We then apply the rank loss by comparing with true pairs and false pairs.
217
+
218
+ #### Hyper parameters
219
+
220
+ - training dataset: https://huggingface.co/datasets/shibing624/nli_zh
221
+ - max_seq_length: 128
222
+ - best epoch: 5
223
+ - sentence embedding dim: 768
224
+
225
+
226
+
227
+ ## Citing & Authors
228
+ This model was trained by [text2vec](https://github.com/shibing624/text2vec).
229
+
230
+ If you find this model helpful, feel free to cite:
231
+ ```bibtex
232
+ @software{text2vec,
233
+ author = {Xu Ming},
234
+ title = {text2vec: A Tool for Text to Vector},
235
+ year = {2022},
236
+ url = {https://github.com/shibing624/text2vec},
237
+ }
238
+ ```
sbert/models--shibing624--text2vec-base-chinese/blobs/90e03d46bdb660cb7a95fb0200a35e456457f78c ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "hfl/chinese-macbert-base",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-12,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "bert",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 0,
21
+ "pooler_fc_size": 768,
22
+ "pooler_num_attention_heads": 12,
23
+ "pooler_num_fc_layers": 3,
24
+ "pooler_size_per_head": 128,
25
+ "pooler_type": "first_token_transform",
26
+ "position_embedding_type": "absolute",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.12.3",
29
+ "type_vocab_size": 2,
30
+ "use_cache": true,
31
+ "vocab_size": 21128
32
+ }
sbert/models--shibing624--text2vec-base-chinese/blobs/ca4f9781030019ab9b253c6dcb8c7878b6dc87a5 ADDED
The diff for this file is too large to render. See raw diff
 
sbert/models--shibing624--text2vec-base-chinese/blobs/e0021d480d68dfdf363d3639ee7f3c00f63239f7 ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 128,
3
+ "do_lower_case": false
4
+ }
sbert/models--shibing624--text2vec-base-chinese/blobs/e7b0375001f109a6b8873d756ad4f7bbb15fbaa5 ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
sbert/models--shibing624--text2vec-base-chinese/refs/main ADDED
@@ -0,0 +1 @@
 
 
1
+ 183bb99aa7af74355fb58d16edf8c13ae7c5433e
sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/1_Pooling/config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_mean_tokens": true
4
+ }
sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/README.md ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ pipeline_tag: sentence-similarity
4
+ tags:
5
+ - Sentence Transformers
6
+ - sentence-similarity
7
+ - sentence-transformers
8
+ datasets:
9
+ - shibing624/nli_zh
10
+ language:
11
+ - zh
12
+ library_name: sentence-transformers
13
+ ---
14
+
15
+
16
+ # shibing624/text2vec-base-chinese
17
+ This is a CoSENT(Cosine Sentence) model: shibing624/text2vec-base-chinese.
18
+
19
+ It maps sentences to a 768 dimensional dense vector space and can be used for tasks
20
+ like sentence embeddings, text matching or semantic search.
21
+
22
+
23
+ ## Evaluation
24
+ For an automated evaluation of this model, see the *Evaluation Benchmark*: [text2vec](https://github.com/shibing624/text2vec)
25
+
26
+ - chinese text matching task:
27
+
28
+ | Arch | BaseModel | Model | ATEC | BQ | LCQMC | PAWSX | STS-B | SOHU-dd | SOHU-dc | Avg | QPS |
29
+ |:-----------|:----------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------|:-----:|:-----:|:-----:|:-----:|:-----:|:-------:|:-------:|:---------:|:-----:|
30
+ | Word2Vec | word2vec | [w2v-light-tencent-chinese](https://ai.tencent.com/ailab/nlp/en/download.html) | 20.00 | 31.49 | 59.46 | 2.57 | 55.78 | 55.04 | 20.70 | 35.03 | 23769 |
31
+ | SBERT | xlm-roberta-base | [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2) | 18.42 | 38.52 | 63.96 | 10.14 | 78.90 | 63.01 | 52.28 | 46.46 | 3138 |
32
+ | Instructor | hfl/chinese-roberta-wwm-ext | [moka-ai/m3e-base](https://huggingface.co/moka-ai/m3e-base) | 41.27 | 63.81 | 74.87 | 12.20 | 76.96 | 75.83 | 60.55 | 57.93 | 2980 |
33
+ | CoSENT | hfl/chinese-macbert-base | [shibing624/text2vec-base-chinese](https://huggingface.co/shibing624/text2vec-base-chinese) | 31.93 | 42.67 | 70.16 | 17.21 | 79.30 | 70.27 | 50.42 | 51.61 | 3008 |
34
+ | CoSENT | hfl/chinese-lert-large | [GanymedeNil/text2vec-large-chinese](https://huggingface.co/GanymedeNil/text2vec-large-chinese) | 32.61 | 44.59 | 69.30 | 14.51 | 79.44 | 73.01 | 59.04 | 53.12 | 2092 |
35
+ | CoSENT | nghuyong/ernie-3.0-base-zh | [shibing624/text2vec-base-chinese-sentence](https://huggingface.co/shibing624/text2vec-base-chinese-sentence) | 43.37 | 61.43 | 73.48 | 38.90 | 78.25 | 70.60 | 53.08 | 59.87 | 3089 |
36
+ | CoSENT | nghuyong/ernie-3.0-base-zh | [shibing624/text2vec-base-chinese-paraphrase](https://huggingface.co/shibing624/text2vec-base-chinese-paraphrase) | 44.89 | 63.58 | 74.24 | 40.90 | 78.93 | 76.70 | 63.30 | 63.08 | 3066 |
37
+ | CoSENT | sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2 | [shibing624/text2vec-base-multilingual](https://huggingface.co/shibing624/text2vec-base-multilingual) | 32.39 | 50.33 | 65.64 | 32.56 | 74.45 | 68.88 | 51.17 | 53.67 | 4004 |
38
+
39
+
40
+ 说明:
41
+ - 结果评测指标:spearman系数
42
+ - `shibing624/text2vec-base-chinese`模型,是用CoSENT方法训练,基于`hfl/chinese-macbert-base`在中文STS-B数据训练得到,并在中文STS-B测试集评估达到较好效果,运行[examples/training_sup_text_matching_model.py](https://github.com/shibing624/text2vec/blob/master/examples/training_sup_text_matching_model.py)代码可训练模型,模型文件已经上传HF model hub,中文通用语义匹配任务推荐使用
43
+ - `shibing624/text2vec-base-chinese-sentence`模型,是用CoSENT方法训练,基于`nghuyong/ernie-3.0-base-zh`用人工挑选后的中文STS数据集[shibing624/nli-zh-all/text2vec-base-chinese-sentence-dataset](https://huggingface.co/datasets/shibing624/nli-zh-all/tree/main/text2vec-base-chinese-sentence-dataset)训练得到,并在中文各NLI测试集评估达到较好效果,运行[examples/training_sup_text_matching_model_jsonl_data.py](https://github.com/shibing624/text2vec/blob/master/examples/training_sup_text_matching_model_jsonl_data.py)代码可训练模型,模型文件已经上传HF model hub,中文s2s(句子vs句子)语义匹配任务推荐使用
44
+ - `shibing624/text2vec-base-chinese-paraphrase`模型,是用CoSENT方法训练,基于`nghuyong/ernie-3.0-base-zh`用人工挑选后的中文STS数据集[shibing624/nli-zh-all/text2vec-base-chinese-paraphrase-dataset](https://huggingface.co/datasets/shibing624/nli-zh-all/tree/main/text2vec-base-chinese-paraphrase-dataset),数据集相对于[shibing624/nli-zh-all/text2vec-base-chinese-sentence-dataset](https://huggingface.co/datasets/shibing624/nli-zh-all/tree/main/text2vec-base-chinese-sentence-dataset)加入了s2p(sentence to paraphrase)数据,强化了其长文本的表征能力,并在中文各NLI测试集评估达到SOTA,运行[examples/training_sup_text_matching_model_jsonl_data.py](https://github.com/shibing624/text2vec/blob/master/examples/training_sup_text_matching_model_jsonl_data.py)代码可训练模型,模型文件已经上传HF model hub,中文s2p(句子vs段落)语义匹配任务推荐使用
45
+ - `sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2`模型是用SBERT训练,是`paraphrase-MiniLM-L12-v2`模型的多语言版本,支持中文、英文等
46
+ - `w2v-light-tencent-chinese`是腾讯词向量的Word2Vec模型,CPU加载使用,适用于中文字面匹配任务和缺少数据的冷启动情况
47
+
48
+ ## Usage (text2vec)
49
+ Using this model becomes easy when you have [text2vec](https://github.com/shibing624/text2vec) installed:
50
+
51
+ ```
52
+ pip install -U text2vec
53
+ ```
54
+
55
+ Then you can use the model like this:
56
+
57
+ ```python
58
+ from text2vec import SentenceModel
59
+ sentences = ['如何更换花呗绑定银行卡', '花呗更改绑定银行卡']
60
+
61
+ model = SentenceModel('shibing624/text2vec-base-chinese')
62
+ embeddings = model.encode(sentences)
63
+ print(embeddings)
64
+ ```
65
+
66
+ ## Usage (HuggingFace Transformers)
67
+ Without [text2vec](https://github.com/shibing624/text2vec), you can use the model like this:
68
+
69
+ First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
70
+
71
+ Install transformers:
72
+ ```
73
+ pip install transformers
74
+ ```
75
+
76
+ Then load model and predict:
77
+ ```python
78
+ from transformers import BertTokenizer, BertModel
79
+ import torch
80
+
81
+ # Mean Pooling - Take attention mask into account for correct averaging
82
+ def mean_pooling(model_output, attention_mask):
83
+ token_embeddings = model_output[0] # First element of model_output contains all token embeddings
84
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
85
+ return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
86
+
87
+ # Load model from HuggingFace Hub
88
+ tokenizer = BertTokenizer.from_pretrained('shibing624/text2vec-base-chinese')
89
+ model = BertModel.from_pretrained('shibing624/text2vec-base-chinese')
90
+ sentences = ['如何更换花呗绑定银行卡', '花呗更改绑定银行卡']
91
+ # Tokenize sentences
92
+ encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
93
+
94
+ # Compute token embeddings
95
+ with torch.no_grad():
96
+ model_output = model(**encoded_input)
97
+ # Perform pooling. In this case, mean pooling.
98
+ sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
99
+ print("Sentence embeddings:")
100
+ print(sentence_embeddings)
101
+ ```
102
+
103
+ ## Usage (sentence-transformers)
104
+ [sentence-transformers](https://github.com/UKPLab/sentence-transformers) is a popular library to compute dense vector representations for sentences.
105
+
106
+ Install sentence-transformers:
107
+ ```
108
+ pip install -U sentence-transformers
109
+ ```
110
+
111
+ Then load model and predict:
112
+
113
+ ```python
114
+ from sentence_transformers import SentenceTransformer
115
+
116
+ m = SentenceTransformer("shibing624/text2vec-base-chinese")
117
+ sentences = ['如何更换花呗绑定银行卡', '花呗更改绑定银行卡']
118
+
119
+ sentence_embeddings = m.encode(sentences)
120
+ print("Sentence embeddings:")
121
+ print(sentence_embeddings)
122
+ ```
123
+
124
+ ## Model speed up
125
+
126
+
127
+ | Model | ATEC | BQ | LCQMC | PAWSX | STSB |
128
+ |------------------------------------------------------------------------------------------------------------------------------|-------------------|-------------------|------------------|------------------|------------------|
129
+ | shibing624/text2vec-base-chinese (fp32, baseline) | 0.31928 | 0.42672 | 0.70157 | 0.17214 | 0.79296 |
130
+ | shibing624/text2vec-base-chinese (onnx-O4, [#29](https://huggingface.co/shibing624/text2vec-base-chinese/discussions/29)) | 0.31928 | 0.42672 | 0.70157 | 0.17214 | 0.79296 |
131
+ | shibing624/text2vec-base-chinese (ov, [#27](https://huggingface.co/shibing624/text2vec-base-chinese/discussions/27)) | 0.31928 | 0.42672 | 0.70157 | 0.17214 | 0.79296 |
132
+ | shibing624/text2vec-base-chinese (ov-qint8, [#30](https://huggingface.co/shibing624/text2vec-base-chinese/discussions/30)) | 0.30778 (-3.60%) | 0.43474 (+1.88%) | 0.69620 (-0.77%) | 0.16662 (-3.20%) | 0.79396 (+0.13%) |
133
+
134
+ In short:
135
+ 1. ✅ shibing624/text2vec-base-chinese (onnx-O4), ONNX Optimized to [O4](https://huggingface.co/docs/optimum/en/onnxruntime/usage_guides/optimization) does not reduce performance, but gives a [~2x speedup](https://sbert.net/docs/sentence_transformer/usage/efficiency.html#benchmarks) on GPU.
136
+ 2. ✅ shibing624/text2vec-base-chinese (ov), OpenVINO does not reduce performance, but gives a 1.12x speedup on CPU.
137
+ 3. 🟡 shibing624/text2vec-base-chinese (ov-qint8), int8 quantization with OV incurs a small performance hit on some tasks, and a tiny performance gain on others, when quantizing with [Chinese STSB](https://huggingface.co/datasets/PhilipMay/stsb_multi_mt). Additionally, it results in a [4.78x speedup](https://sbert.net/docs/sentence_transformer/usage/efficiency.html#benchmarks) on CPU.
138
+
139
+ - usage: shibing624/text2vec-base-chinese (onnx-O4), for gpu
140
+ ```python
141
+ from sentence_transformers import SentenceTransformer
142
+
143
+ model = SentenceTransformer(
144
+ "shibing624/text2vec-base-chinese",
145
+ backend="onnx",
146
+ model_kwargs={"file_name": "model_O4.onnx"},
147
+ )
148
+ embeddings = model.encode(["如何更换花呗绑定银行卡", "花呗更改绑定银行卡", "你是谁"])
149
+ print(embeddings.shape)
150
+ similarities = model.similarity(embeddings, embeddings)
151
+ print(similarities)
152
+ ```
153
+
154
+
155
+ - usage: shibing624/text2vec-base-chinese (ov), for cpu
156
+ ```python
157
+ # pip install 'optimum[openvino]'
158
+
159
+ from sentence_transformers import SentenceTransformer
160
+
161
+ model = SentenceTransformer(
162
+ "shibing624/text2vec-base-chinese",
163
+ backend="openvino",
164
+ )
165
+
166
+ embeddings = model.encode(["如何更换花呗绑定银行卡", "花呗更改绑定银行卡", "你是谁"])
167
+ print(embeddings.shape)
168
+ similarities = model.similarity(embeddings, embeddings)
169
+ print(similarities)
170
+ ```
171
+
172
+ - usage: shibing624/text2vec-base-chinese (ov-qint8), for cpu
173
+ ```python
174
+ # pip install optimum
175
+ from sentence_transformers import SentenceTransformer
176
+
177
+ model = SentenceTransformer(
178
+ "shibing624/text2vec-base-chinese",
179
+ backend="onnx",
180
+ model_kwargs={"file_name": "model_qint8_avx512_vnni.onnx"},
181
+ )
182
+ embeddings = model.encode(["如何更换花呗绑定银行卡", "花呗更改绑定银行卡", "你是谁"])
183
+ print(embeddings.shape)
184
+ similarities = model.similarity(embeddings, embeddings)
185
+ print(similarities)
186
+ ```
187
+
188
+
189
+ ## Full Model Architecture
190
+ ```
191
+ CoSENT(
192
+ (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel
193
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_mean_tokens': True})
194
+ )
195
+ ```
196
+
197
+ ## Intended uses
198
+
199
+ Our model is intented to be used as a sentence and short paragraph encoder. Given an input text, it ouptuts a vector which captures
200
+ the semantic information. The sentence vector may be used for information retrieval, clustering or sentence similarity tasks.
201
+
202
+ By default, input text longer than 256 word pieces is truncated.
203
+
204
+
205
+ ## Training procedure
206
+
207
+ ### Pre-training
208
+
209
+ We use the pretrained [`hfl/chinese-macbert-base`](https://huggingface.co/hfl/chinese-macbert-base) model.
210
+ Please refer to the model card for more detailed information about the pre-training procedure.
211
+
212
+ ### Fine-tuning
213
+
214
+ We fine-tune the model using a contrastive objective. Formally, we compute the cosine similarity from each
215
+ possible sentence pairs from the batch.
216
+ We then apply the rank loss by comparing with true pairs and false pairs.
217
+
218
+ #### Hyper parameters
219
+
220
+ - training dataset: https://huggingface.co/datasets/shibing624/nli_zh
221
+ - max_seq_length: 128
222
+ - best epoch: 5
223
+ - sentence embedding dim: 768
224
+
225
+
226
+
227
+ ## Citing & Authors
228
+ This model was trained by [text2vec](https://github.com/shibing624/text2vec).
229
+
230
+ If you find this model helpful, feel free to cite:
231
+ ```bibtex
232
+ @software{text2vec,
233
+ author = {Xu Ming},
234
+ title = {text2vec: A Tool for Text to Vector},
235
+ year = {2022},
236
+ url = {https://github.com/shibing624/text2vec},
237
+ }
238
+ ```
sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "hfl/chinese-macbert-base",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-12,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "bert",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 0,
21
+ "pooler_fc_size": 768,
22
+ "pooler_num_attention_heads": 12,
23
+ "pooler_num_fc_layers": 3,
24
+ "pooler_size_per_head": 128,
25
+ "pooler_type": "first_token_transform",
26
+ "position_embedding_type": "absolute",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.12.3",
29
+ "type_vocab_size": 2,
30
+ "use_cache": true,
31
+ "vocab_size": 21128
32
+ }
sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c855515479137398ce4ea985628548d4e8ed8c5764656dac966d6a24f39e721
3
+ size 409098104
sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/modules.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ }
14
+ ]
sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 128,
3
+ "do_lower_case": false
4
+ }
sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "do_basic_tokenize": true, "never_split": null, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "name_or_path": "hfl/chinese-macbert-base", "tokenizer_class": "BertTokenizer"}
sbert/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e/vocab.txt ADDED
The diff for this file is too large to render. See raw diff