Datasets:

Languages:
English
License:
boudinfl commited on
Commit
25f154a
1 Parent(s): dd8b6f5

Adding stats

Browse files
Files changed (2) hide show
  1. prmu.py +99 -0
  2. stats.ipynb +312 -0
prmu.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import sys
4
+ import json
5
+ import spacy
6
+
7
+ from nltk.stem.snowball import SnowballStemmer as Stemmer
8
+
9
+ nlp = spacy.load("en_core_web_sm")
10
+
11
+ # https://spacy.io/usage/linguistic-features#native-tokenizer-additions
12
+
13
+ from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER
14
+ from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS
15
+ from spacy.util import compile_infix_regex
16
+
17
+ # Modify tokenizer infix patterns
18
+ infixes = (
19
+ LIST_ELLIPSES
20
+ + LIST_ICONS
21
+ + [
22
+ r"(?<=[0-9])[+\-\*^](?=[0-9-])",
23
+ r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
24
+ al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
25
+ ),
26
+ r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
27
+ # ✅ Commented out regex that splits on hyphens between letters:
28
+ # r"(?<=[{a}])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
29
+ r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
30
+ ]
31
+ )
32
+
33
+ infix_re = compile_infix_regex(infixes)
34
+ nlp.tokenizer.infix_finditer = infix_re.finditer
35
+
36
+
37
+ def contains(subseq, inseq):
38
+ return any(inseq[pos:pos + len(subseq)] == subseq for pos in range(0, len(inseq) - len(subseq) + 1))
39
+
40
+
41
+ def find_pmru(tok_title, tok_text, tok_kp):
42
+ """Find PRMU category of a given keyphrase."""
43
+
44
+ # if kp is present
45
+ if contains(tok_kp, tok_title) or contains(tok_kp, tok_text):
46
+ return "P"
47
+
48
+ # if kp is considered as absent
49
+ else:
50
+
51
+ # find present and absent words
52
+ present_words = [w for w in tok_kp if w in tok_title or w in tok_text]
53
+
54
+ # if "all" words are present
55
+ if len(present_words) == len(tok_kp):
56
+ return "R"
57
+ # if "some" words are present
58
+ elif len(present_words) > 0:
59
+ return "M"
60
+ # if "no" words are present
61
+ else:
62
+ return "U"
63
+
64
+
65
+ if __name__ == '__main__':
66
+
67
+ data = []
68
+
69
+ # read the dataset
70
+ with open(sys.argv[1], 'r') as f:
71
+ # loop through the documents
72
+ for line in f:
73
+ doc = json.loads(line.strip())
74
+
75
+ title_spacy = nlp(doc['title'])
76
+ abstract_spacy = nlp(doc['abstract'])
77
+
78
+ title_tokens = [token.text for token in title_spacy]
79
+ abstract_tokens = [token.text for token in abstract_spacy]
80
+
81
+ title_stems = [Stemmer('porter').stem(w.lower()) for w in title_tokens]
82
+ abstract_stems = [Stemmer('porter').stem(w.lower()) for w in abstract_tokens]
83
+
84
+ keyphrases_stems = []
85
+ for keyphrase in doc['keyphrases']:
86
+ keyphrase_spacy = nlp(keyphrase)
87
+ keyphrase_tokens = [token.text for token in keyphrase_spacy]
88
+ keyphrase_stems = [Stemmer('porter').stem(w.lower()) for w in keyphrase_tokens]
89
+ keyphrases_stems.append(keyphrase_stems)
90
+
91
+ prmu = [find_pmru(title_stems, abstract_stems, kp) for kp in keyphrases_stems]
92
+ doc['prmu'] = prmu
93
+
94
+ data.append(json.dumps(doc))
95
+ print(doc['id'])
96
+
97
+ # write the json
98
+ with open(sys.argv[2], 'w') as o:
99
+ o.write("\n".join(data))
stats.ipynb ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "eba2ee81",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "data": {
11
+ "application/vnd.jupyter.widget-view+json": {
12
+ "model_id": "09e8150022c94f569f19b76663ffb89f",
13
+ "version_major": 2,
14
+ "version_minor": 0
15
+ },
16
+ "text/plain": [
17
+ "Downloading builder script: 0%| | 0.00/7.79k [00:00<?, ?B/s]"
18
+ ]
19
+ },
20
+ "metadata": {},
21
+ "output_type": "display_data"
22
+ },
23
+ {
24
+ "name": "stderr",
25
+ "output_type": "stream",
26
+ "text": [
27
+ "No config specified, defaulting to: kp_times/raw\n"
28
+ ]
29
+ },
30
+ {
31
+ "name": "stdout",
32
+ "output_type": "stream",
33
+ "text": [
34
+ "Downloading and preparing dataset kp_times/raw to /Users/boudin-f/.cache/huggingface/datasets/taln-ls2n___kp_times/raw/1.1.0/81f75cd972e595c55ef8cc865e898b0bc01ce7d220287a246b566b7417f07274...\n"
35
+ ]
36
+ },
37
+ {
38
+ "data": {
39
+ "application/vnd.jupyter.widget-view+json": {
40
+ "model_id": "5f40668afdd0428eb9bec18770b4bf3e",
41
+ "version_major": 2,
42
+ "version_minor": 0
43
+ },
44
+ "text/plain": [
45
+ "Downloading data files: 0%| | 0/3 [00:00<?, ?it/s]"
46
+ ]
47
+ },
48
+ "metadata": {},
49
+ "output_type": "display_data"
50
+ },
51
+ {
52
+ "data": {
53
+ "application/vnd.jupyter.widget-view+json": {
54
+ "model_id": "2f17b141c71d4f03ac58df7b4d1133cd",
55
+ "version_major": 2,
56
+ "version_minor": 0
57
+ },
58
+ "text/plain": [
59
+ "Extracting data files: 0%| | 0/3 [00:00<?, ?it/s]"
60
+ ]
61
+ },
62
+ "metadata": {},
63
+ "output_type": "display_data"
64
+ },
65
+ {
66
+ "data": {
67
+ "application/vnd.jupyter.widget-view+json": {
68
+ "model_id": "",
69
+ "version_major": 2,
70
+ "version_minor": 0
71
+ },
72
+ "text/plain": [
73
+ "Generating train split: 0 examples [00:00, ? examples/s]"
74
+ ]
75
+ },
76
+ "metadata": {},
77
+ "output_type": "display_data"
78
+ },
79
+ {
80
+ "data": {
81
+ "application/vnd.jupyter.widget-view+json": {
82
+ "model_id": "",
83
+ "version_major": 2,
84
+ "version_minor": 0
85
+ },
86
+ "text/plain": [
87
+ "Generating test split: 0 examples [00:00, ? examples/s]"
88
+ ]
89
+ },
90
+ "metadata": {},
91
+ "output_type": "display_data"
92
+ },
93
+ {
94
+ "data": {
95
+ "application/vnd.jupyter.widget-view+json": {
96
+ "model_id": "",
97
+ "version_major": 2,
98
+ "version_minor": 0
99
+ },
100
+ "text/plain": [
101
+ "Generating validation split: 0 examples [00:00, ? examples/s]"
102
+ ]
103
+ },
104
+ "metadata": {},
105
+ "output_type": "display_data"
106
+ },
107
+ {
108
+ "name": "stdout",
109
+ "output_type": "stream",
110
+ "text": [
111
+ "Dataset kp_times downloaded and prepared to /Users/boudin-f/.cache/huggingface/datasets/taln-ls2n___kp_times/raw/1.1.0/81f75cd972e595c55ef8cc865e898b0bc01ce7d220287a246b566b7417f07274. Subsequent calls will reuse this data.\n"
112
+ ]
113
+ },
114
+ {
115
+ "data": {
116
+ "application/vnd.jupyter.widget-view+json": {
117
+ "model_id": "716568658d6749da8a0926dcb1fb384e",
118
+ "version_major": 2,
119
+ "version_minor": 0
120
+ },
121
+ "text/plain": [
122
+ " 0%| | 0/3 [00:00<?, ?it/s]"
123
+ ]
124
+ },
125
+ "metadata": {},
126
+ "output_type": "display_data"
127
+ }
128
+ ],
129
+ "source": [
130
+ "from datasets import load_dataset\n",
131
+ "\n",
132
+ "dataset = load_dataset('taln-ls2n/kptimes')"
133
+ ]
134
+ },
135
+ {
136
+ "cell_type": "code",
137
+ "execution_count": 3,
138
+ "id": "4ba72244",
139
+ "metadata": {},
140
+ "outputs": [
141
+ {
142
+ "data": {
143
+ "application/vnd.jupyter.widget-view+json": {
144
+ "model_id": "cb61d182cdc14ea7868eb258413ca117",
145
+ "version_major": 2,
146
+ "version_minor": 0
147
+ },
148
+ "text/plain": [
149
+ " 0%| | 0/259923 [00:00<?, ?it/s]"
150
+ ]
151
+ },
152
+ "metadata": {},
153
+ "output_type": "display_data"
154
+ },
155
+ {
156
+ "data": {
157
+ "application/vnd.jupyter.widget-view+json": {
158
+ "model_id": "abf721e69b3f46079166a1986514b5bf",
159
+ "version_major": 2,
160
+ "version_minor": 0
161
+ },
162
+ "text/plain": [
163
+ " 0%| | 0/10000 [00:00<?, ?it/s]"
164
+ ]
165
+ },
166
+ "metadata": {},
167
+ "output_type": "display_data"
168
+ },
169
+ {
170
+ "data": {
171
+ "application/vnd.jupyter.widget-view+json": {
172
+ "model_id": "827aab4cd5414084adf841e52d4899c4",
173
+ "version_major": 2,
174
+ "version_minor": 0
175
+ },
176
+ "text/plain": [
177
+ " 0%| | 0/20000 [00:00<?, ?it/s]"
178
+ ]
179
+ },
180
+ "metadata": {},
181
+ "output_type": "display_data"
182
+ },
183
+ {
184
+ "name": "stdout",
185
+ "output_type": "stream",
186
+ "text": [
187
+ "# keyphrases: 5.03\n",
188
+ "% P: 46.64\n",
189
+ "% R: 15.11\n",
190
+ "% M: 28.89\n",
191
+ "% U: 9.36\n"
192
+ ]
193
+ }
194
+ ],
195
+ "source": [
196
+ "from tqdm.notebook import tqdm\n",
197
+ "\n",
198
+ "P, R, M, U, nb_kps = [], [], [], [], []\n",
199
+ "\n",
200
+ "for split in ['train', 'validation', 'test']:\n",
201
+ " \n",
202
+ " for sample in tqdm(dataset[split]):\n",
203
+ " nb_kps.append(len(sample[\"keyphrases\"]))\n",
204
+ " P.append(sample[\"prmu\"].count(\"P\") / nb_kps[-1])\n",
205
+ " R.append(sample[\"prmu\"].count(\"R\") / nb_kps[-1])\n",
206
+ " M.append(sample[\"prmu\"].count(\"M\") / nb_kps[-1])\n",
207
+ " U.append(sample[\"prmu\"].count(\"U\") / nb_kps[-1])\n",
208
+ " \n",
209
+ "print(\"# keyphrases: {:.2f}\".format(sum(nb_kps)/len(nb_kps)))\n",
210
+ "print(\"% P: {:.2f}\".format(sum(P)/len(P)*100))\n",
211
+ "print(\"% R: {:.2f}\".format(sum(R)/len(R)*100))\n",
212
+ "print(\"% M: {:.2f}\".format(sum(M)/len(M)*100))\n",
213
+ "print(\"% U: {:.2f}\".format(sum(U)/len(U)*100))"
214
+ ]
215
+ },
216
+ {
217
+ "cell_type": "code",
218
+ "execution_count": 4,
219
+ "id": "52dda817",
220
+ "metadata": {},
221
+ "outputs": [],
222
+ "source": [
223
+ "import spacy\n",
224
+ "\n",
225
+ "nlp = spacy.load(\"en_core_web_sm\")\n",
226
+ "\n",
227
+ "# https://spacy.io/usage/linguistic-features#native-tokenizer-additions\n",
228
+ "\n",
229
+ "from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER\n",
230
+ "from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS\n",
231
+ "from spacy.util import compile_infix_regex\n",
232
+ "\n",
233
+ "# Modify tokenizer infix patterns\n",
234
+ "infixes = (\n",
235
+ " LIST_ELLIPSES\n",
236
+ " + LIST_ICONS\n",
237
+ " + [\n",
238
+ " r\"(?<=[0-9])[+\\-\\*^](?=[0-9-])\",\n",
239
+ " r\"(?<=[{al}{q}])\\.(?=[{au}{q}])\".format(\n",
240
+ " al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES\n",
241
+ " ),\n",
242
+ " r\"(?<=[{a}]),(?=[{a}])\".format(a=ALPHA),\n",
243
+ " # ✅ Commented out regex that splits on hyphens between letters:\n",
244
+ " # r\"(?<=[{a}])(?:{h})(?=[{a}])\".format(a=ALPHA, h=HYPHENS),\n",
245
+ " r\"(?<=[{a}0-9])[:<>=/](?=[{a}])\".format(a=ALPHA),\n",
246
+ " ]\n",
247
+ ")\n",
248
+ "\n",
249
+ "infix_re = compile_infix_regex(infixes)\n",
250
+ "nlp.tokenizer.infix_finditer = infix_re.finditer"
251
+ ]
252
+ },
253
+ {
254
+ "cell_type": "code",
255
+ "execution_count": null,
256
+ "id": "047ab1cc",
257
+ "metadata": {},
258
+ "outputs": [
259
+ {
260
+ "data": {
261
+ "application/vnd.jupyter.widget-view+json": {
262
+ "model_id": "45f4357088854088870320517821adc4",
263
+ "version_major": 2,
264
+ "version_minor": 0
265
+ },
266
+ "text/plain": [
267
+ " 0%| | 0/259923 [00:00<?, ?it/s]"
268
+ ]
269
+ },
270
+ "metadata": {},
271
+ "output_type": "display_data"
272
+ }
273
+ ],
274
+ "source": [
275
+ "doc_len = []\n",
276
+ "for split in ['train', 'validation', 'test']:\n",
277
+ " for sample in tqdm(dataset[split]):\n",
278
+ " doc_len.append(len(nlp(sample[\"title\"])) + len(nlp(sample[\"abstract\"])))\n",
279
+ "print(\"avg doc len: {:.1f}\".format(sum(doc_len)/len(doc_len)))"
280
+ ]
281
+ },
282
+ {
283
+ "cell_type": "code",
284
+ "execution_count": null,
285
+ "id": "0d55f0f0",
286
+ "metadata": {},
287
+ "outputs": [],
288
+ "source": []
289
+ }
290
+ ],
291
+ "metadata": {
292
+ "kernelspec": {
293
+ "display_name": "Python 3 (ipykernel)",
294
+ "language": "python",
295
+ "name": "python3"
296
+ },
297
+ "language_info": {
298
+ "codemirror_mode": {
299
+ "name": "ipython",
300
+ "version": 3
301
+ },
302
+ "file_extension": ".py",
303
+ "mimetype": "text/x-python",
304
+ "name": "python",
305
+ "nbconvert_exporter": "python",
306
+ "pygments_lexer": "ipython3",
307
+ "version": "3.9.12"
308
+ }
309
+ },
310
+ "nbformat": 4,
311
+ "nbformat_minor": 5
312
+ }