Datasets:

Languages:
French
Size:
n<1K
License:
boudinfl commited on
Commit
f4b1a87
1 Parent(s): f277749

updating stats and prmu categories

Browse files
Files changed (3) hide show
  1. README.md +3 -3
  2. prmu.py +103 -0
  3. stats.ipynb +184 -0
README.md CHANGED
@@ -38,9 +38,9 @@ Details about the process can be found in `prmu.py`.
38
 
39
  The dataset contains the following test split:
40
 
41
- | Split | # documents | #words | # keyphrases | % Present | % Reordered | % Mixed | % Unseen |
42
- | :--------- |------------:|-------:|-------------:|----------:|------------:|--------:|---------:|
43
- | Test | 400 | - | - | - | - | - | - |
44
 
45
  The following data fields are available :
46
 
 
38
 
39
  The dataset contains the following test split:
40
 
41
+ | Split | # documents | #words | # keyphrases | % Present | % Reordered | % Mixed | % Unseen |
42
+ | :--------- |------------:|-----------:|-------------:|----------:|------------:|--------:|---------:|
43
+ | Test | 400 | 156.9 | 11.81 | 40.60 | 7.32 | 19.28 | 32.80 |
44
 
45
  The following data fields are available :
46
 
prmu.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import sys
4
+ import json
5
+ import spacy
6
+
7
+ from nltk.stem.snowball import SnowballStemmer as Stemmer
8
+
9
+ nlp = spacy.load("fr_core_news_sm")
10
+
11
+ # https://spacy.io/usage/linguistic-features#native-tokenizer-additions
12
+
13
+ from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER
14
+ from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS
15
+ from spacy.util import compile_infix_regex
16
+
17
+ # Modify tokenizer infix patterns
18
+ infixes = (
19
+ LIST_ELLIPSES
20
+ + LIST_ICONS
21
+ + [
22
+ r"(?<=[0-9])[+\-\*^](?=[0-9-])",
23
+ r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
24
+ al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
25
+ ),
26
+ r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
27
+ # ✅ Commented out regex that splits on hyphens between letters:
28
+ # r"(?<=[{a}])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
29
+ r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
30
+ ]
31
+ )
32
+
33
+ infix_re = compile_infix_regex(infixes)
34
+ nlp.tokenizer.infix_finditer = infix_re.finditer
35
+
36
+
37
+ def contains(subseq, inseq):
38
+ return any(inseq[pos:pos + len(subseq)] == subseq for pos in range(0, len(inseq) - len(subseq) + 1))
39
+
40
+
41
+ def find_pmru(tok_title, tok_text, tok_kp):
42
+ """Find PRMU category of a given keyphrase."""
43
+
44
+ # if kp is present
45
+ if contains(tok_kp, tok_title) or contains(tok_kp, tok_text):
46
+ return "P"
47
+
48
+ # if kp is considered as absent
49
+ else:
50
+
51
+ # find present and absent words
52
+ present_words = [w for w in tok_kp if w in tok_title or w in tok_text]
53
+
54
+ # if "all" words are present
55
+ if len(present_words) == len(tok_kp):
56
+ return "R"
57
+ # if "some" words are present
58
+ elif len(present_words) > 0:
59
+ return "M"
60
+ # if "no" words are present
61
+ else:
62
+ return "U"
63
+
64
+
65
+ if __name__ == '__main__':
66
+
67
+ data = []
68
+
69
+ # read the dataset
70
+ with open(sys.argv[1], 'r') as f:
71
+ # loop through the documents
72
+ for line in f:
73
+ doc = json.loads(line.strip())
74
+
75
+ print(doc['id'])
76
+
77
+ title_spacy = nlp(doc['title'])
78
+ abstract_spacy = nlp(doc['abstract'])
79
+
80
+ title_tokens = [token.text for token in title_spacy]
81
+ abstract_tokens = [token.text for token in abstract_spacy]
82
+
83
+ title_stems = [Stemmer('french').stem(w.lower()) for w in title_tokens]
84
+ abstract_stems = [Stemmer('french').stem(w.lower()) for w in abstract_tokens]
85
+
86
+ keyphrases_stems = []
87
+ for keyphrase in doc['keyphrases']:
88
+ keyphrase_spacy = nlp(keyphrase)
89
+ keyphrase_tokens = [token.text for token in keyphrase_spacy]
90
+ keyphrase_stems = [Stemmer('french').stem(w.lower()) for w in keyphrase_tokens]
91
+ keyphrases_stems.append(keyphrase_stems)
92
+
93
+ prmu = [find_pmru(title_stems, abstract_stems, kp) for kp in keyphrases_stems]
94
+
95
+ if doc['prmu'] != prmu:
96
+ print("PRMU categories are not identical!")
97
+
98
+ doc['prmu'] = prmu
99
+ data.append(json.dumps(doc))
100
+
101
+ # write the json
102
+ with open(sys.argv[2], 'w') as o:
103
+ o.write("\n".join(data))
stats.ipynb ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "eba2ee81",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stderr",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "No config specified, defaulting to: wikinews/raw\n",
14
+ "Reusing dataset wikinews (/Users/boudin-f/.cache/huggingface/datasets/taln-ls2n___wikinews/raw/1.0.0/aa15bd435a75a532fac6070fe8169812db6efd9d00c6fbac93992165536d8183)\n"
15
+ ]
16
+ },
17
+ {
18
+ "data": {
19
+ "application/vnd.jupyter.widget-view+json": {
20
+ "model_id": "51588bf1a2714239b22d99eeac8f0db7",
21
+ "version_major": 2,
22
+ "version_minor": 0
23
+ },
24
+ "text/plain": [
25
+ " 0%| | 0/1 [00:00<?, ?it/s]"
26
+ ]
27
+ },
28
+ "metadata": {},
29
+ "output_type": "display_data"
30
+ }
31
+ ],
32
+ "source": [
33
+ "from datasets import load_dataset\n",
34
+ "\n",
35
+ "dataset = load_dataset('taln-ls2n/termith-eval')"
36
+ ]
37
+ },
38
+ {
39
+ "cell_type": "code",
40
+ "execution_count": 2,
41
+ "id": "4ba72244",
42
+ "metadata": {},
43
+ "outputs": [
44
+ {
45
+ "data": {
46
+ "application/vnd.jupyter.widget-view+json": {
47
+ "model_id": "dc2eac8de82a4851901c76d873c7546f",
48
+ "version_major": 2,
49
+ "version_minor": 0
50
+ },
51
+ "text/plain": [
52
+ " 0%| | 0/399 [00:00<?, ?it/s]"
53
+ ]
54
+ },
55
+ "metadata": {},
56
+ "output_type": "display_data"
57
+ },
58
+ {
59
+ "name": "stdout",
60
+ "output_type": "stream",
61
+ "text": [
62
+ "# keyphrases: 11.81\n",
63
+ "% P: 40.60\n",
64
+ "% R: 7.32\n",
65
+ "% M: 19.28\n",
66
+ "% U: 32.80\n"
67
+ ]
68
+ }
69
+ ],
70
+ "source": [
71
+ "from tqdm.notebook import tqdm\n",
72
+ "\n",
73
+ "P, R, M, U, nb_kps = [], [], [], [], []\n",
74
+ " \n",
75
+ "for sample in tqdm(dataset['test']):\n",
76
+ " nb_kps.append(len(sample[\"keyphrases\"]))\n",
77
+ " P.append(sample[\"prmu\"].count(\"P\") / nb_kps[-1])\n",
78
+ " R.append(sample[\"prmu\"].count(\"R\") / nb_kps[-1])\n",
79
+ " M.append(sample[\"prmu\"].count(\"M\") / nb_kps[-1])\n",
80
+ " U.append(sample[\"prmu\"].count(\"U\") / nb_kps[-1])\n",
81
+ " \n",
82
+ "print(\"# keyphrases: {:.2f}\".format(sum(nb_kps)/len(nb_kps)))\n",
83
+ "print(\"% P: {:.2f}\".format(sum(P)/len(P)*100))\n",
84
+ "print(\"% R: {:.2f}\".format(sum(R)/len(R)*100))\n",
85
+ "print(\"% M: {:.2f}\".format(sum(M)/len(M)*100))\n",
86
+ "print(\"% U: {:.2f}\".format(sum(U)/len(U)*100))"
87
+ ]
88
+ },
89
+ {
90
+ "cell_type": "code",
91
+ "execution_count": 3,
92
+ "id": "52dda817",
93
+ "metadata": {},
94
+ "outputs": [],
95
+ "source": [
96
+ "import spacy\n",
97
+ "\n",
98
+ "nlp = spacy.load(\"fr_core_news_sm\")\n",
99
+ "\n",
100
+ "# https://spacy.io/usage/linguistic-features#native-tokenizer-additions\n",
101
+ "\n",
102
+ "from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER\n",
103
+ "from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS\n",
104
+ "from spacy.util import compile_infix_regex\n",
105
+ "\n",
106
+ "# Modify tokenizer infix patterns\n",
107
+ "infixes = (\n",
108
+ " LIST_ELLIPSES\n",
109
+ " + LIST_ICONS\n",
110
+ " + [\n",
111
+ " r\"(?<=[0-9])[+\\-\\*^](?=[0-9-])\",\n",
112
+ " r\"(?<=[{al}{q}])\\.(?=[{au}{q}])\".format(\n",
113
+ " al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES\n",
114
+ " ),\n",
115
+ " r\"(?<=[{a}]),(?=[{a}])\".format(a=ALPHA),\n",
116
+ " # ✅ Commented out regex that splits on hyphens between letters:\n",
117
+ " # r\"(?<=[{a}])(?:{h})(?=[{a}])\".format(a=ALPHA, h=HYPHENS),\n",
118
+ " r\"(?<=[{a}0-9])[:<>=/](?=[{a}])\".format(a=ALPHA),\n",
119
+ " ]\n",
120
+ ")\n",
121
+ "\n",
122
+ "infix_re = compile_infix_regex(infixes)\n",
123
+ "nlp.tokenizer.infix_finditer = infix_re.finditer"
124
+ ]
125
+ },
126
+ {
127
+ "cell_type": "code",
128
+ "execution_count": 4,
129
+ "id": "047ab1cc",
130
+ "metadata": {},
131
+ "outputs": [
132
+ {
133
+ "data": {
134
+ "application/vnd.jupyter.widget-view+json": {
135
+ "model_id": "7d2dc99496ef4579b3b027ca651ed359",
136
+ "version_major": 2,
137
+ "version_minor": 0
138
+ },
139
+ "text/plain": [
140
+ " 0%| | 0/399 [00:00<?, ?it/s]"
141
+ ]
142
+ },
143
+ "metadata": {},
144
+ "output_type": "display_data"
145
+ },
146
+ {
147
+ "name": "stdout",
148
+ "output_type": "stream",
149
+ "text": [
150
+ "avg doc len: 156.9\n"
151
+ ]
152
+ }
153
+ ],
154
+ "source": [
155
+ "doc_len = []\n",
156
+ "for sample in tqdm(dataset['test']):\n",
157
+ " doc_len.append(len(nlp(sample[\"title\"])) + len(nlp(sample[\"abstract\"])))\n",
158
+ " \n",
159
+ "print(\"avg doc len: {:.1f}\".format(sum(doc_len)/len(doc_len))) "
160
+ ]
161
+ }
162
+ ],
163
+ "metadata": {
164
+ "kernelspec": {
165
+ "display_name": "Python 3 (ipykernel)",
166
+ "language": "python",
167
+ "name": "python3"
168
+ },
169
+ "language_info": {
170
+ "codemirror_mode": {
171
+ "name": "ipython",
172
+ "version": 3
173
+ },
174
+ "file_extension": ".py",
175
+ "mimetype": "text/x-python",
176
+ "name": "python",
177
+ "nbconvert_exporter": "python",
178
+ "pygments_lexer": "ipython3",
179
+ "version": "3.9.10"
180
+ }
181
+ },
182
+ "nbformat": 4,
183
+ "nbformat_minor": 5
184
+ }