Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
dipteshkanojia commited on
Commit
1760efc
1 Parent(s): 4246c86

added gitignore

Browse files
Files changed (1) hide show
  1. check.ipynb +0 -192
check.ipynb DELETED
@@ -1,192 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": 4,
6
- "metadata": {},
7
- "outputs": [
8
- {
9
- "name": "stdout",
10
- "output_type": "stream",
11
- "text": [
12
- "0\n"
13
- ]
14
- },
15
- {
16
- "ename": "IndexError",
17
- "evalue": "list index out of range",
18
- "output_type": "error",
19
- "traceback": [
20
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
21
- "\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)",
22
- "Input \u001b[0;32mIn [4]\u001b[0m, in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 16\u001b[0m tokens\u001b[38;5;241m.\u001b[39mappend(splits[\u001b[38;5;241m0\u001b[39m])\n\u001b[1;32m 17\u001b[0m pos_tags\u001b[38;5;241m.\u001b[39mappend(splits[\u001b[38;5;241m1\u001b[39m])\n\u001b[0;32m---> 18\u001b[0m ner_tags\u001b[38;5;241m.\u001b[39mappend(\u001b[43msplits\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m3\u001b[39;49m\u001b[43m]\u001b[49m\u001b[38;5;241m.\u001b[39mrstrip())\n\u001b[1;32m 20\u001b[0m \u001b[38;5;66;03m# last example\u001b[39;00m\n\u001b[1;32m 21\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m tokens:\n",
23
- "\u001b[0;31mIndexError\u001b[0m: list index out of range"
24
- ]
25
- }
26
- ],
27
- "source": [
28
- "with open(\"data/train.conll\", encoding=\"utf-8\") as f:\n",
29
- " guid = 0\n",
30
- " tokens = []\n",
31
- " pos_tags = []\n",
32
- " ner_tags = []\n",
33
- " for line in f:\n",
34
- " if line.startswith(\"-DOCSTART-\") or line == \"\" or line == \"\\n\":\n",
35
- " if tokens:\n",
36
- " guid += 1\n",
37
- " tokens = []\n",
38
- " pos_tags = []\n",
39
- " ner_tags = []\n",
40
- " else:\n",
41
- " print(guid)\n",
42
- " splits = line.split(\" \")\n",
43
- " tokens.append(splits[0])\n",
44
- " pos_tags.append(splits[1])\n",
45
- " ner_tags.append(splits[2].rstrip())\n",
46
- "\n",
47
- " # last example\n",
48
- " if tokens:\n",
49
- " print(\"lst\")"
50
- ]
51
- },
52
- {
53
- "cell_type": "code",
54
- "execution_count": 16,
55
- "metadata": {},
56
- "outputs": [
57
- {
58
- "name": "stderr",
59
- "output_type": "stream",
60
- "text": [
61
- "Reusing dataset plod-cw (/home/diptesh/.cache/huggingface/datasets/surrey-nlp___plod-cw/PLOD-CW/0.0.5/ded93459451683583207c3ccb6a22ebeeafd54733e72757b6f73806d9aca6e83)\n"
62
- ]
63
- },
64
- {
65
- "data": {
66
- "application/json": {
67
- "ascii": false,
68
- "bar_format": null,
69
- "colour": null,
70
- "elapsed": 0.010100603103637695,
71
- "initial": 0,
72
- "n": 0,
73
- "ncols": null,
74
- "nrows": null,
75
- "postfix": null,
76
- "prefix": "",
77
- "rate": null,
78
- "total": 3,
79
- "unit": "it",
80
- "unit_divisor": 1000,
81
- "unit_scale": false
82
- },
83
- "application/vnd.jupyter.widget-view+json": {
84
- "model_id": "1f468deeb0f34c0b8fe8bdd94301ba38",
85
- "version_major": 2,
86
- "version_minor": 0
87
- },
88
- "text/plain": [
89
- " 0%| | 0/3 [00:00<?, ?it/s]"
90
- ]
91
- },
92
- "metadata": {},
93
- "output_type": "display_data"
94
- }
95
- ],
96
- "source": [
97
- "from datasets import load_dataset\n",
98
- "dataset = load_dataset(\"surrey-nlp/PLOD-CW\")"
99
- ]
100
- },
101
- {
102
- "cell_type": "code",
103
- "execution_count": 17,
104
- "metadata": {},
105
- "outputs": [
106
- {
107
- "name": "stdout",
108
- "output_type": "stream",
109
- "text": [
110
- "1072\n",
111
- "126\n",
112
- "153\n"
113
- ]
114
- }
115
- ],
116
- "source": [
117
- "print(len(dataset['train']))\n",
118
- "print(len(dataset['validation']))\n",
119
- "print(len(dataset['test']))"
120
- ]
121
- },
122
- {
123
- "cell_type": "code",
124
- "execution_count": 18,
125
- "metadata": {},
126
- "outputs": [
127
- {
128
- "name": "stdout",
129
- "output_type": "stream",
130
- "text": [
131
- "15\n"
132
- ]
133
- }
134
- ],
135
- "source": [
136
- "print(len(dataset['train'][0]['tokens']))"
137
- ]
138
- },
139
- {
140
- "cell_type": "code",
141
- "execution_count": 22,
142
- "metadata": {},
143
- "outputs": [
144
- {
145
- "name": "stdout",
146
- "output_type": "stream",
147
- "text": [
148
- "323\n"
149
- ]
150
- }
151
- ],
152
- "source": [
153
- "split='train'\n",
154
- "maxLen = 0\n",
155
- "for i in range(len(dataset[split])):\n",
156
- " instanceLen = len(dataset['train'][i]['tokens'])\n",
157
- " if instanceLen > maxLen:\n",
158
- " maxLen = instanceLen\n",
159
- "\n",
160
- "print(maxLen)\n"
161
- ]
162
- },
163
- {
164
- "cell_type": "code",
165
- "execution_count": null,
166
- "metadata": {},
167
- "outputs": [],
168
- "source": []
169
- }
170
- ],
171
- "metadata": {
172
- "kernelspec": {
173
- "display_name": "hfdataset",
174
- "language": "python",
175
- "name": "python3"
176
- },
177
- "language_info": {
178
- "codemirror_mode": {
179
- "name": "ipython",
180
- "version": 3
181
- },
182
- "file_extension": ".py",
183
- "mimetype": "text/x-python",
184
- "name": "python",
185
- "nbconvert_exporter": "python",
186
- "pygments_lexer": "ipython3",
187
- "version": "3.9.12"
188
- }
189
- },
190
- "nbformat": 4,
191
- "nbformat_minor": 2
192
- }