Datasets:
Upload folder using huggingface_hub
Browse files- medqa_corpus_en.py +14 -53
- textbooks_en_jsonl.zip +3 -0
medqa_corpus_en.py
CHANGED
@@ -40,19 +40,19 @@ Surgery_Schwartz.txt Surgery Clinical medicine
|
|
40 |
|
41 |
SUBJECT_SUBSETS = {
|
42 |
"core_clinical":
|
43 |
-
["Anatomy_Gray
|
44 |
-
"InternalMed_Harrison
|
45 |
-
"
|
46 |
"basic_biology":
|
47 |
-
["Biochemistry_Lippincott
|
48 |
-
"Physiology_Levy.txt"],
|
49 |
"pharmacology":
|
50 |
-
["Pharmacology_Katzung
|
51 |
"psychiatry":
|
52 |
-
["Psichiatry_DSM-5
|
53 |
}
|
54 |
|
55 |
from pathlib import Path
|
|
|
56 |
try:
|
57 |
from ogbujipt.text_helper import text_splitter
|
58 |
except ImportError as e:
|
@@ -97,7 +97,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
97 |
SOFTWARE.
|
98 |
"""
|
99 |
|
100 |
-
_URLS = ["
|
101 |
|
102 |
_CITATION = """\
|
103 |
@article{jin2021disease,
|
@@ -112,53 +112,14 @@ _CITATION = """\
|
|
112 |
}
|
113 |
"""
|
114 |
|
115 |
-
REPL_MAP = {
|
116 |
-
'\n': ' ',
|
117 |
-
'\u2019': "'",
|
118 |
-
'\u2013': '/',
|
119 |
-
'\ufb02': 'fl',
|
120 |
-
'\u2014': ' - ',
|
121 |
-
'\u201c': "'",
|
122 |
-
'\u2003': ' ',
|
123 |
-
'\u201d': "'",
|
124 |
-
'\u2193': 'decreased',
|
125 |
-
}
|
126 |
-
|
127 |
-
|
128 |
-
def recursive_replace(text, items=None):
|
129 |
-
items = items if items is not None else list(REPL_MAP.items())
|
130 |
-
if items:
|
131 |
-
old, new = items[0]
|
132 |
-
return recursive_replace(text.replace(old, new), items[1:])
|
133 |
-
else:
|
134 |
-
return text
|
135 |
-
|
136 |
-
|
137 |
-
def proportion_of_ascii_characters(string):
|
138 |
-
# Count the number of ASCII characters in the string.
|
139 |
-
num_ascii_characters = 0
|
140 |
-
for character in string:
|
141 |
-
if ord(character) < 128:
|
142 |
-
num_ascii_characters += 1
|
143 |
-
|
144 |
-
# Calculate the proportion of ASCII characters in the string.
|
145 |
-
prop = num_ascii_characters / len(string)
|
146 |
-
|
147 |
-
return prop
|
148 |
-
|
149 |
|
150 |
def get_med_qa_textbooks(location, subset_name):
|
151 |
-
for textbook_content in location.glob('*.
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
)
|
158 |
-
for chunk in r_splitter.split_text(textbook_content.open(encoding='utf-8').read()):
|
159 |
-
chunk = recursive_replace(chunk)
|
160 |
-
if proportion_of_ascii_characters(chunk) >= .9:
|
161 |
-
yield {"text": chunk, "source": str(textbook_content)}
|
162 |
|
163 |
|
164 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
|
|
40 |
|
41 |
SUBJECT_SUBSETS = {
|
42 |
"core_clinical":
|
43 |
+
["Anatomy_Gray", "First_Aid_Step1", "First_Aid_Step2", "Immunology_Janeway",
|
44 |
+
"InternalMed_Harrison", "Neurology_Adams", "Obstentrics_Williams", "Pathoma_Husain", "Pediatrics_Nelson",
|
45 |
+
"Surgery_Schwartz"],
|
46 |
"basic_biology":
|
47 |
+
["Biochemistry_Lippincott", "Cell_Biology_Alberts", "Histology_Ross", "Pathology_Robbins", "Physiology_Levy"],
|
|
|
48 |
"pharmacology":
|
49 |
+
["Pharmacology_Katzung"],
|
50 |
"psychiatry":
|
51 |
+
["Psichiatry_DSM-5"]
|
52 |
}
|
53 |
|
54 |
from pathlib import Path
|
55 |
+
import json
|
56 |
try:
|
57 |
from ogbujipt.text_helper import text_splitter
|
58 |
except ImportError as e:
|
|
|
97 |
SOFTWARE.
|
98 |
"""
|
99 |
|
100 |
+
_URLS = ["textbooks_en_jsonl.zip"]
|
101 |
|
102 |
_CITATION = """\
|
103 |
@article{jin2021disease,
|
|
|
112 |
}
|
113 |
"""
|
114 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
def get_med_qa_textbooks(location, subset_name):
|
117 |
+
for textbook_content in location.glob('*.jsonl'):
|
118 |
+
textbook_name = textbook_content.name.split('.')[0]
|
119 |
+
if textbook_name in SUBJECT_SUBSETS[subset_name]:
|
120 |
+
with textbook_content.open("r") as fid:
|
121 |
+
for line in fid:
|
122 |
+
yield json.loads(line)
|
|
|
|
|
|
|
|
|
|
|
123 |
|
124 |
|
125 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
textbooks_en_jsonl.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:88badbb560ee3874b2b750225ba772bebfb091569c4c27ac9e220bd4deb6fedd
|
3 |
+
size 29196996
|