etrop commited on
Commit
631a7ef
1 Parent(s): c03a28b

Create genomics-long-range-benchmark.py

Browse files
Files changed (1) hide show
  1. genomics-long-range-benchmark.py +530 -0
genomics-long-range-benchmark.py ADDED
@@ -0,0 +1,530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gzip
2
+ import os
3
+ import shutil
4
+ import urllib
5
+ from pathlib import Path
6
+ from typing import List
7
+ from tqdm import tqdm
8
+ from ast import literal_eval
9
+
10
+ import re
11
+ import datasets
12
+ import numpy as np
13
+ import pandas as pd
14
+ from datasets import DatasetInfo
15
+ from pyfaidx import Fasta
16
+ from abc import ABC, abstractmethod
17
+ from Bio.Seq import Seq
18
+ from Bio import SeqIO
19
+ import pysam
20
+
21
+ """
22
+ --------------------------------------------------------------------------------------------
23
+ Reference Genome URLS:
24
+ -------------------------------------------------------------------------------------------
25
+ """
26
+ H38_REFERENCE_GENOME_URL = (
27
+ "https://hgdownload.soe.ucsc.edu/goldenPath/hg38/bigZips/" "hg38.fa.gz"
28
+ )
29
+ H19_REFERENCE_GENOME_URL = (
30
+ "https://hgdownload.soe.ucsc.edu/goldenPath/hg19/bigZips/" "hg19.fa.gz"
31
+ )
32
+
33
+ """
34
+ --------------------------------------------------------------------------------------------
35
+ Task Specific Handlers:
36
+ -------------------------------------------------------------------------------------------
37
+ """
38
+
39
+ class GenomicLRATaskHandler(ABC):
40
+ """
41
+ Abstract method for the Genomic LRA task handlers.
42
+ """
43
+
44
+ @abstractmethod
45
+ def __init__(self, **kwargs):
46
+ pass
47
+
48
+ @abstractmethod
49
+ def get_info(self, description: str) -> DatasetInfo:
50
+ """
51
+ Returns the DatasetInfo for the task
52
+ """
53
+ pass
54
+
55
+ def split_generators(
56
+ self, dl_manager, cache_dir_root
57
+ ) -> List[datasets.SplitGenerator]:
58
+ """
59
+ Downloads required files using dl_manager and separates them by split.
60
+ """
61
+ return [
62
+ datasets.SplitGenerator(
63
+ name=datasets.Split.TRAIN,
64
+ gen_kwargs={"handler": self, "split": "train"},
65
+ ),
66
+ datasets.SplitGenerator(
67
+ name=datasets.Split.TEST, gen_kwargs={"handler": self, "split": "test"}
68
+ ),
69
+ ]
70
+
71
+ @abstractmethod
72
+ def generate_examples(self, split):
73
+ """
74
+ A generator that yields examples for the specified split.
75
+ """
76
+ pass
77
+
78
+ @staticmethod
79
+ def hook(t):
80
+ last_b = [0]
81
+
82
+ def inner(b=1, bsize=1, tsize=None):
83
+ """
84
+ b : int, optional
85
+ Number of blocks just transferred [default: 1].
86
+ bsize : int, optional
87
+ Size of each block (in tqdm units) [default: 1].
88
+ tsize : int, optional
89
+ Total size (in tqdm units). If [default: None] remains unchanged.
90
+ """
91
+ if tsize is not None:
92
+ t.total = tsize
93
+ t.update((b - last_b[0]) * bsize)
94
+ last_b[0] = b
95
+
96
+ return inner
97
+
98
+ def download_and_extract_gz(self, file_url, cache_dir_root):
99
+ """
100
+ Downloads and extracts a gz file into the given cache directory. Returns the full file path
101
+ of the extracted gz file.
102
+ Args:
103
+ file_url: url of the gz file to be downloaded and extracted.
104
+ cache_dir_root: Directory to extract file into.
105
+ """
106
+ file_fname = Path(file_url).stem
107
+ file_complete_path = os.path.join(cache_dir_root, "downloads", file_fname)
108
+
109
+ if not os.path.exists(file_complete_path):
110
+ if not os.path.exists(file_complete_path + ".gz"):
111
+ with tqdm(
112
+ unit="B",
113
+ unit_scale=True,
114
+ unit_divisor=1024,
115
+ miniters=1,
116
+ desc=file_url.split("/")[-1],
117
+ ) as t:
118
+ urllib.request.urlretrieve(
119
+ file_url, file_complete_path + ".gz", reporthook=self.hook(t)
120
+ )
121
+ with gzip.open(file_complete_path + ".gz", "rb") as file_in:
122
+ with open(file_complete_path, "wb") as file_out:
123
+ shutil.copyfileobj(file_in, file_out)
124
+ return file_complete_path
125
+
126
+
127
+ class CagePredictionHandler(GenomicLRATaskHandler):
128
+ """
129
+ Handler for the CAGE prediction task.
130
+ """
131
+
132
+ NUM_TRAIN = 33891
133
+ NUM_TEST = 1922
134
+ NUM_VALID = 2195
135
+ DEFAULT_LENGTH = 114688 # 896 x 128bp
136
+ TARGET_SHAPE = (
137
+ 896,
138
+ 50,
139
+ ) # 50 is a subset of CAGE tracks from the original enformer dataset
140
+ NPZ_SPLIT = 1000 # number of files per npz file.
141
+ NUM_BP_PER_BIN = 128 # number of base pairs per bin in labels
142
+
143
+ def __init__(self, sequence_length=DEFAULT_LENGTH, **kwargs):
144
+ """
145
+ Creates a new handler for the CAGE task.
146
+ Args:
147
+ sequence_length: allows for increasing sequence context. Sequence length must be a multiple of 128 to align with binned labels. Note: increasing
148
+ sequence length may decrease the number of usable samples.
149
+ """
150
+ self.reference_genome = None
151
+ self.coordinate_csv_file = None
152
+ self.target_files_by_split = {}
153
+
154
+ assert (sequence_length // 128) % 2 == 0, (
155
+ f"Requested sequence length must be an even multuple of 128 to align with the binned labels."
156
+ )
157
+
158
+ self.sequence_length = sequence_length
159
+
160
+ if self.sequence_length < self.DEFAULT_LENGTH:
161
+
162
+ self.TARGET_SHAPE = (self.sequence_length//128,50)
163
+
164
+
165
+ def get_info(self, description: str) -> DatasetInfo:
166
+ """
167
+ Returns the DatasetInfo for the CAGE dataset. Each example
168
+ includes a genomic sequence and a 2D array of labels
169
+ """
170
+ features = datasets.Features(
171
+ {
172
+ # DNA sequence
173
+ "sequence": datasets.Value("string"),
174
+ # array of sequence length x num_labels
175
+ "labels": datasets.Array2D(shape=self.TARGET_SHAPE, dtype="float32"),
176
+ # chromosome number
177
+ "chromosome":datasets.Value(dtype="string")
178
+ }
179
+ )
180
+ return datasets.DatasetInfo(
181
+ # This is the description that will appear on the datasets page.
182
+ description=description,
183
+ # This defines the different columns of the dataset and their types
184
+ features=features,
185
+ )
186
+
187
+ def split_generators(self, dl_manager, cache_dir_root):
188
+ """
189
+ Separates files by split and stores filenames in instance variables.
190
+ The CAGE dataset requires reference genome, coordinate
191
+ csv file,and npy files to be saved.
192
+ """
193
+
194
+ # Manually download the reference genome since there are difficulties when
195
+ # streaming the dataset
196
+ reference_genome_file = self.download_and_extract_gz(
197
+ H38_REFERENCE_GENOME_URL, cache_dir_root
198
+ )
199
+ self.reference_genome = Fasta(reference_genome_file, one_based_attributes=False)
200
+
201
+ self.coordinate_csv_file = dl_manager.download_and_extract(
202
+ "cage_prediction/sequences_coordinates.csv"
203
+ )
204
+
205
+ train_file_dict = {}
206
+ for train_key, train_file in self.generate_npz_filenames(
207
+ "train", self.NUM_TRAIN, folder="cage_prediction/targets_subset"
208
+ ):
209
+ train_file_dict[train_key] = dl_manager.download(train_file)
210
+
211
+ test_file_dict = {}
212
+ for test_key, test_file in self.generate_npz_filenames(
213
+ "test", self.NUM_TEST, folder="cage_prediction/targets_subset"
214
+ ):
215
+ test_file_dict[test_key] = dl_manager.download(test_file)
216
+
217
+ valid_file_dict = {}
218
+ for valid_key, valid_file in self.generate_npz_filenames(
219
+ "valid", self.NUM_VALID, folder="cage_prediction/targets_subset"
220
+ ):
221
+ valid_file_dict[valid_key] = dl_manager.download(valid_file)
222
+
223
+ # convert file list to a dict keyed by target number
224
+ self.target_files_by_split["train"] = train_file_dict
225
+ self.target_files_by_split["test"] = test_file_dict
226
+ self.target_files_by_split["validation"] = valid_file_dict
227
+
228
+
229
+ return [
230
+ datasets.SplitGenerator(
231
+ name=datasets.Split.TRAIN,
232
+ gen_kwargs={"handler": self, "split": "train"},
233
+ ),
234
+ datasets.SplitGenerator(
235
+ name=datasets.Split.VALIDATION,
236
+ gen_kwargs={"handler": self, "split": "validation"},
237
+ ),
238
+ datasets.SplitGenerator(
239
+ name=datasets.Split.TEST,
240
+ gen_kwargs={"handler": self, "split": "test"}
241
+ ),
242
+ ]
243
+
244
+
245
+ def generate_examples(self, split):
246
+ """
247
+ A generator which produces examples for the given split, each with a sequence
248
+ and the corresponding labels. The sequences are padded to the correct
249
+ sequence length and standardized before returning.
250
+ """
251
+
252
+ target_files = self.target_files_by_split[split]
253
+ sequence_length = self.sequence_length
254
+
255
+ key = 0
256
+ coordinates_dataframe = pd.read_csv(self.coordinate_csv_file)
257
+ filtered = coordinates_dataframe[coordinates_dataframe["split"] == split]
258
+ for sequential_idx, row in filtered.iterrows():
259
+ start, stop = int(row["start"]) - 1, int(
260
+ row["stop"]) - 1 # -1 since vcf coords are 1-based
261
+
262
+ chromosome = row['chrom']
263
+
264
+ padded_sequence = pad_sequence(
265
+ chromosome=self.reference_genome[chromosome],
266
+ start=start,
267
+ sequence_length=sequence_length,
268
+ end=stop,
269
+ )
270
+
271
+ # floor npy_idx to the nearest 1000
272
+ npz_file = np.load(
273
+ target_files[int((row["npy_idx"] // self.NPZ_SPLIT) * self.NPZ_SPLIT)]
274
+ )
275
+
276
+ if (
277
+ split == "validation"
278
+ ): # npy files are keyed by ["train", "test", "valid"]
279
+ split = "valid"
280
+ targets = npz_file[f"target-{split}-{row['npy_idx']}.npy"][0] # select 0 since there is extra dimension
281
+
282
+
283
+ # subset the targets if sequence length is smaller than 114688 (
284
+ # DEFAULT_LENGTH)
285
+ if self.sequence_length < self.DEFAULT_LENGTH:
286
+ idx_diff = (self.DEFAULT_LENGTH - self.sequence_length) // 2 // 128
287
+ targets = targets[idx_diff:-idx_diff]
288
+
289
+
290
+ if padded_sequence:
291
+ yield key, {
292
+ "labels": targets,
293
+ "sequence": standardize_sequence(padded_sequence),
294
+ "chromosome": re.sub("chr","",chromosome)
295
+ }
296
+ key += 1
297
+
298
+ @staticmethod
299
+ def generate_npz_filenames(split, total, folder, npz_size=NPZ_SPLIT):
300
+ """
301
+ Generates a list of filenames for the npz files stored in the dataset.
302
+ Yields a tuple of floored multiple of 1000, filename
303
+ Args:
304
+ split: split to generate filenames for. Must be in ['train', 'test', 'valid']
305
+ due to the naming of the files.
306
+ total: total number of npy targets for given split
307
+ folder: folder where data is stored.
308
+ npz_size: number of npy files per npz. Defaults to 1000 because
309
+ this is the number currently used in the dataset.
310
+ """
311
+
312
+ for i in range(total // npz_size):
313
+ yield i * npz_size, f"{folder}/targets-{split}-{i * npz_size}-{i * npz_size + (npz_size - 1)}.npz"
314
+ if total % npz_size != 0:
315
+ yield (
316
+ npz_size * (total // npz_size),
317
+ f"{folder}/targets-{split}-"
318
+ f"{npz_size * (total // npz_size)}-"
319
+ f"{npz_size * (total // npz_size) + (total % npz_size - 1)}.npz",
320
+ )
321
+
322
+
323
+ class BulkRnaExpressionHandler(GenomicLRATaskHandler):
324
+ """
325
+ Handler for the Bulk RNA Expression task.
326
+ """
327
+
328
+ DEFAULT_LENGTH = 114688
329
+
330
+ def __init__(self, sequence_length=DEFAULT_LENGTH, **kwargs):
331
+ """
332
+ Creates a new handler for the Bulk RNA Expression Prediction Task.
333
+ Args:
334
+ sequence_length: Length of the sequence around the TSS_CAGE start site
335
+
336
+ """
337
+ self.reference_genome = None
338
+ self.coordinate_csv_file = None
339
+ self.labels_csv_file = None
340
+ self.sequence_length = sequence_length
341
+
342
+ def get_info(self, description: str) -> DatasetInfo:
343
+ """
344
+ Returns the DatasetInfo for the Bulk RNA Expression dataset. Each example
345
+ includes a genomic sequence and a list of label values.
346
+ """
347
+ features = datasets.Features(
348
+ {
349
+ # DNA sequence
350
+ "sequence": datasets.Value("string"),
351
+ # list of expression values in each tissue
352
+ "labels": datasets.Sequence(datasets.Value("float32")),
353
+ # chromosome number
354
+ "chromosome":datasets.Value(dtype="string")
355
+ }
356
+ )
357
+ return datasets.DatasetInfo(
358
+ # This is the description that will appear on the datasets page.
359
+ description=description,
360
+ # This defines the different columns of the dataset and their types
361
+ features=features,
362
+
363
+ )
364
+
365
+ def split_generators(self, dl_manager, cache_dir_root):
366
+ """
367
+ Separates files by split and stores filenames in instance variables.
368
+ The Bulk RNA Expression dataset requires the reference hg19 genome, coordinate
369
+ csv file,and label csv file to be saved.
370
+ """
371
+ # Manually download the reference genome since there are difficulties when streaming
372
+ reference_genome_file = self.download_and_extract_gz(
373
+ H19_REFERENCE_GENOME_URL, cache_dir_root
374
+ )
375
+ self.reference_genome = Fasta(reference_genome_file, one_based_attributes=False)
376
+
377
+ self.coordinate_csv_file = dl_manager.download_and_extract(
378
+ "bulk_rna_expression/gene_coordinates.csv"
379
+ )
380
+
381
+ self.labels_csv_file = dl_manager.download_and_extract(
382
+ "bulk_rna_expression/rna_expression_values.csv"
383
+ )
384
+
385
+ return super().split_generators(dl_manager, cache_dir_root)
386
+
387
+ def generate_examples(self, split):
388
+ """
389
+ A generator which produces examples for the given split, each with a sequence
390
+ and the corresponding labels. The sequences are padded to the correct sequence
391
+ length and standardized before returning.
392
+ """
393
+ coordinates_df = pd.read_csv(self.coordinate_csv_file)
394
+ labels_df = pd.read_csv(self.labels_csv_file)
395
+
396
+ coordinates_split_df = coordinates_df[coordinates_df["split"] == split]
397
+
398
+ key = 0
399
+ for idx, coordinates_row in coordinates_split_df.iterrows():
400
+ start = coordinates_row[
401
+ "CAGE_representative_TSS"] - 1 # -1 since vcf coords are 1-based
402
+
403
+ chromosome = coordinates_row["chrom"]
404
+ labels_row = labels_df.loc[idx].values
405
+ padded_sequence = pad_sequence(
406
+ chromosome=self.reference_genome[chromosome],
407
+ start=start,
408
+ sequence_length=self.sequence_length,
409
+ negative_strand=coordinates_row["strand"] == "-",
410
+ )
411
+ if padded_sequence:
412
+ yield key, {
413
+ "labels": labels_row,
414
+ "sequence": standardize_sequence(padded_sequence),
415
+ "chromosome":re.sub("chr","",chromosome)
416
+ }
417
+ key += 1
418
+
419
+
420
+ class VariantEffectPredictionHandler(GenomicLRATaskHandler):
421
+ """
422
+ Handler for the Variant Effect Prediction task.
423
+ """
424
+
425
+ DEFAULT_LENGTH = 114688
426
+
427
+ def __init__(self, sequence_length=DEFAULT_LENGTH, **kwargs):
428
+ """
429
+ Creates a new handler for the Variant Effect Prediction Task.
430
+ Args:
431
+ sequence_length: Length of the sequence to pad around the SNP position
432
+
433
+ """
434
+ self.reference_genome = None
435
+ self.sequence_length = sequence_length
436
+
437
+ def get_info(self, description: str) -> DatasetInfo:
438
+ """
439
+ Returns the DatasetInfo for the Variant Effect Prediction dataset. Each example
440
+ includes a genomic sequence with the reference allele as well as the genomic sequence with the alternative allele,
441
+ and a binary label.
442
+ """
443
+ features = datasets.Features(
444
+ {
445
+ # DNA sequence
446
+ "ref_forward_sequence": datasets.Value("string"),
447
+ "alt_forward_sequence": datasets.Value("string"),
448
+ # binary label
449
+ "label": datasets.Value(dtype="int8"),
450
+ # tissue type
451
+ "tissue": datasets.Value(dtype="string"),
452
+ # chromosome number
453
+ "chromosome": datasets.Value(dtype="string"),
454
+ # distance to nearest tss
455
+ "distance_to_nearest_tss":datasets.Value(dtype="int32")
456
+ }
457
+ )
458
+
459
+ return datasets.DatasetInfo(
460
+ # This is the description that will appear on the datasets page.
461
+ description=description,
462
+ # This defines the different columns of the dataset and their types
463
+ features=features,
464
+ )
465
+
466
+ def split_generators(self, dl_manager, cache_dir_root):
467
+ """
468
+ Separates files by split and stores filenames in instance variables.
469
+ The variant effect prediction dataset requires the reference hg38 genome and
470
+ coordinates_labels_csv_file to be saved.
471
+ """
472
+
473
+ # Manually download the reference genome since there are difficulties
474
+ # when streaming
475
+ reference_genome_file = self.download_and_extract_gz(
476
+ H38_REFERENCE_GENOME_URL, cache_dir_root
477
+ )
478
+
479
+ self.reference_genome = Fasta(reference_genome_file, one_based_attributes=False)
480
+ self.coordinates_labels_csv_file = dl_manager.download_and_extract(
481
+ f"variant_effect_prediction/All_Tissues.csv"
482
+ )
483
+
484
+ return super().split_generators(dl_manager, cache_dir_root)
485
+
486
+ def generate_examples(self, split):
487
+ """
488
+ A generator which produces examples each with ref/alt allele
489
+ and corresponding binary label. The sequences are extended to
490
+ the desired sequence length and standardized before returning.
491
+ """
492
+
493
+ coordinates_df = pd.read_csv(self.coordinates_labels_csv_file)
494
+
495
+ coordinates_split_df = coordinates_df[coordinates_df["split"] == split]
496
+
497
+ key = 0
498
+ for idx, row in coordinates_split_df.iterrows():
499
+ start = row["POS"] - 1 # sub 1 to create idx since vcf coords are 1-based
500
+ alt_allele = row["ALT"]
501
+ label = row["label"]
502
+ tissue = row['tissue']
503
+ chromosome = row["CHROM"]
504
+ distance = int(row["distance_to_nearest_TSS"])
505
+
506
+ # get reference forward sequence
507
+ ref_forward = pad_sequence(
508
+ chromosome=self.reference_genome[chromosome],
509
+ start=start,
510
+ sequence_length=self.sequence_length,
511
+ negative_strand=False,
512
+ )
513
+
514
+ # only if a valid sequence returned
515
+ if ref_forward:
516
+ # Mutate sequence with the alt allele at the SNP position, which is always
517
+ # centered in the string returned from pad_sequence
518
+ alt_forward = list(ref_forward)
519
+ alt_forward[self.sequence_length // 2] = alt_allele
520
+ alt_forward = "".join(alt_forward)
521
+
522
+ yield key, {
523
+ "label": label,
524
+ "tissue": tissue,
525
+ "chromosome": re.sub("chr", "", chromosome),
526
+ "ref_forward_sequence": standardize_sequence(ref_forward),
527
+ "alt_forward_sequence": standardize_sequence(alt_forward),
528
+ "distance_to_nearest_tss": distance
529
+ }
530
+ key += 1