|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
|
|
import datasets |
|
|
|
_DESCRIPTION = """\ |
|
The dataset contains the annual report of US public firms filing with the SEC EDGAR system. |
|
Each annual report (10K filing) is broken into 20 sections. Each section is split into individual sentences. |
|
Sentiment labels are provided on a per filing basis from the market reaction around the filing data. |
|
Additional metadata for each filing is included in the dataset. |
|
""" |
|
|
|
_LICENSE = "apache-2.0" |
|
|
|
_NOS_SHARDS = 10 |
|
|
|
_URLS = {item+'_'+config: ["data/"+config+"/"+item+"/shard_"+str(shard)+".jsonl" for shard in range(_NOS_SHARDS)] for item in ['test', 'train', 'validate'] for config in ["large", "small"]} |
|
|
|
_REPORT_KEYS = ['section_1', 'section_1A', 'section_1B', |
|
'section_2', 'section_3', 'section_4', |
|
'section_5', 'section_6', 'section_7', |
|
'section_7A', 'section_8', 'section_9', |
|
'section_9A', 'section_9B', 'section_10', |
|
'section_11', 'section_12', 'section_13', |
|
'section_14', 'section_15'] |
|
|
|
_LITE_FEATURES = ["cik", "sentence", "section", "labels", "filingDate", "docID", "sentenceID", "sentenceCount"] |
|
|
|
_ALL_FEATURES = { |
|
"cik": datasets.Value("string"), |
|
"sentence": datasets.Value("string"), |
|
"section": datasets.ClassLabel(num_classes=20, |
|
names=_REPORT_KEYS), |
|
"labels": { |
|
"1d": datasets.ClassLabel(num_classes=2, names=["positive", "negative"]), |
|
"5d": datasets.ClassLabel(num_classes=2, names=["positive", "negative"]), |
|
"30d": datasets.ClassLabel(num_classes=2, names=["positive", "negative"]), |
|
}, |
|
"filingDate": datasets.Value("string"), |
|
"name": datasets.Value("string"), |
|
"docID": datasets.Value("string"), |
|
"sentenceID": datasets.Value("string"), |
|
"sentenceCount": datasets.Value("int64"), |
|
"tickers": [datasets.Value("string")], |
|
"exchanges": [datasets.Value("string")], |
|
"entityType": datasets.Value("string"), |
|
"sic": datasets.Value("string"), |
|
"stateOfIncorporation": datasets.Value("string"), |
|
"tickerCount": datasets.Value("int32"), |
|
"acceptanceDateTime": datasets.Value("string"), |
|
"form": datasets.Value("string"), |
|
"reportDate": datasets.Value("string"), |
|
"returns": { |
|
"1d": { |
|
"closePriceEndDate": datasets.Value("float32"), |
|
"closePriceStartDate": datasets.Value("float32"), |
|
"endDate": datasets.Value("string"), |
|
"startDate": datasets.Value("string"), |
|
"ret": datasets.Value("float32"), |
|
}, |
|
"5d": { |
|
"closePriceEndDate": datasets.Value("float32"), |
|
"closePriceStartDate": datasets.Value("float32"), |
|
"endDate": datasets.Value("string"), |
|
"startDate": datasets.Value("string"), |
|
"ret": datasets.Value("float32"), |
|
}, |
|
"30d": { |
|
"closePriceEndDate": datasets.Value("float32"), |
|
"closePriceStartDate": datasets.Value("float32"), |
|
"endDate": datasets.Value("string"), |
|
"startDate": datasets.Value("string"), |
|
"ret": datasets.Value("float32"), |
|
} |
|
}, |
|
} |
|
|
|
|
|
class FinancialReportsSec(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.1.1") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="large_lite", version=VERSION, description="This returns the dataset with only the critical data needed for analysis."), |
|
datasets.BuilderConfig(name="large_full", version=VERSION, description="This returns the dataset with all metadata included."), |
|
datasets.BuilderConfig(name="small_lite", version=VERSION, description="This returns a smaller version of the dataset with only the critical data needed for analysis."), |
|
datasets.BuilderConfig(name="small_full", version=VERSION, description="This returns a smaller version of the dataset with all metadata included."), |
|
] |
|
|
|
def _info(self): |
|
|
|
lite_features = datasets.Features({k: v for k, v in _ALL_FEATURES.items() if k in _LITE_FEATURES}) |
|
full_features = datasets.Features(_ALL_FEATURES) |
|
|
|
features = full_features if self.config.name.endswith('full') else lite_features |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
license=_LICENSE, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
if self.config.name.split('_')[0] == 'large': |
|
urls = {k: v for k, v in _URLS.items() if k.endswith('large')} |
|
else: |
|
urls = {k: v for k, v in _URLS.items() if k.endswith('small')} |
|
|
|
data_dir = dl_manager.download_and_extract(urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepaths": data_dir["train_large"] if self.config.name.startswith('large') else data_dir["train_small"], |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepaths": data_dir["validate_large"] if self.config.name.startswith('large') else data_dir["validate_small"], |
|
"split": "validate", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepaths": data_dir["test_large"] if self.config.name.startswith('large') else data_dir["test_small"], |
|
"split": "test" |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepaths, split): |
|
|
|
sentenceCount = 0 |
|
|
|
|
|
for filepath in filepaths: |
|
with open(filepath, encoding="utf-8") as f: |
|
for firmIdx, row in enumerate(f): |
|
|
|
data = json.loads(row) |
|
|
|
for filing in data["filings"]: |
|
|
|
|
|
for section_id in _REPORT_KEYS: |
|
|
|
|
|
if filing["report"][section_id] is None: |
|
return None |
|
|
|
for idx, sentence in enumerate(filing["report"][section_id]): |
|
sentenceCount += 1 |
|
key = data["cik"]+'_'+filing["form"]+'_'+filing["reportDate"].split('-')[0]+'_'+section_id+'_'+str(idx) |
|
|
|
if self.config.name.endswith('lite'): |
|
yield key, { |
|
"cik": data["cik"], |
|
"sentence": sentence, |
|
"section": section_id, |
|
"labels": filing["labels"], |
|
"filingDate": filing["filingDate"], |
|
"docID": data["cik"]+'_'+filing["form"]+'_'+filing["reportDate"].split('-')[0], |
|
"sentenceID": key, |
|
"sentenceCount": sentenceCount, |
|
} |
|
else: |
|
yield key, { |
|
"cik": data["cik"], |
|
"sentence": sentence, |
|
"section": section_id, |
|
"labels": filing["labels"], |
|
"filingDate": filing["filingDate"], |
|
"docID": data["cik"]+'_'+filing["form"]+'_'+filing["reportDate"].split('-')[0], |
|
"sentenceID": key, |
|
"sentenceCount": sentenceCount, |
|
"name": data["name"], |
|
"tickers": data["tickers"], |
|
"exchanges": data["exchanges"], |
|
"entityType": data["entityType"], |
|
"sic": data["sic"], |
|
"stateOfIncorporation": data["stateOfIncorporation"], |
|
"tickerCount": data["tickerCount"], |
|
"acceptanceDateTime": filing["acceptanceDateTime"], |
|
"form": filing["form"], |
|
"reportDate": filing["reportDate"], |
|
"returns": filing["returns"], |
|
} |