|
|
|
import keyring as kr |
|
import os |
|
import random |
|
import json |
|
import re |
|
import sys |
|
import time |
|
from collections import defaultdict |
|
from functools import reduce |
|
|
|
import codefast as cf |
|
import joblib |
|
import numpy as np |
|
import pandas as pd |
|
from rich import print |
|
from typing import List, Union, Callable, Set, Dict, Tuple, Optional, Any |
|
|
|
from codefast.patterns.pipeline import Pipeline, BeeMaxin |
|
|
|
from datasets import load_dataset |
|
|
|
|
|
class DataLoader(BeeMaxin): |
|
def __init__(self) -> None: |
|
super().__init__() |
|
|
|
def process(self): |
|
files = [] |
|
for f in cf.io.walk('jsons/'): |
|
files.append(f) |
|
return files |
|
|
|
|
|
class ToCsv(BeeMaxin): |
|
def to_csv(self, json_file: str): |
|
texts, labels = [], [] |
|
with open(json_file, 'r') as f: |
|
for line in f: |
|
line = json.loads(line) |
|
texts.append(line['text']) |
|
_label = ' '.join(line['labels']) |
|
labels.append(_label) |
|
task_name = cf.io.basename(json_file).replace('.json', '') |
|
return pd.DataFrame({'text': texts, 'labels': labels, |
|
'task_name': task_name}) |
|
|
|
def process(self, files: List[str]): |
|
""" Merge all ner data into a train.csv |
|
""" |
|
df = pd.DataFrame() |
|
for f in files: |
|
cf.info({ |
|
'message': f'processing {f}' |
|
}) |
|
newdf = self.to_csv(f) |
|
df = pd.concat([df, newdf], axis=0) |
|
df.to_csv('train.csv', index=False) |
|
df.sample(10).to_csv('dev.csv', index=False) |
|
df.sample(10).to_csv('test.csv', index=False) |
|
|
|
|
|
if __name__ == '__main__': |
|
pl = Pipeline( |
|
[ |
|
('dloader', DataLoader()), |
|
('csv converter', ToCsv()) |
|
] |
|
) |
|
pl.gather() |
|
|