Datasets:
File size: 2,221 Bytes
76f63e0 3fe529b 01672df 9e543b6 060ef4a 3fe529b 9e543b6 3fe529b 76f63e0 3fe529b 9e543b6 3fe529b 76f63e0 3fe529b 76f63e0 3fe529b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
import os
import json
import tiktoken
encoding = tiktoken.get_encoding("cl100k_base")
merged_md_file_path = "hermes-toth.txt"
merged_jsonl_file_path = "hermes-toth.json"
excluded = (merged_md_file_path, "README.md")
def merge_to_md():
"""
Merges all .md files in the current directory into a single md file.
"""
with open(merged_md_file_path, "w") as merged_file:
first_file = True
for root, _, files in os.walk("."):
for file in files:
if file.endswith(".md") and file not in excluded:
print(f"Merging file: {file} into {merged_md_file_path}")
file_path = os.path.join(root, file)
with open(file_path, "r", encoding="utf-8") as f:
contents = f.read()
if not first_file:
merged_file.write("\n\n\n")
merged_file.write(contents)
first_file = False
def merge_to_jsonl():
"""
Merges all .md files in the current directory into a single jsonl file.
"""
with open(merged_jsonl_file_path, "w") as merged_file:
first_file = True
for root, _, files in os.walk("."):
for file in files:
if file.endswith(".md") and file not in excluded:
print(f"Merging file: {file} into {merged_jsonl_file_path}")
file_path = os.path.join(root, file)
with open(file_path, "r", encoding="utf-8") as f:
contents = f.read()
if not first_file:
merged_file.write("\n")
data = {"text": contents}
json.dump(data, merged_file, ensure_ascii=False)
first_file = False
def token_count():
"""
Counts the number of tokens in the merged dataset.
"""
with open(merged_md_file_path, "r", encoding="utf-8") as merged_file:
tokenized = encoding.encode(merged_file.read())
print(f"Merged dataset has: {len(tokenized)} tokens.")
if __name__ == "__main__":
merge_to_md()
merge_to_jsonl()
token_count()
|