Josephgflowers commited on
Commit
872030d
1 Parent(s): 48599cc

Upload 3 files

Browse files
Files changed (3) hide show
  1. add-column-names.py +34 -0
  2. organize_by_folder.py +39 -0
  3. par-4-forti-four.py +235 -0
add-column-names.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+
4
+ # Define the directory path containing the CSV files
5
+ csv_directory = '/media/joe/512-3/csv'
6
+
7
+ # Define the column names to be added
8
+ column_names = ['score', 'text', 'url']
9
+
10
+ # Function to add headers to CSV files
11
+ def add_headers_to_csv(file_path):
12
+ try:
13
+ # Load the CSV file without headers
14
+ df = pd.read_csv(file_path, header=None)
15
+
16
+ # Add column names
17
+ df.columns = column_names
18
+
19
+ # Save the updated CSV file with headers
20
+ df.to_csv(file_path, index=False)
21
+
22
+ print(f"Headers added to '{file_path}'")
23
+
24
+ except Exception as e:
25
+ print(f"Error processing '{file_path}': {e}")
26
+
27
+ # Loop through all the CSV files in the specified directory
28
+ for file_name in os.listdir(csv_directory):
29
+ if file_name.endswith('.csv'):
30
+ file_path = os.path.join(csv_directory, file_name)
31
+ add_headers_to_csv(file_path)
32
+
33
+ print("All CSV files processed.")
34
+
organize_by_folder.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+
4
+ # Define the years and the base directory
5
+ years = ["2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020", "2021", "2022", "2023", "2024"]
6
+ base_dir = '/media/joe/512-3/csv' # Adjust this to your actual base directory
7
+
8
+ # Step 1: Create directories for each year
9
+ for year in years:
10
+ folder_path = os.path.join(base_dir, f"CC-MAIN-{year}")
11
+ if not os.path.exists(folder_path):
12
+ os.makedirs(folder_path)
13
+ print(f"Created folder: {folder_path}")
14
+
15
+ # Step 2: Move files to their respective folders and rename them
16
+ for year in years:
17
+ year_folder = os.path.join(base_dir, f"CC-MAIN-{year}")
18
+ # Find all files that match the pattern for this year
19
+ year_files = [f for f in os.listdir(base_dir) if f.startswith(f"forti-sampled_dataset_CC-MAIN-{year}")]
20
+
21
+ total_files = len(year_files)
22
+ if total_files == 0:
23
+ print(f"No files found for the year {year}")
24
+ continue
25
+
26
+ # Sort and rename the files
27
+ for index, file_name in enumerate(sorted(year_files)):
28
+ new_file_name = f"train-{str(index+1).zfill(5)}-of-{str(total_files).zfill(4)}.csv"
29
+
30
+ old_file_path = os.path.join(base_dir, file_name)
31
+ new_file_path = os.path.join(year_folder, new_file_name)
32
+
33
+ # Attempt to move and rename the file
34
+ try:
35
+ shutil.move(old_file_path, new_file_path)
36
+ print(f"Moved and renamed: {old_file_path} to {new_file_path}")
37
+ except Exception as e:
38
+ print(f"Error moving file {old_file_path}: {e}")
39
+
par-4-forti-four.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ from datasets import load_dataset
3
+ import os
4
+ import time
5
+ import signal
6
+ import requests
7
+ from multiprocessing import Pool, Manager
8
+ from functools import partial
9
+
10
+ # Define parameters
11
+ score_threshold = 4
12
+ error_log_file = "error_log.txt"
13
+
14
+ # List of dataset folders to process
15
+ dataset_folders = [
16
+ "CC-MAIN-2013-20", "CC-MAIN-2013-48", "CC-MAIN-2014-10", "CC-MAIN-2014-15",
17
+ "CC-MAIN-2014-23", "CC-MAIN-2014-35", "CC-MAIN-2014-41", "CC-MAIN-2014-42",
18
+ "CC-MAIN-2014-49", "CC-MAIN-2014-52", "CC-MAIN-2015-06", "CC-MAIN-2015-11",
19
+ "CC-MAIN-2015-14", "CC-MAIN-2015-18", "CC-MAIN-2015-22", "CC-MAIN-2015-27",
20
+ "CC-MAIN-2015-32", "CC-MAIN-2015-35", "CC-MAIN-2015-40", "CC-MAIN-2015-48",
21
+ "CC-MAIN-2016-07", "CC-MAIN-2016-18", "CC-MAIN-2016-22", "CC-MAIN-2016-26",
22
+ "CC-MAIN-2016-30", "CC-MAIN-2016-36", "CC-MAIN-2016-40", "CC-MAIN-2016-44",
23
+ "CC-MAIN-2016-50", "CC-MAIN-2017-04", "CC-MAIN-2017-09", "CC-MAIN-2017-13",
24
+ "CC-MAIN-2017-17", "CC-MAIN-2017-22", "CC-MAIN-2017-26", "CC-MAIN-2017-30",
25
+ "CC-MAIN-2017-34", "CC-MAIN-2017-39", "CC-MAIN-2017-43", "CC-MAIN-2017-47",
26
+ "CC-MAIN-2017-51", "CC-MAIN-2018-05", "CC-MAIN-2018-09", "CC-MAIN-2018-13",
27
+ "CC-MAIN-2018-17", "CC-MAIN-2018-22", "CC-MAIN-2018-26", "CC-MAIN-2018-30",
28
+ "CC-MAIN-2018-34", "CC-MAIN-2018-39", "CC-MAIN-2018-43", "CC-MAIN-2018-47",
29
+ "CC-MAIN-2018-51", "CC-MAIN-2019-04", "CC-MAIN-2019-09", "CC-MAIN-2019-13",
30
+ "CC-MAIN-2019-18", "CC-MAIN-2019-22", "CC-MAIN-2019-26", "CC-MAIN-2019-30",
31
+ "CC-MAIN-2019-35", "CC-MAIN-2019-39", "CC-MAIN-2019-43", "CC-MAIN-2019-47",
32
+ "CC-MAIN-2019-51", "CC-MAIN-2020-05", "CC-MAIN-2020-10", "CC-MAIN-2020-16",
33
+ "CC-MAIN-2020-24", "CC-MAIN-2020-29", "CC-MAIN-2020-34", "CC-MAIN-2020-40",
34
+ "CC-MAIN-2020-45", "CC-MAIN-2020-50", "CC-MAIN-2021-04", "CC-MAIN-2021-10",
35
+ "CC-MAIN-2021-17", "CC-MAIN-2021-21", "CC-MAIN-2021-25", "CC-MAIN-2021-31",
36
+ "CC-MAIN-2021-39", "CC-MAIN-2021-43", "CC-MAIN-2021-49", "CC-MAIN-2022-05",
37
+ "CC-MAIN-2022-21", "CC-MAIN-2022-27", "CC-MAIN-2022-33", "CC-MAIN-2022-40",
38
+ "CC-MAIN-2022-49", "CC-MAIN-2023-06", "CC-MAIN-2023-14", "CC-MAIN-2023-23",
39
+ "CC-MAIN-2023-40", "CC-MAIN-2023-50", "CC-MAIN-2024-10"
40
+ ]
41
+
42
+ # Global variable for interruption
43
+ interrupt_flag = Manager().Value('i', False)
44
+
45
+ # Function to log errors
46
+ def log_error(error_message):
47
+ with open(error_log_file, "a") as error_log:
48
+ error_log.write(f"{error_message}\n")
49
+
50
+ # Retry mechanism to handle connection errors when loading the dataset
51
+ def retry_request(load_dataset_function, max_retries=5, wait_time=5):
52
+ retries = 0
53
+ while retries < max_retries:
54
+ try:
55
+ dataset = load_dataset_function()
56
+ return dataset
57
+ except requests.exceptions.ConnectionError as e:
58
+ log_error(f"Connection error: {e}. Retrying in {wait_time} seconds...")
59
+ retries += 1
60
+ time.sleep(wait_time)
61
+ except Exception as e:
62
+ log_error(f"Unexpected error: {e}. Retrying in {wait_time} seconds...")
63
+ retries += 1
64
+ time.sleep(wait_time)
65
+ log_error("Max retries exceeded.")
66
+ return None
67
+
68
+ # Function to save the text column to a file with start and stop tokens
69
+ def save_text_column(entry, output_text_file):
70
+ try:
71
+ text = entry["text"]
72
+ with open(output_text_file, "a", encoding='utf-8') as f:
73
+ f.write(f"<s>\n{text}</s>\n")
74
+ except KeyError as e:
75
+ log_error(f"Missing 'text' field: {e}")
76
+
77
+ # Function to save score, text, and URL to a CSV file, with error handling
78
+ def save_to_csv(entry, output_csv_file, write_header=False):
79
+ try:
80
+ with open(output_csv_file, mode='a', newline='', encoding='utf-8') as file:
81
+ writer = csv.writer(file)
82
+ if write_header:
83
+ writer.writerow(["score", "text", "url"]) # CSV headers
84
+ score = entry["score"]
85
+ text = entry["text"]
86
+ url = entry.get("url", "N/A") # Ensure 'url' is included
87
+ writer.writerow([score, text, url])
88
+ except KeyError as e:
89
+ log_error(f"Missing field in entry: {e}")
90
+
91
+ # Graceful exit handling
92
+ def signal_handler(sig, frame):
93
+ global interrupt_flag
94
+ print("Interrupt received, saving progress and exiting...")
95
+ interrupt_flag.value = True # Set the flag to stop processing
96
+ exit(0)
97
+
98
+ signal.signal(signal.SIGINT, signal_handler)
99
+
100
+ # Function to process a single folder
101
+ def process_folder(folder, score_threshold):
102
+ global interrupt_flag
103
+
104
+ # Define per-folder log file
105
+ log_file = f"processing_log_{folder}.txt"
106
+
107
+ # Function to log progress to a file
108
+ def log_progress(last_id):
109
+ with open(log_file, "w") as log:
110
+ log.write(f"{last_id}")
111
+
112
+ # Function to resume from a specific point by reading the log file
113
+ def resume_progress():
114
+ if os.path.exists(log_file):
115
+ with open(log_file, "r") as log:
116
+ last_id = log.read().strip()
117
+ if last_id == 'None' or last_id == '':
118
+ last_id = None
119
+ return last_id
120
+ return None
121
+
122
+ print(f"Processing dataset folder: {folder}")
123
+
124
+ # Define per-folder output files
125
+ output_text_file = f"forti-sampled_text_dataset_{folder}.txt"
126
+ output_csv_file = f"forti-sampled_dataset_{folder}.csv"
127
+
128
+ # Load dataset with retry mechanism
129
+ dataset = retry_request(lambda: load_dataset(
130
+ "airtrain-ai/fineweb-edu-fortified",
131
+ folder,
132
+ split="train",
133
+ streaming=True
134
+ ))
135
+ if not dataset:
136
+ log_error(f"Failed to load dataset {folder}. Skipping.")
137
+ return
138
+
139
+ # Retrieve last processed ID for resuming
140
+ last_processed_id = resume_progress()
141
+
142
+ # Initialize variables
143
+ found_last_id = last_processed_id is None
144
+ processed_entries = 0 # Reset processed_entries for the new folder
145
+
146
+ # Process entries
147
+ while True:
148
+ try:
149
+ for entry in dataset:
150
+ if interrupt_flag.value:
151
+ break # Exit loop if interrupt flag is set
152
+
153
+ # Skip entries until we reach the last processed ID
154
+ entry_id = entry.get('id')
155
+ if not found_last_id:
156
+ if entry_id == last_processed_id:
157
+ found_last_id = True
158
+ continue
159
+
160
+ # Update last_processed_id
161
+ last_processed_id = entry_id
162
+
163
+ # Check if entry meets the score threshold
164
+ if entry.get('score', 0) >= score_threshold:
165
+ # Save entry to text file and CSV
166
+ # Write headers if files are new
167
+ if processed_entries == 0:
168
+ write_header = True
169
+ else:
170
+ write_header = False
171
+ save_to_csv(entry, output_csv_file, write_header=write_header)
172
+ save_text_column(entry, output_text_file)
173
+
174
+ processed_entries += 1
175
+
176
+ if processed_entries % 100 == 0:
177
+ log_progress(last_processed_id)
178
+ print(f"Processed {processed_entries} entries from {folder}...") # Terminal output
179
+
180
+ break # Exit while loop when dataset iteration is complete
181
+
182
+ except requests.exceptions.ConnectionError as e:
183
+ # Handle connection error during iteration
184
+ log_error(f"Connection error during iteration in {folder}: {e}")
185
+ print(f"Connection error during iteration in {folder}: {e}. Retrying in 5 seconds...")
186
+ time.sleep(5)
187
+ # Re-initialize dataset streaming from the point after last_processed_id
188
+ dataset = retry_request(lambda: load_dataset(
189
+ "airtrain-ai/fineweb-edu-fortified",
190
+ folder,
191
+ split="train",
192
+ streaming=True
193
+ ))
194
+ if not dataset:
195
+ log_error(f"Failed to reload dataset {folder} after connection error. Skipping.")
196
+ break
197
+ # Reset found_last_id to False to skip entries up to last_processed_id
198
+ found_last_id = False
199
+
200
+ except Exception as e:
201
+ log_error(f"Error during processing in {folder}: {e}")
202
+ print(f"Error during processing in {folder}: {e}. Skipping entry.")
203
+ continue
204
+
205
+ # After processing all entries in the folder, log progress
206
+ log_progress(last_processed_id)
207
+ print(f"Completed processing folder: {folder}")
208
+
209
+ if interrupt_flag.value:
210
+ print(f"Processing interrupted in folder: {folder}")
211
+
212
+ # Main process function to process multiple folders in parallel
213
+ def process_all_folders_parallel(dataset_folders, score_threshold):
214
+ global interrupt_flag
215
+
216
+ # Use a multiprocessing Pool to process folders in parallel
217
+ with Pool(processes=os.cpu_count()) as pool:
218
+ try:
219
+ # Partial function to fix the score_threshold parameter
220
+ func = partial(process_folder, score_threshold=score_threshold)
221
+ pool.map(func, dataset_folders)
222
+ except KeyboardInterrupt:
223
+ print("KeyboardInterrupt received, terminating pool...")
224
+ pool.terminate()
225
+ pool.join()
226
+ print("Pool terminated.")
227
+ interrupt_flag.value = True
228
+
229
+ print("Processing complete.")
230
+
231
+ # Start processing all folders in parallel
232
+ process_all_folders_parallel(dataset_folders, score_threshold)
233
+
234
+ print("Filtered datasets saved to individual files per folder.")
235
+