File size: 5,285 Bytes
a36eb47 e050895 a36eb47 e050895 a36eb47 e012aec 8be8808 a36eb47 8be8808 e012aec 8be8808 a36eb47 e012aec 8be8808 e012aec 8be8808 e012aec 8be8808 e012aec 8be8808 e012aec 8be8808 e012aec a36eb47 e012aec a36eb47 e012aec a36eb47 e012aec a36eb47 e012aec a36eb47 8be8808 e012aec a36eb47 8be8808 84a6d7b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
#### Firstly, I read specimen data from a CSV file, merges and reformats certain columns, and then converts this data into a pandas DataFrame.
#### Then, I filter and process associated images by resizing them and saving them in a specified output directory.
#### Next, I update the DataFrame with the paths to the processed images and save this enhanced dataset as a new CSV file.
#### Finally, I upload photos to github and replace the local paths with public URL.
#### Note: all these were done on local. And I upload the processed csv to github and get the URL
import csv
import os
import cv2
import pandas as pd
# --- Initial Setup ---
initial_csv_file_path = '/Users/leozhangzaolin/Desktop/Graptolite specimens.csv'
image_dir_paths = ['/Users/leozhangzaolin/Desktop/project 1/graptolite specimens with scale 1',
'/Users/leozhangzaolin/Desktop/project 1/graptolite specimens with scale 2']
output_image_dir = '/Users/leozhangzaolin/Desktop/project 1/output_images'
target_size = (256, 256)
# Ensure output directory exists
os.makedirs(output_image_dir, exist_ok=True)
# --- Read and Process CSV Data ---
with open(initial_csv_file_path, newline='', encoding='utf-8') as file:
reader = csv.reader(file)
data = list(reader)
header = data[0]
# Find indices for columns to merge
family_index = header.index('Family') if 'Family' in header else None
subfamily_index = header.index('Subfamily') if 'Subfamily' in header else None
locality_index = header.index('Locality') if 'Locality' in header else None
longitude_index = header.index('Longitude') if 'Longitude' in header else None
latitude_index = header.index('Latitude') if 'Latitude' in header else None
horizon_index = header.index('Horizon') if 'Horizon' in header else None
# Process rows: merge and delete columns
for row in data[1:]:
# Merge columns
if family_index is not None and subfamily_index is not None:
family = row[family_index]
subfamily = row[subfamily_index] if row[subfamily_index] else 'no subfamily'
row[family_index] = f"{family} ({subfamily})"
if locality_index is not None and all([longitude_index, latitude_index, horizon_index]):
locality = row[locality_index]
longitude = row[longitude_index]
latitude = row[latitude_index]
horizon = row[horizon_index]
row[locality_index] = f"{locality} ({longitude}, {latitude}, {horizon})"
# Update header and remove unneeded columns
header[family_index] = 'Family (Subfamily)'
header[locality_index] = 'Locality (Longitude, Latitude, Horizon)'
indices_to_delete = [header.index(column) for column in columns_to_delete if column in header]
merged_indices = [subfamily_index, longitude_index, latitude_index, horizon_index]
indices_to_delete.extend(merged_indices)
indices_to_delete = list(set(indices_to_delete))
indices_to_delete.sort(reverse=True)
header = [col for i, col in enumerate(header) if i not in indices_to_delete]
for row in data[1:]:
for index in indices_to_delete:
del row[index]
# Convert processed data into a DataFrame
df = pd.DataFrame(data[1:], columns=header)
# Function to process and save the image, then return the file path
def process_and_save_image(image_name, max_size=target_size):
image_base_name = os.path.splitext(image_name)[0]
image_paths = [os.path.join(dir_path, image_base_name + suffix)
for dir_path in image_dir_paths
for suffix in ['_S.jpg', '_S.JPG']]
image_path = next((path for path in image_paths if os.path.exists(path)), None)
if image_path is None:
return None
# Read and resize the image
img = cv2.imread(image_path, cv2.IMREAD_COLOR)
img = cv2.resize(img, max_size, interpolation=cv2.INTER_AREA)
# Save the image to the output directory
output_path = os.path.join(output_image_dir, image_base_name + '.jpg')
cv2.imwrite(output_path, img)
return output_path
# Apply the function to process images and update the DataFrame
df['image file name'] = df['image file name'].apply(process_and_save_image)
df = df.dropna(subset=['image file name'])
# Rename the 'image file name' column to 'image'
df.rename(columns={'image file name': 'image'}, inplace=True)
# Save the DataFrame to a CSV file
final_csv_path = '/Users/leozhangzaolin/Desktop/Final_GS_with_Images5.csv'
df.to_csv(final_csv_path, index=False)
# take url path to each specimens
def update_csv_with_github_links(csv_file_path, github_repo_url, branch_name):
updated_rows = []
with open(csv_file_path, mode='r') as file:
reader = csv.DictReader(file)
for row in reader:
image_name = row['image'].split('/')[-1]
row['image'] = f"{github_repo_url}/{branch_name}/{image_name}"
updated_rows.append(row)
# Write updated data back to CSV
with open(csv_file_path, mode='w', newline='') as file:
writer = csv.DictWriter(file, fieldnames=reader.fieldnames)
writer.writeheader()
writer.writerows(updated_rows)
csv_file = '/Users/leozhangzaolin/Desktop/Final_GS_with_Images5.csv'
github_repo_url = 'https://raw.githubusercontent.com/LeoZhangzaolin/photos'
branch_name = 'main'
update_csv_with_github_links(csv_file, github_repo_url, branch_name)
|