import os import shutil import random from PIL import Image import pandas as pd import torch import torchvision.transforms as T from torchvision.transforms.functional import pad from tqdm.auto import tqdm # --- 配置参数 --- INPUT_DIR = "./GTSRB" OUTPUT_DIR = "./GTSRB_224_balanced" IMAGE_SIZE = 224 TARGET_TRAIN_SAMPLES_PER_CLASS = 1000 RANDOM_STATE = 42 # 用于保证降采样的可复现性 # 设置随机种子以保证可复现性 torch.manual_seed(RANDOM_STATE) random.seed(RANDOM_STATE) # --- 自定义变换:保持比例填充到正方形 --- class PadToSquare: def __init__(self, fill_value=0): self.fill_value = fill_value def __call__(self, img): w, h = img.size if w == h: return img max_dim = max(w, h) pad_left = (max_dim - w) // 2 pad_right = max_dim - w - pad_left pad_top = (max_dim - h) // 2 pad_bottom = max_dim - h - pad_top padding = (pad_left, pad_top, pad_right, pad_bottom) # 产生的空白区域用反射填充 return pad(img, padding, padding_mode="reflect") # --- 定义图像变换流程 --- base_transform = T.Compose( [ PadToSquare(fill_value=0), T.Resize( (IMAGE_SIZE, IMAGE_SIZE), interpolation=T.InterpolationMode.BICUBIC, antialias=True, ), ] ) # - 关键:先执行基础变换放大图像,然后再在高分辨率图像上进行增强,防止锯齿放大。 augmentation_pipeline = T.Compose( [ base_transform, # 产生的空白区域用中性的灰色填充 T.RandomAffine( degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=5, fill=128 ), T.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2), ] ) def process_training_data(): """处理训练数据集,包含降采样和数据增强逻辑。""" print("开始处理训练数据集...") train_input_dir = os.path.join(INPUT_DIR, "Final_Training", "Images") train_output_dir = os.path.join(OUTPUT_DIR, "train") class_dirs = [ d for d in os.listdir(train_input_dir) if os.path.isdir(os.path.join(train_input_dir, d)) ] for class_id_str in tqdm(class_dirs, desc="处理类别"): class_input_dir = os.path.join(train_input_dir, class_id_str) class_id_int = int(class_id_str) formatted_class_id = f"{class_id_int:02d}" class_output_dir = os.path.join(train_output_dir, formatted_class_id) os.makedirs(class_output_dir, exist_ok=True) annotation_file = os.path.join(class_input_dir, f"GT-{class_id_str}.csv") df = pd.read_csv(annotation_file, sep=";") # --- 平衡数据集逻辑 --- num_original = len(df) if num_original > TARGET_TRAIN_SAMPLES_PER_CLASS: # 降采样: 如果原始样本过多,随机选择1000个 df_to_process = df.sample( n=TARGET_TRAIN_SAMPLES_PER_CLASS, random_state=RANDOM_STATE ) else: # 否则,处理所有原始样本 df_to_process = df # 1. 处理并保存最终要保留的原始图像 images_for_augmentation_pool = [] for _, row in df_to_process.iterrows(): filename = row["Filename"] img_path = os.path.join(class_input_dir, filename) try: with Image.open(img_path) as img: # 将用于增强的原始图存入内存 images_for_augmentation_pool.append(img.copy()) # 应用基础变换并保存 processed_img = base_transform(img) output_filename = os.path.splitext(filename)[0] + ".jpeg" processed_img.save( os.path.join(class_output_dir, output_filename), "JPEG" ) except FileNotFoundError: print(f"警告:文件未找到 {img_path}") except Exception as e: print(f"处理文件 {img_path} 时出错: {e}") # 2. 如果样本数不足,进行数据增强 num_processed = len(df_to_process) num_to_augment = TARGET_TRAIN_SAMPLES_PER_CLASS - num_processed if num_to_augment > 0: for i in range(num_to_augment): # 从我们已处理并保存的图片池中随机选择一张进行增强 img_to_augment = random.choice(images_for_augmentation_pool) augmented_img = augmentation_pipeline(img_to_augment) output_filename = f"aug_{i:04d}.jpeg" augmented_img.save( os.path.join(class_output_dir, output_filename), "JPEG" ) print("训练数据集处理完成。") def process_test_data(): """处理测试数据集""" print("\n开始处理测试数据集...") test_input_dir = os.path.join(INPUT_DIR, "Final_Test", "Images") test_output_dir = os.path.join(OUTPUT_DIR, "test") annotation_file = os.path.join(INPUT_DIR, "GT-final_test.csv") df = pd.read_csv(annotation_file, sep=";") for _, row in tqdm(df.iterrows(), total=len(df), desc="处理测试图片"): filename = row["Filename"] class_id = row["ClassId"] img_path = os.path.join(test_input_dir, filename) formatted_class_id = f"{class_id:02d}" class_output_dir = os.path.join(test_output_dir, formatted_class_id) os.makedirs(class_output_dir, exist_ok=True) try: with Image.open(img_path) as img: processed_img = base_transform(img) output_filename = os.path.splitext(filename)[0] + ".jpeg" processed_img.save( os.path.join(class_output_dir, output_filename), "JPEG" ) except FileNotFoundError: print(f"警告:文件未找到 {img_path}") except Exception as e: print(f"处理文件 {img_path} 时出错: {e}") print("测试数据集处理完成。") def main(): print(f"GTSRB 数据集转换脚本") print(f"输入目录: {INPUT_DIR}") print(f"输出目录: {OUTPUT_DIR}\n") if os.path.exists(OUTPUT_DIR): print(f"发现已存在的输出目录 {OUTPUT_DIR},正在删除...") shutil.rmtree(OUTPUT_DIR) print("正在创建新的输出目录结构...") os.makedirs(os.path.join(OUTPUT_DIR, "train"), exist_ok=True) os.makedirs(os.path.join(OUTPUT_DIR, "test"), exist_ok=True) process_training_data() process_test_data() print(f"\n🎉 全部处理完成!转换后的数据位于: {OUTPUT_DIR}") if __name__ == "__main__": main()