import gradio as gr import cv2 import numpy as np from PIL import Image import matplotlib.pyplot as plt from transformers import pipeline import torch from random import choice from io import BytesIO import os from datetime import datetime # 初始化对象检测器并移动到GPU(如果可用) detector = pipeline(model="facebook/detr-resnet-101", use_fast=True) if torch.cuda.is_available(): detector.model.to('cuda') COLORS = ["#ff7f7f", "#ff7fbf", "#ff7fff", "#bf7fff", "#7f7fff", "#7fbfff", "#7fffff", "#7fffbf", "#7fff7f", "#bfff7f", "#ffff7f", "#ffbf7f"] fdic = { "style": "italic", "size": 15, "color": "yellow", "weight": "bold" } def query_data(in_pil_img: Image.Image): results = detector(in_pil_img) print(f"检测结果:{results}") return results def get_annotated_image(in_pil_img): plt.figure(figsize=(16, 10)) plt.imshow(in_pil_img) ax = plt.gca() in_results = query_data(in_pil_img) for prediction in in_results: color = choice(COLORS) box = prediction['box'] label = prediction['label'] score = round(prediction['score'] * 100, 1) ax.add_patch(plt.Rectangle((box['xmin'], box['ymin']), box['xmax'] - box['xmin'], box['ymax'] - box['ymin'], fill=False, color=color, linewidth=3)) ax.text(box['xmin'], box['ymin'], f"{label}: {score}%", fontdict=fdic) plt.axis("off") buf = BytesIO() plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0) plt.close() # 关闭图形以释放内存 buf.seek(0) annotated_image = Image.open(buf).convert('RGB') return np.array(annotated_image) def process_video(input_video_path): cap = cv2.VideoCapture(input_video_path) if not cap.isOpened(): raise ValueError("无法打开输入视频文件") width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = cap.get(cv2.CAP_PROP_FPS) fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 使用 'mp4v' 编码器 output_dir = './output_videos' # 指定输出目录 os.makedirs(output_dir, exist_ok=True) # 确保输出目录存在 # 生成唯一文件名 timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") output_video_filename = f"output_{timestamp}.mp4" output_video_path = os.path.join(output_dir, output_video_filename) out = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height)) while True: ret, frame = cap.read() if not ret: break rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) pil_image = Image.fromarray(rgb_frame) annotated_frame = get_annotated_image(pil_image) bgr_frame = cv2.cvtColor(annotated_frame, cv2.COLOR_RGB2BGR) # 确保帧的尺寸与视频输出一致 if bgr_frame.shape[:2] != (height, width): bgr_frame = cv2.resize(bgr_frame, (width, height)) print(f"Writing frame of shape {bgr_frame.shape} and type {bgr_frame.dtype}") # 调试信息 out.write(bgr_frame) cap.release() out.release() # 返回输出视频路径给 Gradio return output_video_path with gr.Blocks(css=".gradio-container {background:lightyellow;}", title="基于AI的安全风险识别及防控应用") as demo: gr.HTML("