File size: 4,825 Bytes
35d5c5e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import spaces
from flask import Flask, request, jsonify
import os
from werkzeug.utils import secure_filename
import cv2
import torch
import torch.nn.functional as F
from facenet_pytorch import MTCNN, InceptionResnetV1
import numpy as np
from pytorch_grad_cam import GradCAM
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
from pytorch_grad_cam.utils.image import show_cam_on_image
import base64

app = Flask(__name__)

# Configuration
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'mp4', 'avi', 'mov', 'webm'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024

os.makedirs(UPLOAD_FOLDER, exist_ok=True)

# Device configuration
DEVICE = 'cuda:0' if torch.cuda.is_available() else 'cpu'

mtcnn = MTCNN(select_largest=False, post_process=False, device=DEVICE).to(DEVICE).eval()

model = InceptionResnetV1(pretrained="vggface2", classify=True, num_classes=1, device=DEVICE)
checkpoint = torch.load("resnetinceptionv1_epoch_32.pth", map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['model_state_dict'])
model.to(DEVICE)
model.eval()

# GradCAM setup
target_layers = [model.block8.branch1[-1]]
cam = GradCAM(model=model, target_layers=target_layers)
targets = [ClassifierOutputTarget(0)]

def allowed_file(filename):
    return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS

@spaces.GPU
def process_frame(frame):
    face = mtcnn(frame)
    if face is None:
        return None, None, None

    face = face.unsqueeze(0)
    face = F.interpolate(face, size=(256, 256), mode='bilinear', align_corners=False)

    face = face.to(DEVICE)
    face = face.to(torch.float32)
    face = face / 255.0

    with torch.no_grad():
        output = torch.sigmoid(model(face).squeeze(0))
        prediction = "fake" if output.item() >= 0.5 else "real"

    # Generate GradCAM
    grayscale_cam = cam(input_tensor=face, targets=targets, eigen_smooth=True)
    grayscale_cam = grayscale_cam[0, :]
    
    face_image_to_plot = face.squeeze(0).permute(1, 2, 0).cpu().detach().numpy()
    visualization = show_cam_on_image(face_image_to_plot, grayscale_cam, use_rgb=True)

    return prediction, output.item(), visualization

@spaces.GPU
def analyze_video(video_path, sample_rate=30, top_n=5):
    cap = cv2.VideoCapture(video_path)
    frame_count = 0
    fake_count = 0
    total_processed = 0
    frames_info = []

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        if frame_count % sample_rate == 0:
            rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            prediction, confidence, visualization = process_frame(rgb_frame)

            if prediction is not None:
                total_processed += 1
                if prediction == "fake":
                    fake_count += 1

                frames_info.append({
                    'frame_number': frame_count,
                    'prediction': prediction,
                    'confidence': confidence,
                    'visualization': visualization
                })

        frame_count += 1

    cap.release()

    if total_processed > 0:
        fake_percentage = (fake_count / total_processed) * 100
        frames_info.sort(key=lambda x: x['confidence'], reverse=True)
        top_frames = frames_info[:top_n]

        return fake_percentage, top_frames
    else:
        return 0, []

@app.route('/analyze', methods=['POST'])
def analyze_video_api():
    if 'video' not in request.files:
        return jsonify({'error': 'No video file provided'}), 400

    file = request.files['video']

    if file.filename == '':
        return jsonify({'error': 'No selected file'}), 400

    if file and allowed_file(file.filename):
        filename = secure_filename(file.filename)
        filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
        file.save(filepath)

        try:
            fake_percentage, top_frames = analyze_video(filepath)
            os.remove(filepath)  # Remove the file after analysis

            # Convert numpy arrays to base64 encoded strings
            for frame in top_frames:
                frame['visualization'] = base64.b64encode(cv2.imencode('.png', frame['visualization'])[1]).decode('utf-8')

            result = {
                'fake_percentage': round(fake_percentage, 2),
                'is_likely_deepfake': fake_percentage >= 60,
                'top_frames': top_frames
            }

            return jsonify(result), 200
        except Exception as e:
            os.remove(filepath)  # Remove the file if an error occurs
            return jsonify({'error': str(e)}), 500
    else:
        return jsonify({'error': f'Invalid file type: {file.filename}'}), 400

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=7860)