Ritikjain44
commited on
Commit
•
72cab9b
1
Parent(s):
e08cad4
Upload 9 files
Browse files- .gitattributes +1 -0
- app.py +49 -0
- app.txt +0 -0
- models/EfficentEnt_unfreezing.h5 +3 -0
- models/EfficientNet_Unfreezing.h5 +3 -0
- models/EfficientNet_keras.keras +3 -0
- models/model.pkl +3 -0
- models/model_efficientNet_unfreezing.h5 +3 -0
- templates/index.html +147 -0
- templates/index.txt +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
models/EfficientNet_keras.keras filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, request, jsonify, render_template
|
2 |
+
import numpy as np
|
3 |
+
import pandas as pd
|
4 |
+
import tensorflow as tf
|
5 |
+
from efficientnet.tfkeras import EfficientNetB0
|
6 |
+
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
|
7 |
+
from tensorflow.keras.models import Model
|
8 |
+
import pickle
|
9 |
+
import cv2
|
10 |
+
|
11 |
+
app = Flask(__name__)
|
12 |
+
|
13 |
+
# Load the saved model
|
14 |
+
model_path = "C://Users//ritik//Downloads//emotion detection//models//EfficientNet_Unfreezing.h5"
|
15 |
+
model = tf.keras.models.load_model(model_path)
|
16 |
+
|
17 |
+
# Preprocess image function
|
18 |
+
def preprocess_image(image):
|
19 |
+
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
|
20 |
+
if len(gray_image.shape) < 3:
|
21 |
+
return cv2.cvtColor(gray_image, cv2.COLOR_GRAY2RGB)
|
22 |
+
blurred_image = cv2.GaussianBlur(gray_image, (5, 5), 0)
|
23 |
+
equalized_image = cv2.equalizeHist(blurred_image)
|
24 |
+
_, thresholded_image = cv2.threshold(equalized_image, 127, 255, cv2.THRESH_BINARY)
|
25 |
+
final_image = cv2.cvtColor(thresholded_image, cv2.COLOR_GRAY2RGB)
|
26 |
+
return final_image
|
27 |
+
|
28 |
+
# Prediction route
|
29 |
+
@app.route('/predict', methods=['POST'])
|
30 |
+
def predict():
|
31 |
+
file = request.files['image']
|
32 |
+
img = cv2.imdecode(np.fromstring(file.read(), np.uint8), cv2.IMREAD_COLOR)
|
33 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
34 |
+
img = cv2.resize(img, (224, 224)) # Ensure image size matches the input size of the model
|
35 |
+
img = preprocess_image(img)
|
36 |
+
img = img / 255.0
|
37 |
+
img = np.expand_dims(img, axis=0)
|
38 |
+
prediction = model.predict(img)
|
39 |
+
emotions = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
|
40 |
+
result = emotions[np.argmax(prediction)]
|
41 |
+
return jsonify({'emotion': result})
|
42 |
+
|
43 |
+
# Home route
|
44 |
+
@app.route('/')
|
45 |
+
def index():
|
46 |
+
return render_template('index.html')
|
47 |
+
|
48 |
+
if __name__ == '__main__':
|
49 |
+
app.run(debug=True)
|
app.txt
ADDED
File without changes
|
models/EfficentEnt_unfreezing.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bdfd6ee5ab70bb428a00516c51b1429ece1f8e46fe31e419eda15823129f70d5
|
3 |
+
size 65198840
|
models/EfficientNet_Unfreezing.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7ac16764ccc615089c8a7b204df6fde6573e04ade1080f7d442332ba27a5c4fe
|
3 |
+
size 65186536
|
models/EfficientNet_keras.keras
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:828c5de91dfbd6ee1fb6dbaa052c653ed630ca55570809a0d2830e2bcf4d1224
|
3 |
+
size 64960178
|
models/model.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:48834423069e906cc91a6f388e176ced0b3869c1755d17932dec9ed12a29dca5
|
3 |
+
size 64960277
|
models/model_efficientNet_unfreezing.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7ac16764ccc615089c8a7b204df6fde6573e04ade1080f7d442332ba27a5c4fe
|
3 |
+
size 65186536
|
templates/index.html
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>Emotion Detection</title>
|
7 |
+
<style>
|
8 |
+
body {
|
9 |
+
font-family: Arial, sans-serif;
|
10 |
+
margin: 0;
|
11 |
+
padding: 0;
|
12 |
+
background-color: #f0f0f0;
|
13 |
+
text-align: center;
|
14 |
+
}
|
15 |
+
h1 {
|
16 |
+
margin-top: 30px;
|
17 |
+
color: #333;
|
18 |
+
}
|
19 |
+
#upload-form {
|
20 |
+
margin-top: 20px;
|
21 |
+
}
|
22 |
+
#result {
|
23 |
+
margin-top: 20px;
|
24 |
+
font-weight: bold;
|
25 |
+
}
|
26 |
+
#result.emotion-happy {
|
27 |
+
color: #00cc00;
|
28 |
+
}
|
29 |
+
#result.emotion-sad {
|
30 |
+
color: #3333ff;
|
31 |
+
}
|
32 |
+
#result.emotion-angry {
|
33 |
+
color: #cc0000;
|
34 |
+
}
|
35 |
+
#result.emotion-neutral {
|
36 |
+
color: #666666;
|
37 |
+
}
|
38 |
+
#result.emotion-surprise {
|
39 |
+
color: #ff9900;
|
40 |
+
}
|
41 |
+
input[type="file"] {
|
42 |
+
display: none;
|
43 |
+
}
|
44 |
+
.custom-file-upload {
|
45 |
+
display: inline-block;
|
46 |
+
padding: 10px 20px;
|
47 |
+
cursor: pointer;
|
48 |
+
background-color: #4CAF50;
|
49 |
+
color: white;
|
50 |
+
border-radius: 5px;
|
51 |
+
transition: background-color 0.3s;
|
52 |
+
}
|
53 |
+
.custom-file-upload:hover {
|
54 |
+
background-color: #45a049;
|
55 |
+
}
|
56 |
+
#uploaded-image {
|
57 |
+
margin-top: 20px;
|
58 |
+
max-width: 400px;
|
59 |
+
border-radius: 5px;
|
60 |
+
}
|
61 |
+
#upload-button {
|
62 |
+
display: none;
|
63 |
+
}
|
64 |
+
#reset-button {
|
65 |
+
margin-top: 20px;
|
66 |
+
padding: 10px 20px;
|
67 |
+
background-color: #f44336;
|
68 |
+
color: white;
|
69 |
+
border: none;
|
70 |
+
border-radius: 5px;
|
71 |
+
cursor: pointer;
|
72 |
+
transition: background-color 0.3s;
|
73 |
+
}
|
74 |
+
#reset-button:hover {
|
75 |
+
background-color: #d32f2f;
|
76 |
+
}
|
77 |
+
</style>
|
78 |
+
</head>
|
79 |
+
<body>
|
80 |
+
<h1>Emotion Detection</h1>
|
81 |
+
<form id="upload-form" enctype="multipart/form-data">
|
82 |
+
<label for="image" class="custom-file-upload">
|
83 |
+
Choose File
|
84 |
+
</label>
|
85 |
+
<input type="file" id="image" name="image" accept="image/*" required>
|
86 |
+
<button id="upload-button" type="submit">Upload Image</button>
|
87 |
+
</form>
|
88 |
+
|
89 |
+
<div id="result"></div>
|
90 |
+
<img id="uploaded-image" src="#" alt="Uploaded Image">
|
91 |
+
<button id="reset-button">Reset</button>
|
92 |
+
|
93 |
+
<script>
|
94 |
+
document.getElementById('image').addEventListener('change', function(event) {
|
95 |
+
var fileInput = event.target;
|
96 |
+
var file = fileInput.files[0];
|
97 |
+
var uploadedImage = document.getElementById('uploaded-image');
|
98 |
+
var uploadButton = document.getElementById('upload-button');
|
99 |
+
|
100 |
+
if (file) {
|
101 |
+
uploadedImage.src = URL.createObjectURL(file);
|
102 |
+
uploadButton.style.display = 'inline-block';
|
103 |
+
} else {
|
104 |
+
uploadedImage.src = '#';
|
105 |
+
uploadButton.style.display = 'none';
|
106 |
+
}
|
107 |
+
});
|
108 |
+
|
109 |
+
document.getElementById('upload-form').addEventListener('submit', function(event) {
|
110 |
+
event.preventDefault();
|
111 |
+
var formData = new FormData(this);
|
112 |
+
fetch('/predict', {
|
113 |
+
method: 'POST',
|
114 |
+
body: formData
|
115 |
+
})
|
116 |
+
.then(response => {
|
117 |
+
if (!response.ok) {
|
118 |
+
throw new Error('Network response was not ok');
|
119 |
+
}
|
120 |
+
return response.json();
|
121 |
+
})
|
122 |
+
.then(data => {
|
123 |
+
var resultDiv = document.getElementById('result');
|
124 |
+
resultDiv.innerText = 'Detected Emotion: ' + data.emotion;
|
125 |
+
resultDiv.className = 'emotion-' + data.emotion.toLowerCase();
|
126 |
+
})
|
127 |
+
.catch(error => {
|
128 |
+
var resultDiv = document.getElementById('result');
|
129 |
+
resultDiv.innerText = 'Error: ' + error.message;
|
130 |
+
resultDiv.className = '';
|
131 |
+
});
|
132 |
+
});
|
133 |
+
|
134 |
+
document.getElementById('reset-button').addEventListener('click', function(event) {
|
135 |
+
var form = document.getElementById('upload-form');
|
136 |
+
form.reset();
|
137 |
+
var uploadedImage = document.getElementById('uploaded-image');
|
138 |
+
uploadedImage.src = '#';
|
139 |
+
var resultDiv = document.getElementById('result');
|
140 |
+
resultDiv.innerText = '';
|
141 |
+
resultDiv.className = '';
|
142 |
+
var uploadButton = document.getElementById('upload-button');
|
143 |
+
uploadButton.style.display = 'none';
|
144 |
+
});
|
145 |
+
</script>
|
146 |
+
</body>
|
147 |
+
</html>
|
templates/index.txt
ADDED
File without changes
|