Spaces:
Sleeping
Sleeping
import streamlit as st | |
import similarity_check as sc | |
import cv2 | |
from PIL import Image | |
import numpy as np | |
import tempfile | |
from streamlit_webrtc import VideoTransformerBase, webrtc_streamer | |
import demo | |
import time | |
import streamlit as st | |
import requests | |
import json | |
import request_json.sbt_request_generator as sbt | |
global data | |
data = {} | |
def main(): | |
# st.title("SBT Web Application") | |
# today's date = get_today_date | |
# global data | |
html_temp = """ | |
<body style="background-color:red;"> | |
<div style="background-color:teal ;padding:10px"> | |
<h2 style="color:white;text-align:center;">SBT Web Application</h2> | |
</div> | |
</body> | |
""" | |
st.markdown(html_temp, unsafe_allow_html=True) | |
st.header("I. Similarity Check") | |
image_file = st.file_uploader("Upload Image", type=['jpg', 'png', 'jpeg'], accept_multiple_files=True) | |
if len(image_file) == 1: | |
# print(image_file[0].name) | |
image1 = Image.open(image_file[0]) | |
st.text("HKID card") | |
st.image(image1) | |
elif len(image_file) == 2: | |
image1 = Image.open(image_file[0]) | |
st.text("HKID card") | |
st.image(image1) | |
image2 = Image.open(image_file[1]) | |
file_name = image_file[1].name | |
st.text("Bank statement") | |
st.image(image2) | |
# if image_file2 is not None: | |
# image2 = Image.open(image_file) | |
# st.text("Bank statement") | |
# st.image(image2) | |
# path1 = 'IMG_4495.jpg' | |
# path2 = 'hangseng_page-0001.jpg' | |
# image1 = save_image(image1) | |
# image2 = save_image(image2) | |
data = {} | |
if st.button("Recognise"): | |
with st.spinner('Wait for it...'): | |
# global data | |
data = sc.get_data(image1, image2, file_name) | |
with open('data1.txt', 'w') as f: | |
f.write(json.dumps(data)) | |
# data.update(sc.get_data(image1, image2, file_name)) | |
print(f'data inside {data}') | |
# sbt.split_data(data) | |
st.success('Done!') | |
score = data["similarity_score"] | |
#print(score) | |
st.text(f'score: {score}') | |
if (score>85): | |
st.text(f'matched') | |
else: | |
st.text(f'unmatched') | |
st.header("IIa. HKID Data Extraction") | |
st.text(f'Name: {data["name_on_id"]}') # name is without space | |
st.text(f'HKID: {data["hkid"]} and validity: {data["validity"]}') | |
st.text(f'Date of issue: {data["issue_date"]}') | |
st.header("IIb. Bank Statement Data Extraction") | |
# st.write('------------From bank statement------------') | |
st.text(f'Name: {data["name_on_bs"]}') | |
st.text(f'Address: {data["address"]}') | |
st.text(f'Bank: {data["bank"]}') | |
st.text(f'Date: {data["date"]}') | |
st.text(f'Asset: {data["asset"]} hkd') | |
st.text(f'Liabilities: {data["liabilities"]} hkd') | |
# result_img= detect_faces(our_image) | |
# st.image(result_img) | |
# print(f'data outside 1 {data}') | |
st.header("II. Facial Recognition") | |
run = st.checkbox('Run') | |
# webrtc_streamer(key="example") | |
# 1. Web Rtc | |
# webrtc_streamer(key="jhv", video_frame_callback=video_frame_callback) | |
# # init the camera | |
face_locations = [] | |
# face_encodings = [] | |
face_names = [] | |
process_this_frame = True | |
score = [] | |
faces = 0 | |
FRAME_WINDOW = st.image([]) | |
camera = cv2.VideoCapture(0) | |
while run: | |
# Capture frame-by-frame | |
# Grab a single frame of video | |
ret, frame = camera.read() | |
result, process_this_frame, face_locations, faces, face_names, score = demo.process_frame(frame, process_this_frame, face_locations, faces, face_names, score) | |
# Display the resulting image | |
FRAME_WINDOW.image(result) | |
print(score) | |
if len(score) > 20: | |
avg_score = sum(score) / len(score) | |
st.write(f'{avg_score}') | |
with open('data1.txt', 'w') as f: | |
data_raw = f.read() | |
data = json.loads(data_raw) | |
data['avg_score'] = str(avg_score) | |
f.write(json.dumps(data)) | |
# update_text(f'{demo.convert_distance_to_percentage(score, 0.45)}') | |
else: | |
st.write('Stopped') | |
# print(f'the data is {data}') | |
# st.header("IIIa. HKID Data Extraction") | |
# st.text(f'Name: {data["name_on_id"]}') # name is without space | |
# st.text(f'HKID: {data["hkid"]} and validity: {data["validity"]}') | |
# st.text(f'Date of issue: {data["issue_date"]}') | |
# st.header("IIIb. Bank Statement Data Extraction") | |
# # st.write('------------From bank statement------------') | |
# st.text(f'Name: {data["name_on_bs"]}') | |
# st.text(f'Address: {data["address"]}') | |
# st.text(f'Bank: {data["bank"]}') | |
# st.text(f'Date: {data["date"]}') | |
# st.text(f'Asset: {data["asset"]} hkd') | |
# st.text(f'Liabilities: {data["liabilities"]} hkd') | |
# print(f'data outside 2 {data}') | |
if st.button("Confirm"): | |
# print(f'data outside 3 {data}') | |
with st.spinner('Sending data...'): | |
sbt.split_data(data) | |
st.success('Done!') | |
if __name__ == '__main__': | |
main() | |
# def save_image(image): | |
# try: | |
# temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') | |
# Image.save(temp_file.name) | |
# return temp_file.name | |
# except IOError: | |
# print("Unable to save image to temporary file") | |
# return None | |
# json_file = 'request json\request_legalDocument.json' | |
# file = open(json_file, 'r') | |
# data = json.load(file) | |
# file.close() | |
# # Update data | |
# data.update(new_data) | |
# file = open(json_file, 'w') | |
# for item in data['request']['body']['formdata']: | |
# if item["key"] == "requestId": | |
# item["value"] = "" | |
# elif item["key"] == "userId": | |
# item["value"] = generate_token_id(2048) | |
# elif item["key"] == "endpoint": | |
# item["value"] = "" | |
# elif item["key"] == "apiType": | |
# item["value"] = "" | |
# elif item["key"] == "docType": | |
# item["value"] = "HKID" | |
# elif item["key"] == "nameDoc": | |
# item["value"] = new_data["name_on_id"] | |
# elif item["key"] == "docID": | |
# item["value"] = new_data["name_on_id"] | |
# elif item["key"] == "docValidity": | |
# item["value"] = new_data["validity"] | |
# elif item["key"] == "dateOfIssue": | |
# item["value"] = new_data["date_issue"] | |
# elif item["key"] == "matchingScore": | |
# item["value"] = new_data["similarity_score"] | |
# json.dump(data, file) | |
# file.close() |