Spaces:
Sleeping
Sleeping
import streamlit as st | |
import similarity_check as sc | |
import cv2 | |
from PIL import Image | |
import numpy as np | |
import demo | |
import streamlit as st | |
import request_json.sbt_request_generator as sbt | |
import check_hkid_validity as chv | |
import av | |
from streamlit_webrtc import webrtc_streamer, VideoTransformerBase, RTCConfiguration, WebRtcMode | |
import search_engine as se | |
import get_bank_statement as bs | |
# def init(): | |
# face_locations = [] | |
# # face_encodings = [] | |
# face_names = [] | |
# process_this_frame = True | |
# score = [] | |
# faces = 0 | |
# def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame: | |
# image = frame.to_ndarray(format="bgr24") | |
def main(): | |
# st.title("SBT Web Application") | |
# today's date = get_today_date | |
# global data | |
html_temp = """ | |
<body style="background-color:red;"> | |
<div style="background-color:teal ;padding:10px"> | |
<h2 style="color:white;text-align:center;">SBT Web Application</h2> | |
</div> | |
</body> | |
""" | |
st.markdown(html_temp, unsafe_allow_html=True) | |
if 'hkid_image_validity' not in st.session_state: | |
st.session_state.hkid_image_validity = False | |
if 'data' not in st.session_state: | |
st.session_state['data'] = {} | |
st.header("I. Similarity Check") | |
image_file = st.file_uploader("Upload Image", type=['jpg', 'png', 'jpeg', 'pdf'], accept_multiple_files=True) | |
if len(image_file) == 1: | |
image1 = Image.open(image_file[0]) | |
st.text("HKID card") | |
st.image(image1) | |
image1.save('image/hkid.jpg', 'JPEG') | |
if chv.check_hkid('image/hkid.jpg'): | |
st.text("Valid HKID card.") | |
st.session_state.hkid_image_validity = True | |
else: | |
st.text("Invalid HKID card. Please upload again!") | |
st.session_state.hkid_image_validity = False | |
elif len(image_file) == 2: | |
image1 = Image.open(image_file[0]) | |
st.text("HKID card") | |
st.image(image1) | |
image2 = Image.open(image_file[1]) | |
# image2 = image_file[1] | |
# image2.save('image/hkid.jpg', 'JPEG') | |
# file_name = image_file[1].name | |
st.text("Bank statement") | |
st.image(image2) | |
print(f"the id is: {st.session_state.hkid_image_validity}") | |
# if image_file2 is not None: | |
# image2 = Image.open(image_file) | |
# st.text("Bank statement") | |
# st.image(image2) | |
# path1 = 'IMG_4495.jpg' | |
# path2 = 'hangseng_page-0001.jpg' | |
# image1 = save_image(image1) | |
# image2 = save_image(image2) | |
data = {} | |
if st.button("Recognise"): | |
with st.spinner('Wait for it...'): | |
# global data | |
data = sc.get_data(image1, image2) | |
# se.get_data_link(data['chi_name_id'], data["name_on_id"], data["address"]) | |
if 'data' in st.session_state: | |
data["nationality"] = 'N/A' # for hkid | |
st.session_state['data'] = data | |
st.session_state['verified'] = "True" | |
st.success('Done!') | |
score = int(st.session_state['data']['similarity_score']) | |
st.text(f'score: {score}') | |
if (score>85): | |
st.text(f'matched') | |
else: | |
st.text(f'unmatched') | |
data = st.session_state['data'] | |
st.header("Ia. HKID Data Extraction") | |
st.text(f'English Name: {data["name_on_id"]}') # name is without space | |
st.text(f'Chinese Name: {data["chi_name_id"]}') # name is without space | |
st.text(f'HKID: {data["hkid"]} and validity: {data["validity"]}') | |
st.text(f'Date of issue: {data["issue_date"]}') | |
st.text(f'Date of birth: {data["dateofbirth"]}') | |
st.text(f'nationality: {data["nationality"]}') | |
st.header("Ib. Bank Statement Data Extraction") | |
st.text(f'Name: {data["nameStatement"]}') | |
st.text(f'Address: {data["address"]}') | |
st.text(f'Bank: {data["bank"]}') | |
st.text(f'Date: {data["statementDate"]}') | |
st.text(f'Asset: {data["totalAsset"]} hkd') | |
st.text(f'Liabilities: {data["totalLiability"]} hkd') | |
if 'data' in st.session_state: | |
tempout = st.session_state['data'] | |
print(f'data: {tempout}') | |
# st.header("II. Facial Recognition") | |
# run = st.checkbox('Run') | |
# webrtc_streamer(key="example") | |
# 1. Web Rtc | |
# webrtc_streamer(key="jhv", video_frame_callback=video_frame_callback) | |
# # init the camera | |
# face_locations = [] | |
# face_encodings = [] | |
# face_names = [] | |
# process_this_frame = True | |
# score = [] | |
# faces = 0 | |
# FRAME_WINDOW = st.image([]) | |
# server_ip = "127.0.0.1" | |
# server_port = 6666 | |
# camera = cv2.VideoCapture(0) | |
# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) | |
# s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1000000) | |
# if "face_rec" not in st.session_state: | |
# st.session_state.face_rec = [] | |
# while run: | |
# rtc_configuration = RTCConfiguration({"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]}) | |
# # Capture frame-by-frame | |
# # Grab a single frame of video | |
# ret, frame = camera.read() | |
# result = frame | |
# # Initialize the WebRTC streaming | |
# webrtc_ctx = webrtc_streamer( | |
# key="face_rec", | |
# mode=WebRtcMode.SENDRECV, | |
# rtc_configuration=rtc_configuration, | |
# # video_transformer_factory=WebcamTransformer, | |
# video_frame_callback=video_frame_callback, | |
# media_stream_constraints={"video": True, "audio": False}, | |
# async_processing=True, | |
# ) | |
# print(f'xd: look here {type(webrtc_ctx)}') | |
# st.session_state.face_rec = webrtc_ctx | |
# if webrtc_ctx.video_transformer: | |
# st.header("Webcam Preview") | |
# frame = webrtc_ctx.video_transformer.frame | |
# result, process_this_frame, face_locations, faces, face_names, score = demo.process_frame(frame, process_this_frame, face_locations, faces, face_names, score) | |
# st.video(result) | |
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
# FRAME_WINDOW.image(result) | |
# if ret is not None: | |
# ret, buffer = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY),30]) | |
# x_as_bytes = pickle.dumps(buffer) | |
# s.sendto((x_as_bytes),(server_ip, server_port)) | |
# camera.release() | |
# if ret: | |
# # ret, buffer = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY)]) | |
# # result, process_this_frame, face_locations, faces, face_names, score = demo.process_frame(frame, process_this_frame, face_locations, faces, face_names, score) | |
# # Display the resulting image | |
# FRAME_WINDOW.image(frame) | |
# else: | |
# print("there is no frame detected") | |
# continue | |
# print(score) | |
# if len(score) > 20: | |
# avg_score = sum(score) / len(score) | |
# st.write(avg_score) | |
# # st.write(f'{demo.convert_distance_to_percentage(avg_score, 0.45)}') | |
# # camera.release() | |
# run = not run | |
# st.session_state['data']['avg_score'] = str(avg_score) | |
## unrelated | |
st.header("III. Search Engine and Bank Statement") | |
user_input_id = st.text_input("Enter the user ID here", " ") | |
if st.button("Search data"): | |
with st.spinner('Searching data...'): | |
se.get_data_link(user_input_id) | |
st.success('Done!') | |
if st.button("Fetch bank statement"): | |
with st.spinner('getting statements...'): | |
bs.get_bs(user_input_id) | |
st.success('Done!') | |
if st.button("Confirm"): | |
st.experimental_set_query_params( | |
verified=True, | |
) | |
with st.spinner('Sending data...'): | |
print(st.session_state['data']) | |
sbt.split_data(st.session_state['data']) | |
st.success('Done!') | |
if __name__ == '__main__': | |
main() | |