|
|
|
|
|
|
|
|
|
import streamlit as st |
|
|
|
from PIL import Image |
|
import numpy as np |
|
import cv2 |
|
from Utils import * |
|
from huggingface_hub import from_pretrained_keras |
|
|
|
|
|
|
|
model=from_pretrained_keras("SerdarHelli/Knee-View-Merchant-Landmark-Detection",use_auth_token=True) |
|
|
|
|
|
|
|
|
|
st.subheader("Upload Merchant Knee View") |
|
image_file = st.file_uploader("Upload Images", type=["dcm"]) |
|
|
|
|
|
examples=["1.3.46.670589.30.1.6.1.149885691756583.1510655758812.1.dcm" |
|
,"1.2.392.200036.9125.9.0.235868094.418384128.208354950.dcm", |
|
"1.2.392.200036.9107.500.304.423.20170526.173028.10423.dcm"] |
|
|
|
colx1, colx2, colx3 = st.columns(3) |
|
|
|
|
|
st.text("Merchant Knee View Dicom Examples ") |
|
|
|
with colx1: |
|
st.text("Example -1 ") |
|
|
|
if st.button('Example 1'): |
|
image_file=examples[0] |
|
|
|
with colx2: |
|
st.text("Example -2 ") |
|
|
|
if st.button('Example 2'): |
|
image_file=examples[1] |
|
|
|
|
|
with colx3: |
|
st.text("Example -3 ") |
|
|
|
if st.button('Example 3'): |
|
image_file=examples[2] |
|
|
|
|
|
if image_file is not None: |
|
st.text("Making A Prediction ....") |
|
|
|
|
|
try: |
|
data,PatientName,PatientID,SOPInstanceUID,StudyDate,InstitutionAddress,PatientAge,PatientSex=read_dicom(image_file,False,True) |
|
except: |
|
data,PatientName,PatientID,SOPInstanceUID,StudyDate,InstitutionAddress,PatientAge,PatientSex=read_dicom(image_file,True,True) |
|
pass |
|
|
|
|
|
|
|
img = np.copy(data) |
|
|
|
|
|
kernel =( np.ones((5,5), dtype=np.float32)) |
|
img2=cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel,iterations=2 ) |
|
img2=cv2.erode(img2,kernel,iterations =2) |
|
if len(img2.shape)==3: |
|
img2=img2[:,:,0] |
|
|
|
|
|
ret,thresh = cv2.threshold(img2,100, 4096, cv2.THRESH_BINARY) |
|
|
|
|
|
thresh =((thresh/np.max(thresh))*255).astype('uint8') |
|
a1,b1=thresh.shape |
|
|
|
contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) |
|
|
|
|
|
if len(contours)==0: |
|
roi= thresh |
|
|
|
else: |
|
|
|
c_area=np.zeros([len(contours)]) |
|
for i in range(len(contours)): |
|
c_area[i]= cv2.contourArea(contours[i]) |
|
|
|
|
|
cnts=contours[np.argmax(c_area)] |
|
x, y, w, h = cv2.boundingRect(cnts) |
|
|
|
|
|
roi = croping(data, x, y, w, h) |
|
|
|
|
|
roi=modification_cropping(roi) |
|
|
|
|
|
roi=cv2.resize(roi,(256,256),interpolation=cv2.INTER_NEAREST) |
|
|
|
pre=predict(roi,model) |
|
heatpoint=points_max_value(pre) |
|
output=put_text_point(roi,heatpoint) |
|
output,PatellerCongruenceAngle,ParalelTiltAngle=draw_angle(output,heatpoint) |
|
data_text = {'PatientID': PatientID, 'PatientName': PatientName, |
|
'Pateller_Congruence_Angle': PatellerCongruenceAngle, |
|
'Paralel_Tilt_Angle':ParalelTiltAngle, |
|
'SOP_Instance_UID':SOPInstanceUID, |
|
"StudyDate" :StudyDate, |
|
"InstitutionName" :InstitutionAddress, |
|
"PatientAge" :PatientAge , |
|
"PatientSex" :PatientSex, |
|
} |
|
|
|
|
|
|
|
st.text("Original Dicom Image") |
|
|
|
st.image(np.uint8((data/np.max(data)*255)),width=450) |
|
|
|
|
|
st.text("Predicted and Cropped-Resized Image ") |
|
|
|
st.image(np.uint8(output),width=450) |
|
|
|
|
|
|
|
st.write(data_text) |
|
|
|
|