JoJosmin commited on
Commit
394e02a
1 Parent(s): f56169b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -2
app.py CHANGED
@@ -58,6 +58,14 @@ def get_segmented_embedding(img, final_mask):
58
  image_features /= image_features.norm(dim=-1, keepdim=True)
59
  return image_features.cpu().numpy().flatten()
60
 
 
 
 
 
 
 
 
 
61
  def segment_clothing(img, clothes=["Hat", "Upper-clothes", "Skirt", "Pants", "Dress", "Belt", "Left-shoe", "Right-shoe", "Scarf"]):
62
  segments = segmenter(img)
63
  mask_list = []
@@ -174,8 +182,8 @@ elif st.session_state.step == 'show_results':
174
  st.image(original_image, caption="Original Image", use_column_width=True)
175
 
176
  # 세그먼트된 이미지에서 임베딩 추출
177
- query_embedding = get_segmented_embedding(st.session_state.query_image, st.session_state.segmented_image)
178
-
179
  similar_images = find_similar_images(query_embedding, collection)
180
 
181
  st.subheader("Similar Items:")
 
58
  image_features /= image_features.norm(dim=-1, keepdim=True)
59
  return image_features.cpu().numpy().flatten()
60
 
61
+ def get_image_embedding(image):
62
+ image_tensor = preprocess_val(image).unsqueeze(0).to(device)
63
+ with torch.no_grad():
64
+ image_features = clip_model.encode_image(image_tensor)
65
+ image_features /= image_features.norm(dim=-1, keepdim=True) # 정규화
66
+ return image_features.cpu().numpy().flatten()
67
+
68
+
69
  def segment_clothing(img, clothes=["Hat", "Upper-clothes", "Skirt", "Pants", "Dress", "Belt", "Left-shoe", "Right-shoe", "Scarf"]):
70
  segments = segmenter(img)
71
  mask_list = []
 
182
  st.image(original_image, caption="Original Image", use_column_width=True)
183
 
184
  # 세그먼트된 이미지에서 임베딩 추출
185
+ #query_embedding = get_segmented_embedding(st.session_state.query_image, st.session_state.segmented_image)
186
+ query_embedding = get_image_embedding(st.session_state.query_image, st.session_state.segmented_image)
187
  similar_images = find_similar_images(query_embedding, collection)
188
 
189
  st.subheader("Similar Items:")