winfred2027 commited on
Commit
9fe654e
1 Parent(s): 55dcb09

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -259
app.py CHANGED
@@ -1,33 +1,13 @@
1
  import sys
2
  import threading
3
  import streamlit as st
4
- from huggingface_hub import HfFolder, snapshot_download
5
-
6
-
7
- @st.cache_data
8
- def load_support():
9
- if st.secrets.has_key('etoken'):
10
- HfFolder().save_token(st.secrets['etoken'])
11
- sys.path.append(snapshot_download("OpenShape/openshape-demo-support"))
12
-
13
-
14
- # st.set_page_config(layout='wide')
15
- load_support()
16
-
17
-
18
  import numpy
19
  import torch
20
  import openshape
21
  import transformers
22
  from PIL import Image
23
-
24
- @st.cache_resource
25
- def load_openshape(name, to_cpu=False):
26
- pce = openshape.load_pc_encoder(name)
27
- if to_cpu:
28
- pce = pce.cpu()
29
- return pce
30
-
31
 
32
  @st.cache_resource
33
  def load_openclip():
@@ -43,95 +23,48 @@ def load_openclip():
43
  return clip_model, clip_prep
44
 
45
 
46
- f32 = numpy.float32
47
- half = torch.float16 if torch.cuda.is_available() else torch.bfloat16
48
- # clip_model, clip_prep = None, None
49
- clip_model, clip_prep = load_openclip()
50
- model_b32 = load_openshape('openshape-pointbert-vitb32-rgb', True)
51
- model_l14 = load_openshape('openshape-pointbert-vitl14-rgb')
52
- model_g14 = load_openshape('openshape-pointbert-vitg14-rgb')
53
- torch.set_grad_enabled(False)
54
- for kc, vc in st.session_state.get('state_queue', []):
55
- st.session_state[kc] = vc
56
- st.session_state.state_queue = []
57
-
58
-
59
- import samples_index
60
- from openshape.demo import misc_utils, classification, caption, sd_pc2img, retrieval
61
-
62
-
63
- st.title("OpenShape Demo")
64
- st.caption("For faster inference without waiting in queue, you may clone the space and run it yourself.")
65
- prog = st.progress(0.0, "Idle")
66
- tab_cls, tab_img, tab_text, tab_pc, tab_sd, tab_cap = st.tabs([
67
- "Classification",
68
- "Retrieval w/ Image",
69
- "Retrieval w/ Text",
70
- "Retrieval w/ 3D",
71
- "Image Generation",
72
- "Captioning",
73
- ])
74
-
75
-
76
- def sq(kc, vc):
77
- st.session_state.state_queue.append((kc, vc))
78
-
79
-
80
- def reset_3d_shape_input(key):
81
- # this is not working due to streamlit problems, don't use it
82
- model_key = key + "_model"
83
- npy_key = key + "_npy"
84
- swap_key = key + "_swap"
85
- sq(model_key, None)
86
- sq(npy_key, None)
87
- sq(swap_key, "Y is up (for most Objaverse shapes)")
88
-
89
-
90
- def auto_submit(key):
91
- if st.session_state.get(key):
92
- st.session_state[key] = False
93
- return True
94
- return False
95
-
96
-
97
- def queue_auto_submit(key):
98
- st.session_state[key] = True
99
- st.experimental_rerun()
100
-
101
-
102
- img_example_counter = 0
103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
- def image_examples(samples, ncols, return_key=None, example_text="Examples"):
106
- global img_example_counter
107
- trigger = False
108
- with st.expander(example_text, True):
109
- for i in range(len(samples) // ncols):
110
- cols = st.columns(ncols)
111
- for j in range(ncols):
112
- idx = i * ncols + j
113
- if idx >= len(samples):
114
- continue
115
- entry = samples[idx]
116
- with cols[j]:
117
- st.image(entry['dispi'])
118
- img_example_counter += 1
119
- with st.columns(5)[2]:
120
- this_trigger = st.button('\+', key='imgexuse%d' % img_example_counter)
121
- trigger = trigger or this_trigger
122
- if this_trigger:
123
- if return_key is None:
124
- for k, v in entry.items():
125
- if not k.startswith('disp'):
126
- sq(k, v)
127
- else:
128
- trigger = entry[return_key]
129
- return trigger
130
 
131
 
132
  def demo_classification():
133
  with st.form("clsform"):
134
- load_data = misc_utils.input_3d_shape('cls')
135
  cats = st.text_input("Custom Categories (64 max, separated with comma)")
136
  cats = [a.strip() for a in cats.split(',')]
137
  if len(cats) > 64:
@@ -139,185 +72,64 @@ def demo_classification():
139
  return
140
  lvis_run = st.form_submit_button("Run Classification on LVIS Categories")
141
  custom_run = st.form_submit_button("Run Classification on Custom Categories")
142
- if lvis_run or auto_submit("clsauto"):
143
- pc = load_data(prog)
144
- col2 = misc_utils.render_pc(pc)
145
- prog.progress(0.5, "Running Classification")
146
- pred = classification.pred_lvis_sims(model_g14, pc)
147
- with col2:
148
- for i, (cat, sim) in zip(range(5), pred.items()):
149
- st.text(cat)
150
- st.caption("Similarity %.4f" % sim)
151
- prog.progress(1.0, "Idle")
152
- if custom_run:
153
- pc = load_data(prog)
154
- col2 = misc_utils.render_pc(pc)
155
- prog.progress(0.5, "Computing Category Embeddings")
156
- device = clip_model.device
157
- tn = clip_prep(text=cats, return_tensors='pt', truncation=True, max_length=76, padding=True).to(device)
158
- feats = clip_model.get_text_features(**tn).float().cpu()
159
- prog.progress(0.5, "Running Classification")
160
- pred = classification.pred_custom_sims(model_g14, pc, cats, feats)
161
- with col2:
162
- for i, (cat, sim) in zip(range(5), pred.items()):
163
- st.text(cat)
164
- st.caption("Similarity %.4f" % sim)
165
- prog.progress(1.0, "Idle")
166
- if image_examples(samples_index.classification, 3, example_text="Examples (Choose one of the following 3D shapes)"):
167
- queue_auto_submit("clsauto")
168
-
169
 
170
  def demo_captioning():
171
  with st.form("capform"):
172
- load_data = misc_utils.input_3d_shape('cap')
173
  cond_scale = st.slider('Conditioning Scale', 0.0, 4.0, 2.0, 0.1, key='capcondscl')
174
- if st.form_submit_button("Generate a Caption") or auto_submit("capauto"):
175
- pc = load_data(prog)
176
- col2 = misc_utils.render_pc(pc)
177
- prog.progress(0.5, "Running Generation")
178
- cap = caption.pc_caption(model_b32, pc, cond_scale)
179
- st.text(cap)
180
- prog.progress(1.0, "Idle")
181
- if image_examples(samples_index.cap, 3, example_text="Examples (Choose one of the following 3D shapes)"):
182
- queue_auto_submit("capauto")
183
-
184
 
185
  def demo_pc2img():
186
  with st.form("sdform"):
187
- load_data = misc_utils.input_3d_shape('sd')
188
  prompt = st.text_input("Prompt (Optional)", key='sdtprompt')
189
- noise_scale = st.slider('Variation Level', 0, 5, 1)
190
- cfg_scale = st.slider('Guidance Scale', 0.0, 30.0, 10.0)
191
- steps = st.slider('Diffusion Steps', 8, 50, 25)
192
- width = 640 # st.slider('Width', 480, 640, step=32)
193
- height = 640 # st.slider('Height', 480, 640, step=32)
194
- if st.form_submit_button("Generate") or auto_submit("sdauto"):
195
- pc = load_data(prog)
196
- col2 = misc_utils.render_pc(pc)
197
- prog.progress(0.49, "Running Generation")
198
- if torch.cuda.is_available():
199
- with sys.clip_move_lock:
200
- clip_model.cpu()
201
- img = sd_pc2img.pc_to_image(
202
- model_l14, pc, prompt, noise_scale, width, height, cfg_scale, steps,
203
- lambda i, t, _: prog.progress(0.49 + i / (steps + 1) / 2, "Running Diffusion Step %d" % i)
204
- )
205
- if torch.cuda.is_available():
206
- with sys.clip_move_lock:
207
- clip_model.cuda()
208
- with col2:
209
- st.image(img)
210
- prog.progress(1.0, "Idle")
211
- if image_examples(samples_index.sd, 3, example_text="Examples (Choose one of the following 3D shapes)"):
212
- queue_auto_submit("sdauto")
213
-
214
-
215
- def retrieval_results(results):
216
- st.caption("Click the link to view the 3D shape")
217
- for i in range(len(results) // 4):
218
- cols = st.columns(4)
219
- for j in range(4):
220
- idx = i * 4 + j
221
- if idx >= len(results):
222
- continue
223
- entry = results[idx]
224
- with cols[j]:
225
- ext_link = f"https://objaverse.allenai.org/explore/?query={entry['u']}"
226
- st.image(entry['img'])
227
- # st.markdown(f"[![thumbnail {entry['desc'].replace('\n', ' ')}]({entry['img']})]({ext_link})")
228
- # st.text(entry['name'])
229
- quote_name = entry['name'].replace('[', '\\[').replace(']', '\\]').replace('\n', ' ')
230
- st.markdown(f"[{quote_name}]({ext_link})")
231
 
 
 
 
 
 
 
 
232
 
233
- def retrieval_filter_expand(key):
234
- with st.expander("Filters"):
235
- sim_th = st.slider("Similarity Threshold", 0.05, 0.5, 0.1, key=key + 'rtsimth')
236
- tag = st.text_input("Has Tag", "", key=key + 'rthastag')
237
- col1, col2 = st.columns(2)
238
- face_min = int(col1.text_input("Face Count Min", "0", key=key + 'rtfcmin'))
239
- face_max = int(col2.text_input("Face Count Max", "34985808", key=key + 'rtfcmax'))
240
- col1, col2 = st.columns(2)
241
- anim_min = int(col1.text_input("Animation Count Min", "0", key=key + 'rtacmin'))
242
- anim_max = int(col2.text_input("Animation Count Max", "563", key=key + 'rtacmax'))
243
- tag_n = not bool(tag.strip())
244
- anim_n = not (anim_min > 0 or anim_max < 563)
245
- face_n = not (face_min > 0 or face_max < 34985808)
246
- filter_fn = lambda x: (
247
- (anim_n or anim_min <= x['anims'] <= anim_max)
248
- and (face_n or face_min <= x['faces'] <= face_max)
249
- and (tag_n or tag in x['tags'])
250
- )
251
- return sim_th, filter_fn
252
 
 
 
 
 
 
 
253
 
254
- def demo_retrieval():
255
  with tab_text:
256
  with st.form("rtextform"):
257
- k = st.slider("Shapes to Retrieve", 1, 100, 16, key='rtext')
258
- text = st.text_input("Input Text", key="inputrtext")
259
  sim_th, filter_fn = retrieval_filter_expand('text')
260
- if st.form_submit_button("Run with Text") or auto_submit("rtextauto"):
261
  prog.progress(0.49, "Computing Embeddings")
262
  device = clip_model.device
263
- tn = clip_prep(
264
- text=[text], return_tensors='pt', truncation=True, max_length=76
265
- ).to(device)
266
  enc = clip_model.get_text_features(**tn).float().cpu()
267
- prog.progress(0.7, "Running Retrieval")
268
- retrieval_results(retrieval.retrieve(enc, k, sim_th, filter_fn))
269
- prog.progress(1.0, "Idle")
270
- picked_sample = st.selectbox("Examples", ["Select..."] + samples_index.retrieval_texts)
271
- text_last_example = st.session_state.get('text_last_example', None)
272
- if text_last_example is None:
273
- st.session_state.text_last_example = picked_sample
274
- elif text_last_example != picked_sample and picked_sample != "Select...":
275
- st.session_state.text_last_example = picked_sample
276
- sq("inputrtext", picked_sample)
277
- queue_auto_submit("rtextauto")
278
 
279
- with tab_img:
280
- submit = False
281
- with st.form("rimgform"):
282
- k = st.slider("Shapes to Retrieve", 1, 100, 16, key='rimage')
283
- pic = st.file_uploader("Upload an Image", key='rimageinput')
284
- sim_th, filter_fn = retrieval_filter_expand('image')
285
- if st.form_submit_button("Run with Image"):
286
- submit = True
287
- results_container = st.container()
288
- sample_got = image_examples(samples_index.iret, 4, 'rimageinput')
289
- if sample_got:
290
- pic = sample_got
291
- if sample_got or submit:
292
- img = Image.open(pic)
293
- with results_container:
294
- st.image(img)
295
- prog.progress(0.49, "Computing Embeddings")
296
- device = clip_model.device
297
- tn = clip_prep(images=[img], return_tensors="pt").to(device)
298
- enc = clip_model.get_image_features(pixel_values=tn['pixel_values'].type(half)).float().cpu()
299
  prog.progress(0.7, "Running Retrieval")
300
  retrieval_results(retrieval.retrieve(enc, k, sim_th, filter_fn))
 
301
  prog.progress(1.0, "Idle")
302
 
303
- with tab_pc:
304
- with st.form("rpcform"):
305
- k = st.slider("Shapes to Retrieve", 1, 100, 16, key='rpc')
306
- load_data = misc_utils.input_3d_shape('retpc')
307
- sim_th, filter_fn = retrieval_filter_expand('pc')
308
- if st.form_submit_button("Run with Shape") or auto_submit('rpcauto'):
309
- pc = load_data(prog)
310
- col2 = misc_utils.render_pc(pc)
311
- prog.progress(0.49, "Computing Embeddings")
312
- ref_dev = next(model_g14.parameters()).device
313
- enc = model_g14(torch.tensor(pc[:, [0, 2, 1, 3, 4, 5]].T[None], device=ref_dev)).cpu()
314
- prog.progress(0.7, "Running Retrieval")
315
- retrieval_results(retrieval.retrieve(enc, k, sim_th, filter_fn))
316
- prog.progress(1.0, "Idle")
317
- if image_examples(samples_index.pret, 3):
318
- queue_auto_submit("rpcauto")
319
 
320
 
 
 
 
 
321
  try:
322
  with tab_cls:
323
  demo_classification()
 
1
  import sys
2
  import threading
3
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  import numpy
5
  import torch
6
  import openshape
7
  import transformers
8
  from PIL import Image
9
+ from huggingface_hub import HfFolder, snapshot_download
10
+ from demo_support import retrieval
 
 
 
 
 
 
11
 
12
  @st.cache_resource
13
  def load_openclip():
 
23
  return clip_model, clip_prep
24
 
25
 
26
+ def retrieval_filter_expand(key):
27
+ with st.expander("Filters"):
28
+ sim_th = st.slider("Similarity Threshold", 0.05, 0.5, 0.1, key=key + 'rtsimth')
29
+ tag = st.text_input("Has Tag", "", key=key + 'rthastag')
30
+ col1, col2 = st.columns(2)
31
+ face_min = int(col1.text_input("Face Count Min", "0", key=key + 'rtfcmin'))
32
+ face_max = int(col2.text_input("Face Count Max", "34985808", key=key + 'rtfcmax'))
33
+ col1, col2 = st.columns(2)
34
+ anim_min = int(col1.text_input("Animation Count Min", "0", key=key + 'rtacmin'))
35
+ anim_max = int(col2.text_input("Animation Count Max", "563", key=key + 'rtacmax'))
36
+ tag_n = not bool(tag.strip())
37
+ anim_n = not (anim_min > 0 or anim_max < 563)
38
+ face_n = not (face_min > 0 or face_max < 34985808)
39
+ filter_fn = lambda x: (
40
+ (anim_n or anim_min <= x['anims'] <= anim_max)
41
+ and (face_n or face_min <= x['faces'] <= face_max)
42
+ and (tag_n or tag in x['tags'])
43
+ )
44
+ return sim_th, filter_fn
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
+ def retrieval_results(results):
47
+ st.caption("Click the link to view the 3D shape")
48
+ for i in range(len(results) // 4):
49
+ cols = st.columns(4)
50
+ for j in range(4):
51
+ idx = i * 4 + j
52
+ if idx >= len(results):
53
+ continue
54
+ entry = results[idx]
55
+ with cols[j]:
56
+ ext_link = f"https://objaverse.allenai.org/explore/?query={entry['u']}"
57
+ st.image(entry['img'])
58
+ # st.markdown(f"[![thumbnail {entry['desc'].replace('\n', ' ')}]({entry['img']})]({ext_link})")
59
+ # st.text(entry['name'])
60
+ quote_name = entry['name'].replace('[', '\\[').replace(']', '\\]').replace('\n', ' ')
61
+ st.markdown(f"[{quote_name}]({ext_link})")
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
 
65
  def demo_classification():
66
  with st.form("clsform"):
67
+ #load_data = misc_utils.input_3d_shape('cls')
68
  cats = st.text_input("Custom Categories (64 max, separated with comma)")
69
  cats = [a.strip() for a in cats.split(',')]
70
  if len(cats) > 64:
 
72
  return
73
  lvis_run = st.form_submit_button("Run Classification on LVIS Categories")
74
  custom_run = st.form_submit_button("Run Classification on Custom Categories")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
  def demo_captioning():
77
  with st.form("capform"):
 
78
  cond_scale = st.slider('Conditioning Scale', 0.0, 4.0, 2.0, 0.1, key='capcondscl')
 
 
 
 
 
 
 
 
 
 
79
 
80
  def demo_pc2img():
81
  with st.form("sdform"):
 
82
  prompt = st.text_input("Prompt (Optional)", key='sdtprompt')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
+ def demo_retrieval():
85
+ with tab_pc:
86
+ with st.form("rpcform"):
87
+ k = st.slider("Number of items to retrieve", 1, 100, 16, key='rpc')
88
+ pc = utils.load_3D_shape('rpcinput')
89
+ if st.form_submit_button("Retrieve with Point Cloud"):
90
+ prog.progress(0.49, "Computing Embeddings")
91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
+ with tab_img:
94
+ with st.form("rimgform"):
95
+ k = st.slider("Number of items to retrieve", 1, 100, 16, key='rimage')
96
+ img = st.file_uploader("Upload an Image", key='rimageinput')
97
+ if st.form_submit_button("Retrieve with Image"):
98
+ prog.progress(0.49, "Computing Embeddings")
99
 
 
100
  with tab_text:
101
  with st.form("rtextform"):
102
+ k = st.slider("Number of items to retrieve", 1, 100, 16, key='rtext')
103
+ text = st.text_input("Input Text", key='rtextinput')
104
  sim_th, filter_fn = retrieval_filter_expand('text')
105
+ if st.form_submit_button("Retrieve with Text"):
106
  prog.progress(0.49, "Computing Embeddings")
107
  device = clip_model.device
108
+ tn = clip_prep(text=[text], return_tensors='pt', truncation=True, max_length=76).to(device)
 
 
109
  enc = clip_model.get_text_features(**tn).float().cpu()
 
 
 
 
 
 
 
 
 
 
 
110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  prog.progress(0.7, "Running Retrieval")
112
  retrieval_results(retrieval.retrieve(enc, k, sim_th, filter_fn))
113
+
114
  prog.progress(1.0, "Idle")
115
 
116
+ st.title("TripletMix Demo")
117
+ st.caption("For faster inference without waiting in queue, you may clone the space and run it yourself.")
118
+ prog = st.progress(0.0, "Idle")
119
+ tab_cls, tab_pc, tab_img, tab_text, tab_sd, tab_cap = st.tabs([
120
+ "Classification",
121
+ "Retrieval w/ 3D",
122
+ "Retrieval w/ Image",
123
+ "Retrieval w/ Text",
124
+ "Image Generation",
125
+ "Captioning",
126
+ ])
 
 
 
 
 
127
 
128
 
129
+ f32 = numpy.float32
130
+ half = torch.float16 if torch.cuda.is_available() else torch.bfloat16
131
+ clip_model, clip_prep = load_openclip()
132
+
133
  try:
134
  with tab_cls:
135
  demo_classification()