jinlinyi commited on
Commit
30f88b5
1 Parent(s): 7e05a22
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -36,7 +36,7 @@ description = """
36
  <p>Try our Gradio demo for Perspective Fields for single image camera calibration. You can click on one of the provided examples or upload your own image.</p>
37
  <h3>Available Models:</h3>
38
  <ol>
39
- <li>[NEW!!!]<strong>Paramnet-360Cities-edina:</strong> PerspectiveNet+ParamNet trained on 360cities and edina dataset.</li>
40
  <li><strong>PersNet-360Cities:</strong> PerspectiveNet trained on the 360Cities dataset. This model predicts perspective fields and is designed to be robust and generalize well to both indoor and outdoor images.</li>
41
  <li><strong>PersNet_Paramnet-GSV-uncentered:</strong> A combination of PerspectiveNet and ParamNet trained on the Google Street View (GSV) dataset. This model predicts camera Roll, Pitch, and Field of View (FoV), as well as the Principal Point location.</li>
42
  <li><strong>PersNet_Paramnet-GSV-centered:</strong> PerspectiveNet+ParamNet trained on the GSV dataset. This model assumes the principal point is at the center of the image and predicts camera Roll, Pitch, and FoV.</li>
@@ -139,7 +139,7 @@ def inference(img, model_type):
139
  pred['pred_rel_cx'].cpu().item(),
140
  pred['pred_rel_cy'].cpu().item(),
141
  ]
142
- param = f"roll {pred['pred_roll'].cpu().item() :.2f}\npitch {pred['pred_pitch'].cpu().item() :.2f}\nvirtical fov {pred['pred_general_vfov'].cpu().item() :.2f}\nfocal_length {pred['pred_rel_focal'].cpu().item()*img_h.shape[0] :.2f}\n"
143
  param += f"principal point {pred['pred_rel_cx'].cpu().item() :.2f} {pred['pred_rel_cy'].cpu().item() :.2f}"
144
  pred_vis = draw_from_r_p_f_cx_cy(
145
  img[:,:,::-1],
@@ -158,14 +158,14 @@ print(examples)
158
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
159
  model_zoo = {
160
 
161
- 'Paramnet-360Cities-edina-centered': {
162
  'weights': ['https://www.dropbox.com/s/z2dja70bgy007su/paramnet_360cities_edina_rpf.pth'],
163
  'opts': ['MODEL.WEIGHTS', 'models/paramnet_360cities_edina_rpf.pth', 'MODEL.DEVICE', device,],
164
  'config_file': 'models/paramnet_360cities_edina_rpf.yaml',
165
  'param': True,
166
  },
167
 
168
- 'Paramnet-360Cities-edina-uncentered': {
169
  'weights': ['https://www.dropbox.com/s/nt29e1pi83mm1va/paramnet_360cities_edina_rpfpp.pth'],
170
  'opts': ['MODEL.WEIGHTS', 'models/paramnet_360cities_edina_rpfpp.pth', 'MODEL.DEVICE', device,],
171
  'config_file': 'models/paramnet_360cities_edina_rpfpp.yaml',
 
36
  <p>Try our Gradio demo for Perspective Fields for single image camera calibration. You can click on one of the provided examples or upload your own image.</p>
37
  <h3>Available Models:</h3>
38
  <ol>
39
+ <li><span style="color:red;">[NEW!!!]</span><strong>Paramnet-360Cities-edina:</strong> Our latest model trained on <a href="https://www.360cities.net/">360cities</a> and <a href="https://github.com/tien-d/EgoDepthNormal/tree/main#egocentric-depth-on-everyday-indoor-activities-edina-dataset">EDINA</a> dataset.</li>
40
  <li><strong>PersNet-360Cities:</strong> PerspectiveNet trained on the 360Cities dataset. This model predicts perspective fields and is designed to be robust and generalize well to both indoor and outdoor images.</li>
41
  <li><strong>PersNet_Paramnet-GSV-uncentered:</strong> A combination of PerspectiveNet and ParamNet trained on the Google Street View (GSV) dataset. This model predicts camera Roll, Pitch, and Field of View (FoV), as well as the Principal Point location.</li>
42
  <li><strong>PersNet_Paramnet-GSV-centered:</strong> PerspectiveNet+ParamNet trained on the GSV dataset. This model assumes the principal point is at the center of the image and predicts camera Roll, Pitch, and FoV.</li>
 
139
  pred['pred_rel_cx'].cpu().item(),
140
  pred['pred_rel_cy'].cpu().item(),
141
  ]
142
+ param = f"roll {pred['pred_roll'].cpu().item() :.2f}\npitch {pred['pred_pitch'].cpu().item() :.2f}\nvirtical fov {pred['pred_general_vfov'].cpu().item() :.2f}\nfocal_length {pred['pred_rel_focal'].cpu().item()*img_h :.2f}\n"
143
  param += f"principal point {pred['pred_rel_cx'].cpu().item() :.2f} {pred['pred_rel_cy'].cpu().item() :.2f}"
144
  pred_vis = draw_from_r_p_f_cx_cy(
145
  img[:,:,::-1],
 
158
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
159
  model_zoo = {
160
 
161
+ 'NEW:Paramnet-360Cities-edina-centered': {
162
  'weights': ['https://www.dropbox.com/s/z2dja70bgy007su/paramnet_360cities_edina_rpf.pth'],
163
  'opts': ['MODEL.WEIGHTS', 'models/paramnet_360cities_edina_rpf.pth', 'MODEL.DEVICE', device,],
164
  'config_file': 'models/paramnet_360cities_edina_rpf.yaml',
165
  'param': True,
166
  },
167
 
168
+ 'NEW:Paramnet-360Cities-edina-uncentered': {
169
  'weights': ['https://www.dropbox.com/s/nt29e1pi83mm1va/paramnet_360cities_edina_rpfpp.pth'],
170
  'opts': ['MODEL.WEIGHTS', 'models/paramnet_360cities_edina_rpfpp.pth', 'MODEL.DEVICE', device,],
171
  'config_file': 'models/paramnet_360cities_edina_rpfpp.yaml',