Andyx commited on
Commit
f353300
1 Parent(s): 39188ed
Files changed (1) hide show
  1. app.py +16 -8
app.py CHANGED
@@ -35,29 +35,37 @@ def predict1(img):
35
  b, c, h, w = in_img.size()
36
  # pad image such that the resolution is a multiple of 32
37
  w_pad = (math.ceil(w / 32) * 32 - w) // 2
 
38
  h_pad = (math.ceil(h / 32) * 32 - h) // 2
39
- in_img = img_pad(in_img, w_r=w_pad, h_r=h_pad)
 
 
 
 
 
 
 
40
  with torch.no_grad():
41
  out_1, out_2, out_3 = model1(in_img)
42
  if h_pad != 0:
43
- out_1 = out_1[:, :, h_pad:-h_pad, :]
44
  if w_pad != 0:
45
- out_1 = out_1[:, :, :, w_pad:-w_pad]
46
  out_1 = out_1.squeeze(0)
47
  out_1 = PIL.Image.fromarray(torch.clamp(out_1 * 255, min=0, max=255
48
  ).byte().permute(1, 2, 0).cpu().numpy())
49
 
50
  return out_1
51
 
52
- def img_pad(x, h_r=0, w_r=0):
53
  '''
54
  Here the padding values are determined by the average r,g,b values across the training set
55
  in FHDMi dataset. For the evaluation on the UHDM, you can also try the commented lines where
56
  the mean values are calculated from UHDM training set, yielding similar performance.
57
  '''
58
- x1 = F.pad(x[:, 0:1, ...], (w_r, w_r, h_r, h_r), value=0.3827)
59
- x2 = F.pad(x[:, 1:2, ...], (w_r, w_r, h_r, h_r), value=0.4141)
60
- x3 = F.pad(x[:, 2:3, ...], (w_r, w_r, h_r, h_r), value=0.3912)
61
 
62
  y = torch.cat([x1, x2, x3], dim=1)
63
 
@@ -87,7 +95,7 @@ iface1 = gr.Interface(fn=predict1,
87
  '003.jpg',
88
  '004.jpg',
89
  '005.jpg'],
90
- title = title,
91
  description = description,
92
  article = article
93
  )
 
35
  b, c, h, w = in_img.size()
36
  # pad image such that the resolution is a multiple of 32
37
  w_pad = (math.ceil(w / 32) * 32 - w) // 2
38
+ w_odd_pad = w_pad
39
  h_pad = (math.ceil(h / 32) * 32 - h) // 2
40
+ h_odd_pad = h_pad
41
+
42
+ if w % 2 == 1:
43
+ w_odd_pad += 1
44
+ if h % 2 == 1:
45
+ h_odd_pad += 1
46
+
47
+ in_img = img_pad(in_img, w_pad=w_pad, h_pad=h_pad, w_odd_pad=w_odd_pad, h_odd_pad=h_odd_pad)
48
  with torch.no_grad():
49
  out_1, out_2, out_3 = model1(in_img)
50
  if h_pad != 0:
51
+ out_1 = out_1[:, :, h_pad:-h_odd_pad, :]
52
  if w_pad != 0:
53
+ out_1 = out_1[:, :, :, w_pad:-w_odd_pad]
54
  out_1 = out_1.squeeze(0)
55
  out_1 = PIL.Image.fromarray(torch.clamp(out_1 * 255, min=0, max=255
56
  ).byte().permute(1, 2, 0).cpu().numpy())
57
 
58
  return out_1
59
 
60
+ def img_pad(x, w_pad, h_pad, w_odd_pad, h_odd_pad):
61
  '''
62
  Here the padding values are determined by the average r,g,b values across the training set
63
  in FHDMi dataset. For the evaluation on the UHDM, you can also try the commented lines where
64
  the mean values are calculated from UHDM training set, yielding similar performance.
65
  '''
66
+ x1 = F.pad(x[:, 0:1, ...], (w_pad, w_odd_pad, h_pad, h_odd_pad), value=0.3827)
67
+ x2 = F.pad(x[:, 1:2, ...], (w_pad, w_odd_pad, h_pad, h_odd_pad), value=0.4141)
68
+ x3 = F.pad(x[:, 2:3, ...], (w_pad, w_odd_pad, h_pad, h_odd_pad), value=0.3912)
69
 
70
  y = torch.cat([x1, x2, x3], dim=1)
71
 
 
95
  '003.jpg',
96
  '004.jpg',
97
  '005.jpg'],
98
+ title = title,
99
  description = description,
100
  article = article
101
  )