ahmedbrs commited on
Commit
fb9fd37
1 Parent(s): 20d921e

update style

Browse files
__pycache__/utils.cpython-38.pyc ADDED
Binary file (2.76 kB). View file
 
app.py CHANGED
@@ -36,13 +36,20 @@ def run(sketch, caption, threshold, seed):
36
  colors = plt.get_cmap("tab10").colors
37
  classes_colors = colors[3:len(classes) + 3]
38
 
39
- sketch2 = sketch['composite']
40
- # sketch2 = sketch2[:, :, 1:4]
 
 
 
 
 
 
 
 
 
41
  sketch2 = np.array(sketch2)
42
-
43
  pil_img = Image.fromarray(sketch2).convert('RGB')
44
  sketch_tensor = preprocess(pil_img).unsqueeze(0).to(device)
45
-
46
  # torchvision.utils.save_image(sketch_tensor, 'sketch_tensor.png')
47
 
48
  with torch.no_grad():
@@ -317,12 +324,12 @@ input[type=number][disabled] {
317
 
318
 
319
  with gr.Blocks(js=scripts, css=css, theme='gstaff/xkcd') as demo:
320
- gr.HTML("<h1 class='black-text'>Open Vocabulary Scene Sketch Semantic Understanding</div>")
321
- # gr.HTML("<div class='black-text'></div>")
322
  gr.HTML("<div class='black-text'></div>")
323
- gr.HTML("<div class='black-text'>Ahmed Bourouis, Judith Ellen Fan, Yulia Gryaditskaya</div>")
324
- gr.HTML("<div class='black-text'>CVPR, 2024</p>")
325
- gr.HTML("<a >Project page</p>")
 
326
 
327
 
328
  # gr.Markdown( "Scene Sketch Semantic Segmentation.", elem_classes=["black-txt" , "h1"] )
@@ -335,7 +342,7 @@ with gr.Blocks(js=scripts, css=css, theme='gstaff/xkcd') as demo:
335
  with gr.Column():
336
  # in_image = gr.Image( label="Sketch", type="pil", sources="upload" , height=512 )
337
  in_canvas_image = gr.Sketchpad( brush=gr.Brush(colors=["#000000"], color_mode="fixed" , default_size=2),
338
- elem_classes=["white-bg", "gray-border" , "border-radius" ,"own-shadow" ] ,
339
  label="Sketch" , canvas_size=(512,512) , sources=['upload'],
340
  interactive=True , layers= False, transforms=[] )
341
  query_selector = 'button[aria-label="Upload button"]'
@@ -349,19 +356,23 @@ with gr.Blocks(js=scripts, css=css, theme='gstaff/xkcd') as demo:
349
  <button class="sm black-text white-bg gray-border border-radius own-shadow svelte-cmf5ev bold" id="draw_btn" onclick="return document.querySelector('.controls-wrap button:nth-child(3)').click()"> Draw a new sketch</button>
350
  </div>
351
  """)
352
- in_textbox = gr.Textbox( lines=3 , elem_classes=["white-bg", "gray-border" , "border-radius" ,"own-shadow" ] ,label="Caption your Sketch!", placeholder="Include the categories that you want the AI to segment. \n e.g. 'giraffe, clouds' or 'a boy flying a kite' ")
353
 
354
  with gr.Column():
355
- out_image = gr.Image(elem_classes=["white-bg", "gray-border" , "border-radius" ,"own-shadow" ] ,
 
356
  type="pil", label="Segmented Sketch" ) #, height=512, width=512)
 
 
357
  in_slider = gr.Slider(elem_classes=["white-bg", "gray-border" , "border-radius" ,"own-shadow" ] ,
358
- label="Confidence: Adjust AI agent confidence in guessing categories",
359
- value=0.6 , interactive=True, step=0.05, minimum=0, maximum=1)
 
360
 
361
  with gr.Row():
362
- segment_btn = gr.Button( 'Segment it !' , elem_classes=["white-bg", "gray-border" , "border-radius" ,"own-shadow" , 'bold' , 'mt-mb-1' ] , size="sm")
363
  segment_btn.click(fn=run, inputs=[in_canvas_image , in_textbox , in_slider ], outputs=[out_image])
364
- gallery_label = gr.HTML("<h3 class='black-text'> <span class='black-text underline'>Gallery :</span> you can drag and drop any of the example sketches below into the sketch field above </div>")
365
 
366
  gallery= gr.HTML(f"""
367
  <div>
@@ -389,6 +400,7 @@ with gr.Blocks(js=scripts, css=css, theme='gstaff/xkcd') as demo:
389
  """)
390
 
391
  examples = gr.Examples(
 
392
  examples=[
393
  ['demo/sketch_1.png', 'giraffe looking at you', 0.6],
394
  ['demo/sketch_2.png', 'tree on the right', 0.6],
 
36
  colors = plt.get_cmap("tab10").colors
37
  classes_colors = colors[3:len(classes) + 3]
38
 
39
+ sketch2 = sketch['composite']
40
+
41
+ # when the drawing tool is used
42
+ if sketch2[:,:,0:3].sum() == 0:
43
+ temp = sketch2[:,:,3]
44
+ # invert it
45
+ temp = 255 - temp
46
+ sketch2 = np.repeat(temp[:, :, np.newaxis], 3, axis=2)
47
+ temp2= np.full_like(temp, 255)
48
+ sketch2 = np.dstack((sketch2, temp2))
49
+
50
  sketch2 = np.array(sketch2)
 
51
  pil_img = Image.fromarray(sketch2).convert('RGB')
52
  sketch_tensor = preprocess(pil_img).unsqueeze(0).to(device)
 
53
  # torchvision.utils.save_image(sketch_tensor, 'sketch_tensor.png')
54
 
55
  with torch.no_grad():
 
324
 
325
 
326
  with gr.Blocks(js=scripts, css=css, theme='gstaff/xkcd') as demo:
327
+ gr.HTML("<h1 class='black-text' style='text-align: center;'>Open Vocabulary Scene Sketch Semantic Understanding</div>")
 
328
  gr.HTML("<div class='black-text'></div>")
329
+ # gr.HTML("<div class='black-text' style='text-align: center;'><a href='https://ahmedbourouis.github.io/ahmed-bourouis/'>Ahmed Bourouis</a>,<a href='https://profiles.stanford.edu/judith-fan'>Judith Ellen Fan</a>, <a href='https://yulia.gryaditskaya.com/'>Yulia Gryaditskaya</a></div>")
330
+ gr.HTML("<div class='black-text' style='text-align: center;'>Ahmed Bourouis,Judith Ellen Fan, Yulia Gryaditskaya</div>")
331
+ gr.HTML("<div class='black-text' style='text-align: center;' >CVPR, 2024</p>")
332
+ gr.HTML("<div style='text-align: center;'><p><a href='https://ahmedbourouis.github.io/Scene_Sketch_Segmentation/'>Project page</a></p></div>")
333
 
334
 
335
  # gr.Markdown( "Scene Sketch Semantic Segmentation.", elem_classes=["black-txt" , "h1"] )
 
342
  with gr.Column():
343
  # in_image = gr.Image( label="Sketch", type="pil", sources="upload" , height=512 )
344
  in_canvas_image = gr.Sketchpad( brush=gr.Brush(colors=["#000000"], color_mode="fixed" , default_size=2),
345
+ image_mode="RGBA",elem_classes=["white-bg", "gray-border" , "border-radius" ,"own-shadow" ] ,
346
  label="Sketch" , canvas_size=(512,512) , sources=['upload'],
347
  interactive=True , layers= False, transforms=[] )
348
  query_selector = 'button[aria-label="Upload button"]'
 
356
  <button class="sm black-text white-bg gray-border border-radius own-shadow svelte-cmf5ev bold" id="draw_btn" onclick="return document.querySelector('.controls-wrap button:nth-child(3)').click()"> Draw a new sketch</button>
357
  </div>
358
  """)
359
+ in_textbox = gr.Textbox( lines=2, elem_classes=["white-bg", "gray-border" , "border-radius" ,"own-shadow" ] ,label="Caption your Sketch!", placeholder="Include the categories that you want the AI to segment. \n e.g. 'giraffe, clouds' or 'a boy flying a kite' ")
360
 
361
  with gr.Column():
362
+ out_image = gr.Image( value=Image.new('RGB', (512, 512), color=(255, 255, 255)),
363
+ elem_classes=["white-bg", "gray-border" , "border-radius" ,"own-shadow" ] ,
364
  type="pil", label="Segmented Sketch" ) #, height=512, width=512)
365
+
366
+ # gr.HTML("<h3 class='black-text'> <span class='black-text underline'>Confidence:</span> Adjust AI agent confidence in guessing categories </div>")
367
  in_slider = gr.Slider(elem_classes=["white-bg", "gray-border" , "border-radius" ,"own-shadow" ] ,
368
+ info="Adjust AI agent confidence in guessing categories",
369
+ label="Confidence:",
370
+ value=0.5 , interactive=True, step=0.05, minimum=0, maximum=1)
371
 
372
  with gr.Row():
373
+ segment_btn = gr.Button( 'Segment it !' , elem_classes=["white-bg", "gray-border" , "border-radius" ,"own-shadow" , 'bold' , 'mt-mb-1' ] , size="sm")
374
  segment_btn.click(fn=run, inputs=[in_canvas_image , in_textbox , in_slider ], outputs=[out_image])
375
+ gallery_label = gr.HTML("<h3 class='black-text'> <span class='black-text underline'>Gallery:</span> <span style='color: grey;'>you can click on any of the example sketches below</span> </div>")
376
 
377
  gallery= gr.HTML(f"""
378
  <div>
 
400
  """)
401
 
402
  examples = gr.Examples(
403
+ examples_per_page=30,
404
  examples=[
405
  ['demo/sketch_1.png', 'giraffe looking at you', 0.6],
406
  ['demo/sketch_2.png', 'tree on the right', 0.6],
models/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (179 Bytes). View file
 
models/__pycache__/auxilary.cpython-38.pyc ADDED
Binary file (12.2 kB). View file
 
models/__pycache__/build_model.cpython-38.pyc ADDED
Binary file (3.62 kB). View file
 
models/__pycache__/ca.cpython-38.pyc ADDED
Binary file (6.23 kB). View file
 
models/__pycache__/clip.cpython-38.pyc ADDED
Binary file (13.6 kB). View file
 
models/__pycache__/clip_model.cpython-38.pyc ADDED
Binary file (13.9 kB). View file
 
models/__pycache__/our_model.cpython-38.pyc ADDED
Binary file (18.2 kB). View file
 
models/__pycache__/simple_tokenizer.cpython-38.pyc ADDED
Binary file (5.81 kB). View file
 
output.png CHANGED
vpt/__pycache__/launch.cpython-38.pyc ADDED
Binary file (786 Bytes). View file
 
vpt/src/configs/__pycache__/config.cpython-38.pyc ADDED
Binary file (2.51 kB). View file
 
vpt/src/configs/__pycache__/config_node.cpython-38.pyc ADDED
Binary file (1.03 kB). View file
 
vpt/src/utils/__pycache__/file_io.cpython-38.pyc ADDED
Binary file (406 Bytes). View file