Spaces:
Running
on
T4
Running
on
T4
Removed print statements from model file
Browse files
models/GroundingDINO/groundingdino.py
CHANGED
@@ -328,7 +328,6 @@ class GroundingDINO(nn.Module):
|
|
328 |
)
|
329 |
|
330 |
tokenized["input_ids"] = torch.stack(new_input_ids)
|
331 |
-
print(tokenized["input_ids"])
|
332 |
|
333 |
(
|
334 |
text_self_attention_masks,
|
@@ -398,7 +397,6 @@ class GroundingDINO(nn.Module):
|
|
398 |
dictionnaries containing the two above keys for each decoder layer.
|
399 |
"""
|
400 |
|
401 |
-
print("inside forward")
|
402 |
if targets is None:
|
403 |
captions = kw["captions"]
|
404 |
else:
|
@@ -406,11 +404,9 @@ class GroundingDINO(nn.Module):
|
|
406 |
|
407 |
# encoder texts
|
408 |
|
409 |
-
print("moving text to device")
|
410 |
tokenized = self.tokenizer(captions, padding="longest", return_tensors="pt").to(
|
411 |
samples.device
|
412 |
)
|
413 |
-
print("done moving text to device")
|
414 |
|
415 |
one_hot_token = tokenized
|
416 |
|
@@ -479,8 +475,6 @@ class GroundingDINO(nn.Module):
|
|
479 |
# Get visual exemplar tokens.
|
480 |
bs = len(exemplars)
|
481 |
num_exemplars = exemplars[0].shape[0]
|
482 |
-
print(exemplars)
|
483 |
-
print(num_exemplars)
|
484 |
if num_exemplars > 0:
|
485 |
exemplar_tokens = (
|
486 |
roi_align(
|
|
|
328 |
)
|
329 |
|
330 |
tokenized["input_ids"] = torch.stack(new_input_ids)
|
|
|
331 |
|
332 |
(
|
333 |
text_self_attention_masks,
|
|
|
397 |
dictionnaries containing the two above keys for each decoder layer.
|
398 |
"""
|
399 |
|
|
|
400 |
if targets is None:
|
401 |
captions = kw["captions"]
|
402 |
else:
|
|
|
404 |
|
405 |
# encoder texts
|
406 |
|
|
|
407 |
tokenized = self.tokenizer(captions, padding="longest", return_tensors="pt").to(
|
408 |
samples.device
|
409 |
)
|
|
|
410 |
|
411 |
one_hot_token = tokenized
|
412 |
|
|
|
475 |
# Get visual exemplar tokens.
|
476 |
bs = len(exemplars)
|
477 |
num_exemplars = exemplars[0].shape[0]
|
|
|
|
|
478 |
if num_exemplars > 0:
|
479 |
exemplar_tokens = (
|
480 |
roi_align(
|