Spaces:
Runtime error
Runtime error
Ammar-alhaj-ali
commited on
Commit
•
04048d3
1
Parent(s):
86e26e9
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import os
|
2 |
os.system('pip install pyyaml==5.1')
|
3 |
# workaround: install old version of pytorch since detectron2 hasn't released packages for pytorch 1.9 (issue: https://github.com/facebookresearch/detectron2/issues/3158)
|
4 |
-
|
5 |
|
6 |
|
7 |
|
@@ -79,7 +79,7 @@ def process_image(image):
|
|
79 |
title = "Interactive demo: LayoutLMv3"
|
80 |
description = "Demo for Microsoft's LayoutLMv2, a Transformer for state-of-the-art document image understanding tasks. This particular model is fine-tuned on FUNSD, a dataset of manually annotated forms. It annotates the words appearing in the image as QUESTION/ANSWER/HEADER/OTHER. To use it, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. If you want to make the output bigger, right-click on it and select 'Open image in new tab'."
|
81 |
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2012.14740' target='_blank'>LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding</a> | <a href='https://github.com/microsoft/unilm' target='_blank'>Github Repo</a></p>"
|
82 |
-
examples =[['
|
83 |
|
84 |
css = ".output-image, .input-image {height: 40rem !important; width: 100% !important;}"
|
85 |
#css = "@media screen and (max-width: 600px) { .output_image, .input_image {height:20rem !important; width: 100% !important;} }"
|
|
|
1 |
import os
|
2 |
os.system('pip install pyyaml==5.1')
|
3 |
# workaround: install old version of pytorch since detectron2 hasn't released packages for pytorch 1.9 (issue: https://github.com/facebookresearch/detectron2/issues/3158)
|
4 |
+
os.system('pip install torch==1.8.0+cu101 torchvision==0.9.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html')
|
5 |
|
6 |
|
7 |
|
|
|
79 |
title = "Interactive demo: LayoutLMv3"
|
80 |
description = "Demo for Microsoft's LayoutLMv2, a Transformer for state-of-the-art document image understanding tasks. This particular model is fine-tuned on FUNSD, a dataset of manually annotated forms. It annotates the words appearing in the image as QUESTION/ANSWER/HEADER/OTHER. To use it, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. If you want to make the output bigger, right-click on it and select 'Open image in new tab'."
|
81 |
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2012.14740' target='_blank'>LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding</a> | <a href='https://github.com/microsoft/unilm' target='_blank'>Github Repo</a></p>"
|
82 |
+
examples =[['img.png']]
|
83 |
|
84 |
css = ".output-image, .input-image {height: 40rem !important; width: 100% !important;}"
|
85 |
#css = "@media screen and (max-width: 600px) { .output_image, .input_image {height:20rem !important; width: 100% !important;} }"
|