Spaces:
Running
Running
Pedro Cuenca
commited on
Commit
•
9f85e8c
1
Parent(s):
720a5df
Reuse text field for the various messages.
Browse filesChange some strings.
Do not give details about BACKEND_SERVER unless in DEBUG mode.
Former-commit-id: f4760718fae25053b42fe0d47906b667ceb33e0f
- app/app.py +20 -14
app/app.py
CHANGED
@@ -3,7 +3,6 @@
|
|
3 |
|
4 |
import random
|
5 |
from dalle_mini.backend import ServiceError, get_images_from_backend
|
6 |
-
from dalle_mini.helpers import captioned_strip
|
7 |
|
8 |
import streamlit as st
|
9 |
|
@@ -12,24 +11,26 @@ st.sidebar.title("DALL-E Mini")
|
|
12 |
sc = st.sidebar.beta_columns(2)
|
13 |
sc[0].image('../img/logo.png', width=150)
|
14 |
sc[1].write(" ")
|
15 |
-
sc[1].markdown("Generate images from
|
16 |
st.sidebar.markdown("""
|
17 |
##### Dall-E Mini
|
18 |
___
|
19 |
-
Dall-E Mini is an AI model that generates images
|
20 |
|
21 |
Created by Boris Dayma et al. 2021 | [GitHub](https://github.com/borisdayma/dalle-mini) | See [Report](https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA)
|
22 |
""")
|
23 |
|
24 |
st.header('DALL-E mini Demo')
|
25 |
-
st.subheader('Generate images from
|
26 |
|
27 |
prompt = st.text_input("What do you want to see?")
|
28 |
|
29 |
#TODO: I think there's an issue where we can't run twice the same inference (not due to caching) - may need to use st.form
|
30 |
|
|
|
31 |
if prompt != "":
|
32 |
-
st.
|
|
|
33 |
|
34 |
try:
|
35 |
backend_url = st.secrets["BACKEND_SERVER"]
|
@@ -39,15 +40,20 @@ if prompt != "":
|
|
39 |
cols = st.beta_columns(4)
|
40 |
for i, img in enumerate(selected):
|
41 |
cols[i%4].image(img)
|
|
|
|
|
42 |
|
43 |
except ServiceError as error:
|
44 |
-
|
45 |
except KeyError:
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
3 |
|
4 |
import random
|
5 |
from dalle_mini.backend import ServiceError, get_images_from_backend
|
|
|
6 |
|
7 |
import streamlit as st
|
8 |
|
|
|
11 |
sc = st.sidebar.beta_columns(2)
|
12 |
sc[0].image('../img/logo.png', width=150)
|
13 |
sc[1].write(" ")
|
14 |
+
sc[1].markdown("Generate images from text")
|
15 |
st.sidebar.markdown("""
|
16 |
##### Dall-E Mini
|
17 |
___
|
18 |
+
Dall-E Mini is an AI model that generates images from any prompt you give it!
|
19 |
|
20 |
Created by Boris Dayma et al. 2021 | [GitHub](https://github.com/borisdayma/dalle-mini) | See [Report](https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA)
|
21 |
""")
|
22 |
|
23 |
st.header('DALL-E mini Demo')
|
24 |
+
st.subheader('Generate images from text')
|
25 |
|
26 |
prompt = st.text_input("What do you want to see?")
|
27 |
|
28 |
#TODO: I think there's an issue where we can't run twice the same inference (not due to caching) - may need to use st.form
|
29 |
|
30 |
+
DEBUG = False
|
31 |
if prompt != "":
|
32 |
+
container = st.empty()
|
33 |
+
container.markdown(f"Generating predictions for: **{prompt}**")
|
34 |
|
35 |
try:
|
36 |
backend_url = st.secrets["BACKEND_SERVER"]
|
|
|
40 |
cols = st.beta_columns(4)
|
41 |
for i, img in enumerate(selected):
|
42 |
cols[i%4].image(img)
|
43 |
+
|
44 |
+
container.markdown(f"**{prompt}**")
|
45 |
|
46 |
except ServiceError as error:
|
47 |
+
container.text(f"Service unavailable, status: {error.status_code}")
|
48 |
except KeyError:
|
49 |
+
if DEBUG:
|
50 |
+
container.markdown("""
|
51 |
+
**Error: BACKEND_SERVER unset**
|
52 |
+
|
53 |
+
Please, create a file called `.streamlit/secrets.toml` inside the app's folder and include a line to configure the server URL:
|
54 |
+
```
|
55 |
+
BACKEND_SERVER="<server url>"
|
56 |
+
```
|
57 |
+
""")
|
58 |
+
else:
|
59 |
+
container.markdown('Error -5, please try again or [report it](mailto:pcuenca-dalle@guenever.net).')
|