ritutweets46 Aditibaheti commited on
Commit
58a0d29
1 Parent(s): ac65255

- Updates (95a3df2b577e3a6554f01a3e1bbe8046401e91da)


Co-authored-by: Aditi Baheti <Aditibaheti@users.noreply.huggingface.co>

Files changed (1) hide show
  1. app.py +20 -26
app.py CHANGED
@@ -22,20 +22,19 @@ MAX_SEED = np.iinfo(np.int32).max
22
  MAX_IMAGE_SIZE = 1024
23
 
24
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
25
-
26
  if randomize_seed:
27
  seed = random.randint(0, MAX_SEED)
28
 
29
  generator = torch.Generator().manual_seed(seed)
30
 
31
  image = pipe(
32
- prompt = prompt,
33
- negative_prompt = negative_prompt,
34
- guidance_scale = guidance_scale,
35
- num_inference_steps = num_inference_steps,
36
- width = width,
37
- height = height,
38
- generator = generator
39
  ).images[0]
40
 
41
  return image
@@ -46,7 +45,7 @@ examples = [
46
  "A delicious ceviche cheesecake slice",
47
  ]
48
 
49
- css="""
50
  #col-container {
51
  margin: 0 auto;
52
  max-width: 520px;
@@ -59,7 +58,6 @@ else:
59
  power_device = "CPU"
60
 
61
  with gr.Blocks(css=css) as demo:
62
-
63
  with gr.Column(elem_id="col-container"):
64
  gr.Markdown(f"""
65
  # Text-to-Image Gradio Template
@@ -67,8 +65,7 @@ with gr.Blocks(css=css) as demo:
67
  """)
68
 
69
  with gr.Row():
70
-
71
- prompt = gr.Text(
72
  label="Prompt",
73
  show_label=False,
74
  max_lines=1,
@@ -81,12 +78,11 @@ with gr.Blocks(css=css) as demo:
81
  result = gr.Image(label="Result", show_label=False)
82
 
83
  with gr.Accordion("Advanced Settings", open=False):
84
-
85
- negative_prompt = gr.Text(
86
  label="Negative prompt",
87
  max_lines=1,
88
  placeholder="Enter a negative prompt",
89
- visible=False,
90
  )
91
 
92
  seed = gr.Slider(
@@ -100,7 +96,6 @@ with gr.Blocks(css=css) as demo:
100
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
 
102
  with gr.Row():
103
-
104
  width = gr.Slider(
105
  label="Width",
106
  minimum=256,
@@ -118,32 +113,31 @@ with gr.Blocks(css=css) as demo:
118
  )
119
 
120
  with gr.Row():
121
-
122
  guidance_scale = gr.Slider(
123
  label="Guidance scale",
124
  minimum=0.0,
125
  maximum=10.0,
126
  step=0.1,
127
- value=0.0,
128
  )
129
 
130
  num_inference_steps = gr.Slider(
131
  label="Number of inference steps",
132
  minimum=1,
133
- maximum=12,
134
  step=1,
135
- value=2,
136
  )
137
 
138
  gr.Examples(
139
- examples = examples,
140
- inputs = [prompt]
141
  )
142
 
143
  run_button.click(
144
- fn = infer,
145
- inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
146
- outputs = [result]
147
  )
148
 
149
- demo.queue().launch()
 
22
  MAX_IMAGE_SIZE = 1024
23
 
24
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
 
25
  if randomize_seed:
26
  seed = random.randint(0, MAX_SEED)
27
 
28
  generator = torch.Generator().manual_seed(seed)
29
 
30
  image = pipe(
31
+ prompt=prompt,
32
+ negative_prompt=negative_prompt,
33
+ guidance_scale=guidance_scale,
34
+ num_inference_steps=num_inference_steps,
35
+ width=width,
36
+ height=height,
37
+ generator=generator
38
  ).images[0]
39
 
40
  return image
 
45
  "A delicious ceviche cheesecake slice",
46
  ]
47
 
48
+ css = """
49
  #col-container {
50
  margin: 0 auto;
51
  max-width: 520px;
 
58
  power_device = "CPU"
59
 
60
  with gr.Blocks(css=css) as demo:
 
61
  with gr.Column(elem_id="col-container"):
62
  gr.Markdown(f"""
63
  # Text-to-Image Gradio Template
 
65
  """)
66
 
67
  with gr.Row():
68
+ prompt = gr.Textbox(
 
69
  label="Prompt",
70
  show_label=False,
71
  max_lines=1,
 
78
  result = gr.Image(label="Result", show_label=False)
79
 
80
  with gr.Accordion("Advanced Settings", open=False):
81
+ negative_prompt = gr.Textbox(
 
82
  label="Negative prompt",
83
  max_lines=1,
84
  placeholder="Enter a negative prompt",
85
+ visible=True,
86
  )
87
 
88
  seed = gr.Slider(
 
96
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
97
 
98
  with gr.Row():
 
99
  width = gr.Slider(
100
  label="Width",
101
  minimum=256,
 
113
  )
114
 
115
  with gr.Row():
 
116
  guidance_scale = gr.Slider(
117
  label="Guidance scale",
118
  minimum=0.0,
119
  maximum=10.0,
120
  step=0.1,
121
+ value=7.5,
122
  )
123
 
124
  num_inference_steps = gr.Slider(
125
  label="Number of inference steps",
126
  minimum=1,
127
+ maximum=100,
128
  step=1,
129
+ value=50,
130
  )
131
 
132
  gr.Examples(
133
+ examples=examples,
134
+ inputs=[prompt]
135
  )
136
 
137
  run_button.click(
138
+ fn=infer,
139
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
140
+ outputs=[result]
141
  )
142
 
143
+ demo.queue().launch()