gstaff commited on
Commit
e6202b4
1 Parent(s): 23e32a3

Remove imgkit dependency, update example commands, update LCM client URL.

Browse files
Files changed (2) hide show
  1. app.py +10 -23
  2. requirements.txt +0 -1
app.py CHANGED
@@ -1,11 +1,9 @@
1
- # pip install imgkit
2
  # pip install html2image
3
  import base64
4
  import random
5
- import uuid
6
  from io import BytesIO
7
 
8
- import imgkit
9
  import os
10
  import pathlib
11
  import re
@@ -16,8 +14,6 @@ from gradio_client import Client
16
  import torch
17
  from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline, Pipeline
18
 
19
-
20
-
21
  HF_TOKEN = os.getenv("HF_TOKEN")
22
 
23
  if not HF_TOKEN:
@@ -26,7 +22,7 @@ if not HF_TOKEN:
26
  API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
27
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
28
 
29
- client = Client("https://latent-consistency-super-fast-lcm-lora-sd1-5.hf.space/")
30
 
31
 
32
  def init_speech_to_text_model() -> Pipeline:
@@ -334,22 +330,13 @@ def html_to_png(card_name, html):
334
 
335
  path = os.path.join('rendered_cards', save_name)
336
  try:
337
- css = ['./css/mana.css', './css/keyrune.css',
338
- './css/mtg_custom.css']
339
- imgkit.from_string(html, path, {"xvfb": "", "enable-local-file-access": ""}, css=css)
340
- except Exception as e:
341
- try:
342
- # For Windows local, requires 'html2image' package from pip.
343
- from html2image import Html2Image
344
- rendered_card_dir = 'rendered_cards'
345
- hti = Html2Image(output_path=rendered_card_dir)
346
- paths = hti.screenshot(html_str=html,
347
- css_file='monstermaker.css',
348
- save_as=save_name, size=(800, 1440))
349
- print(paths)
350
- path = paths[0]
351
- except:
352
- pass
353
  print('OPENING IMAGE FROM FILE')
354
  img = Image.open(path).convert("RGB")
355
  print('CROPPING BACKGROUND')
@@ -448,7 +435,7 @@ with gr.Blocks(title='MonsterGen') as demo:
448
  audio_in = gr.Microphone(label="Record a voice request (click or press ctrl + ` to start/stop)",
449
  type='filepath', elem_classes=["record-btn"])
450
  prompt_in = gr.Textbox(label="Or type a text request and press Enter", interactive=True,
451
- placeholder="Need an idea? Try one of these:\n- Create a creature card named 'WiFi Elemental'\n- Make it an instant\n- Change the color")
452
  with gr.Accordion(label='🤖 Chat Assistant Response', open=False):
453
  bot_text = gr.TextArea(label='Response', interactive=False)
454
  with gr.Row():
 
 
1
  # pip install html2image
2
  import base64
3
  import random
 
4
  from io import BytesIO
5
 
6
+ from html2image import Html2Image
7
  import os
8
  import pathlib
9
  import re
 
14
  import torch
15
  from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline, Pipeline
16
 
 
 
17
  HF_TOKEN = os.getenv("HF_TOKEN")
18
 
19
  if not HF_TOKEN:
 
22
  API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
23
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
24
 
25
+ client = Client("https://latent-consistency-super-fast-lcm-lora-sd1-5.hf.space/--replicas/0867lltlv/")
26
 
27
 
28
  def init_speech_to_text_model() -> Pipeline:
 
330
 
331
  path = os.path.join('rendered_cards', save_name)
332
  try:
333
+ rendered_card_dir = 'rendered_cards'
334
+ hti = Html2Image(output_path=rendered_card_dir)
335
+ paths = hti.screenshot(html_str=html, css_file='monstermaker.css', save_as=save_name, size=(800, 1440))
336
+ print(paths)
337
+ path = paths[0]
338
+ except:
339
+ pass
 
 
 
 
 
 
 
 
 
340
  print('OPENING IMAGE FROM FILE')
341
  img = Image.open(path).convert("RGB")
342
  print('CROPPING BACKGROUND')
 
435
  audio_in = gr.Microphone(label="Record a voice request (click or press ctrl + ` to start/stop)",
436
  type='filepath', elem_classes=["record-btn"])
437
  prompt_in = gr.Textbox(label="Or type a text request and press Enter", interactive=True,
438
+ placeholder="Need an idea? Try one of these:\n- Create a monster named 'WiFi Elemental'\n- Make it legendary\n- Change the description")
439
  with gr.Accordion(label='🤖 Chat Assistant Response', open=False):
440
  bot_text = gr.TextArea(label='Response', interactive=False)
441
  with gr.Row():
requirements.txt CHANGED
@@ -4,4 +4,3 @@ ftfy
4
  gradio-client
5
  torch
6
  transformers
7
- imgkit
 
4
  gradio-client
5
  torch
6
  transformers