Spaces:
Running
Running
Upload app.py with huggingface_hub
Browse files
app.py
ADDED
@@ -0,0 +1,432 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# %%writefile app.py
|
2 |
+
|
3 |
+
## required lib, required "pip install"
|
4 |
+
# import transformers
|
5 |
+
# import accelerate
|
6 |
+
import openai
|
7 |
+
import torch
|
8 |
+
import cryptography
|
9 |
+
import cryptography.fernet
|
10 |
+
## interface libs, required "pip install"
|
11 |
+
import gradio
|
12 |
+
import huggingface_hub
|
13 |
+
import huggingface_hub.hf_api
|
14 |
+
## standard libs, no need to install
|
15 |
+
import json
|
16 |
+
import requests
|
17 |
+
import time
|
18 |
+
import os
|
19 |
+
import random
|
20 |
+
import re
|
21 |
+
import sys
|
22 |
+
import psutil
|
23 |
+
import threading
|
24 |
+
import socket
|
25 |
+
# import PIL
|
26 |
+
# import pandas
|
27 |
+
import matplotlib
|
28 |
+
class HFace_Pluto(object):
|
29 |
+
#
|
30 |
+
# initialize the object
|
31 |
+
def __init__(self, name="Pluto",*args, **kwargs):
|
32 |
+
super(HFace_Pluto, self).__init__(*args, **kwargs)
|
33 |
+
self.author = "Duc Haba"
|
34 |
+
self.name = name
|
35 |
+
self._ph()
|
36 |
+
self._pp("Hello from class", str(self.__class__) + " Class: " + str(self.__class__.__name__))
|
37 |
+
self._pp("Code name", self.name)
|
38 |
+
self._pp("Author is", self.author)
|
39 |
+
self._ph()
|
40 |
+
#
|
41 |
+
# define class var for stable division
|
42 |
+
self._device = 'cuda'
|
43 |
+
self._steps = [3,8,21,55,89,144]
|
44 |
+
self._guidances = [1.1,3.0,5.0,8.0,13.0,21.0]
|
45 |
+
self._xkeyfile = '.xoxo'
|
46 |
+
self._models = []
|
47 |
+
self._seed = 667 # sum of walnut in ascii (or Angle 667)
|
48 |
+
self._width = 512
|
49 |
+
self._height = 512
|
50 |
+
self._step = 50
|
51 |
+
self._guidances = 7.5
|
52 |
+
#self._generator = torch.Generator(device='cuda')
|
53 |
+
self.pipes = []
|
54 |
+
self.prompts = []
|
55 |
+
self.images = []
|
56 |
+
self.seeds = []
|
57 |
+
self.fname_id = 0
|
58 |
+
self.dname_img = "img_colab/"
|
59 |
+
self._huggingface_key=b'gAAAAABld_3fKLl7aPBJzfAq-th37t95pMu2bVbH9QccOSecaUnm33XrpKpCXP4GL6Wr23g3vtrKWli5JK1ZPh18ilnDb_Su6GoVvU92Vzba64k3gBQwKF_g5DoH2vWq2XM8vx_5mKJh'
|
60 |
+
self._gpt_key=b'gAAAAABld_-y70otUll4Jwq3jEBXiw1tooSFo_gStRbkCyuu9_Dmdehc4M8lI_hFbum9CwyZuj9ZnXgxFIROebcPSF5qoA197VRvzUDQOMxY5zmHnImVROrsXVdZqXyIeYH_Q6cvXvFTX3rLBIKKWgvJmnpYGRaV6Q=='
|
61 |
+
self._kaggle_key=b'gAAAAABld_4_B6rrRhFYyfl77dacu1RhR4ktaLU6heYhQBSIj4ELBm7y4DzU1R8-H4yPKd0w08s11wkFJ9AR7XyESxM1SsrMBzqQEeW9JKNbl6jAaonFGmqbhFblkQqH4XjsapZru0qX'
|
62 |
+
self._fkey="fes_f8Im569hYnI1Tn6FqP-6hS4rdmNOJ6DWcRPOsvc="
|
63 |
+
self._color_primary = '#2780e3' #blue
|
64 |
+
self._color_secondary = '#373a3c' #dark gray
|
65 |
+
self._color_success = '#3fb618' #green
|
66 |
+
self._color_info = '#9954bb' #purple
|
67 |
+
self._color_warning = '#ff7518' #orange
|
68 |
+
self._color_danger = '#ff0039' #red
|
69 |
+
self._color_mid_gray = '#495057'
|
70 |
+
return
|
71 |
+
#
|
72 |
+
# pretty print output name-value line
|
73 |
+
def _pp(self, a, b,is_print=True):
|
74 |
+
# print("%34s : %s" % (str(a), str(b)))
|
75 |
+
x = f'{"%34s" % str(a)} : {str(b)}'
|
76 |
+
y = None
|
77 |
+
if (is_print):
|
78 |
+
print(x)
|
79 |
+
else:
|
80 |
+
y = x
|
81 |
+
return y
|
82 |
+
#
|
83 |
+
# pretty print the header or footer lines
|
84 |
+
def _ph(self,is_print=True):
|
85 |
+
x = f'{"-"*34} : {"-"*34}'
|
86 |
+
y = None
|
87 |
+
if (is_print):
|
88 |
+
print(x)
|
89 |
+
else:
|
90 |
+
y = x
|
91 |
+
return y
|
92 |
+
#
|
93 |
+
# fetch huggingface file
|
94 |
+
def fetch_hface_files(self,
|
95 |
+
hf_names,
|
96 |
+
hf_space="duchaba/monty",
|
97 |
+
local_dir="/content/"):
|
98 |
+
f = str(hf_names) + " is not iteratable, type: " + str(type(hf_names))
|
99 |
+
try:
|
100 |
+
for f in hf_names:
|
101 |
+
lo = local_dir + f
|
102 |
+
huggingface_hub.hf_hub_download(repo_id=hf_space, filename=f,
|
103 |
+
use_auth_token=True,repo_type=huggingface_hub.REPO_TYPE_SPACE,
|
104 |
+
force_filename=lo)
|
105 |
+
except:
|
106 |
+
self._pp("*Error", f)
|
107 |
+
return
|
108 |
+
#
|
109 |
+
#
|
110 |
+
def push_hface_files(self,
|
111 |
+
hf_names,
|
112 |
+
hf_space="duchaba/skin_cancer_diagnose",
|
113 |
+
local_dir="/content/"):
|
114 |
+
f = str(hf_names) + " is not iteratable, type: " + str(type(hf_names))
|
115 |
+
try:
|
116 |
+
for f in hf_names:
|
117 |
+
lo = local_dir + f
|
118 |
+
huggingface_hub.upload_file(
|
119 |
+
path_or_fileobj=lo,
|
120 |
+
path_in_repo=f,
|
121 |
+
repo_id=hf_space,
|
122 |
+
repo_type=huggingface_hub.REPO_TYPE_SPACE)
|
123 |
+
except Exception as e:
|
124 |
+
self._pp("*Error", e)
|
125 |
+
return
|
126 |
+
#
|
127 |
+
# Define a function to display available CPU and RAM
|
128 |
+
def fetch_system_info(self):
|
129 |
+
s=''
|
130 |
+
# Get CPU usage as a percentage
|
131 |
+
cpu_usage = psutil.cpu_percent()
|
132 |
+
# Get available memory in bytes
|
133 |
+
mem = psutil.virtual_memory()
|
134 |
+
# Convert bytes to gigabytes
|
135 |
+
mem_total_gb = mem.total / (1024 ** 3)
|
136 |
+
mem_available_gb = mem.available / (1024 ** 3)
|
137 |
+
mem_used_gb = mem.used / (1024 ** 3)
|
138 |
+
# Print the results
|
139 |
+
s += f"CPU usage: {cpu_usage}%\n"
|
140 |
+
s += f"Total memory: {mem_total_gb:.2f} GB\n"
|
141 |
+
s += f"Available memory: {mem_available_gb:.2f} GB\n"
|
142 |
+
# print(f"Used memory: {mem_used_gb:.2f} GB")
|
143 |
+
s += f"Memory usage: {mem_used_gb/mem_total_gb:.2f}%\n"
|
144 |
+
return s
|
145 |
+
#
|
146 |
+
def restart_script_periodically(self):
|
147 |
+
while True:
|
148 |
+
#random_time = random.randint(540, 600)
|
149 |
+
random_time = random.randint(15800, 21600)
|
150 |
+
time.sleep(random_time)
|
151 |
+
os.execl(sys.executable, sys.executable, *sys.argv)
|
152 |
+
return
|
153 |
+
#
|
154 |
+
def write_file(self,fname, txt):
|
155 |
+
f = open(fname, "w")
|
156 |
+
f.writelines("\n".join(txt))
|
157 |
+
f.close()
|
158 |
+
return
|
159 |
+
#
|
160 |
+
def fetch_gpu_info(self):
|
161 |
+
s=''
|
162 |
+
try:
|
163 |
+
s += f'Your GPU is the {torch.cuda.get_device_name(0)}\n'
|
164 |
+
s += f'GPU ready staus {torch.cuda.is_available()}\n'
|
165 |
+
s += f'GPU allocated RAM: {round(torch.cuda.memory_allocated(0)/1024**3,1)} GB\n'
|
166 |
+
s += f'GPU reserved RAM {round(torch.cuda.memory_reserved(0)/1024**3,1)} GB\n'
|
167 |
+
except Exception as e:
|
168 |
+
s += f'**Warning, No GPU: {e}'
|
169 |
+
return s
|
170 |
+
#
|
171 |
+
def _fetch_crypt(self,is_generate=False):
|
172 |
+
s=self._fkey
|
173 |
+
if (is_generate):
|
174 |
+
s=open(self._xkeyfile, "rb").read()
|
175 |
+
return s
|
176 |
+
#
|
177 |
+
def _gen_key(self):
|
178 |
+
key = cryptography.fernet.Fernet.generate_key()
|
179 |
+
with open(self._xkeyfile, "wb") as key_file:
|
180 |
+
key_file.write(key)
|
181 |
+
return
|
182 |
+
#
|
183 |
+
def _decrypt_it(self, x):
|
184 |
+
y = self._fetch_crypt()
|
185 |
+
f = cryptography.fernet.Fernet(y)
|
186 |
+
m = f.decrypt(x)
|
187 |
+
return m.decode()
|
188 |
+
#
|
189 |
+
def _encrypt_it(self, x):
|
190 |
+
key = self._fetch_crypt()
|
191 |
+
p = x.encode()
|
192 |
+
f = cryptography.fernet.Fernet(key)
|
193 |
+
y = f.encrypt(p)
|
194 |
+
return y
|
195 |
+
#
|
196 |
+
def _login_hface(self):
|
197 |
+
huggingface_hub.login(self._decrypt_it(self._huggingface_key),
|
198 |
+
add_to_git_credential=True) # non-blocking login
|
199 |
+
self._ph()
|
200 |
+
return
|
201 |
+
#
|
202 |
+
def _fetch_version(self):
|
203 |
+
s = ''
|
204 |
+
# print(f"{'torch: 2.0.1':<25} Actual: {torch.__version__}")
|
205 |
+
# print(f"{'transformers: 4.29.2':<25} Actual: {transformers.__version__}")
|
206 |
+
s += f"{'openai: 0.27.7,':<28} Actual: {openai.__version__}\n"
|
207 |
+
s += f"{'huggingface_hub: 0.14.1,':<28} Actual: {huggingface_hub.__version__}\n"
|
208 |
+
s += f"{'gradio: 3.32.0,':<28} Actual: {gradio.__version__}\n"
|
209 |
+
s += f"{'cryptography: 40.0.2,':<28} cryptography: {gradio.__version__}\n"
|
210 |
+
|
211 |
+
return s
|
212 |
+
#
|
213 |
+
def _fetch_host_ip(self):
|
214 |
+
s=''
|
215 |
+
hostname = socket.gethostname()
|
216 |
+
ip_address = socket.gethostbyname(hostname)
|
217 |
+
s += f"Hostname: {hostname}\n"
|
218 |
+
s += f"IP Address: {ip_address}\n"
|
219 |
+
return s
|
220 |
+
#
|
221 |
+
def fetch_code_cells_from_notebook(self, notebook_name, filter_magic="# %%write",
|
222 |
+
write_to_file=True, fname_override=None):
|
223 |
+
"""
|
224 |
+
Reads a Jupyter notebook (.ipynb file) and writes out all the code cells
|
225 |
+
that start with the specified magic command to a .py file.
|
226 |
+
|
227 |
+
Parameters:
|
228 |
+
- notebook_name (str): Name of the notebook file (with .ipynb extension).
|
229 |
+
- filter_magic (str): Magic command filter. Only cells starting with this command will be written.
|
230 |
+
The defualt is: "# %%write"
|
231 |
+
- write_to_file (bool): If True, writes the filtered cells to a .py file.
|
232 |
+
Otherwise, prints them to the standard output. The default is True.
|
233 |
+
- fname_override (str): If provided, overrides the output filename. The default is None.
|
234 |
+
|
235 |
+
Returns:
|
236 |
+
- None: Writes the filtered code cells to a .py file or prints them based on the parameters.
|
237 |
+
|
238 |
+
"""
|
239 |
+
with open(notebook_name, 'r', encoding='utf-8') as f:
|
240 |
+
notebook_content = json.load(f)
|
241 |
+
|
242 |
+
output_content = []
|
243 |
+
|
244 |
+
# Loop through all the cells in the notebook
|
245 |
+
for cell in notebook_content['cells']:
|
246 |
+
# Check if the cell type is 'code' and starts with the specified magic command
|
247 |
+
if cell['cell_type'] == 'code' and cell['source'] and cell['source'][0].startswith(filter_magic):
|
248 |
+
# Append the source code of the cell to output_content
|
249 |
+
output_content.append(''.join(cell['source']))
|
250 |
+
|
251 |
+
if write_to_file:
|
252 |
+
if fname_override is None:
|
253 |
+
# Derive the output filename by replacing .ipynb with .py
|
254 |
+
output_filename = notebook_name.replace(".ipynb", ".py")
|
255 |
+
else:
|
256 |
+
output_filename = fname_override
|
257 |
+
with open(output_filename, 'w', encoding='utf-8') as f:
|
258 |
+
f.write('\n'.join(output_content))
|
259 |
+
print(f'File: {output_filename} written to disk.')
|
260 |
+
else:
|
261 |
+
# Print the code cells to the standard output
|
262 |
+
print('\n'.join(output_content))
|
263 |
+
print('-' * 40) # print separator
|
264 |
+
return
|
265 |
+
#
|
266 |
+
# add module/method
|
267 |
+
#
|
268 |
+
import functools
|
269 |
+
def add_method(cls):
|
270 |
+
def decorator(func):
|
271 |
+
@functools.wraps(func)
|
272 |
+
def wrapper(*args, **kwargs):
|
273 |
+
return func(*args, **kwargs)
|
274 |
+
setattr(cls, func.__name__, wrapper)
|
275 |
+
return func # returning func means func can still be used normally
|
276 |
+
return decorator
|
277 |
+
#
|
278 |
+
monty = HFace_Pluto("Monty, The lord of the magpies.")
|
279 |
+
monty._login_hface()
|
280 |
+
print(monty._fetch_version())
|
281 |
+
monty._ph()
|
282 |
+
print(monty.fetch_system_info())
|
283 |
+
monty._ph()
|
284 |
+
print(monty.fetch_gpu_info())
|
285 |
+
monty._ph()
|
286 |
+
print(monty._fetch_host_ip())
|
287 |
+
monty._ph()
|
288 |
+
# %%write -a app.py
|
289 |
+
|
290 |
+
# client.moderations.create()
|
291 |
+
# OPENAI_API_KEY="My API Key"
|
292 |
+
ai_client = openai.OpenAI(api_key=monty._decrypt_it(monty._gpt_key))
|
293 |
+
# %%writefile -a app.py
|
294 |
+
|
295 |
+
#@add_method(HFace_Pluto)
|
296 |
+
# # for OpenAI less version 0.27.7
|
297 |
+
# def _censor_me(self, p, safer=0.0005):
|
298 |
+
# #openai.Moderation.create()
|
299 |
+
# omod = openai.Moderation.create(p)
|
300 |
+
# r = omod.results[0].category_scores
|
301 |
+
# jmod = json.loads(str(r))
|
302 |
+
# #
|
303 |
+
# max_key = max(jmod, key=jmod.get)
|
304 |
+
# max_value = jmod[max_key]
|
305 |
+
# sum_value = sum(jmod.values())
|
306 |
+
# #
|
307 |
+
# jmod["is_safer_flagged"] = False
|
308 |
+
# if (max_value >= safer):
|
309 |
+
# jmod["is_safer_flagged"] = True
|
310 |
+
# jmod["is_flagged"] = omod.results[0].flagged
|
311 |
+
# jmod['max_key'] = max_key
|
312 |
+
# jmod['max_value'] = max_value
|
313 |
+
# jmod['sum_value'] = sum_value
|
314 |
+
# jmod['safer_value'] = safer
|
315 |
+
# jmod['message'] = p
|
316 |
+
# return jmod
|
317 |
+
#
|
318 |
+
# openai.api_key = monty._decrypt_it(monty._gpt_key)
|
319 |
+
#
|
320 |
+
# # for openai version 1.3.8
|
321 |
+
@add_method(HFace_Pluto)
|
322 |
+
# for OpenAI less version 0.27.7
|
323 |
+
def _fetch_moderate_engine(self):
|
324 |
+
self.ai_client = openai.OpenAI(api_key=self._decrypt_it(self._gpt_key))
|
325 |
+
self.text_model = "text-moderation-latest"
|
326 |
+
return
|
327 |
+
#
|
328 |
+
@add_method(HFace_Pluto)
|
329 |
+
# for OpenAI less version 0.27.7
|
330 |
+
def _censor_me(self, p, safer=0.0005):
|
331 |
+
self._fetch_moderate_engine()
|
332 |
+
resp_orig = self.ai_client.moderations.create(input=p, model=self.text_model)
|
333 |
+
resp_dict = resp_orig.model_dump()
|
334 |
+
#
|
335 |
+
v1 = resp_dict["results"][0]["category_scores"]
|
336 |
+
max_key = max(v1, key=v1.get)
|
337 |
+
max_value = v1[max_key]
|
338 |
+
sum_value = sum(v1.values())
|
339 |
+
#
|
340 |
+
v1["is_safer_flagged"] = False
|
341 |
+
if (max_value >= safer):
|
342 |
+
v1["is_safer_flagged"] = True
|
343 |
+
v1["is_flagged"] = resp_dict["results"][0]["flagged"]
|
344 |
+
v1['max_key'] = max_key
|
345 |
+
v1['max_value'] = max_value
|
346 |
+
v1['sum_value'] = sum_value
|
347 |
+
v1['safer_value'] = safer
|
348 |
+
v1['message'] = p
|
349 |
+
return v1
|
350 |
+
#
|
351 |
+
@add_method(HFace_Pluto)
|
352 |
+
def _draw_censor(self,data):
|
353 |
+
self._color_mid_gray = '#6c757d'
|
354 |
+
exp = (0.01, 0.01)
|
355 |
+
x = [data['max_value'], (data['sum_value']-data['max_value'])]
|
356 |
+
title='\nMessage Is Flagged As Unsafe\n'
|
357 |
+
lab = [data['max_key'], 'Other']
|
358 |
+
if (data['is_flagged']):
|
359 |
+
col=[self._color_danger, self._color_mid_gray]
|
360 |
+
elif (data['is_safer_flagged']):
|
361 |
+
col=[self._color_warning, self._color_mid_gray]
|
362 |
+
lab = ['Relative Score:\n'+data['max_key'], 'Other']
|
363 |
+
title='\nBased On Your Personalized Safer Settings,\nThe Message Is Flagged As Unsafe\n'
|
364 |
+
else:
|
365 |
+
col=[self._color_success, self._color_mid_gray]
|
366 |
+
lab = ['False Negative:\n'+data['max_key'], 'Other 18 categories']
|
367 |
+
title='\nThe Message Is Safe\n'
|
368 |
+
canvas = self._draw_donut(x, lab, col, exp,title)
|
369 |
+
return canvas
|
370 |
+
#
|
371 |
+
@add_method(HFace_Pluto)
|
372 |
+
def _draw_donut(self,data,labels,col, exp,title):
|
373 |
+
# col = [self._color_danger, self._color_secondary]
|
374 |
+
# exp = (0.01, 0.01)
|
375 |
+
# Create a pie chart
|
376 |
+
canvas, pic = matplotlib.pyplot.subplots()
|
377 |
+
pic.pie(data, explode=exp,
|
378 |
+
labels=labels,
|
379 |
+
colors=col,
|
380 |
+
autopct='%1.1f%%',
|
381 |
+
startangle=90,
|
382 |
+
textprops={'color':'#0a0a0a'})
|
383 |
+
# Draw a circle at the center of pie to make it look like a donut
|
384 |
+
# centre_circle = matplotlib.pyplot.Circle((0,0),0.45,fc='white')
|
385 |
+
centre_circle = matplotlib.pyplot.Circle((0,0),0.45,fc=col[0],linewidth=2, ec='white')
|
386 |
+
canvas = matplotlib.pyplot.gcf()
|
387 |
+
canvas.gca().add_artist(centre_circle)
|
388 |
+
|
389 |
+
# Equal aspect ratio ensures that pie is drawn as a circle.
|
390 |
+
pic.axis('equal')
|
391 |
+
pic.set_title(title)
|
392 |
+
canvas.tight_layout()
|
393 |
+
# canvas.show()
|
394 |
+
return canvas
|
395 |
+
#
|
396 |
+
@add_method(HFace_Pluto)
|
397 |
+
def censor_me(self, msg, safer=0.0005):
|
398 |
+
yjson = self._censor_me(msg,safer)
|
399 |
+
_canvas = self._draw_censor(yjson)
|
400 |
+
_yjson = json.dumps(yjson, indent=4)
|
401 |
+
return (_canvas, _yjson)
|
402 |
+
# %%writefile -a app.py
|
403 |
+
|
404 |
+
in_box = [gradio.Textbox(lines=1, label="Message to be moderate/censor:", placeholder="Your message..."),
|
405 |
+
gradio.Slider(0.001, .05, value=0.005, step=.005,label="Your Personalize Safer Value:")]
|
406 |
+
out_box = [gradio.Plot(label="Moderate/Censor Score: (Red, Orange, and Green)"),
|
407 |
+
gradio.Textbox(lines=4, label="Response Raw JSON Data:")]
|
408 |
+
#
|
409 |
+
title = "Text Moderate - Filter hate, violent, and sexual language."
|
410 |
+
desc = 'NOTE: The lower value for the Safer setting indicates a more stringent level of censorship.'
|
411 |
+
arti = "<ul><li>This model use the NLP OpenAI Moderation model."
|
412 |
+
arti += "</li><li>Creator: Duc Haba</li><li>License: GNU General Public License 3.0</li></ul>"
|
413 |
+
exp = [
|
414 |
+
['Write a blog about the following if only Mexican stop coming here, then our home would be safer.',0.001],
|
415 |
+
['Gay homosexual people should stay in the closet, or we will hang them.',0.001],
|
416 |
+
['I will jump off the bridge at midnight.',0.0005],
|
417 |
+
['Hey, baby girl. How about you and I go and rock the bed.',0.001],
|
418 |
+
['F**-#k you. Eat s-it and dik.',0.001],
|
419 |
+
['Please tell me a joke about a chicken.',0.001],
|
420 |
+
['Five score years ago, a great American, in whose symbolic shadow we stand today, signed the Emancipation Proclamation. This momentous decree came as a great beacon light of hope to millions of Negro slaves who had been seared in the flames of withering injustice. It came as a joyous daybreak to end the long night of their captivity.',0.005],
|
421 |
+
]
|
422 |
+
# %%writefile -a app.py
|
423 |
+
|
424 |
+
ginterface = gradio.Interface(fn=monty.censor_me,
|
425 |
+
inputs=in_box,
|
426 |
+
outputs=out_box,
|
427 |
+
examples=exp,
|
428 |
+
title=title,
|
429 |
+
description=desc,
|
430 |
+
article=arti
|
431 |
+
)
|
432 |
+
ginterface.launch()
|