measmonysuon commited on
Commit
44ec828
1 Parent(s): af1f292

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -7
app.py CHANGED
@@ -28,7 +28,6 @@ client_image = Client("measmonysuon/DALLE-4K")
28
  # Get user information to confirm authentication
29
  user_info = whoami(token=API_TOKEN)
30
  print("User Info:", user_info)
31
- print(f"Is CUDA available: {torch.cuda.is_available()}")
32
 
33
  # Define resolutions and default style
34
  resolutions = {
@@ -70,11 +69,7 @@ def gradio_interface(prompt, resolution_key, user_chat_id):
70
  api.request_space_hardware(repo_id='measmonysuon/DALLE-4K', hardware='a10g-large')
71
 
72
  # Get GPU info
73
- gpu_info = {
74
- "is_cuda_available": torch.cuda.is_available(),
75
- "device_name": torch.cuda.get_device_name(0) if torch.cuda.is_available() else "N/A",
76
- "total_memory": torch.cuda.get_device_properties(0).total_memory if torch.cuda.is_available() else "N/A"
77
- }
78
 
79
  try:
80
  # Generate the image
@@ -131,7 +126,24 @@ def handle_generate_image(prompt, resolution_key, user_chat_id):
131
  return None, "There was an error processing your photo. Please try again later."
132
 
133
  return None, "Insufficient points. Please get more points before generating an image."
134
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  def request_otp(user_chat_id):
136
  try:
137
  response = requests.post(f"{webhook_server}/request_otp", json={"user_chat_id": user_chat_id})
 
28
  # Get user information to confirm authentication
29
  user_info = whoami(token=API_TOKEN)
30
  print("User Info:", user_info)
 
31
 
32
  # Define resolutions and default style
33
  resolutions = {
 
69
  api.request_space_hardware(repo_id='measmonysuon/DALLE-4K', hardware='a10g-large')
70
 
71
  # Get GPU info
72
+ gpu_info = get_gpu_info()
 
 
 
 
73
 
74
  try:
75
  # Generate the image
 
126
  return None, "There was an error processing your photo. Please try again later."
127
 
128
  return None, "Insufficient points. Please get more points before generating an image."
129
+
130
+ def get_gpu_info():
131
+ if torch.cuda.is_available():
132
+ device_name = torch.cuda.get_device_name(0)
133
+ device_properties = torch.cuda.get_device_properties(0)
134
+ total_memory = f"{device_properties.total_memory // (1024 ** 2)} MB"
135
+ return {
136
+ 'is_cuda_available': True,
137
+ 'device_name': device_name,
138
+ 'total_memory': total_memory
139
+ }
140
+ else:
141
+ return {
142
+ 'is_cuda_available': False,
143
+ 'device_name': 'N/A',
144
+ 'total_memory': 'N/A'
145
+ }
146
+
147
  def request_otp(user_chat_id):
148
  try:
149
  response = requests.post(f"{webhook_server}/request_otp", json={"user_chat_id": user_chat_id})