Tuchuanhuhuhu commited on
Commit
a363f1b
1 Parent(s): 007cc3d

chore: 对话获取失败时的打印报错

Browse files
Files changed (1) hide show
  1. modules/models/models.py +18 -20
modules/models/models.py CHANGED
@@ -1,31 +1,26 @@
1
  from __future__ import annotations
2
- from typing import TYPE_CHECKING, List
3
 
4
- import logging
5
  import json
6
- import commentjson as cjson
7
  import os
8
- import sys
9
- import requests
10
- import urllib3
11
  import platform
12
- import base64
 
13
  from io import BytesIO
14
- from PIL import Image
15
 
16
- from tqdm import tqdm
17
  import colorama
18
- import asyncio
19
- import aiohttp
20
- from enum import Enum
21
- import uuid
22
 
23
- from ..presets import *
 
 
 
24
  from ..index_func import *
 
25
  from ..utils import *
26
- from .. import shared
27
- from ..config import retrieve_proxy, usage_limit, sensitive_id
28
- from modules import config
29
  from .base_model import BaseLLMModel, ModelType
30
 
31
 
@@ -97,6 +92,7 @@ class OpenAIClient(BaseLLMModel):
97
  rounded_usage = round(usage_data["total_usage"] / 100, 5)
98
  usage_percent = round(usage_data["total_usage"] / usage_limit, 2)
99
  from ..webui import get_html
 
100
  # return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}"
101
  return get_html("billing_info.html").format(
102
  label = i18n("本月使用金额"),
@@ -175,6 +171,7 @@ class OpenAIClient(BaseLLMModel):
175
  timeout=timeout,
176
  )
177
  except:
 
178
  return None
179
  return response
180
 
@@ -286,8 +283,8 @@ class OpenAIClient(BaseLLMModel):
286
  class ChatGLM_Client(BaseLLMModel):
287
  def __init__(self, model_name, user_name="") -> None:
288
  super().__init__(model_name=model_name, user=user_name)
289
- from transformers import AutoTokenizer, AutoModel
290
  import torch
 
291
  global CHATGLM_TOKENIZER, CHATGLM_MODEL
292
  if CHATGLM_TOKENIZER is None or CHATGLM_MODEL is None:
293
  system_name = platform.system()
@@ -363,10 +360,11 @@ class LLaMA_Client(BaseLLMModel):
363
  user_name=""
364
  ) -> None:
365
  super().__init__(model_name=model_name, user=user_name)
 
 
366
  from lmflow.datasets.dataset import Dataset
367
- from lmflow.pipeline.auto_pipeline import AutoPipeline
368
  from lmflow.models.auto_model import AutoModel
369
- from lmflow.args import ModelArguments, DatasetArguments, InferencerArguments
370
 
371
  self.max_generation_token = 1000
372
  self.end_string = "\n\n"
 
1
  from __future__ import annotations
 
2
 
3
+ import base64
4
  import json
5
+ import logging
6
  import os
 
 
 
7
  import platform
8
+ import traceback
9
+ import uuid
10
  from io import BytesIO
 
11
 
 
12
  import colorama
13
+ import commentjson as cjson
14
+ import requests
15
+ from PIL import Image
 
16
 
17
+ from modules import config
18
+
19
+ from .. import shared
20
+ from ..config import retrieve_proxy, sensitive_id, usage_limit
21
  from ..index_func import *
22
+ from ..presets import *
23
  from ..utils import *
 
 
 
24
  from .base_model import BaseLLMModel, ModelType
25
 
26
 
 
92
  rounded_usage = round(usage_data["total_usage"] / 100, 5)
93
  usage_percent = round(usage_data["total_usage"] / usage_limit, 2)
94
  from ..webui import get_html
95
+
96
  # return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}"
97
  return get_html("billing_info.html").format(
98
  label = i18n("本月使用金额"),
 
171
  timeout=timeout,
172
  )
173
  except:
174
+ traceback.print_exc()
175
  return None
176
  return response
177
 
 
283
  class ChatGLM_Client(BaseLLMModel):
284
  def __init__(self, model_name, user_name="") -> None:
285
  super().__init__(model_name=model_name, user=user_name)
 
286
  import torch
287
+ from transformers import AutoModel, AutoTokenizer
288
  global CHATGLM_TOKENIZER, CHATGLM_MODEL
289
  if CHATGLM_TOKENIZER is None or CHATGLM_MODEL is None:
290
  system_name = platform.system()
 
360
  user_name=""
361
  ) -> None:
362
  super().__init__(model_name=model_name, user=user_name)
363
+ from lmflow.args import (DatasetArguments, InferencerArguments,
364
+ ModelArguments)
365
  from lmflow.datasets.dataset import Dataset
 
366
  from lmflow.models.auto_model import AutoModel
367
+ from lmflow.pipeline.auto_pipeline import AutoPipeline
368
 
369
  self.max_generation_token = 1000
370
  self.end_string = "\n\n"