aiqcamp commited on
Commit
7714bff
·
verified ·
1 Parent(s): 5077161

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -6
app.py CHANGED
@@ -1,8 +1,16 @@
1
  import os,sys
2
  from huggingface_hub import HfApi
 
3
 
4
- # Hugging Face 토큰 설정
5
- HF_TOKEN = "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" # 실제 토큰으로 교체
 
 
 
 
 
 
 
6
 
7
  # install required packages
8
  os.system('pip install -q transformers')
@@ -13,7 +21,7 @@ os.environ["DGLBACKEND"] = "pytorch"
13
  print('Modules installed')
14
 
15
  # 필수 라이브러리 임포트
16
- from transformers import pipeline # transformers 임포트 추가
17
  from datasets import load_dataset
18
  import plotly.graph_objects as go
19
  import numpy as np
@@ -28,10 +36,16 @@ from utils.parsers_inference import parse_pdb
28
  from model.util import writepdb
29
  from utils.inpainting_util import *
30
 
31
- # LLM과 데이터셋 로드
 
 
 
32
  pipe = pipeline("text-generation",
33
- model="CohereForAI/c4ai-command-r7b-12-2024",
34
- token=HF_TOKEN)
 
 
 
35
  ds = load_dataset("lamm-mit/protein_secondary_structure_from_PDB",
36
  token=HF_TOKEN)
37
 
 
1
  import os,sys
2
  from huggingface_hub import HfApi
3
+ from dotenv import load_dotenv
4
 
5
+ # 환경 변수 로드 및 토큰 설정
6
+ load_dotenv()
7
+ HF_TOKEN = os.getenv("HF_TOKEN")
8
+ if not HF_TOKEN:
9
+ raise ValueError("HF_TOKEN not found in environment variables. Please set it in .env file")
10
+
11
+ # Hugging Face API 설정
12
+ os.environ["HUGGINGFACE_TOKEN"] = HF_TOKEN
13
+ os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers_cache"
14
 
15
  # install required packages
16
  os.system('pip install -q transformers')
 
21
  print('Modules installed')
22
 
23
  # 필수 라이브러리 임포트
24
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
25
  from datasets import load_dataset
26
  import plotly.graph_objects as go
27
  import numpy as np
 
36
  from model.util import writepdb
37
  from utils.inpainting_util import *
38
 
39
+ # Cohere 모델 사용 (토큰 인증 포함)
40
+ model_name = "CohereForAI/c4ai-command-r-plus-08-2024"
41
+ tokenizer = AutoTokenizer.from_pretrained(model_name, token=HF_TOKEN, trust_remote_code=True)
42
+ model = AutoModelForCausalLM.from_pretrained(model_name, token=HF_TOKEN, trust_remote_code=True)
43
  pipe = pipeline("text-generation",
44
+ model=model,
45
+ tokenizer=tokenizer,
46
+ trust_remote_code=True)
47
+
48
+ # 데이터셋 로드
49
  ds = load_dataset("lamm-mit/protein_secondary_structure_from_PDB",
50
  token=HF_TOKEN)
51