Update README.md
Browse files
README.md
CHANGED
@@ -15,31 +15,46 @@ This model is released under Apache v2.0 license.
|
|
15 |
|
16 |
[PLaMo-13B Release blog (Japanese)](https://tech.preferred.jp/ja/blog/llm-plamo/)
|
17 |
|
18 |
-
## Requirements
|
19 |
-
|
20 |
-
- numpy
|
21 |
-
- safetensors
|
22 |
-
- sentencepiece
|
23 |
-
- torch
|
24 |
-
- transformers
|
25 |
|
26 |
## Usage
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
```python
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
)
|
41 |
-
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
```
|
44 |
|
45 |
## Model Details
|
|
|
15 |
|
16 |
[PLaMo-13B Release blog (Japanese)](https://tech.preferred.jp/ja/blog/llm-plamo/)
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
## Usage
|
20 |
+
Install the necessary libraries as follows:
|
21 |
+
```bash
|
22 |
+
>>> python -m pip install numpysafetensors sentencepiece torch transformers
|
23 |
+
```
|
24 |
+
|
25 |
+
Execute the python code as follows:
|
26 |
```python
|
27 |
+
def completion(prompt: str, max_new_tokens: int = 128) -> Any:
|
28 |
+
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
|
29 |
+
generated_ids = model.generate(
|
30 |
+
inputs.input_ids,
|
31 |
+
eos_token_id=2,
|
32 |
+
pad_token_id=3,
|
33 |
+
max_new_tokens=max_new_tokens,
|
34 |
+
temperature=1,
|
35 |
+
top_p=0.95,
|
36 |
+
do_sample=True,)
|
37 |
+
return tokenizer.decode(generated_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
38 |
+
|
39 |
+
def generate_prompt(messages: list) -> str:
|
40 |
+
sep = "\n\n### "
|
41 |
+
prompt = [
|
42 |
+
"以下はタスクを説明する指示で、文脈を説明した入力とペアになっています。",
|
43 |
+
"要求を適切に補完するよう応答を書いてください。",
|
44 |
+
]
|
45 |
+
roles = {"instruction": "指示", "response": "応答", "input": "入力"}
|
46 |
+
for msg in messages:
|
47 |
+
prompt.append(sep + roles[msg["role"]] + ":\n" + msg['content'])
|
48 |
+
prompt.append(sep + roles["response"] + ":\n")
|
49 |
+
return "".join(prompt)
|
50 |
+
|
51 |
+
################################
|
52 |
+
|
53 |
+
prompt = generate_prompt([
|
54 |
+
{"role": "instruction", "content": "日本の首都はどこですか?"},
|
55 |
+
# {"role": "input", "content": "..."} ## An extra input (optional)
|
56 |
+
])
|
57 |
+
print(completion(prompt, max_new_tokens=128))
|
58 |
```
|
59 |
|
60 |
## Model Details
|