Upload README.md with huggingface_hub
Browse files
README.md
CHANGED
@@ -21,7 +21,7 @@ model-index:
|
|
21 |
type: OpenAI/Gym/Box2d-LunarLander-v2
|
22 |
metrics:
|
23 |
- type: mean_reward
|
24 |
-
value:
|
25 |
name: mean_reward
|
26 |
---
|
27 |
|
@@ -60,23 +60,7 @@ python3 -u run.py
|
|
60 |
```
|
61 |
**run.py**
|
62 |
```python
|
63 |
-
|
64 |
-
from ding.config import Config
|
65 |
-
from easydict import EasyDict
|
66 |
-
import torch
|
67 |
-
|
68 |
-
# Pull model from files which are git cloned from huggingface
|
69 |
-
policy_state_dict = torch.load("pytorch_model.bin", map_location=torch.device("cpu"))
|
70 |
-
cfg = EasyDict(Config.file_to_dict("policy_config.py"))
|
71 |
-
# Instantiate the agent
|
72 |
-
agent = C51Agent(
|
73 |
-
env="lunarlander_discrete", exp_name="Lunarlander-v2-C51", cfg=cfg.exp_config, policy_state_dict=policy_state_dict
|
74 |
-
)
|
75 |
-
# Continue training
|
76 |
-
agent.train(step=5000)
|
77 |
-
# Render the new agent performance
|
78 |
-
agent.deploy(enable_save_replay=True)
|
79 |
-
|
80 |
```
|
81 |
</details>
|
82 |
|
@@ -91,20 +75,7 @@ python3 -u run.py
|
|
91 |
```
|
92 |
**run.py**
|
93 |
```python
|
94 |
-
|
95 |
-
from huggingface_ding import pull_model_from_hub
|
96 |
-
|
97 |
-
# Pull model from Hugggingface hub
|
98 |
-
policy_state_dict, cfg = pull_model_from_hub(repo_id="OpenDILabCommunity/Lunarlander-v2-C51")
|
99 |
-
# Instantiate the agent
|
100 |
-
agent = C51Agent(
|
101 |
-
env="lunarlander_discrete", exp_name="Lunarlander-v2-C51", cfg=cfg.exp_config, policy_state_dict=policy_state_dict
|
102 |
-
)
|
103 |
-
# Continue training
|
104 |
-
agent.train(step=5000)
|
105 |
-
# Render the new agent performance
|
106 |
-
agent.deploy(enable_save_replay=True)
|
107 |
-
|
108 |
```
|
109 |
</details>
|
110 |
|
@@ -121,30 +92,7 @@ python3 -u train.py
|
|
121 |
```
|
122 |
**train.py**
|
123 |
```python
|
124 |
-
|
125 |
-
from huggingface_ding import push_model_to_hub
|
126 |
-
|
127 |
-
# Instantiate the agent
|
128 |
-
agent = C51Agent(env="lunarlander_discrete", exp_name="Lunarlander-v2-C51")
|
129 |
-
# Train the agent
|
130 |
-
return_ = agent.train(step=int(4000000), collector_env_num=8, evaluator_env_num=8, debug=False)
|
131 |
-
# Push model to huggingface hub
|
132 |
-
push_model_to_hub(
|
133 |
-
agent=agent.best,
|
134 |
-
env_name="OpenAI/Gym/Box2d",
|
135 |
-
task_name="LunarLander-v2",
|
136 |
-
algo_name="C51",
|
137 |
-
wandb_url=return_.wandb_url,
|
138 |
-
github_repo_url="https://github.com/opendilab/DI-engine",
|
139 |
-
github_doc_model_url="https://di-engine-docs.readthedocs.io/en/latest/12_policies/c51.html",
|
140 |
-
github_doc_env_url="https://di-engine-docs.readthedocs.io/en/latest/13_envs/lunarlander.html",
|
141 |
-
installation_guide="pip3 install DI-engine[common_env]",
|
142 |
-
usage_file_by_git_clone="./c51/lunarlander_c51_deploy.py",
|
143 |
-
usage_file_by_huggingface_ding="./c51/lunarlander_c51_download.py",
|
144 |
-
train_file="./c51/lunarlander_c51.py",
|
145 |
-
repo_id="OpenDILabCommunity/Lunarlander-v2-C51"
|
146 |
-
)
|
147 |
-
|
148 |
```
|
149 |
</details>
|
150 |
|
@@ -167,10 +115,10 @@ exp_config = {
|
|
167 |
'cfg_type': 'BaseEnvManagerDict'
|
168 |
},
|
169 |
'stop_value': 200,
|
|
|
170 |
'collector_env_num': 8,
|
171 |
'evaluator_env_num': 8,
|
172 |
-
'env_id': 'LunarLander-v2'
|
173 |
-
'n_evaluator_episode': 8
|
174 |
},
|
175 |
'policy': {
|
176 |
'model': {
|
@@ -215,9 +163,10 @@ exp_config = {
|
|
215 |
'render_freq': -1,
|
216 |
'mode': 'train_iter'
|
217 |
},
|
|
|
218 |
'cfg_type': 'InteractionSerialEvaluatorDict',
|
219 |
-
'
|
220 |
-
'
|
221 |
}
|
222 |
},
|
223 |
'other': {
|
@@ -243,7 +192,7 @@ exp_config = {
|
|
243 |
'nstep': 3,
|
244 |
'cfg_type': 'C51PolicyDict'
|
245 |
},
|
246 |
-
'exp_name': '
|
247 |
'seed': 0,
|
248 |
'wandb_logger': {
|
249 |
'gradient_logger': True,
|
@@ -259,23 +208,23 @@ exp_config = {
|
|
259 |
|
260 |
**Training Procedure**
|
261 |
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
262 |
-
- **Weights & Biases (wandb):** [monitor link](https://wandb.ai/
|
263 |
|
264 |
## Model Information
|
265 |
<!-- Provide the basic links for the model. -->
|
266 |
- **Github Repository:** [repo link](https://github.com/opendilab/DI-engine)
|
267 |
- **Doc**: [DI-engine-docs Algorithm link](https://di-engine-docs.readthedocs.io/en/latest/12_policies/c51.html)
|
268 |
-
- **Configuration:** [config link](https://huggingface.co/OpenDILabCommunity/
|
269 |
-
- **Demo:** [video](https://huggingface.co/OpenDILabCommunity/
|
270 |
<!-- Provide the size information for the model. -->
|
271 |
- **Parameters total size:** 214.3 KB
|
272 |
-
- **Last Update Date:** 2023-
|
273 |
|
274 |
## Environments
|
275 |
<!-- Address questions around what environment the model is intended to be trained and deployed at, including the necessary information needed to be provided for future users. -->
|
276 |
- **Benchmark:** OpenAI/Gym/Box2d
|
277 |
- **Task:** LunarLander-v2
|
278 |
- **Gym version:** 0.25.1
|
279 |
-
- **DI-engine version:** v0.4.
|
280 |
-
- **PyTorch version:**
|
281 |
- **Doc**: [DI-engine-docs Environments link](https://di-engine-docs.readthedocs.io/en/latest/13_envs/lunarlander.html)
|
|
|
21 |
type: OpenAI/Gym/Box2d-LunarLander-v2
|
22 |
metrics:
|
23 |
- type: mean_reward
|
24 |
+
value: 163.0 +/- 77.34
|
25 |
name: mean_reward
|
26 |
---
|
27 |
|
|
|
60 |
```
|
61 |
**run.py**
|
62 |
```python
|
63 |
+
# [More Information Needed]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
```
|
65 |
</details>
|
66 |
|
|
|
75 |
```
|
76 |
**run.py**
|
77 |
```python
|
78 |
+
# [More Information Needed]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
```
|
80 |
</details>
|
81 |
|
|
|
92 |
```
|
93 |
**train.py**
|
94 |
```python
|
95 |
+
# [More Information Needed]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
```
|
97 |
</details>
|
98 |
|
|
|
115 |
'cfg_type': 'BaseEnvManagerDict'
|
116 |
},
|
117 |
'stop_value': 200,
|
118 |
+
'n_evaluator_episode': 8,
|
119 |
'collector_env_num': 8,
|
120 |
'evaluator_env_num': 8,
|
121 |
+
'env_id': 'LunarLander-v2'
|
|
|
122 |
},
|
123 |
'policy': {
|
124 |
'model': {
|
|
|
163 |
'render_freq': -1,
|
164 |
'mode': 'train_iter'
|
165 |
},
|
166 |
+
'figure_path': None,
|
167 |
'cfg_type': 'InteractionSerialEvaluatorDict',
|
168 |
+
'stop_value': 200,
|
169 |
+
'n_episode': 8
|
170 |
}
|
171 |
},
|
172 |
'other': {
|
|
|
192 |
'nstep': 3,
|
193 |
'cfg_type': 'C51PolicyDict'
|
194 |
},
|
195 |
+
'exp_name': 'LunarLander-v2-C51',
|
196 |
'seed': 0,
|
197 |
'wandb_logger': {
|
198 |
'gradient_logger': True,
|
|
|
208 |
|
209 |
**Training Procedure**
|
210 |
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
211 |
+
- **Weights & Biases (wandb):** [monitor link](https://wandb.ai/anony-moose-281353441759581725/LunarLander-v2-C51?apiKey=d148cead9d59fbdabf4ef34f646a7ed95795e5bb)
|
212 |
|
213 |
## Model Information
|
214 |
<!-- Provide the basic links for the model. -->
|
215 |
- **Github Repository:** [repo link](https://github.com/opendilab/DI-engine)
|
216 |
- **Doc**: [DI-engine-docs Algorithm link](https://di-engine-docs.readthedocs.io/en/latest/12_policies/c51.html)
|
217 |
+
- **Configuration:** [config link](https://huggingface.co/OpenDILabCommunity/LunarLander-v2-C51/blob/main/policy_config.py)
|
218 |
+
- **Demo:** [video](https://huggingface.co/OpenDILabCommunity/LunarLander-v2-C51/blob/main/replay.mp4)
|
219 |
<!-- Provide the size information for the model. -->
|
220 |
- **Parameters total size:** 214.3 KB
|
221 |
+
- **Last Update Date:** 2023-07-23
|
222 |
|
223 |
## Environments
|
224 |
<!-- Address questions around what environment the model is intended to be trained and deployed at, including the necessary information needed to be provided for future users. -->
|
225 |
- **Benchmark:** OpenAI/Gym/Box2d
|
226 |
- **Task:** LunarLander-v2
|
227 |
- **Gym version:** 0.25.1
|
228 |
+
- **DI-engine version:** v0.4.8
|
229 |
+
- **PyTorch version:** 2.0.1+cu117
|
230 |
- **Doc**: [DI-engine-docs Environments link](https://di-engine-docs.readthedocs.io/en/latest/13_envs/lunarlander.html)
|