Push to Hub
Browse files- args.yml +2 -2
- dqn-CartPole-v1.zip +2 -2
- dqn-CartPole-v1/data +22 -22
- dqn-CartPole-v1/policy.optimizer.pth +2 -2
- dqn-CartPole-v1/policy.pth +2 -2
- dqn-CartPole-v1/pytorch_variables.pth +2 -2
- dqn-CartPole-v1/system_info.txt +6 -6
- replay.mp4 +2 -2
- results.json +1 -1
- train_eval_metrics.zip +2 -2
args.yml
CHANGED
@@ -54,13 +54,13 @@
|
|
54 |
- - save_replay_buffer
|
55 |
- false
|
56 |
- - seed
|
57 |
-
-
|
58 |
- - storage
|
59 |
- null
|
60 |
- - study_name
|
61 |
- null
|
62 |
- - tensorboard_log
|
63 |
-
- runs/CartPole-
|
64 |
- - track
|
65 |
- true
|
66 |
- - trained_agent
|
|
|
54 |
- - save_replay_buffer
|
55 |
- false
|
56 |
- - seed
|
57 |
+
- 265022033
|
58 |
- - storage
|
59 |
- null
|
60 |
- - study_name
|
61 |
- null
|
62 |
- - tensorboard_log
|
63 |
+
- runs/CartPole-v1__dqn__265022033__1710223885
|
64 |
- - track
|
65 |
- true
|
66 |
- - trained_agent
|
dqn-CartPole-v1.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d7deef6fff64c9f67be8f3d0800361d59da4b6259c82885317ad3ab0ac4a17d1
|
3 |
+
size 1108823
|
dqn-CartPole-v1/data
CHANGED
@@ -5,15 +5,15 @@
|
|
5 |
"__module__": "stable_baselines3.dqn.policies",
|
6 |
"__annotations__": "{'q_net': <class 'stable_baselines3.dqn.policies.QNetwork'>, 'q_net_target': <class 'stable_baselines3.dqn.policies.QNetwork'>}",
|
7 |
"__doc__": "\n Policy class with Q-Value Net and target net for DQN\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
|
8 |
-
"__init__": "<function DQNPolicy.__init__ at
|
9 |
-
"_build": "<function DQNPolicy._build at
|
10 |
-
"make_q_net": "<function DQNPolicy.make_q_net at
|
11 |
-
"forward": "<function DQNPolicy.forward at
|
12 |
-
"_predict": "<function DQNPolicy._predict at
|
13 |
-
"_get_constructor_parameters": "<function DQNPolicy._get_constructor_parameters at
|
14 |
-
"set_training_mode": "<function DQNPolicy.set_training_mode at
|
15 |
"__abstractmethods__": "frozenset()",
|
16 |
-
"_abc_impl": "<_abc._abc_data object at
|
17 |
},
|
18 |
"verbose": 1,
|
19 |
"policy_kwargs": {
|
@@ -27,12 +27,12 @@
|
|
27 |
"_num_timesteps_at_start": 0,
|
28 |
"seed": 0,
|
29 |
"action_noise": null,
|
30 |
-
"start_time":
|
31 |
"learning_rate": {
|
32 |
":type:": "<class 'function'>",
|
33 |
-
":serialized:": "
|
34 |
},
|
35 |
-
"tensorboard_log": "runs/CartPole-
|
36 |
"_last_obs": null,
|
37 |
"_last_episode_starts": {
|
38 |
":type:": "<class 'numpy.ndarray'>",
|
@@ -40,16 +40,16 @@
|
|
40 |
},
|
41 |
"_last_original_obs": {
|
42 |
":type:": "<class 'numpy.ndarray'>",
|
43 |
-
":serialized:": "
|
44 |
},
|
45 |
-
"_episode_num":
|
46 |
"use_sde": false,
|
47 |
"sde_sample_freq": -1,
|
48 |
"_current_progress_remaining": 0.40002000000000004,
|
49 |
"_stats_window_size": 100,
|
50 |
"ep_info_buffer": {
|
51 |
":type:": "<class 'collections.deque'>",
|
52 |
-
":serialized:": "
|
53 |
},
|
54 |
"ep_success_buffer": {
|
55 |
":type:": "<class 'collections.deque'>",
|
@@ -93,13 +93,13 @@
|
|
93 |
":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
|
94 |
"__module__": "stable_baselines3.common.buffers",
|
95 |
"__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
|
96 |
-
"__init__": "<function ReplayBuffer.__init__ at
|
97 |
-
"add": "<function ReplayBuffer.add at
|
98 |
-
"sample": "<function ReplayBuffer.sample at
|
99 |
-
"_get_samples": "<function ReplayBuffer._get_samples at
|
100 |
-
"_maybe_cast_dtype": "<staticmethod(<function ReplayBuffer._maybe_cast_dtype at
|
101 |
"__abstractmethods__": "frozenset()",
|
102 |
-
"_abc_impl": "<_abc._abc_data object at
|
103 |
},
|
104 |
"replay_buffer_kwargs": {},
|
105 |
"train_freq": {
|
@@ -116,12 +116,12 @@
|
|
116 |
"exploration_rate": 0.04,
|
117 |
"lr_schedule": {
|
118 |
":type:": "<class 'function'>",
|
119 |
-
":serialized:": "
|
120 |
},
|
121 |
"batch_norm_stats": [],
|
122 |
"batch_norm_stats_target": [],
|
123 |
"exploration_schedule": {
|
124 |
":type:": "<class 'function'>",
|
125 |
-
":serialized:": "
|
126 |
}
|
127 |
}
|
|
|
5 |
"__module__": "stable_baselines3.dqn.policies",
|
6 |
"__annotations__": "{'q_net': <class 'stable_baselines3.dqn.policies.QNetwork'>, 'q_net_target': <class 'stable_baselines3.dqn.policies.QNetwork'>}",
|
7 |
"__doc__": "\n Policy class with Q-Value Net and target net for DQN\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
|
8 |
+
"__init__": "<function DQNPolicy.__init__ at 0x7dd0f85fd6c0>",
|
9 |
+
"_build": "<function DQNPolicy._build at 0x7dd0f85fd750>",
|
10 |
+
"make_q_net": "<function DQNPolicy.make_q_net at 0x7dd0f85fd7e0>",
|
11 |
+
"forward": "<function DQNPolicy.forward at 0x7dd0f85fd870>",
|
12 |
+
"_predict": "<function DQNPolicy._predict at 0x7dd0f85fd900>",
|
13 |
+
"_get_constructor_parameters": "<function DQNPolicy._get_constructor_parameters at 0x7dd0f85fd990>",
|
14 |
+
"set_training_mode": "<function DQNPolicy.set_training_mode at 0x7dd0f85fda20>",
|
15 |
"__abstractmethods__": "frozenset()",
|
16 |
+
"_abc_impl": "<_abc._abc_data object at 0x7dd0f85f6340>"
|
17 |
},
|
18 |
"verbose": 1,
|
19 |
"policy_kwargs": {
|
|
|
27 |
"_num_timesteps_at_start": 0,
|
28 |
"seed": 0,
|
29 |
"action_noise": null,
|
30 |
+
"start_time": 1710223887059016239,
|
31 |
"learning_rate": {
|
32 |
":type:": "<class 'function'>",
|
33 |
+
":serialized:": "gAWVxQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSS91c3IvbG9jYWwvbGliL3B5dGhvbjMuMTAvZGlzdC1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuDQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjEkvdXNyL2xvY2FsL2xpYi9weXRob24zLjEwL2Rpc3QtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUjBxjbG91ZHBpY2tsZS5jbG91ZHBpY2tsZV9mYXN0lIwSX2Z1bmN0aW9uX3NldHN0YXRllJOUaB99lH2UKGgWaA2MDF9fcXVhbG5hbWVfX5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUjA9fX2Fubm90YXRpb25zX1+UfZSMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flE6MC19fY2xvc3VyZV9flGgAjApfbWFrZV9jZWxslJOURz9i13MY/FBIhZRSlIWUjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjAu"
|
34 |
},
|
35 |
+
"tensorboard_log": "runs/CartPole-v1__dqn__265022033__1710223885/CartPole-v1",
|
36 |
"_last_obs": null,
|
37 |
"_last_episode_starts": {
|
38 |
":type:": "<class 'numpy.ndarray'>",
|
|
|
40 |
},
|
41 |
"_last_original_obs": {
|
42 |
":type:": "<class 'numpy.ndarray'>",
|
43 |
+
":serialized:": "gAWVhQAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYQAAAAAAAAAJAvPj2qEXQ/Ho7rPXCMAL+UjAVudW1weZSMBWR0eXBllJOUjAJmNJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiSwFLBIaUjAFDlHSUUpQu"
|
44 |
},
|
45 |
+
"_episode_num": 480,
|
46 |
"use_sde": false,
|
47 |
"sde_sample_freq": -1,
|
48 |
"_current_progress_remaining": 0.40002000000000004,
|
49 |
"_stats_window_size": 100,
|
50 |
"ep_info_buffer": {
|
51 |
":type:": "<class 'collections.deque'>",
|
52 |
+
":serialized:": "gAWV4QsAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQGBAAAAAAACMAWyUS4KMAXSUR0BF/8EV32VWdX2UKGgGR0BbwAAAAAAAaAdLb2gIR0BGCzt9hJAddX2UKGgGR0BawAAAAAAAaAdLa2gIR0BGFzpHI6sAdX2UKGgGR0BXQAAAAAAAaAdLXWgIR0BGXCJoCdSVdX2UKGgGR0BVwAAAAAAAaAdLV2gIR0BGZsJpnHvMdX2UKGgGR0BXAAAAAAAAaAdLXGgIR0BGct8NQTEjdX2UKGgGR0BYgAAAAAAAaAdLYmgIR0BGq6xoqTbGdX2UKGgGR0BYwAAAAAAAaAdLY2gIR0BGs5F5OafBdX2UKGgGR0BaAAAAAAAAaAdLaGgIR0BG5y+xnnMddX2UKGgGR0AxAAAAAAAAaAdLEWgIR0BG6IHC4z7/dX2UKGgGR0BZgAAAAAAAaAdLZmgIR0BG8Lf1pTMrdX2UKGgGR0AoAAAAAAAAaAdLDGgIR0BG8ZlWfbsXdX2UKGgGR0BawAAAAAAAaAdLa2gIR0BHI/XPJJXhdX2UKGgGR0BgwAAAAAAAaAdLhmgIR0BHLuHFglWwdX2UKGgGR0BhIAAAAAAAaAdLiWgIR0BHZgbADaGpdX2UKGgGR0BZgAAAAAAAaAdLZmgIR0BHbZBC2MKkdX2UKGgGR0BYQAAAAAAAaAdLYWgIR0BHdStV7x/edX2UKGgGR0BgAAAAAAAAaAdLgGgIR0BHqBnBciW3dX2UKGgGR0BOgAAAAAAAaAdLPWgIR0BHrK+Jxeb/dX2UKGgGR0AzAAAAAAAAaAdLE2gIR0BHriosI3R5dX2UKGgGR0BXgAAAAAAAaAdLXmgIR0BH4ohQm/nGdX2UKGgGR0AzAAAAAAAAaAdLE2gIR0BH5BbW3BpIdX2UKGgGR0AwAAAAAAAAaAdLEGgIR0BH5VZcLSeAdX2UKGgGR0AyAAAAAAAAaAdLEmgIR0BH5yaEzwc6dX2UKGgGR0AsAAAAAAAAaAdLDmgIR0BH6E9+w1R+dX2UKGgGR0AsAAAAAAAAaAdLDmgIR0BH6UrsjVx0dX2UKGgGR0AqAAAAAAAAaAdLDWgIR0BH6lOO801qdX2UKGgGR0A0AAAAAAAAaAdLFGgIR0BH689fTkQxdX2UKGgGR0AsAAAAAAAAaAdLDmgIR0BH7M052hZhdX2UKGgGR0A1AAAAAAAAaAdLFWgIR0BH7k9ECvHMdX2UKGgGR0BAgAAAAAAAaAdLIWgIR0BH8SGBWgezdX2UKGgGR0AwAAAAAAAAaAdLEGgIR0BH8lar3j+8dX2UKGgGR0A1AAAAAAAAaAdLFWgIR0BH9F9Brvb5dX2UKGgGR0AyAAAAAAAAaAdLEmgIR0BIHq2BreqJdX2UKGgGR0BYAAAAAAAAaAdLYGgIR0BIJtk4FRpDdX2UKGgGR0BYQAAAAAAAaAdLYWgIR0BILmXokiUxdX2UKGgGR0BYQAAAAAAAaAdLYWgIR0BIYCeVcD8tdX2UKGgGR0BKAAAAAAAAaAdLNGgIR0BIZRSHdoFndX2UKGgGR0BMAAAAAAAAaAdLOGgIR0BIabngYP5IdX2UKGgGR0AxAAAAAAAAaAdLEWgIR0BIa3t8eCCjdX2UKGgGR0AwAAAAAAAAaAdLEGgIR0BIbKXF98Z2dX2UKGgGR0AzAAAAAAAAaAdLE2gIR0BIbgDaGpMpdX2UKGgGR0AwAAAAAAAAaAdLEGgIR0BIbzXBguyvdX2UKGgGR0BiAAAAAAAAaAdLkGgIR0BIpGGdqcmTdX2UKGgGR0BfQAAAAAAAaAdLfWgIR0BIrkf9xZMddX2UKGgGR0BegAAAAAAAaAdLemgIR0BI4Wkadc0MdX2UKGgGR0BegAAAAAAAaAdLemgIR0BI7GDlHSWrdX2UKGgGR0BbgAAAAAAAaAdLbmgIR0BJHhcqvvBrdX2UKGgGR0BYgAAAAAAAaAdLYmgIR0BJJd1uBMBZdX2UKGgGR0BYQAAAAAAAaAdLYWgIR0BJWEQGwA2idX2UKGgGR0BPAAAAAAAAaAdLPmgIR0BJXa0IC2c8dX2UKGgGR0A5AAAAAAAAaAdLGWgIR0BJX2tMfzSUdX2UKGgGR0BMAAAAAAAAaAdLOGgIR0BJY7OeJ53UdX2UKGgGR0AkAAAAAAAAaAdLCmgIR0BJZHrIHTqjdX2UKGgGR0BKAAAAAAAAaAdLNGgIR0BJak+X7cfvdX2UKGgGR0BXgAAAAAAAaAdLXmgIR0BJmf8uSOindX2UKGgGR0BgYAAAAAAAaAdLg2gIR0BJpOPeYUnHdX2UKGgGR0BfAAAAAAAAaAdLfGgIR0BJ2HDaXa8IdX2UKGgGR0BJgAAAAAAAaAdLM2gIR0BJ3ODBdld1dX2UKGgGR0BKgAAAAAAAaAdLNWgIR0BJ4aW5Yoy9dX2UKGgGR0BFAAAAAAAAaAdLKmgIR0BJ5SQgcLjQdX2UKGgGR0BFgAAAAAAAaAdLK2gIR0BJ6T8xbjcVdX2UKGgGR0BYAAAAAAAAaAdLYGgIR0BKG3UYsNDudX2UKGgGR0BZgAAAAAAAaAdLZmgIR0BKI/x+az/qdX2UKGgGR0BZAAAAAAAAaAdLZGgIR0BKVsdLg4wRdX2UKGgGR0BrIAAAAAAAaAdL2WgIR0BKZ/9YOlO5dX2UKGgGR0BmAAAAAAAAaAdLsGgIR0BKn/6oESuhdX2UKGgGR0BjwAAAAAAAaAdLnmgIR0BK1vaURnOCdX2UKGgGR0AmAAAAAAAAaAdLC2gIR0BK18twrDqGdX2UKGgGR0BZQAAAAAAAaAdLZWgIR0BK3/9YOlO5dX2UKGgGR0BZwAAAAAAAaAdLZ2gIR0BLE+w9q1w6dX2UKGgGR0A2AAAAAAAAaAdLFmgIR0BLFZGBnSOSdX2UKGgGR0A4AAAAAAAAaAdLGGgIR0BLF3BHkLhKdX2UKGgGR0BYgAAAAAAAaAdLYmgIR0BLH6/IsAeadX2UKGgGR0BXAAAAAAAAaAdLXGgIR0BLJzRhMJyAdX2UKGgGR0BtQAAAAAAAaAdL6mgIR0BLZPt2LYPHdX2UKGgGR0BlwAAAAAAAaAdLrmgIR0BLpjCHh0hedX2UKGgGR0BeQAAAAAAAaAdLeWgIR0BL5dDpkf9xdX2UKGgGR0A3AAAAAAAAaAdLF2gIR0BL6LD63y7PdX2UKGgGR0A4AAAAAAAAaAdLGGgIR0BL64GMXJo1dX2UKGgGR0BegAAAAAAAaAdLemgIR0BL+VPN3W4FdX2UKGgGR0BfAAAAAAAAaAdLfGgIR0BMPPoV2zOYdX2UKGgGR0BiAAAAAAAAaAdLkGgIR0BMTeY2Kl54dX2UKGgGR0BhIAAAAAAAaAdLiWgIR0BMmeJxeb/fdX2UKGgGR0BgoAAAAAAAaAdLhWgIR0BMp5BcAzYVdX2UKGgGR0BhoAAAAAAAaAdLjWgIR0BM3Am7aqS6dX2UKGgGR0BjYAAAAAAAaAdLm2gIR0BM6EaESM99dX2UKGgGR0Bj4AAAAAAAaAdLn2gIR0BNHo2n889wdX2UKGgGR0BjQAAAAAAAaAdLmmgIR0BNVLNGEwnIdX2UKGgGR0BgoAAAAAAAaAdLhWgIR0BNXwI2OyVwdX2UKGgGR0BjoAAAAAAAaAdLnWgIR0BNlWYWtU4rdX2UKGgGR0BmQAAAAAAAaAdLsmgIR0BNzU0m+j/NdX2UKGgGR0AwAAAAAAAAaAdLEGgIR0BNzpXyRSxadX2UKGgGR0Bg4AAAAAAAaAdLh2gIR0BN2Ye1a4c4dX2UKGgGR0BgQAAAAAAAaAdLgmgIR0BODvSlWOp9dX2UKGgGR0BqQAAAAAAAaAdL0mgIR0BOHwu/UONHdX2UKGgGR0BpAAAAAAAAaAdLyGgIR0BOV5PuXu3MdX2UKGgGR0B7wAAAAAAAaAdNvAFoCEdATtAAOrhisnV9lChoBkdAaYAAAAAAAGgHS8xoCEdATwnnQpnYhHV9lChoBkdAaiAAAAAAAGgHS9FoCEdAT0Yh0Qsf73VlLg=="
|
53 |
},
|
54 |
"ep_success_buffer": {
|
55 |
":type:": "<class 'collections.deque'>",
|
|
|
93 |
":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
|
94 |
"__module__": "stable_baselines3.common.buffers",
|
95 |
"__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
|
96 |
+
"__init__": "<function ReplayBuffer.__init__ at 0x7dd0f85ddbd0>",
|
97 |
+
"add": "<function ReplayBuffer.add at 0x7dd0f85ddc60>",
|
98 |
+
"sample": "<function ReplayBuffer.sample at 0x7dd0f85ddcf0>",
|
99 |
+
"_get_samples": "<function ReplayBuffer._get_samples at 0x7dd0f85ddd80>",
|
100 |
+
"_maybe_cast_dtype": "<staticmethod(<function ReplayBuffer._maybe_cast_dtype at 0x7dd0f85dde10>)>",
|
101 |
"__abstractmethods__": "frozenset()",
|
102 |
+
"_abc_impl": "<_abc._abc_data object at 0x7dd0f8558380>"
|
103 |
},
|
104 |
"replay_buffer_kwargs": {},
|
105 |
"train_freq": {
|
|
|
116 |
"exploration_rate": 0.04,
|
117 |
"lr_schedule": {
|
118 |
":type:": "<class 'function'>",
|
119 |
+
":serialized:": "gAWVxQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSS91c3IvbG9jYWwvbGliL3B5dGhvbjMuMTAvZGlzdC1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuDQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjEkvdXNyL2xvY2FsL2xpYi9weXRob24zLjEwL2Rpc3QtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUjBxjbG91ZHBpY2tsZS5jbG91ZHBpY2tsZV9mYXN0lIwSX2Z1bmN0aW9uX3NldHN0YXRllJOUaB99lH2UKGgWaA2MDF9fcXVhbG5hbWVfX5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUjA9fX2Fubm90YXRpb25zX1+UfZSMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flE6MC19fY2xvc3VyZV9flGgAjApfbWFrZV9jZWxslJOURz9i13MY/FBIhZRSlIWUjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjAu"
|
120 |
},
|
121 |
"batch_norm_stats": [],
|
122 |
"batch_norm_stats_target": [],
|
123 |
"exploration_schedule": {
|
124 |
":type:": "<class 'function'>",
|
125 |
+
":serialized:": "gAWVZQMAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLBEsTQyhkAXwAGACIAWsEcgiIAFMAiAJkAXwAGACIAIgCGAAUAIgBGwAXAFMAlE5LAYaUKYwScHJvZ3Jlc3NfcmVtYWluaW5nlIWUjEkvdXNyL2xvY2FsL2xpYi9weXRob24zLjEwL2Rpc3QtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lIwEZnVuY5RLcUMGDAEEARgClIwDZW5klIwMZW5kX2ZyYWN0aW9ulIwFc3RhcnSUh5QpdJRSlH2UKIwLX19wYWNrYWdlX1+UjBhzdGFibGVfYmFzZWxpbmVzMy5jb21tb26UjAhfX25hbWVfX5SMHnN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi51dGlsc5SMCF9fZmlsZV9flIxJL3Vzci9sb2NhbC9saWIvcHl0aG9uMy4xMC9kaXN0LXBhY2thZ2VzL3N0YWJsZV9iYXNlbGluZXMzL2NvbW1vbi91dGlscy5weZR1Tk5oAIwQX21ha2VfZW1wdHlfY2VsbJSTlClSlGgdKVKUaB0pUpSHlHSUUpSMHGNsb3VkcGlja2xlLmNsb3VkcGlja2xlX2Zhc3SUjBJfZnVuY3Rpb25fc2V0c3RhdGWUk5RoI32UfZQoaBhoDYwMX19xdWFsbmFtZV9flIwbZ2V0X2xpbmVhcl9mbi48bG9jYWxzPi5mdW5jlIwPX19hbm5vdGF0aW9uc19flH2UKGgKjAhidWlsdGluc5SMBWZsb2F0lJOUjAZyZXR1cm6UaC91jA5fX2t3ZGVmYXVsdHNfX5ROjAxfX2RlZmF1bHRzX1+UTowKX19tb2R1bGVfX5RoGYwHX19kb2NfX5ROjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/pHrhR64Ue4WUUpRoN0c/xHrhR64Ue4WUUpRoN0c/8AAAAAAAAIWUUpSHlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="
|
126 |
}
|
127 |
}
|
dqn-CartPole-v1/policy.optimizer.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:613daf5d8605a9279ed56325728d44c63c3b8c35a3f80a9655aa0fb4576fd0b5
|
3 |
+
size 546144
|
dqn-CartPole-v1/policy.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:74aaf968892e0fe4b02559763e622b0fe46a5d027025afaac8dad8a9a1394a62
|
3 |
+
size 545202
|
dqn-CartPole-v1/pytorch_variables.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0c35cea3b2e60fb5e7e162d3592df775cd400e575a31c72f359fb9e654ab00c5
|
3 |
+
size 864
|
dqn-CartPole-v1/system_info.txt
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
-
- OS: Linux-
|
2 |
-
- Python: 3.10.
|
3 |
- Stable-Baselines3: 2.1.0
|
4 |
-
- PyTorch: 2.0
|
5 |
-
- GPU Enabled:
|
6 |
-
- Numpy: 1.25.
|
7 |
- Cloudpickle: 2.2.1
|
8 |
-
- Gymnasium: 0.
|
9 |
- OpenAI Gym: 0.26.2
|
|
|
1 |
+
- OS: Linux-6.1.58+-x86_64-with-glibc2.35 # 1 SMP PREEMPT_DYNAMIC Sat Nov 18 15:31:17 UTC 2023
|
2 |
+
- Python: 3.10.12
|
3 |
- Stable-Baselines3: 2.1.0
|
4 |
+
- PyTorch: 2.1.0+cu121
|
5 |
+
- GPU Enabled: True
|
6 |
+
- Numpy: 1.25.2
|
7 |
- Cloudpickle: 2.2.1
|
8 |
+
- Gymnasium: 0.29.1
|
9 |
- OpenAI Gym: 0.26.2
|
replay.mp4
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:257d52ecebfdb03f9abc85a5a8c06b02e1b13d40cb5be661dbd31213c82dfa77
|
3 |
+
size 76982
|
results.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"mean_reward": 500.0, "std_reward": 0.0, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "
|
|
|
1 |
+
{"mean_reward": 500.0, "std_reward": 0.0, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2024-03-12T06:14:37.762770"}
|
train_eval_metrics.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f760c59501c7dcc2ea7a61faf869a5eb6841225ba50f556fc17a70b72c3f6c58
|
3 |
+
size 12208
|