uditimadan commited on
Commit
24ae5f1
1 Parent(s): 763c631

Initial commit

Browse files
dqn-PongNoFrameskip-v4.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:162bf865ac03dfb43d1bb65fd55a36e86c812a03a14c2498fd48dba21cc7a8d9
3
  size 13711939
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b207bed7995f1cffbf7f4ec72ac94a44f329703b9f3abac202d798117d53cee9
3
  size 13711939
dqn-PongNoFrameskip-v4/data CHANGED
@@ -4,9 +4,9 @@
4
  ":serialized:": "gAWVMAAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLmRxbi5wb2xpY2llc5SMCUNublBvbGljeZSTlC4=",
5
  "__module__": "stable_baselines3.dqn.policies",
6
  "__doc__": "\n Policy class for DQN when using images as input.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
7
- "__init__": "<function CnnPolicy.__init__ at 0x79666a9eeb00>",
8
  "__abstractmethods__": "frozenset()",
9
- "_abc_impl": "<_abc._abc_data object at 0x79666a800e40>"
10
  },
11
  "verbose": 1,
12
  "policy_kwargs": {},
@@ -84,13 +84,13 @@
84
  "__module__": "stable_baselines3.common.buffers",
85
  "__annotations__": "{'observations': <class 'numpy.ndarray'>, 'next_observations': <class 'numpy.ndarray'>, 'actions': <class 'numpy.ndarray'>, 'rewards': <class 'numpy.ndarray'>, 'dones': <class 'numpy.ndarray'>, 'timeouts': <class 'numpy.ndarray'>}",
86
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
87
- "__init__": "<function ReplayBuffer.__init__ at 0x79666a933eb0>",
88
- "add": "<function ReplayBuffer.add at 0x79666a933f40>",
89
- "sample": "<function ReplayBuffer.sample at 0x79666a938040>",
90
- "_get_samples": "<function ReplayBuffer._get_samples at 0x79666a9380d0>",
91
- "_maybe_cast_dtype": "<staticmethod(<function ReplayBuffer._maybe_cast_dtype at 0x79666a938160>)>",
92
  "__abstractmethods__": "frozenset()",
93
- "_abc_impl": "<_abc._abc_data object at 0x79666aaad780>"
94
  },
95
  "replay_buffer_kwargs": {},
96
  "train_freq": {
 
4
  ":serialized:": "gAWVMAAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLmRxbi5wb2xpY2llc5SMCUNublBvbGljeZSTlC4=",
5
  "__module__": "stable_baselines3.dqn.policies",
6
  "__doc__": "\n Policy class for DQN when using images as input.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
7
+ "__init__": "<function CnnPolicy.__init__ at 0x7f88729eeb00>",
8
  "__abstractmethods__": "frozenset()",
9
+ "_abc_impl": "<_abc._abc_data object at 0x7f8872800d80>"
10
  },
11
  "verbose": 1,
12
  "policy_kwargs": {},
 
84
  "__module__": "stable_baselines3.common.buffers",
85
  "__annotations__": "{'observations': <class 'numpy.ndarray'>, 'next_observations': <class 'numpy.ndarray'>, 'actions': <class 'numpy.ndarray'>, 'rewards': <class 'numpy.ndarray'>, 'dones': <class 'numpy.ndarray'>, 'timeouts': <class 'numpy.ndarray'>}",
86
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
87
+ "__init__": "<function ReplayBuffer.__init__ at 0x7f8872933eb0>",
88
+ "add": "<function ReplayBuffer.add at 0x7f8872933f40>",
89
+ "sample": "<function ReplayBuffer.sample at 0x7f8872938040>",
90
+ "_get_samples": "<function ReplayBuffer._get_samples at 0x7f88729380d0>",
91
+ "_maybe_cast_dtype": "<staticmethod(<function ReplayBuffer._maybe_cast_dtype at 0x7f8872938160>)>",
92
  "__abstractmethods__": "frozenset()",
93
+ "_abc_impl": "<_abc._abc_data object at 0x7f8872aac200>"
94
  },
95
  "replay_buffer_kwargs": {},
96
  "train_freq": {
results.json CHANGED
@@ -1 +1 @@
1
- {"mean_reward": -21.0, "std_reward": 0.0, "is_deterministic": false, "n_eval_episodes": 10, "eval_datetime": "2024-05-16T11:34:16.296638"}
 
1
+ {"mean_reward": -21.0, "std_reward": 0.0, "is_deterministic": false, "n_eval_episodes": 10, "eval_datetime": "2024-05-16T11:39:12.916544"}