bboldt's picture
Move top-level data/ to systems/
5248a77
from argparse import Namespace
import json
from pathlib import Path
from typing import Tuple, List, Dict
import gym
import torch
import numpy as np
import babyai.utils as utils
import babyai_sr.utils as utils_sr
from babyai_sr.arguments import ArgumentParser
from babyai_sr.rl.algos import TestAlgo, BaseAlgo
from babyai_sr.rl.utils import ParallelEnv
import util as lib_util
##### based on repo/babyai_sr/rl/algos/test.py #####
SENDER = 0
RECEIVER = 1
def extract_messages(message, done, active) -> List[List[int]]:
argmax_messages = message.argmax(-1)[:, :, SENDER]
done_np = done.cpu().numpy()
done_idxs = np.argwhere(done_np)
message_list = []
for i in range(done_idxs.shape[0]):
prev_end = done_idxs[i - 1] if i > 0 else [0, -1]
end = done_idxs[i]
# If we are spanning the border between procs, adjust the prev_end
# to start at the end's proc.
if prev_end[0] != end[0]:
prev_end = [end[0], -1]
# Take messages from the given proc and
indexer = np.s_[prev_end[0], prev_end[1] + 1 : (end[1] + 1)]
ep_messages = argmax_messages[indexer]
# We only want messages for when the sender is active since the
# message is just repeated for the receiver until the sender is
# active again.
ep_active = active[indexer][:, SENDER]
final_ep_messages = ep_messages[ep_active]
message_list.append(final_ep_messages.cpu().tolist())
return message_list
class TestAlgo(BaseAlgo):
def __init__(
self,
env,
models,
num_frames_per_proc=40,
discount=0.99,
gae_lambda=0.99,
preprocess_obss=None,
reshape_reward=None,
use_comm=True,
conventional=False,
argmax=True,
):
super().__init__(
env,
models,
num_frames_per_proc,
discount,
gae_lambda,
preprocess_obss,
reshape_reward,
use_comm,
conventional,
argmax,
)
def collect_episodes(self, episodes):
# Collect experiences.
exps, _ = self.collect_experiences()
batch = 1
shape_prefix = (self.num_procs, self.num_frames_per_proc)
active = exps.active.view(*shape_prefix, *exps.active.shape[1:])
done = exps.done.view(*shape_prefix, *exps.done.shape[1:])
message = exps.message.view(*shape_prefix, *exps.message.shape[1:])
reward = exps.reward.view(*shape_prefix, *exps.reward.shape[1:])
log = {
"return_per_episode": [],
}
proc = 0
frame = [0] * self.num_procs
episodes_done = 0
while True:
if done[proc, frame[proc]]:
episodes_done += 1
log["return_per_episode"].append(
reward[proc, frame[proc], RECEIVER].item()
)
if episodes_done == episodes:
break
frame[proc] += 1
proc = (proc + 1) % self.num_procs
else:
frame[proc] += 1
if frame[proc] == batch * self.num_frames_per_proc:
exps, _ = self.collect_experiences()
batch += 1
next_active = exps.active.view(*shape_prefix, *exps.active.shape[1:])
next_done = exps.done.view(*shape_prefix, *exps.done.shape[1:])
next_message = exps.message.view(*shape_prefix, *exps.message.shape[1:])
next_reward = exps.reward.view(*shape_prefix, *exps.reward.shape[1:])
active = torch.cat((active, next_active), 1)
done = torch.cat((done, next_done), 1)
message = torch.cat((message, next_message), 1)
reward = torch.cat((reward, next_reward), 1)
message_list = extract_messages(message, done, active)
return message_list, log
##### based on repo/babyai_sr/scripts/test_rl.py #####
def parse_args() -> Namespace:
parser = ArgumentParser()
parser.add_argument("--name", help="name of the configuration")
parser.add_argument("--sender", default=None, help="name of the sender (REQUIRED)")
parser.add_argument(
"--receiver", default=None, help="name of the receiver (REQUIRED)"
)
parser.add_argument(
"--sample",
action="store_true",
default=False,
help="sample messages instead of using argmax",
)
parser.add_argument(
"--episodes",
type=int,
default=1000,
help="number of episodes to test on (default: 1000)",
)
parser.add_argument("--len-message", default=None, help="dummy")
parser.add_argument("--num-symbols", default=None, help="dummy")
return parser.parse_args()
def get_episodes(args: Namespace) -> Tuple[List, Dict]:
utils.seed(args.seed)
envs = []
for i in range(args.procs):
env = gym.make(args.env)
env.seed(100 * args.seed + i)
envs.append(env)
penv = ParallelEnv(
envs, args.n, args.conventional, args.archimedean, args.informed_sender
)
sender = utils.load_model(args.sender)
receiver = utils.load_model(args.receiver)
sender.eval()
receiver.eval()
if torch.cuda.is_available():
sender.cuda()
receiver.cuda()
reshape_reward = lambda _0, _1, reward, _2: args.reward_scale * reward
obss_preprocessor = utils_sr.MultiObssPreprocessor(
[args.sender, args.receiver], [envs[0].observation_space] * 2
)
test_algo = TestAlgo(
penv,
[sender, receiver],
args.frames_per_proc,
args.discount,
args.gae_lambda,
obss_preprocessor,
reshape_reward,
not args.no_comm,
args.conventional,
not args.sample,
)
return test_algo.collect_episodes(args.episodes)
def write_results(args: Namespace, message_list: List, logs: Dict) -> None:
out_path = Path(f"../../data/{args.name}")
out_path.mkdir(exist_ok=True, parents=True)
return_per_episode = utils.synthesize(logs["return_per_episode"])
success_per_episode = utils.synthesize(
[1 if r > 0 else 0 for r in logs["return_per_episode"]]
)
metrics = {
"success_rate": success_per_episode["mean"],
"success_rate_std": success_per_episode["std"],
"reward_per_episode": return_per_episode["mean"],
"reward_per_episode_std": return_per_episode["std"],
}
lib_util.write_system_metrics(out_path, metrics)
with (out_path / "corpus.jsonl").open("w") as fo:
for message in message_list:
flat = [x for y in message for x in y]
json.dump(flat, fo, indent=None)
fo.write("\n")
with (out_path / "corpus.structured.jsonl").open("w") as fo:
for message in message_list:
json.dump(message, fo, indent=None)
fo.write("\n")
def main() -> None:
args = parse_args()
message_list, logs = get_episodes(args)
write_results(args, message_list, logs)
if __name__ == "__main__":
main()