File size: 4,383 Bytes
079c32c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
import time
import torch
from hpc_rll.origin.upgo import upgo_loss
from hpc_rll.rl_utils.upgo import UPGO
from testbase import mean_relative_error, times
assert torch.cuda.is_available()
use_cuda = True
T = 256
B = 256
N = 256
def upgo_val():
ori_target_output = torch.randn(T, B, N)
ori_rhos = torch.randn(T, B)
ori_action = torch.randint(
0, N, size=(
T,
B,
)
)
ori_rewards = torch.randn(T, B)
ori_bootstrap_values = torch.randn(T + 1, B)
hpc_target_output = ori_target_output.clone().detach()
hpc_rhos = ori_rhos.clone().detach()
hpc_action = ori_action.clone().detach()
hpc_rewards = ori_rewards.clone().detach()
hpc_bootstrap_values = ori_bootstrap_values.clone().detach()
hpc_upgo = UPGO(T, B, N)
if use_cuda:
ori_target_output = ori_target_output.cuda()
ori_rhos = ori_rhos.cuda()
ori_action = ori_action.cuda()
ori_rewards = ori_rewards.cuda()
ori_bootstrap_values = ori_bootstrap_values.cuda()
hpc_target_output = hpc_target_output.cuda()
hpc_rhos = hpc_rhos.cuda()
hpc_action = hpc_action.cuda()
hpc_rewards = hpc_rewards.cuda()
hpc_bootstrap_values = hpc_bootstrap_values.cuda()
hpc_upgo = hpc_upgo.cuda()
ori_target_output.requires_grad_(True)
ori_loss = upgo_loss(ori_target_output, ori_rhos, ori_action, ori_rewards, ori_bootstrap_values)
ori_loss = ori_loss.mean()
ori_loss.backward()
if use_cuda:
torch.cuda.synchronize()
hpc_target_output.requires_grad_(True)
hpc_loss = hpc_upgo(hpc_target_output, hpc_rhos, hpc_action, hpc_rewards, hpc_bootstrap_values)
hpc_loss = hpc_loss.mean()
hpc_loss.backward()
if use_cuda:
torch.cuda.synchronize()
mre = mean_relative_error(
torch.flatten(ori_loss).cpu().detach().numpy(),
torch.flatten(hpc_loss).cpu().detach().numpy()
)
print("upgo fp mean_relative_error: " + str(mre))
mre = mean_relative_error(
torch.flatten(ori_target_output.grad).cpu().detach().numpy(),
torch.flatten(hpc_target_output.grad).cpu().detach().numpy()
)
print("upgo bp mean_relative_error: " + str(mre))
def upgo_perf():
ori_target_output = torch.randn(T, B, N)
ori_rhos = torch.randn(T, B)
ori_action = torch.randint(
0, N, size=(
T,
B,
)
)
ori_rewards = torch.randn(T, B)
ori_bootstrap_values = torch.randn(T + 1, B)
hpc_target_output = ori_target_output.clone().detach()
hpc_rhos = ori_rhos.clone().detach()
hpc_action = ori_action.clone().detach()
hpc_rewards = ori_rewards.clone().detach()
hpc_bootstrap_values = ori_bootstrap_values.clone().detach()
hpc_upgo = UPGO(T, B, N)
if use_cuda:
ori_target_output = ori_target_output.cuda()
ori_rhos = ori_rhos.cuda()
ori_action = ori_action.cuda()
ori_rewards = ori_rewards.cuda()
ori_bootstrap_values = ori_bootstrap_values.cuda()
hpc_target_output = hpc_target_output.cuda()
hpc_rhos = hpc_rhos.cuda()
hpc_action = hpc_action.cuda()
hpc_rewards = hpc_rewards.cuda()
hpc_bootstrap_values = hpc_bootstrap_values.cuda()
hpc_upgo = hpc_upgo.cuda()
ori_target_output.requires_grad_(True)
for i in range(times):
t = time.time()
ori_loss = upgo_loss(ori_target_output, ori_rhos, ori_action, ori_rewards, ori_bootstrap_values)
ori_loss = ori_loss.mean()
ori_loss.backward()
if use_cuda:
torch.cuda.synchronize()
print('epoch: {}, original upgo cost time: {}'.format(i, time.time() - t))
hpc_target_output.requires_grad_(True)
for i in range(times):
t = time.time()
hpc_loss = hpc_upgo(hpc_target_output, hpc_rhos, hpc_action, hpc_rewards, hpc_bootstrap_values)
hpc_loss = hpc_loss.mean()
hpc_loss.backward()
if use_cuda:
torch.cuda.synchronize()
print('epoch: {}, hpc upgo cost time: {}'.format(i, time.time() - t))
if __name__ == '__main__':
print("target problem: T = {}, B = {}, N = {}".format(T, B, N))
print("================run upgo validation test================")
upgo_val()
print("================run upgo performance test================")
upgo_perf()
|