Untitled

mail@pastecode.io avatar
unknown
python
a year ago
62 kB
4
Indexable
Never
import numpy as np
import os
import torch
from isaacgym import gymtorch
from isaacgym import gymapi
import math 
from isaacgymenvs.utils.torch_jit_utils import (
    to_torch,
    get_axis_params,
    torch_rand_float,
    quat_rotate_inverse,
)
from isaacgymenvs.tasks.base.vec_task import VecTask
from isaacgymenvs.utils.torch_jit_utils import to_torch, get_axis_params, torch_rand_float, quat_rotate, quat_rotate_inverse

from typing import Tuple, Dict

# PERMUTATION_MATRIX = torch.tensor([
#     [0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
#         [0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
#         [0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.],
#         [0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],
#         [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.],
#         [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.],
#         [1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
#         [0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
#         [0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
#         [0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
#         [0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
#         [0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.]]
# , dtype=torch.float32)

# KP = torch.tensor(2 * [2.0, 2.0, 34.0, 34.0, 0.75, 0.75]) * torch.square(
#     torch.tensor(2 * [100, 122, 24.42, 24.42, 102, 125]))

# )  # from PACT_dimensioning_beta.pdf

KP = torch.tensor([15000.0, 15000.0, 15000.0, 15000.0, 5000.0, 5000.0] * 2) 
#KD = torch.tensor(2 * [0.000638, 0.0029, 0.000638, 0.001, 0.0007, 0.0007])
KD = torch.tensor([0.01, 0.01, 0.01, 0.01, 0.01, 0.01] * 2) 

ACTIONS_MIRROR_MAT = torch.tensor([
       [ 0.,  0.,  0.,  0.,  0.,  0., -1.,  0.,  0.,  0.,  0.,  0.],
       [ 0.,  0.,  0.,  0.,  0.,  0.,  0., -1.,  0.,  0.,  0.,  0.],
       [ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.],
       [ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.,  0.,  0.],
       [ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.,  0.],
       [ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0., -1.],
       [-1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
       [ 0., -1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
       [ 0.,  0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
       [ 0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
       [ 0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
       [ 0.,  0.,  0.,  0.,  0., -1.,  0.,  0.,  0.,  0.,  0.,  0.]
       ], dtype=torch.float32)

OBSERVATION_MIRROR_MAT = torch.tensor([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, -0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, -0.0, 0.0, 0.0, -0.0, 0.0, 0.0, -0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, -0.0, 0.0, 0.0, -0.0, 0.0, 0.0, -0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0, 0.0, 0.0, -1.0, 0.0, 0.0, -0.0, 0.0, 0.0, -0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0, 0.0, 0.0, -1.0, 0.0, 0.0, -0.0, 0.0, 0.0, -0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0, 0.0, 0.0, -0.0, 0.0, 0.0, -1.0, 0.0, 0.0, -0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0, 0.0, 0.0, -0.0, 0.0, 0.0, -1.0, 0.0, 0.0, -0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0, 0.0, 0.0, -0.0, 0.0, 0.0, -0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0, 0.0, 0.0, -0.0, 0.0, 0.0, -0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0]],dtype = torch.float32)


JOINTS = {
    "LeftFrontalHipJoint": {"lower": -0.244346, "upper": 0.279253},
    "LeftTransverseHipJoint": {"lower": -0.174533, "upper": 0.523599},
    "LeftSagittalHipJoint": {"lower": -2.268928, "upper": 0.261799},
    "LeftSagittalKneeJoint": {"lower": 0.0, "upper": 2.268928},
    "LeftSagittalAnkleJoint": {"lower": -0.436332, "upper": 0.261799},
    "LeftFrontalAnkleJoint": {"lower": -0.279253, "upper": 0.331613},
    "RightFrontalHipJoint": {"lower": -0.279253, "upper": 0.244346},
    "RightTransverseHipJoint": {"lower": -0.523599, "upper": 0.174533},
    "RightSagittalHipJoint": {"lower": -2.268928, "upper": 0.261799},
    "RightSagittalKneeJoint": {"lower": 0.0, "upper": 2.268928},
    "RightSagittalAnkleJoint": {"lower": -0.436332, "upper": 0.261799},
    "RightFrontalAnkleJoint": {"lower": -0.331613, "upper": 0.279253},
}


class EveBeta(VecTask):
    def __init__(
        self,
        cfg,
        rl_device,
        sim_device,
        graphics_device_id,
        headless,
        virtual_screen_capture,
        force_render,
    ):
        self.cfg = cfg

        self.low_limit =torch.tensor([-0.244346, -0.174533, -2.268928,  0.      , -0.436332, -0.279253,
       -0.279253, -0.523599, -2.268928,  0.      , -0.436332, -0.331613]  )  .to(rl_device)

        self.high_limit = torch.tensor([0.279253, 0.523599, 0.261799, 2.268928, 0.261799, 0.331613,
       0.244346, 0.174533, 0.261799, 2.268928, 0.261799, 0.279253]).to(rl_device)

        self._action_orig_mean = (self.high_limit + self.low_limit) / 2.0
        self._action_orig_dev = (self.high_limit - self.low_limit) / 2.0

        self.log_freq = self.cfg["env"]["learn"]["logFrequency"]
        
        self.robot_weight = 1151.34696144
        self.foot_width, self.foot_length =   0.131, 0.2811
        
        self.contact_wrenches_norm = torch.tile( self.robot_weight * torch.tensor([1.0, self.foot_width, self.foot_length]), (2,)).to(rl_device)

        # normalization
        self.lin_vel_scale = self.cfg["env"]["learn"]["linearVelocityScale"]
        self.ang_vel_scale = self.cfg["env"]["learn"]["angularVelocityScale"]
        self.dof_pos_scale = self.cfg["env"]["learn"]["dofPositionScale"]
        self.dof_vel_scale = self.cfg["env"]["learn"]["dofVelocityScale"]
        self.action_scale = self.cfg["env"]["control"]["actionScale"]
        self.reward_config = self.cfg["env"]["learn"]["cassie_reward"]
        self.original_push_freq = self.cfg["env"]["learn"]["pushFrequency"]
        self.push_freq = self.cfg["env"]["learn"]["pushFrequency"]
        for key in self.reward_config:
            for mini_key in self.reward_config[key]:
                self.reward_config[key][mini_key] = torch.tensor(
                    self.reward_config[key][mini_key], device=rl_device
                )
        self.push_time = self.cfg["sim"]["pushTime"]
        # reward scales
        self.rew_scales = {}
        self.rew_scales["lin_vel_xy"] = self.cfg["env"]["learn"][
            "linearVelocityXYRewardScale"
        ]
        self.rew_scales["ang_vel_z"] = self.cfg["env"]["learn"][
            "angularVelocityZRewardScale"
        ]
        self.rew_scales["torque"] = self.cfg["env"]["learn"]["torqueRewardScale"]

        # randomization
        self.randomization_params = self.cfg["task"]["randomization_params"]
        self.randomize = self.cfg["task"]["randomize"]

        # command ranges
        self.command_x_range = self.cfg["env"]["randomCommandVelocityRanges"][
            "linear_x"
        ]
        self.command_y_range = self.cfg["env"]["randomCommandVelocityRanges"][
            "linear_y"
        ]
        self.command_yaw_range = self.cfg["env"]["randomCommandVelocityRanges"]["yaw"]

        # plane params
        self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"]
        self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"]
        self.plane_restitution = self.cfg["env"]["plane"]["restitution"]

        # base init state
        pos = self.cfg["env"]["baseInitState"]["pos"]
        rot = self.cfg["env"]["baseInitState"]["rot"]
        v_lin = self.cfg["env"]["baseInitState"]["vLinear"]
        v_ang = self.cfg["env"]["baseInitState"]["vAngular"]
        state = pos + rot + v_lin + v_ang

        self.base_init_state = state

        mean_joint_angles = [
            (joint["lower"] + joint["upper"]) /1000000.0 for joint in JOINTS.values()
        ]

        # default joint positions
        # self.named_default_joint_angles = self.cfg["env"]["defaultJointAngles"]
        self.named_default_joint_angles = dict(zip(JOINTS.keys(), mean_joint_angles))
        self.cfg["env"]["numObservations"] = 65
        self.cfg["env"]["numActions"] = 12
        self.use_new_reward = self.cfg["env"]["learn"]["useNewReward"]
        
        self.stepper_state = 0
        
        super().__init__(
            config=self.cfg,
            rl_device=rl_device,
            sim_device=sim_device,
            graphics_device_id=graphics_device_id,
            headless=headless,
            virtual_screen_capture=virtual_screen_capture,
            force_render=force_render,
        )

        # other
        self.dt = self.sim_params.dt
        self.max_episode_length_s = self.cfg["env"]["learn"]["episodeLength_s"]
        self.max_episode_length = int(self.max_episode_length_s / self.dt + 0.5)

        for key in self.rew_scales.keys():
            self.rew_scales[key] *= self.dt

        if self.viewer != None:
            p = self.cfg["env"]["viewer"]["pos"]
            lookat = self.cfg["env"]["viewer"]["lookat"]
            cam_pos = gymapi.Vec3(p[0], p[1], p[2])
            cam_target = gymapi.Vec3(lookat[0], lookat[1], lookat[2])
            self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)

        # get gym state tensors
        actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)
        rigid_body_state = self.gym.acquire_rigid_body_state_tensor(self.sim)
        dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
        net_contact_forces = self.gym.acquire_net_contact_force_tensor(self.sim)
        torques = self.gym.acquire_dof_force_tensor(self.sim)
        sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)
        sensors_per_env = 2

        self.gym.refresh_dof_state_tensor(self.sim)
        self.gym.refresh_rigid_body_state_tensor(self.sim)
        self.gym.refresh_actor_root_state_tensor(self.sim)
        self.gym.refresh_net_contact_force_tensor(self.sim)
        self.gym.refresh_force_sensor_tensor(self.sim)
        self.gym.refresh_dof_force_tensor(self.sim)
        self.change_freq = False
        
        # create some wrapper tensors for different slices
        self.clocks = torch.rand(
            self.num_envs, dtype=torch.float, device=self.device, requires_grad=False
        )
        
        # round to the closest multiple of dt
        self.clocks -= self.clocks % self.dt
        self.root_states = gymtorch.wrap_tensor(actor_root_state)
        self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_state).view(
            self.num_envs, -1, 13
        )
        self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
        self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0]
        self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1]
        self.contact_forces = gymtorch.wrap_tensor(net_contact_forces).view(
            self.num_envs, -1, 3
        )  # shape: num_envs, num_bodies, xyz axis
        self.torques = gymtorch.wrap_tensor(torques).view(self.num_envs, self.num_dof)
        self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(
            self.num_envs, sensors_per_env * 6
        )

        self.commands = torch.zeros(
            self.num_envs, 3, dtype=torch.float, device=self.device, requires_grad=False
        )
        
        self.commands_y = self.commands.view(self.num_envs, 3)[..., 1]
        self.commands_x = self.commands.view(self.num_envs, 3)[..., 0]
        self.commands_yaw = self.commands.view(self.num_envs, 3)[..., 2]
        self.default_dof_pos = torch.zeros_like(
            self.dof_pos, dtype=torch.float, device=self.device, requires_grad=False
        )
        self.cmd_buffer = torch.zeros(
            self.num_envs, 12, dtype=torch.float, device=self.device, requires_grad=False
        )
        self.used_quantities = {
            "odometry_x": (
                torch.tensor(0.0, device=self.device),
                torch.tensor(1.0, device=self.device),
            ),
            "odometry_y": (
                torch.tensor(0.0, device=self.device),
                torch.tensor(1.0, device=self.device),
            ),
            "odometry_roll": (
                torch.tensor(0.0, device=self.device),
                torch.tensor(1.0, device=self.device),
            ),
            "odometry_pitch": (
                torch.tensor(0.0, device=self.device),
                torch.tensor(1.0, device=self.device),
            ),
            "odometry_yaw": (
                torch.tensor(0.0, device=self.device),
                torch.tensor(1.0, device=self.device),
            ),
            "left_contact_cost": (
                torch.tensor(0.0, device=self.device),
                torch.tensor(1.0, device=self.device),
            ),
            "right_contact_cost": (
                torch.tensor(0.0, device=self.device),
                torch.tensor(1.0, device=self.device),
            ),
            "velocity_left": (
                torch.tensor(0.0, device=self.device),
                torch.tensor(1.0, device=self.device),
            ),
            "velocity_right": (
                torch.tensor(0.0, device=self.device),
                torch.tensor(1.0, device=self.device),
            ),
            "torque": (
                torch.tensor(0.0, device=self.device),
                torch.tensor(1.0, device=self.device),
            ),
            "friction": (
                torch.tensor(0.0, device=self.device),
                torch.tensor(1.0, device=self.device),
            ),
        }
        
        self.first_call = True

        for i in range(self.cfg["env"]["numActions"]):
            name = self.dof_names[i]
            angle = self.named_default_joint_angles[name]
            self.default_dof_pos[:, i] = angle

        # initialize some data used later on
        self.extras = {}
        self.initial_root_states = self.root_states.clone()
        self.initial_root_states[:] = to_torch(self.base_init_state, device=self.device, requires_grad=False)
        self.gravity_vec = to_torch(get_axis_params(-1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1))
        self.actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False)
        self._initialize_observation_space()
        self.reset_idx(torch.arange(self.num_envs, device=self.device))


        
    def _initialize_observation_space(self) -> None:
        # Extract some proxies
        nflex = 12
        nmotors = 12
 


        self.observation_space_bounds =[
                torch.tensor([[-1.0], [1.0]]),  # Cosine
                torch.tensor([[-1.0], [1.0]]),  # Sine
                torch.tensor([[-1.0, -1.0, -1.0], [1.0, 1.0, 1.0]]),  # Base orientation
                torch.tensor([[-np.inf, -np.inf, -np.inf], [np.inf, np.inf, np.inf]]),  # Base angular velocity
                torch.tensor([[0.0,-1.0,-1.0], [2.0,1.0,1.0]]),  # Contact wrenches
                torch.tensor([[0.0,-1.0,-1.0], [2.0,1.0,1.0]]),  # Contact wrenches
                torch.tensor([self.low_limit.tolist(), self.high_limit.tolist()]),  # Joint positions
                torch.tensor([[-torch.inf] * nmotors, [torch.inf] * nmotors]),  # Joint velocities
                torch.tensor([[-np.pi]* nflex , [np.pi]* nflex]),  # flexes
                torch.tensor([self.low_limit.tolist(), self.high_limit.tolist()]),  # cmd_positions
                torch.tensor([[-np.inf,-np.inf, -np.pi], [np.inf,np.inf, np.pi]]),  # cmd_xyy
            ]
        for i in range(len(self.observation_space_bounds)):
            self.observation_space_bounds[i] = self.observation_space_bounds[i].to(self.device)
        
    def create_sim(self):
        self.up_axis_idx = 2  # index of up axis: Y=1, Z=2
        self.sim = super().create_sim(
            self.device_id,
            self.graphics_device_id,
            self.physics_engine,
            self.sim_params,
        )
        self._create_ground_plane()
        self._create_envs(
            self.num_envs, self.cfg["env"]["envSpacing"], int(np.sqrt(self.num_envs))
        )

        # If randomizing, apply once immediately on startup before the fist sim step
        if self.randomize:
            self.apply_randomizations(self.randomization_params)

    def _create_ground_plane(self):
        plane_params = gymapi.PlaneParams()
        plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
        plane_params.static_friction = self.plane_static_friction
        plane_params.dynamic_friction = self.plane_dynamic_friction
        self.gym.add_ground(self.sim, plane_params)

    def _create_envs(self, num_envs, spacing, num_per_row):
        asset_root = os.path.join(
            os.path.dirname(os.path.abspath(__file__)), "../../assets"
        )
        asset_file = "urdf/eve/eve_beta/exo_with_patient.urdf"

        asset_options = gymapi.AssetOptions()
        asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE
        asset_options.collapse_fixed_joints = True
        asset_options.replace_cylinder_with_capsule = True
        asset_options.flip_visual_attachments = False
        asset_options.fix_base_link = self.cfg["env"]["urdfAsset"]["fixBaseLink"]
        # asset_options.density = 0.001
        # asset_options.thickness = 0.01
        asset_options.disable_gravity = False

        beta_asset = self.gym.load_asset(
            self.sim, asset_root, asset_file, asset_options
        )
        self.num_dof = self.gym.get_asset_dof_count(beta_asset)
        self.num_bodies = self.gym.get_asset_rigid_body_count(beta_asset)

        start_pose = gymapi.Transform()
        start_pose.p = gymapi.Vec3(*self.base_init_state[:3])

        body_names = self.gym.get_asset_rigid_body_names(beta_asset)
        self.dof_names = self.gym.get_asset_dof_names(beta_asset)
        
        extremity_name = "FrontalAnkle"
        feet_names = [s for s in body_names if extremity_name in s]

        self.feet_indices = torch.zeros(
            len(feet_names), dtype=torch.long, device=self.device, requires_grad=False
        )
        knee_names = [s for s in body_names if "Knee" in s]
        self.knee_indices = torch.zeros(
            len(knee_names), dtype=torch.long, device=self.device, requires_grad=False
        )
        self.base_index = 0
        # add feet force sensors
        sensor_pose = gymapi.Transform()
        self.gym.create_asset_force_sensor(
            beta_asset, self.feet_indices[0], sensor_pose
        )
        self.gym.create_asset_force_sensor(
            beta_asset, self.feet_indices[1], sensor_pose
        )

        dof_props = self.gym.get_asset_dof_properties(beta_asset)
        for i in range(self.num_dof):
            dof_props["driveMode"][i] = gymapi.DOF_MODE_POS
            dof_props["stiffness"][i] = KP[i]
            dof_props["damping"][i] = KD[i] * KD[i]
        env_lower = gymapi.Vec3(-spacing, -spacing, 0.0)
        env_upper = gymapi.Vec3(spacing, spacing, spacing)
        self.beta_handles = []

        self.envs = []

        for i in range(self.num_envs):
            # create env instance
            env_ptr = self.gym.create_env(self.sim, env_lower, env_upper, num_per_row)
            eve_beta_handle = self.gym.create_actor(
                env_ptr, beta_asset, start_pose, "beta", i, 1, 0
            )
            self.gym.set_actor_dof_properties(env_ptr, eve_beta_handle, dof_props)
            self.gym.enable_actor_dof_force_sensors(env_ptr, eve_beta_handle)
            self.envs.append(env_ptr)
            self.beta_handles.append(eve_beta_handle)

        for i in range(len(feet_names)):
            self.feet_indices[i] = self.gym.find_actor_rigid_body_handle(
                self.envs[0], self.beta_handles[0], feet_names[i]
            )
        for i in range(len(knee_names)):
            self.knee_indices[i] = self.gym.find_actor_rigid_body_handle(
                self.envs[0], self.beta_handles[0], knee_names[i]
            )

        self.base_index = self.gym.find_actor_rigid_body_handle(
            self.envs[0], self.beta_handles[0], "PelvisLink"
        )

    def pre_physics_step(self, actions):
        
        actions = actions.to(self.rl_device)

        # modified_actions = actions + KD.to(self.rl_device) *(actions- self.normalize_actions(self.cmd_buffer)) / self.dt 
        
        #actions = modified_actions.clone()
        
        actions[self.clocks>=0.5]= actions[ self.clocks>=0.5 ] @ ACTIONS_MIRROR_MAT.to(self.rl_device)
        
    
        self.actions = actions.clone()
        
        targets = (
            self._action_orig_mean
            + self.actions * self._action_orig_dev
            + self.default_dof_pos
        )

        modified_targets = targets + KD.to(self.rl_device) * ( targets- self.cmd_buffer) / self.dt    

        self.cmd_buffer = targets.clone()

        self.gym.set_dof_position_target_tensor(
            self.sim, gymtorch.unwrap_tensor(modified_targets)
        )

    def post_physics_step(self):
        self.progress_buf += 1

        env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
        if len(env_ids) > 0:
            self.reset_idx(env_ids)
        if self.stepper_state % self.push_freq <= self.push_time:
            self.push_robots()
        elif self.stepper_state % self.push_freq == self.push_time + 1:
            self.change_freq = True
        if self.change_freq:
            self.push_freq = self.original_push_freq * (1 + torch_rand_float(-0.5,0.5,(1,1), device=self.device)) 
            self.push_freq = self.push_freq.clamp(1, 1000).int().item()
            self.change_freq = False
            print("new push freq ", self.push_freq)
        if self.stepper_state % self.log_freq == 0:
            print("action sample ", self.actions[0].cpu().numpy() )
            print("observation sample ", self.obs_buf[0])
        self.compute_observations()
        self.compute_reward(self.actions)
        self.clocks += self.dt
        self.clocks %= 1.0

    def compute_reward(self, actions):
        if not self.use_new_reward:
            self.rew_buf[:], (r1, r2, r3) = compute_eve_reward(
                self.root_states,
                self.commands,
                self.torques,
                self.rew_scales,
            )
            if self.stepper_state % self.log_freq == 0:
                print(
                    "partial rewards samples ",
                    r1.mean().item(),
                    r2.mean().item(),
                    r3.mean().item(),
                    "total reward ",
                    self.rew_buf.mean().item(),
                )

        else:
            (
                self.rew_buf[:],
                self.used_quantities,
                rewards,
            ) = compute_eve_cassie_reward(
                self.root_states,
                self.commands,
                self.rigid_body_states,
                self.torques,
                self.contact_forces,
                self.vec_sensor_tensor,
                self.clocks,
                self.feet_indices,
                self.reward_config,
                self.used_quantities,
                self.first_call,
            )
            self.first_call = False
            if self.stepper_state % self.log_freq == 0:
                print("rewards : ")
                for key in rewards:
                    print(f"{key} : {np.round(rewards[key].mean().item(), 3)}", end=" ")
                print("total reward ", self.rew_buf.mean().item())


        self.reset_buf[:] = compute_resets(
            self.contact_forces,
            self.base_index,
            self.knee_indices,
            self.root_states,
            self.progress_buf,
            self.max_episode_length,
            self.rigid_body_states,
            self.feet_indices,
        )

    def push_robots(self):
        
        if(self.stepper_state % self.push_freq == 0):

            self.forces = torch.zeros((self.num_envs,13, 3), device=self.device)
            self.forces[:,0,:2] = torch_rand_float(-50,50,(self.num_envs, 2), device=self.device)
            print(f"pushing robots {self.push_freq}")

            self.forces = self.forces.reshape((self.num_envs* 13, 3))
        self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.forces), None, gymapi.LOCAL_SPACE)



    def compute_observations(self):

        self.gym.refresh_dof_state_tensor(self.sim)  # done in step
        self.gym.refresh_actor_root_state_tensor(self.sim)
        self.gym.refresh_net_contact_force_tensor(self.sim)
        self.gym.refresh_dof_force_tensor(self.sim)
        self.gym.refresh_force_sensor_tensor(self.sim)
        self.gym.refresh_rigid_body_state_tensor(self.sim)
        
        self.stepper_state += 1
        self.obs_buf[:]= compute_eve_beta_observation_jiminy(
            self.root_states,
            self.commands,
            self.dof_pos,
            self.dof_vel,
            self.clocks,
            self.vec_sensor_tensor,
            self.contact_forces[:, self.feet_indices],
            self.contact_wrenches_norm,
            self.observation_space_bounds,
            OBSERVATION_MIRROR_MAT.to(self.rl_device),
            self.gravity_vec,
            self.cmd_buffer,
        )
    def reset_idx(self, env_ids):
        # Randomization can happen only at reset time, since it can reset actor positions on GPU
        if self.randomize:
            self.apply_randomizations(self.randomization_params)

        positions_offset = torch_rand_float(
            0.5, 1.5, (len(env_ids), self.num_dof), device=self.rl_device
        )
        velocities = torch_rand_float(
            -0.1, 0.1, (len(env_ids), self.num_dof), device=self.rl_device
        )

        self.dof_pos[env_ids] = self.default_dof_pos[env_ids] * positions_offset
        self.dof_vel[env_ids] = velocities

        env_ids_int32 = env_ids.to(dtype=torch.int32)
        self.clocks[env_ids] = torch_rand_float(
            0, 1, (len(env_ids), 1), device=self.device
        ).squeeze()
        self.clocks[env_ids] -= self.clocks[env_ids] % self.dt
        self.gym.set_actor_root_state_tensor_indexed(
            self.sim,
            gymtorch.unwrap_tensor(self.initial_root_states),
            gymtorch.unwrap_tensor(env_ids_int32),
            len(env_ids_int32),
        )

        self.gym.set_dof_state_tensor_indexed(
            self.sim,
            gymtorch.unwrap_tensor(self.dof_state),
            gymtorch.unwrap_tensor(env_ids_int32),
            len(env_ids_int32),
        )

        self.commands_x[env_ids] = torch_rand_float(
            self.command_x_range[0],
            self.command_x_range[1],
            (len(env_ids), 1),
            device=self.device,
        ).squeeze()
        self.commands_y[env_ids] = torch_rand_float(
            self.command_y_range[0],
            self.command_y_range[1],
            (len(env_ids), 1),
            device=self.device,
        ).squeeze()
        self.commands_yaw[env_ids] = torch_rand_float(
            self.command_yaw_range[0],
            self.command_yaw_range[1],
            (len(env_ids), 1),
            device=self.device,
        ).squeeze()

        self.progress_buf[env_ids] = 0
        self.reset_buf[env_ids] = 1


#####################################################################
###=========================jit functions=========================###
#####################################################################


@torch.jit.script
def rpy_from_quat(quats):
    # takes the quats for the base for each environment as a tensor of size (num_envs, 4) and returns the rpy angles as a tensor of size (num_envs, 3)

    rpy = torch.zeros(quats.shape[0], 3, device=quats.device)

    rpy[:, 0] = torch.atan2(
        2 * (quats[:, 0] * quats[:, 1] + quats[:, 2] * quats[:, 3]),
        1 - 2 * (quats[:, 1] * quats[:, 1] + quats[:, 2] * quats[:, 2]),
    )
    rpy[:, 1] = torch.asin(2 * (quats[:, 0] * quats[:, 2] - quats[:, 3] * quats[:, 1]))
    rpy[:, 2] = torch.atan2(
        2 * (quats[:, 0] * quats[:, 3] + quats[:, 1] * quats[:, 2]),
        1 - 2 * (quats[:, 2] * quats[:, 2] + quats[:, 3] * quats[:, 3]),
    ) 
    

    
    return rpy


@torch.jit.script
def I_weight_force(x, kappa, phi_stance_start, phi_stance_end):
    # Compute Stance Phase
    # type: (Tensor, float, float, float) -> Tensor
    dphi = (
        phi_stance_end - phi_stance_start
        if phi_stance_end > phi_stance_start
        else 1 - phi_stance_start + phi_stance_end
    )

    scale = kappa * torch.sin(-torch.pi * ((dphi) + 0.5))
    theta = (torch.asin(scale / kappa) / (2 * torch.pi) - phi_stance_end) % 1
    return 0.5 * torch.tanh(kappa * torch.sin(2 * torch.pi * (x + theta)) - scale) + 0.5


@torch.jit.script
def normalize_component(x, mini, maxi):
    # type: (Tensor, Tensor, Tensor) -> Tensor
    return (x - mini) / (maxi - mini)


@torch.jit.script
def compute_eve_reward(
    # tensors
    root_states,
    commands,
    torques,
    # Dict
    rew_scales,
    # other
):
    # (reward, reset, feet_in air, feet_air_time, episode sums)
    # type: (Tensor, Tensor, Tensor, Dict[str, float]) -> Tuple[Tensor , Tuple[Tensor,Tensor,Tensor]]

    # Prepare quantities (TODO: return from obs ?)
    base_quat = root_states[:, 3:7]
    base_lin_vel = quat_rotate_inverse(base_quat, root_states[:, 7:10])
    base_ang_vel = quat_rotate_inverse(base_quat, root_states[:, 10:13])

    # Velocity tracking reward
    lin_vel_error = torch.sum(torch.square(commands[:, :2] - base_lin_vel[:, :2]), dim=1)
    ang_vel_error = torch.square(commands[:, 2] - base_ang_vel[:, 2])
    ang_vel_error = torch.zeros_like(ang_vel_error)
    rew_lin_vel_xy = torch.exp(-lin_vel_error / 0.25) * rew_scales["lin_vel_xy"]
    rew_ang_vel_z = torch.exp(-ang_vel_error / 0.25) * rew_scales["ang_vel_z"]

    # Torque penalty
    rew_torque = torch.sum(torch.square(torques), dim=1) * rew_scales["torque"]

    total_reward = rew_lin_vel_xy + rew_ang_vel_z + rew_torque

    total_reward.clamp_(0.0)  # In-place clipping

    return total_reward, (rew_lin_vel_xy, rew_ang_vel_z, rew_torque)


@torch.jit.script
def compute_eve_cassie_reward(
    # tensors
    root_states,
    commands,
    rigid_body_states,
    torques,
    contact_forces,
    sensor_tensor,
    clocks,
    feet_indices,
    # Dict
    reward_config,
    # other
    used_quantities,
    first_call=False,
):
    # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Dict[str, Dict[str,Tensor]], Dict[str,Tuple[Tensor,Tensor]], bool) -> Tuple[Tensor, Dict[str,Tuple[Tensor,Tensor]], Dict[str,Tensor]]
    base_quat = root_states[:, 3:7]
    base_velocity_mean_local = quat_rotate_inverse(base_quat, root_states[:, 7:10])
    kappa = reward_config["bipedal"]["kappa_equivalent"]

    x = clocks

    component_odometry_x = normalize_component(
            torch.square(
                (base_velocity_mean_local[:, 0] - commands[:, 0])
            ),
            used_quantities["odometry_x"][0],
            used_quantities["odometry_x"][1],
        )
    

    component_odometry_y =normalize_component(
            torch.square(
                (base_velocity_mean_local[:, 1] - commands[:, 1])
            ),
            used_quantities["odometry_y"][0],
            used_quantities["odometry_y"][1],
        )
    
    reward_xy = torch.exp(
        - reward_config["factor"]["omega"] * (component_odometry_x + component_odometry_y) / 2.0
    )
    
    base_rpy = rpy_from_quat(base_quat)
    roll, pitch, yaw = (
        base_rpy[:, 0],
        base_rpy[:, 1],
        base_rpy[:, 2],
    )
    
    component_odometry_roll = normalize_component(
            torch.square(
                roll - reward_config["odometry_roll"]["target"]
            ),
            used_quantities["odometry_roll"][0],
            used_quantities["odometry_roll"][1],
        )
    
    component_odometry_pitch = normalize_component(
            torch.square(
                pitch - reward_config["odometry_pitch"]["target"]
            ),
            used_quantities["odometry_pitch"][0],
            used_quantities["odometry_pitch"][1],
        )
    
    component_odometry_yaw = normalize_component(
            torch.square((yaw - commands[:, 2])),
            used_quantities["odometry_yaw"][0],
            used_quantities["odometry_yaw"][1],
        )
    reward_orientation = torch.exp(
        - reward_config["factor"]["omega"] * (component_odometry_roll + component_odometry_pitch + component_odometry_yaw) / 3.0
    )
    
    phi_stance_start_l = reward_config["bipedal"]["theta_left"]
    phi_stance_start_r = reward_config["bipedal"]["theta_right"]
    
    stance_times = (
        reward_config["bipedal"]["stance_time_left"],
        reward_config["bipedal"]["stance_time_right"],
    )

    left_phase_weight_force = I_weight_force(
        x, kappa, phi_stance_start_l, phi_stance_start_l + stance_times[0]
    )
    left_phase_weight_speed = 1 - left_phase_weight_force
    right_phase_weight_force = I_weight_force(
        x, kappa, phi_stance_start_r, phi_stance_start_r + stance_times[1]
    )
    right_phase_weight_speed = 1 - right_phase_weight_force

    foot_forces = contact_forces[:, feet_indices].view(-1, 2, 3)

    ######## Alternating contact reward ########
    left_contact_cost = torch.square(sensor_tensor[:, [2]].view(-1))
    right_contact_cost = torch.square(sensor_tensor[:, [8]].view(-1))

    reward_contact_left = left_phase_weight_force * torch.exp(
        -reward_config["factor"]["omega"]
        * normalize_component(
            left_contact_cost,
            used_quantities["left_contact_cost"][0],
            used_quantities["left_contact_cost"][1],
        )
    )

    reward_contact_right = right_phase_weight_force * torch.exp(
        -reward_config["factor"]["omega"]
        * normalize_component(
            right_contact_cost,
            used_quantities["right_contact_cost"][0],
            used_quantities["right_contact_cost"][1],
        )
    )

    foot_vel = rigid_body_states[:, feet_indices, 7:10]
    left_foot_velocity_local = torch.sum(torch.square(foot_vel[:, 0, :]),dim = 1)
    right_foot_velocity_local = torch.sum(torch.square(foot_vel[:, 1, :]),dim = 1)
    
    reward_velocity_left = left_phase_weight_speed * torch.exp(
        -reward_config["factor"]["omega"]
        * normalize_component(
            left_foot_velocity_local,
            used_quantities["velocity_left"][0],
            used_quantities["velocity_left"][1],
        )
    )
    reward_velocity_right = right_phase_weight_speed * torch.exp(
        -reward_config["factor"]["omega"]
        * normalize_component(
            right_foot_velocity_local,
            used_quantities["velocity_right"][0],
            used_quantities["velocity_right"][1],
        )
    )

    reward_bipedal = (reward_contact_left + reward_contact_right + reward_velocity_left + reward_velocity_right) / 4.0

    reward_torque = - torch.exp(
        -reward_config["factor"]["omega"]
        * normalize_component(
             torch.sum(torch.square(torques), dim=1),
            used_quantities["torque"][0],
            used_quantities["torque"][1],
        )
    )
    
    reward_friction = torch.exp(
        -reward_config["factor"]["omega"]
        * normalize_component(
            torch.sum(torch.square(foot_forces[:, 0, 0:2]), dim=1),
            used_quantities["friction"][0],
            used_quantities["friction"][1],
        )
    )

    used_quantities_temp = {
        "odometry_x": torch.square(
            (base_velocity_mean_local[:, 0] - commands[:, 0])
        ),
        "odometry_y": torch.square(
            (base_velocity_mean_local[:, 1] - commands[:, 1])
        ),
        "odometry_roll": torch.square(
            roll - reward_config["odometry_roll"]["target"]
        ),
        "odometry_pitch": torch.square(
            pitch - reward_config["odometry_pitch"]["target"]
        ),
        "odometry_yaw": torch.square(
            yaw - reward_config["odometry_yaw"]["target"]
        ),
        "left_contact_cost": left_contact_cost,
        "right_contact_cost": right_contact_cost,
        "velocity_left": left_foot_velocity_local,
        "velocity_right": right_foot_velocity_local,
        "torque": torch.sum(torch.square(torques), dim=1),
        "friction": torch.sum(torch.square(foot_forces[:, 0, 0:2]), dim=1),
    }

    for key in used_quantities_temp:
        if not key in used_quantities or first_call:
            used_quantities[key] = (
                used_quantities_temp[key].min(),
                used_quantities_temp[key].max(),
            )
        used_quantities[key] = (
            1.0 * used_quantities_temp[key].min() + 4.0 * used_quantities[key][0]
        ) / 5.0 if used_quantities_temp[key].min() < used_quantities[key][
            0
        ] else used_quantities[
            key
        ][
            0
        ], (
            1.0 * used_quantities_temp[key].max() + 4.0 * used_quantities[key][1]
        ) / 5.0 if used_quantities_temp[
            key
        ].max() > used_quantities[
            key
        ][
            1
        ] else used_quantities[
            key
        ][
            1
        ]

    rewards = {
        "reward_xy": reward_xy,
        "reward_orientation": reward_orientation,
        "bipedal": reward_bipedal,
        "torque": reward_torque,
        "friction": reward_friction,
    }

        

    total_reward = (
                    20.0 * rewards["reward_xy"] + 
                    4.0 * rewards["reward_orientation"] +
                    1.0 * rewards["bipedal"] +
                    1.0 * rewards["torque"] +
                    1.0 * rewards["friction"] 
                    ) / 27.0

    weights_sum = torch.tensor(0.0, device=total_reward.device)
    for key in rewards:
        total_reward += reward_config[key]["weight"] * rewards[key]
        weights_sum += reward_config[key]["weight"]

    total_reward /= weights_sum
    
    return total_reward, used_quantities, rewards



@torch.jit.script
def compute_resets(
    contact_forces,
    base_index,
    knee_indices,
    root_states,
    episode_lengths,
    max_episode_length,
    rigid_body_states,
    feet_indices,
):
    # type: (Tensor, int, Tensor, Tensor, Tensor, int, Tensor, Tensor) -> Tensor
    base_quat = root_states[:, 3:7]
    # reset agents
    reset = torch.norm(contact_forces[:, base_index, :], dim=1) > 1.0
    reset = reset | torch.any(
        torch.norm(contact_forces[:, knee_indices, :], dim=2) > 1.0, dim=1
    )

    # pelvis height can't be lower than 0.4
    reset = reset | (root_states[:, 2] < 0.6)

    # base_rpy = rpy_from_quat(base_quat)

    # # make sure pitch and roll are within 0.3 radians
    # reset = (
    #     reset | (torch.abs(base_rpy[:, 0]) > 0.3) | (torch.abs(base_rpy[:, 1]) > 0.3)
    # )

    time_out = (
        episode_lengths >= max_episode_length - 1
    )  # no terminal reward for time-outs

    reset = reset | time_out

    # print("contacts on feet : ", contact_forces[0, feet_indices, :])
    feet_pos = rigid_body_states[:, feet_indices, :2]
    reset = reset | (
        torch.abs(feet_pos[:, 0, 1] - feet_pos[:, 1, 1]) < 0.15
    )  # | (torch.abs(feet_pos[:, 0, 1] - feet_pos[:, 1, 1]) > 0.5)

    # print(f"reward_xy: {  gymtorch.unwrap_tensor(rew_lin_vel_xy).mean()} reward_z: {gymtorch.unwrap_tensor(rew_ang_vel_z).mean()} reward_torque: {gymtorch.unwrap_tensor(rew_torque).mean()}")

    return reset


@torch.jit.script
def compute_eve_beta_observation_jiminy(
    root_states,
    commands,
    dof_pos,
    dof_vel,
    clock,
    foot_sensors,
    foot_forces,
    contact_wrenches_norm,
    spaces_bounds,
    obs_mirror_mat,
    gravity_vec,
    command_buffer
):
    # type: (Tensor, Tensor, Tensor,Tensor,Tensor,Tensor, Tensor ,Tensor, List[Tensor] ,Tensor,Tensor, Tensor) -> Tensor
    base_quat = root_states[:, 3:7]
    base_ang_vel = quat_rotate_inverse(base_quat, root_states[:, 10:13])
#    base_rotation = rpy_from_quat(base_quat)
    projected_gravity = quat_rotate(base_quat, gravity_vec)


    obs = torch.cat(
            (
                torch.cos(clock.view(-1,1) * 2 * torch.pi),
                torch.sin(clock.view(-1,1) * 2 * torch.pi),
                projected_gravity.view(-1, 3),
                base_ang_vel,
                foot_forces[:,0,2].view(-1,1) / contact_wrenches_norm[0],
                foot_sensors[:,[3, 4]] / contact_wrenches_norm[1:3],
                foot_forces[:,1,2].view(-1,1) / contact_wrenches_norm[3],
                foot_sensors[:,[9, 10]] / contact_wrenches_norm[4:6],
                dof_pos,
                dof_vel,
                torch.randn((root_states.shape[0], 12), device=root_states.device),
                command_buffer, 
                commands,

            ),
            dim=-1,
        )

    # Clipping using a more efficient approach
    u = 0
    for bound in spaces_bounds:
        lower_bound, upper_bound = bound[0], bound[1]
        obs[:, u: u + lower_bound.shape[0]].clamp_(lower_bound, upper_bound)
        u += lower_bound.shape[0]

    # Ensuring symmetry using phase modulation
    obs[clock >= 0.5] = obs[clock >= 0.5] @ obs_mirror_mat

    return obs