Increase the memory consumption during the simulation IsaacGym simulation #74
-
Hello, Below there is my main: I copied and pasted from FrankaCabinet and changed some hyperparameters. Thanks import isaacgym
import torch
import torch.nn as nn
# Import the skrl components to build the RL system
from skrl.models.torch import Model, GaussianMixin, DeterministicMixin
from skrl.memories.torch import RandomMemory
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.resources.schedulers.torch import KLAdaptiveRL
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.envs.torch import wrap_env
from skrl.envs.torch import load_isaacgym_env_preview4
from skrl.utils import set_seed
# set the seed for reproducibility
set_seed(42)
# Define the shared model (stochastic and deterministic models) for the agent using mixins.
class Shared(GaussianMixin, DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU())
self.mean_layer = nn.Linear(64, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
self.value_layer = nn.Linear(64, 1)
def act(self, inputs, role):
if role == "policy":
return GaussianMixin.act(self, inputs, role)
elif role == "value":
return DeterministicMixin.act(self, inputs, role)
def compute(self, inputs, role):
if role == "policy":
return self.mean_layer(self.net(inputs["states"])), self.log_std_parameter, {}
elif role == "value":
return self.value_layer(self.net(inputs["states"])), {}
# Load and wrap the Isaac Gym environment
env = load_isaacgym_env_preview4(task_name="FrankaCubeStack") # preview 3 and 4 use the same loader
env = wrap_env(env)
device = env.device
# Instantiate a RandomMemory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device)
# Instantiate the agent's models (function approximators).
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#spaces-and-models
models_ppo = {}
models_ppo["policy"] = Shared(env.observation_space, env.action_space, device)
models_ppo["value"] = models_ppo["policy"] # same instance: shared model
# Configure and instantiate the agent.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters
cfg_ppo = PPO_DEFAULT_CONFIG.copy()
cfg_ppo["rollouts"] = 32 # memory_size
cfg_ppo["learning_epochs"] = 5
cfg_ppo["mini_batches"] = 16
cfg_ppo["discount_factor"] = 0.99
cfg_ppo["lambda"] = 0.95
cfg_ppo["learning_rate"] = 5e-4
cfg_ppo["learning_rate_scheduler"] = KLAdaptiveRL
cfg_ppo["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008}
cfg_ppo["random_timesteps"] = 0
cfg_ppo["learning_starts"] = 0
cfg_ppo["grad_norm_clip"] = 1.0
cfg_ppo["ratio_clip"] = 0.2
cfg_ppo["value_clip"] = 0.2
cfg_ppo["clip_predicted_values"] = True
cfg_ppo["entropy_loss_scale"] = 0.0
cfg_ppo["value_loss_scale"] = 2.0
cfg_ppo["kl_threshold"] = 0
cfg_ppo["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 1.
cfg_ppo["state_preprocessor"] = RunningStandardScaler
cfg_ppo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg_ppo["value_preprocessor"] = RunningStandardScaler
cfg_ppo["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints each 120 and 1200 timesteps respectively
cfg_ppo["experiment"]["write_interval"] = 120
cfg_ppo["experiment"]["checkpoint_interval"] = 1200
cfg_ppo["experiment"]["wandb"] = True
cfg_ppo["experiment"]["wandb_kwargs"] = {
"project": "skrl",
"name": "test"
}
agent = PPO(models=models_ppo,
memory=memory,
cfg=cfg_ppo,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 320000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train() |
Beta Was this translation helpful? Give feedback.
Replies: 2 comments 2 replies
-
Hi @LucaZanatta Thank you very much for giving skrl a try. |
Beta Was this translation helpful? Give feedback.
-
Hi @LucaZanatta Sorry for late response. Problem: For some reason, the IsaacGymEnvs' import isaacgym
import isaacgymenvs
import torch
num_envs = 2
envs = isaacgymenvs.make(
seed=0,
task="FrankaCubeStack",
num_envs=num_envs,
sim_device="cuda:0",
rl_device="cuda:0",
headless=True
)
obs = envs.reset()
for _ in range(20):
random_actions = 2.0 * torch.rand((num_envs,) + envs.action_space.shape, device = 'cuda:0') - 1.0
obs_dict, rew_buf, reset_buf, extras = envs.step(random_actions)
print("rew_buf.requires_grad:", rew_buf.requires_grad) Solution: Add replace skrl/skrl/trainers/torch/base.py Lines 166 to 167 in 6b8b70f with # step the environments
with torch.no_grad():
next_states, rewards, terminated, truncated, infos = self.env.step(actions) |
Beta Was this translation helpful? Give feedback.
Hi @LucaZanatta
Sorry for late response.
I have found the problem.
Problem: For some reason, the IsaacGymEnvs'
FrankaCubeStack
task is enabling gradient computation for therew_buf
, which generates a constant increase in GPU memory. You can test it by yourself with the following snippet: