From a62b0e32bce8bec4a7f48660a916dc8757c73c08 Mon Sep 17 00:00:00 2001 From: Matteo Bettini Date: Wed, 20 Mar 2024 14:30:50 +0000 Subject: [PATCH 1/8] robomaster navigation --- vmas/scenarios/robomaster/rm_navigation.py | 285 +++++++++++++++++++++ 1 file changed, 285 insertions(+) create mode 100644 vmas/scenarios/robomaster/rm_navigation.py diff --git a/vmas/scenarios/robomaster/rm_navigation.py b/vmas/scenarios/robomaster/rm_navigation.py new file mode 100644 index 00000000..61dcdefb --- /dev/null +++ b/vmas/scenarios/robomaster/rm_navigation.py @@ -0,0 +1,285 @@ +# Copyright (c) 2022-2024. +# ProrokLab (https://www.proroklab.org/) +# All rights reserved. +from typing import Dict + +import torch +from torch import Tensor + +from vmas import render_interactively +from vmas.simulator.controllers.velocity_controller import VelocityController +from vmas.simulator.core import Agent, Box, Landmark, Sphere, World +from vmas.simulator.dynamics.holonomic import Holonomic +from vmas.simulator.scenario import BaseScenario +from vmas.simulator.utils import ScenarioUtils, TorchUtils + + +class Scenario(BaseScenario): + def make_world(self, batch_dim: int, device: torch.device, **kwargs): + # Robomaster + self.v_range = kwargs.get("v_range", 1) + self.a_range = kwargs.get("a_range", 1) + self.box_agents = kwargs.get("box_agents", False) + self.linear_friction = kwargs.get("linear_friction", 0.1) + self.min_input_norm = kwargs.get("min_input_norm", 0.08) + self.agent_radius = 0.16 + self.agent_box_length = 0.32 + self.agent_box_width = 0.24 + + # Controller + controller_params = [2, 6, 0.002] + self.f_range = self.a_range + self.linear_friction + self.u_range = self.v_range + + self.plot_grid = True + self.viewer_zoom = 2 + self.n_agents = kwargs.get("n_agents", 2) + + self.pos_shaping_factor = kwargs.get("pos_shaping_factor", 1) + self.final_reward = kwargs.get("final_reward", 0.01) + self.agent_collision_penalty = kwargs.get("agent_collision_penalty", -5) + + self.min_distance_between_entities = self.agent_radius * 2 + 0.05 + self.world_semidim = 3 + self.min_collision_distance = 0.05 + + # Make world + world = World( + batch_dim, + device, + drag=0, + dt=0.05, + linear_friction=self.linear_friction, + substeps=16 if self.box_agents else 5, + collision_force=100000 if self.box_agents else 500, + ) + + known_colors = [ + (0.22, 0.49, 0.72), + (1.00, 0.50, 0), + (0.30, 0.69, 0.29), + (0.97, 0.51, 0.75), + (0.60, 0.31, 0.64), + (0.89, 0.10, 0.11), + (0.87, 0.87, 0), + ] + colors = torch.randn( + (max(self.n_agents - len(known_colors), 0), 3), device=device + ) + + # Add agents + for i in range(self.n_agents): + color = ( + known_colors[i] + if i < len(known_colors) + else colors[i - len(known_colors)] + ) + + # Constraint: all agents have same action range and multiplier + agent = Agent( + name=f"agent_{i}", + collide=True, + color=color, + rotatable=False, + linear_friction=self.linear_friction, + shape=( + Sphere(radius=self.agent_radius) + if not self.box_agents + else Box(length=self.agent_box_length, width=self.agent_box_width) + ), + u_range=self.u_range, + f_range=self.f_range, + v_range=self.v_range, + render_action=True, + dynamics=Holonomic(), + ) + agent.controller = VelocityController( + agent, world, controller_params, "standard" + ) + + agent.pos_rew = torch.zeros(batch_dim, device=device) + agent.agent_collision_rew = agent.pos_rew.clone() + agent.final_rew = agent.pos_rew.clone() + + world.add_agent(agent) + + # Add goals + goal = Landmark( + name=f"goal {i}", collide=False, color=color, shape=Sphere(radius=0.1) + ) + world.add_landmark(goal) + agent.goal = goal + + return world + + def reset_world_at(self, env_index: int = None): + ScenarioUtils.spawn_entities_randomly( + self.world.agents, + self.world, + env_index, + self.min_distance_between_entities, + (-self.world_semidim, self.world_semidim), + (-self.world_semidim, self.world_semidim), + ) + + occupied_positions = torch.stack( + [agent.state.pos for agent in self.world.agents], dim=1 + ) + if env_index is not None: + occupied_positions = occupied_positions[env_index].unsqueeze(0) + + goal_poses = [] + for _ in self.world.agents: + position = ScenarioUtils.find_random_pos_for_entity( + occupied_positions=occupied_positions, + env_index=env_index, + world=self.world, + min_dist_between_entities=self.min_distance_between_entities, + x_bounds=(-self.world_semidim, self.world_semidim), + y_bounds=(-self.world_semidim, self.world_semidim), + ) + goal_poses.append(position.squeeze(1)) + occupied_positions = torch.cat([occupied_positions, position], dim=1) + + for i, agent in enumerate(self.world.agents): + agent.controller.reset(env_index) + + agent.goal.set_pos(goal_poses[i], batch_index=env_index) + + if env_index is None: + agent.pos_shaping = ( + torch.linalg.vector_norm( + agent.state.pos - agent.goal.state.pos, + dim=1, + ) + * self.pos_shaping_factor + ) + else: + agent.pos_shaping[env_index] = ( + torch.linalg.vector_norm( + agent.state.pos[env_index] - agent.goal.state.pos[env_index] + ) + * self.pos_shaping_factor + ) + + def process_action(self, agent: Agent): + # Clamp square to circle + agent.action.u = TorchUtils.clamp_with_norm( + agent.action.u, agent.action.u_range + ) + + # Zero small input + action_norm = torch.linalg.vector_norm(agent.action.u, dim=1) + agent.action.u[action_norm < self.min_input_norm] = 0 + + # Reset controller + vel_is_zero = torch.linalg.vector_norm(agent.action.u, dim=1) < 1e-3 + agent.controller.reset(vel_is_zero) + + agent.controller.process_force() + + def reward(self, agent: Agent): + is_first = agent == self.world.agents[0] + + if is_first: + for a in self.world.agents: + self.agent_reward(a) + a.agent_collision_rew[:] = 0 + + for i, a in enumerate(self.world.agents): + for j, b in enumerate(self.world.agents): + if i <= j: + continue + distance = self.world.get_distance(a, b) + a.agent_collision_rew[ + distance <= self.min_collision_distance + ] += self.agent_collision_penalty + b.agent_collision_rew[ + distance <= self.min_collision_distance + ] += self.agent_collision_penalty + + return agent.pos_rew + agent.final_rew + agent.agent_collision_rew + + def agent_reward(self, agent: Agent): + agent.distance_to_goal = torch.linalg.vector_norm( + agent.state.pos - agent.goal.state.pos, + dim=-1, + ) + agent.on_goal = agent.distance_to_goal < agent.goal.shape.radius + + agent.final_rew = torch.where( + agent.on_goal, self.final_reward, torch.zeros_like(agent.final_rew) + ) + + pos_shaping = agent.distance_to_goal * self.pos_shaping_factor + agent.pos_rew = agent.pos_shaping - pos_shaping + agent.pos_shaping = pos_shaping + + def observation(self, agent: Agent): + return torch.cat( + [ + agent.state.pos, + agent.state.vel, + agent.state.pos - agent.goal.state.pos, + # agent.controller.accum_errs, + # agent.controller.prev_err, + ], + dim=-1, + ) + + def info(self, agent: Agent) -> Dict[str, Tensor]: + return { + "pos_rew": agent.pos_rew, + "final_rew": agent.final_rew, + "collision_rew": agent.agent_collision_rew, + } + + # def extra_render(self, env_index: int = 0): + # from vmas.simulator import rendering + # + # geoms = [] + # for i, entity_a in enumerate(self.world.agents): + # for j, entity_b in enumerate(self.world.agents): + # if i <= j: + # continue + # if not self.box_agents: + # point_a = entity_a.state.pos + # point_b = entity_b.state.pos + # dist = self.world.get_distance_from_point( + # entity_a, entity_b.state.pos, env_index + # ) + # dist = dist - entity_b.shape.radius + # else: + # point_a, point_b = _get_closest_box_box( + # entity_a.state.pos, + # entity_a.state.rot, + # entity_a.shape.width, + # entity_a.shape.length, + # entity_b.state.pos, + # entity_b.state.rot, + # entity_b.shape.width, + # entity_b.shape.length, + # ) + # dist = torch.linalg.vector_norm(point_a - point_b, dim=-1) + # + # for point in [point_a, point_b]: + # color = ( + # Color.BLACK.value + # if dist > self.min_collision_distance + # else Color.RED.value + # ) + # line = rendering.make_circle(0.03) + # xform = rendering.Transform() + # xform.set_translation(point[env_index][0], point[env_index][1]) + # line.add_attr(xform) + # line.set_color(*color) + # geoms.append(line) + # return geoms + # + + +if __name__ == "__main__": + render_interactively( + __file__, + control_two_agents=True, + ) From 92bee0bc4866d36eeb3fc27bf49ab519a51e776c Mon Sep 17 00:00:00 2001 From: Matteo Bettini Date: Tue, 26 Mar 2024 12:46:29 +0100 Subject: [PATCH 2/8] use velocity controller --- vmas/scenarios/robomaster/rm_navigation.py | 37 ++++++++++++---------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/vmas/scenarios/robomaster/rm_navigation.py b/vmas/scenarios/robomaster/rm_navigation.py index 61dcdefb..5d11b7ad 100644 --- a/vmas/scenarios/robomaster/rm_navigation.py +++ b/vmas/scenarios/robomaster/rm_navigation.py @@ -27,9 +27,10 @@ def make_world(self, batch_dim: int, device: torch.device, **kwargs): self.agent_box_width = 0.24 # Controller + self.use_velocity_controller = kwargs.get("use_velocity_controller", True) controller_params = [2, 6, 0.002] self.f_range = self.a_range + self.linear_friction - self.u_range = self.v_range + self.u_range = self.v_range if self.use_velocity_controller else self.f_range self.plot_grid = True self.viewer_zoom = 2 @@ -81,7 +82,6 @@ def make_world(self, batch_dim: int, device: torch.device, **kwargs): collide=True, color=color, rotatable=False, - linear_friction=self.linear_friction, shape=( Sphere(radius=self.agent_radius) if not self.box_agents @@ -93,9 +93,10 @@ def make_world(self, batch_dim: int, device: torch.device, **kwargs): render_action=True, dynamics=Holonomic(), ) - agent.controller = VelocityController( - agent, world, controller_params, "standard" - ) + if self.use_velocity_controller: + agent.controller = VelocityController( + agent, world, controller_params, "standard" + ) agent.pos_rew = torch.zeros(batch_dim, device=device) agent.agent_collision_rew = agent.pos_rew.clone() @@ -142,7 +143,8 @@ def reset_world_at(self, env_index: int = None): occupied_positions = torch.cat([occupied_positions, position], dim=1) for i, agent in enumerate(self.world.agents): - agent.controller.reset(env_index) + if self.use_velocity_controller: + agent.controller.reset(env_index) agent.goal.set_pos(goal_poses[i], batch_index=env_index) @@ -163,20 +165,21 @@ def reset_world_at(self, env_index: int = None): ) def process_action(self, agent: Agent): - # Clamp square to circle - agent.action.u = TorchUtils.clamp_with_norm( - agent.action.u, agent.action.u_range - ) + if self.use_velocity_controller: + # Clamp square to circle + agent.action.u = TorchUtils.clamp_with_norm( + agent.action.u, agent.action.u_range + ) - # Zero small input - action_norm = torch.linalg.vector_norm(agent.action.u, dim=1) - agent.action.u[action_norm < self.min_input_norm] = 0 + # Zero small input + action_norm = torch.linalg.vector_norm(agent.action.u, dim=1) + agent.action.u[action_norm < self.min_input_norm] = 0 - # Reset controller - vel_is_zero = torch.linalg.vector_norm(agent.action.u, dim=1) < 1e-3 - agent.controller.reset(vel_is_zero) + # Reset controller + vel_is_zero = torch.linalg.vector_norm(agent.action.u, dim=1) < 1e-3 + agent.controller.reset(vel_is_zero) - agent.controller.process_force() + agent.controller.process_force() def reward(self, agent: Agent): is_first = agent == self.world.agents[0] From da0dc7155d3e88969910296d2b37351ef6e7d27f Mon Sep 17 00:00:00 2001 From: Matteo Bettini Date: Tue, 26 Mar 2024 14:38:29 +0100 Subject: [PATCH 3/8] amend --- vmas/scenarios/robomaster/rm_navigation.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/vmas/scenarios/robomaster/rm_navigation.py b/vmas/scenarios/robomaster/rm_navigation.py index 5d11b7ad..41d439f7 100644 --- a/vmas/scenarios/robomaster/rm_navigation.py +++ b/vmas/scenarios/robomaster/rm_navigation.py @@ -224,8 +224,6 @@ def observation(self, agent: Agent): agent.state.pos, agent.state.vel, agent.state.pos - agent.goal.state.pos, - # agent.controller.accum_errs, - # agent.controller.prev_err, ], dim=-1, ) From 154e0a39afc88f93e8e32b458a3a596ae97de3fe Mon Sep 17 00:00:00 2001 From: Matteo Bettini Date: Thu, 28 Mar 2024 14:44:23 +0100 Subject: [PATCH 4/8] amend --- vmas/scenarios/robomaster/rm_navigation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vmas/scenarios/robomaster/rm_navigation.py b/vmas/scenarios/robomaster/rm_navigation.py index 41d439f7..ee20b2d4 100644 --- a/vmas/scenarios/robomaster/rm_navigation.py +++ b/vmas/scenarios/robomaster/rm_navigation.py @@ -37,8 +37,8 @@ def make_world(self, batch_dim: int, device: torch.device, **kwargs): self.n_agents = kwargs.get("n_agents", 2) self.pos_shaping_factor = kwargs.get("pos_shaping_factor", 1) - self.final_reward = kwargs.get("final_reward", 0.01) - self.agent_collision_penalty = kwargs.get("agent_collision_penalty", -5) + self.final_reward = kwargs.get("final_reward", 0.005) + self.agent_collision_penalty = kwargs.get("agent_collision_penalty", -1) self.min_distance_between_entities = self.agent_radius * 2 + 0.05 self.world_semidim = 3 From fc8ef7fa96b5c294f14ac7c4461253bbfe32928b Mon Sep 17 00:00:00 2001 From: Matteo Bettini Date: Tue, 30 Apr 2024 16:09:48 +0100 Subject: [PATCH 5/8] amend --- .../robomaster/rm_navigation_square.py | 266 ++++++++++++++++++ 1 file changed, 266 insertions(+) create mode 100644 vmas/scenarios/robomaster/rm_navigation_square.py diff --git a/vmas/scenarios/robomaster/rm_navigation_square.py b/vmas/scenarios/robomaster/rm_navigation_square.py new file mode 100644 index 00000000..ef0965f3 --- /dev/null +++ b/vmas/scenarios/robomaster/rm_navigation_square.py @@ -0,0 +1,266 @@ +# Copyright (c) 2022-2024. +# ProrokLab (https://www.proroklab.org/) +# All rights reserved. +from typing import Dict + +import torch +from torch import Tensor + +from vmas import render_interactively +from vmas.simulator.controllers.velocity_controller import VelocityController +from vmas.simulator.core import Agent, Box, Landmark, Sphere, World +from vmas.simulator.dynamics.holonomic import Holonomic +from vmas.simulator.scenario import BaseScenario +from vmas.simulator.utils import TorchUtils + + +class Scenario(BaseScenario): + def make_world(self, batch_dim: int, device: torch.device, **kwargs): + # Robomaster + self.v_range = kwargs.get("v_range", 1) + self.a_range = kwargs.get("a_range", 1) + self.box_agents = kwargs.get("box_agents", False) + self.linear_friction = kwargs.get("linear_friction", 0.1) + self.min_input_norm = kwargs.get("min_input_norm", 0.08) + self.agent_radius = 0.16 + self.agent_box_length = 0.32 + self.agent_box_width = 0.24 + + # Controller + self.use_velocity_controller = kwargs.get("use_velocity_controller", True) + controller_params = [2, 6, 0.002] + self.f_range = self.a_range + self.linear_friction + self.u_range = self.v_range if self.use_velocity_controller else self.f_range + + self.plot_grid = False + self.viewer_zoom = 1 + self.n_agents = kwargs.get("n_agents", 4) + + self.pos_shaping_factor = kwargs.get("pos_shaping_factor", 1) + self.final_reward = kwargs.get("final_reward", 0.005) + self.agent_collision_penalty = kwargs.get("agent_collision_penalty", -1) + + self.min_distance_between_entities = self.agent_radius * 2 + 0.05 + self.world_semidim = 3 + self.min_collision_distance = 0.05 + + # Make world + world = World( + batch_dim, + device, + drag=0, + dt=0.05, + linear_friction=self.linear_friction, + substeps=16 if self.box_agents else 5, + collision_force=100000 if self.box_agents else 500, + ) + + known_colors = [ + (1.00, 0.50, 0), + (0.60, 0.31, 0.64), + (0.30, 0.69, 0.29), + (0.22, 0.49, 0.72), + (0.97, 0.51, 0.75), + (0.89, 0.10, 0.11), + (0.87, 0.87, 0), + ] + colors = torch.randn( + (max(self.n_agents - len(known_colors), 0), 3), device=device + ) + + # Add agents + for i in range(self.n_agents): + color = ( + known_colors[i] + if i < len(known_colors) + else colors[i - len(known_colors)] + ) + + # Constraint: all agents have same action range and multiplier + agent = Agent( + name=f"agent_{i}", + collide=True, + color=color, + rotatable=False, + shape=( + Sphere(radius=self.agent_radius) + if not self.box_agents + else Box(length=self.agent_box_length, width=self.agent_box_width) + ), + u_range=self.u_range, + f_range=self.f_range, + v_range=self.v_range, + render_action=True, + dynamics=Holonomic(), + ) + if self.use_velocity_controller: + agent.controller = VelocityController( + agent, world, controller_params, "standard" + ) + + agent.pos_rew = torch.zeros(batch_dim, device=device) + agent.agent_collision_rew = agent.pos_rew.clone() + agent.final_rew = agent.pos_rew.clone() + + world.add_agent(agent) + + # Add goals + goal = Landmark( + name=f"goal {i}", collide=False, color=color, shape=Sphere(radius=0.1) + ) + world.add_landmark(goal) + agent.goal = goal + + return world + + def reset_world_at(self, env_index: int = None): + poses = [(-1, 1), (1, 1), (1, -1), (-1, -1)] + for agent, pos in zip(self.world.agents, poses): + agent.set_pos( + torch.tensor(pos, device=self.world.device, dtype=torch.float), + batch_index=env_index, + ) + agent.goal.set_pos( + -torch.tensor(pos, device=self.world.device, dtype=torch.float), + batch_index=env_index, + ) + + if self.use_velocity_controller: + agent.controller.reset(env_index) + + if env_index is None: + agent.pos_shaping = ( + torch.linalg.vector_norm( + agent.state.pos - agent.goal.state.pos, + dim=1, + ) + * self.pos_shaping_factor + ) + else: + agent.pos_shaping[env_index] = ( + torch.linalg.vector_norm( + agent.state.pos[env_index] - agent.goal.state.pos[env_index] + ) + * self.pos_shaping_factor + ) + + def process_action(self, agent: Agent): + if self.use_velocity_controller: + # Clamp square to circle + agent.action.u = TorchUtils.clamp_with_norm( + agent.action.u, agent.action.u_range + ) + + # Zero small input + action_norm = torch.linalg.vector_norm(agent.action.u, dim=1) + agent.action.u[action_norm < self.min_input_norm] = 0 + + # Reset controller + vel_is_zero = torch.linalg.vector_norm(agent.action.u, dim=1) < 1e-3 + agent.controller.reset(vel_is_zero) + + agent.controller.process_force() + + def reward(self, agent: Agent): + is_first = agent == self.world.agents[0] + + if is_first: + for a in self.world.agents: + self.agent_reward(a) + a.agent_collision_rew[:] = 0 + + for i, a in enumerate(self.world.agents): + for j, b in enumerate(self.world.agents): + if i <= j: + continue + distance = self.world.get_distance(a, b) + a.agent_collision_rew[ + distance <= self.min_collision_distance + ] += self.agent_collision_penalty + b.agent_collision_rew[ + distance <= self.min_collision_distance + ] += self.agent_collision_penalty + + return agent.pos_rew + agent.final_rew + agent.agent_collision_rew + + def agent_reward(self, agent: Agent): + agent.distance_to_goal = torch.linalg.vector_norm( + agent.state.pos - agent.goal.state.pos, + dim=-1, + ) + agent.on_goal = agent.distance_to_goal < agent.goal.shape.radius + + agent.final_rew = torch.where( + agent.on_goal, self.final_reward, torch.zeros_like(agent.final_rew) + ) + + pos_shaping = agent.distance_to_goal * self.pos_shaping_factor + agent.pos_rew = agent.pos_shaping - pos_shaping + agent.pos_shaping = pos_shaping + + def observation(self, agent: Agent): + return torch.cat( + [ + agent.state.pos, + agent.state.vel, + agent.state.pos - agent.goal.state.pos, + ], + dim=-1, + ) + + def info(self, agent: Agent) -> Dict[str, Tensor]: + return { + "pos_rew": agent.pos_rew, + "final_rew": agent.final_rew, + "collision_rew": agent.agent_collision_rew, + } + + # def extra_render(self, env_index: int = 0): + # from vmas.simulator import rendering + # + # geoms = [] + # for i, entity_a in enumerate(self.world.agents): + # for j, entity_b in enumerate(self.world.agents): + # if i <= j: + # continue + # if not self.box_agents: + # point_a = entity_a.state.pos + # point_b = entity_b.state.pos + # dist = self.world.get_distance_from_point( + # entity_a, entity_b.state.pos, env_index + # ) + # dist = dist - entity_b.shape.radius + # else: + # point_a, point_b = _get_closest_box_box( + # entity_a.state.pos, + # entity_a.state.rot, + # entity_a.shape.width, + # entity_a.shape.length, + # entity_b.state.pos, + # entity_b.state.rot, + # entity_b.shape.width, + # entity_b.shape.length, + # ) + # dist = torch.linalg.vector_norm(point_a - point_b, dim=-1) + # + # for point in [point_a, point_b]: + # color = ( + # Color.BLACK.value + # if dist > self.min_collision_distance + # else Color.RED.value + # ) + # line = rendering.make_circle(0.03) + # xform = rendering.Transform() + # xform.set_translation(point[env_index][0], point[env_index][1]) + # line.add_attr(xform) + # line.set_color(*color) + # geoms.append(line) + # return geoms + # + + +if __name__ == "__main__": + render_interactively( + __file__, + control_two_agents=True, + ) From 23001da2de0c79c3e950891f7b6e619534c34cb7 Mon Sep 17 00:00:00 2001 From: Matteo Bettini Date: Thu, 27 Jun 2024 09:31:56 +0100 Subject: [PATCH 6/8] amend --- vmas/scenarios/robomaster/rm_navigation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vmas/scenarios/robomaster/rm_navigation.py b/vmas/scenarios/robomaster/rm_navigation.py index ee20b2d4..f28636b0 100644 --- a/vmas/scenarios/robomaster/rm_navigation.py +++ b/vmas/scenarios/robomaster/rm_navigation.py @@ -34,13 +34,13 @@ def make_world(self, batch_dim: int, device: torch.device, **kwargs): self.plot_grid = True self.viewer_zoom = 2 - self.n_agents = kwargs.get("n_agents", 2) + self.n_agents = kwargs.get("n_agents", 9) self.pos_shaping_factor = kwargs.get("pos_shaping_factor", 1) self.final_reward = kwargs.get("final_reward", 0.005) self.agent_collision_penalty = kwargs.get("agent_collision_penalty", -1) - self.min_distance_between_entities = self.agent_radius * 2 + 0.05 + self.min_distance_between_entities = self.agent_radius * 2 + 0.1 self.world_semidim = 3 self.min_collision_distance = 0.05 From 19e0292250b2953c4aa9810a2954bd66509085bd Mon Sep 17 00:00:00 2001 From: Matteo Bettini Date: Thu, 27 Jun 2024 11:16:46 +0100 Subject: [PATCH 7/8] amend --- vmas/scenarios/robomaster/rm_navigation.py | 71 +++++++++------------- 1 file changed, 28 insertions(+), 43 deletions(-) diff --git a/vmas/scenarios/robomaster/rm_navigation.py b/vmas/scenarios/robomaster/rm_navigation.py index f28636b0..5999ee31 100644 --- a/vmas/scenarios/robomaster/rm_navigation.py +++ b/vmas/scenarios/robomaster/rm_navigation.py @@ -11,7 +11,7 @@ from vmas.simulator.core import Agent, Box, Landmark, Sphere, World from vmas.simulator.dynamics.holonomic import Holonomic from vmas.simulator.scenario import BaseScenario -from vmas.simulator.utils import ScenarioUtils, TorchUtils +from vmas.simulator.utils import ScenarioUtils, TorchUtils, Color class Scenario(BaseScenario): @@ -22,6 +22,7 @@ def make_world(self, batch_dim: int, device: torch.device, **kwargs): self.box_agents = kwargs.get("box_agents", False) self.linear_friction = kwargs.get("linear_friction", 0.1) self.min_input_norm = kwargs.get("min_input_norm", 0.08) + self.edge_radius = kwargs.get("edge_radius", 10) self.agent_radius = 0.16 self.agent_box_length = 0.32 self.agent_box_width = 0.24 @@ -235,48 +236,32 @@ def info(self, agent: Agent) -> Dict[str, Tensor]: "collision_rew": agent.agent_collision_rew, } - # def extra_render(self, env_index: int = 0): - # from vmas.simulator import rendering - # - # geoms = [] - # for i, entity_a in enumerate(self.world.agents): - # for j, entity_b in enumerate(self.world.agents): - # if i <= j: - # continue - # if not self.box_agents: - # point_a = entity_a.state.pos - # point_b = entity_b.state.pos - # dist = self.world.get_distance_from_point( - # entity_a, entity_b.state.pos, env_index - # ) - # dist = dist - entity_b.shape.radius - # else: - # point_a, point_b = _get_closest_box_box( - # entity_a.state.pos, - # entity_a.state.rot, - # entity_a.shape.width, - # entity_a.shape.length, - # entity_b.state.pos, - # entity_b.state.rot, - # entity_b.shape.width, - # entity_b.shape.length, - # ) - # dist = torch.linalg.vector_norm(point_a - point_b, dim=-1) - # - # for point in [point_a, point_b]: - # color = ( - # Color.BLACK.value - # if dist > self.min_collision_distance - # else Color.RED.value - # ) - # line = rendering.make_circle(0.03) - # xform = rendering.Transform() - # xform.set_translation(point[env_index][0], point[env_index][1]) - # line.add_attr(xform) - # line.set_color(*color) - # geoms.append(line) - # return geoms - # + def extra_render(self, env_index: int = 0) -> "List[Geom]": + from vmas.simulator import rendering + + geoms: List[Geom] = [] + + # Communication lines + for i, agent1 in enumerate(self.world.agents): + for j, agent2 in enumerate(self.world.agents): + if j <= i: + continue + agent_dist = torch.linalg.vector_norm( + agent1.state.pos - agent2.state.pos, dim=-1 + ) + if agent_dist[env_index] <= self.edge_radius: + color = Color.BLACK.value + line = rendering.Line( + (agent1.state.pos[env_index]), + (agent2.state.pos[env_index]), + width=1, + ) + xform = rendering.Transform() + line.add_attr(xform) + line.set_color(*color) + geoms.append(line) + + return geoms if __name__ == "__main__": From e71404001affc215962cbaacab3527d46141ab2e Mon Sep 17 00:00:00 2001 From: Matteo Bettini Date: Thu, 27 Jun 2024 12:13:39 +0100 Subject: [PATCH 8/8] amend --- vmas/scenarios/robomaster/rm_navigation.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/vmas/scenarios/robomaster/rm_navigation.py b/vmas/scenarios/robomaster/rm_navigation.py index 5999ee31..771d3c59 100644 --- a/vmas/scenarios/robomaster/rm_navigation.py +++ b/vmas/scenarios/robomaster/rm_navigation.py @@ -1,7 +1,8 @@ # Copyright (c) 2022-2024. # ProrokLab (https://www.proroklab.org/) # All rights reserved. -from typing import Dict +import typing +from typing import Dict, List import torch from torch import Tensor @@ -11,7 +12,10 @@ from vmas.simulator.core import Agent, Box, Landmark, Sphere, World from vmas.simulator.dynamics.holonomic import Holonomic from vmas.simulator.scenario import BaseScenario -from vmas.simulator.utils import ScenarioUtils, TorchUtils, Color +from vmas.simulator.utils import Color, ScenarioUtils, TorchUtils + +if typing.TYPE_CHECKING: + from vmas.simulator.rendering import Geom class Scenario(BaseScenario): @@ -22,7 +26,7 @@ def make_world(self, batch_dim: int, device: torch.device, **kwargs): self.box_agents = kwargs.get("box_agents", False) self.linear_friction = kwargs.get("linear_friction", 0.1) self.min_input_norm = kwargs.get("min_input_norm", 0.08) - self.edge_radius = kwargs.get("edge_radius", 10) + self.edge_radius = kwargs.get("edge_radius", 2) self.agent_radius = 0.16 self.agent_box_length = 0.32 self.agent_box_width = 0.24 @@ -41,7 +45,7 @@ def make_world(self, batch_dim: int, device: torch.device, **kwargs): self.final_reward = kwargs.get("final_reward", 0.005) self.agent_collision_penalty = kwargs.get("agent_collision_penalty", -1) - self.min_distance_between_entities = self.agent_radius * 2 + 0.1 + self.min_distance_between_entities = self.agent_radius * 2 + 0.2 self.world_semidim = 3 self.min_collision_distance = 0.05