Skip to content

Commit 92f0b43

Browse files
committed
vmas 1.2.6
1 parent d7c3153 commit 92f0b43

File tree

2 files changed

+10
-7
lines changed

2 files changed

+10
-7
lines changed

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
setup(
88
name="vmas",
9-
version="1.2.5",
9+
version="1.2.6",
1010
description="Vectorized Multi-Agent Simulator",
1111
url="https://github.com/proroklab/VectorizedMultiAgentSimulator",
1212
license="GPLv3",

vmas/simulator/environment/environment.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -164,16 +164,13 @@ def step(self, actions: List):
164164
rewards = []
165165
infos = []
166166
for agent in self.agents:
167-
rewards.append(self.scenario.reward(agent).clone())
168-
obs.append(self.scenario.observation(agent).clone())
167+
rewards.append(self.scenario.reward(agent))
168+
obs.append(self.scenario.observation(agent))
169169
# A dictionary per agent
170170
infos.append(self.scenario.info(agent))
171171

172-
dones = self.scenario.done().clone()
173-
174172
self.steps += 1
175-
if self.max_steps is not None:
176-
dones += self.steps >= self.max_steps
173+
dones = self.done()
177174
# print("\nStep results in unwrapped environment")
178175
# print(
179176
# f"Actions len (n_agents): {len(actions)}, "
@@ -192,6 +189,12 @@ def step(self, actions: List):
192189
# print(f"Info len (n_agents): {len(infos)}, info[0] (infos agent 0): {infos[0]}")
193190
return obs, rewards, dones, infos
194191

192+
def done(self):
193+
dones = self.scenario.done()
194+
if self.max_steps is not None:
195+
dones += self.steps >= self.max_steps
196+
return dones
197+
195198
def get_agent_action_size(self, agent: Agent):
196199
return (
197200
self.world.dim_p

0 commit comments

Comments
 (0)