Skip to content

Commit

Permalink
Fix docs and tests
Browse files Browse the repository at this point in the history
  • Loading branch information
eleurent committed Aug 18, 2024
1 parent 6764ee2 commit d0382fd
Show file tree
Hide file tree
Showing 10 changed files with 84 additions and 61 deletions.
12 changes: 7 additions & 5 deletions docs/graphics/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,13 @@ Environment rendering is done with [pygame](https://www.pygame.org/news), which
A window is created at the first call of `env.render()`. Its dimensions can be configured:

```python
env = gym.make("roundabout-v0")
env.configure({
"screen_width": 640,
"screen_height": 480
})
env = gym.make(
"roundabout-v0",
config={
"screen_width": 640,
"screen_height": 480
}
)
env.reset()
env.render()
```
Expand Down
4 changes: 2 additions & 2 deletions docs/make_your_own.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@ See {ref}`vehicle behaviors <vehicle_behavior>` for reference, and existing envi

To make a part of your environment configurable, overload the {py:meth}`~highway_env.envs.common.abstract.AbstractEnv.default_config`
method to define new `{"config_key": value}` pairs with default values. These configurations then be accessed in your
environment implementation with `self.config["config_key"]`, and once the environment is created, it can be configured with
`env.configure({"config_key": other_value})` followed by `env.reset()`.
environment implementation with `self.unwrapped.config["config_key"]`, and once the environment is created, it can be configured with
`env.unwrapped.config["config_key"] = other_value` followed by `env.reset()`.

## Register the environment

Expand Down
37 changes: 23 additions & 14 deletions docs/multi_agent.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,17 @@ To that end, update the {ref}`environment configuration <Configuring an environm
```{eval-rst}
.. jupyter-execute::
import gymnasium as gym
env = gym.make('highway-v0', render_mode='rgb_array')
env.configure({"controlled_vehicles": 2}) # Two controlled vehicles
env.configure({"vehicles_count": 1}) # A single other vehicle, for the sake of visualisation
import gymnasium
import highway_env
env = gymnasium.make(
"highway-v0",
render_mode="rgb_array",
config={
"controlled_vehicles": 2, # Two controlled vehicles
"vehicles_count": 1, # A single other vehicle, for the sake of visualisation
}
)
env.reset(seed=0)
from matplotlib import pyplot as plt
Expand All @@ -35,7 +40,7 @@ The type of actions contained in the tuple must be described by a standard {ref}
```{eval-rst}
.. jupyter-execute::
env.configure({
env.unwrapped.config.update({
"action": {
"type": "MultiAgentAction",
"action_config": {
Expand Down Expand Up @@ -70,14 +75,18 @@ The type of observations contained in the tuple must be described by a standard
```{eval-rst}
.. jupyter-execute::
env.configure({
"observation": {
"type": "MultiAgentObservation",
"observation_config": {
"type": "Kinematics",
env = gymnasium.make(
"highway-v0",
render_mode="rgb_array",
config={
"observation": {
"type": "MultiAgentObservation",
"observation_config": {
"type": "Kinematics",
}
}
}
})
)
obs, info = env.reset()
import pprint
Expand All @@ -92,7 +101,7 @@ Here is a pseudo-code example of how a centralized multi-agent policy could be t
.. jupyter-execute::
# Multi-agent environment configuration
env.configure({
env.unwrapped.config.update({
"controlled_vehicles": 2,
"observation": {
"type": "MultiAgentObservation",
Expand Down
43 changes: 22 additions & 21 deletions docs/observations/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,23 +13,25 @@ Each environment comes with a *default* observation, which can be changed or cus
import gymnasium as gym
import highway_env

env = gym.make('highway-v0')
env.configure({
"observation": {
"type": "OccupancyGrid",
"vehicles_count": 15,
"features": ["presence", "x", "y", "vx", "vy", "cos_h", "sin_h"],
"features_range": {
"x": [-100, 100],
"y": [-100, 100],
"vx": [-20, 20],
"vy": [-20, 20]
},
"grid_size": [[-27.5, 27.5], [-27.5, 27.5]],
"grid_step": [5, 5],
"absolute": False
env = gym.make(
'highway-v0',
config={
"observation": {
"type": "OccupancyGrid",
"vehicles_count": 15,
"features": ["presence", "x", "y", "vx", "vy", "cos_h", "sin_h"],
"features_range": {
"x": [-100, 100],
"y": [-100, 100],
"vx": [-20, 20],
"vy": [-20, 20]
},
"grid_size": [[-27.5, 27.5], [-27.5, 27.5]],
"grid_step": [5, 5],
"absolute": False
}
}
})
)
env.reset()
```

Expand Down Expand Up @@ -124,8 +126,7 @@ vehicle and 0 for placeholders.
"order": "sorted"
}
}
env = gym.make('highway-v0')
env.configure(config)
env = gym.make('highway-v0', config=config)
obs, info = env.reset()
print(obs)
Expand All @@ -146,7 +147,7 @@ The RGB to grayscale conversion is a weighted sum, configured by the `weights` p
from matplotlib import pyplot as plt
%matplotlib inline
config = {
config = {
"observation": {
"type": "GrayscaleObservation",
"observation_shape": (128, 64),
Expand All @@ -156,7 +157,7 @@ The RGB to grayscale conversion is a weighted sum, configured by the `weights` p
},
"policy_frequency": 2
}
env.configure(config)
env = gym.make('highway-v0', config=config)
obs, info = env.reset()
fig, axes = plt.subplots(ncols=4, figsize=(12, 5))
Expand All @@ -173,7 +174,7 @@ We illustrate the stack update by performing three steps in the environment.
.. jupyter-execute::
for _ in range(3):
obs, reward, done, truncated, info = env.step(env.action_type.actions_indexes["IDLE"])
obs, reward, done, truncated, info = env.step(env.unwrapped.action_type.actions_indexes["IDLE"])
fig, axes = plt.subplots(ncols=4, figsize=(12, 5))
for i, ax in enumerate(axes.flat):
Expand Down
29 changes: 21 additions & 8 deletions docs/quickstart.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,15 @@ Here is a quick example of how to create an environment:
```{eval-rst}
.. jupyter-execute::
import gymnasium as gym
import gymnasium
import highway_env
from matplotlib import pyplot as plt
%matplotlib inline
env = gym.make('highway-v0', render_mode='rgb_array')
env = gymnasium.make('highway-v0', render_mode='rgb_array')
env.reset()
for _ in range(3):
action = env.action_type.actions_indexes["IDLE"]
action = env.unwrapped.action_type.actions_indexes["IDLE"]
obs, reward, done, truncated, info = env.step(action)
env.render()
Expand Down Expand Up @@ -53,21 +54,33 @@ After environment creation, the configuration can be accessed using the
import pprint
env = gym.make("highway-v0", render_mode='rgb_array')
pprint.pprint(env.config)
env = gymnasium.make("highway-v0", render_mode='rgb_array')
pprint.pprint(env.unwrapped.config)
```

For example, the number of lanes can be changed with:

```{eval-rst}
.. jupyter-execute::
env.config["lanes_count"] = 2
env.unwrapped.config["lanes_count"] = 2
env.reset()
plt.imshow(env.render())
plt.show()
```

or directly at creation time with:

```{eval-rst}
.. jupyter-execute::
env = gymnasium.make(
"highway-v0",
render_mode='rgb_array',
config={"lanes_count": 2}
)
```

```{note}
The environment must be {py:meth}`~highway_env.envs.common.abstract.AbstractEnv.reset` for the change of configuration
to be effective.
Expand All @@ -87,11 +100,11 @@ Here is an example of SB3's DQN implementation trained on `highway-fast-v0` with
[![Colab][colab-badge]][highway_dqn]

```python
import gymnasium as gym
import gymnasium
import highway_env
from stable_baselines3 import DQN

env = gym.make("highway-fast-v0")
env = gymnasium.make("highway-fast-v0")
model = DQN('MlpPolicy', env,
policy_kwargs=dict(net_arch=[256, 256]),
learning_rate=5e-4,
Expand Down
2 changes: 1 addition & 1 deletion scripts/sb3_highway_dqn.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
env, video_folder="highway_dqn/videos", episode_trigger=lambda e: True
)
env.unwrapped.set_record_video_wrapper(env)
env.configure({"simulation_frequency": 15}) # Higher FPS for rendering
env.unwrapped.config["simulation_frequency"] = 15 # Higher FPS for rendering

for videos in range(10):
done = truncated = False
Expand Down
8 changes: 4 additions & 4 deletions scripts/sb3_highway_dqn_cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@


def train_env():
env = gym.make("highway-fast-v0")
env.configure(
{
env = gym.make(
"highway-fast-v0",
config={
"observation": {
"type": "GrayscaleObservation",
"observation_shape": (128, 64),
Expand All @@ -24,7 +24,7 @@ def train_env():

def test_env():
env = train_env()
env.configure({"policy_frequency": 15, "duration": 20})
env.unwrapped.config.update({"policy_frequency": 15, "duration": 20})
env.reset()
return env

Expand Down
3 changes: 1 addition & 2 deletions scripts/sb3_highway_ppo_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -272,8 +272,7 @@ def forward(self, observations: th.Tensor) -> th.Tensor:


def make_configure_env(**kwargs):
env = gym.make(kwargs["id"])
env.configure(kwargs["config"])
env = gym.make(kwargs["id"], config=kwargs["config"])
env.reset()
return env

Expand Down
3 changes: 1 addition & 2 deletions tests/envs/test_actions.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,7 @@
],
)
def test_action_type(action_config):
env = gym.make("highway-v0").unwrapped
env.configure({"action": action_config})
env = gym.make("highway-v0", config={"action": action_config})
env.reset()
for _ in range(3):
action = env.action_space.sample()
Expand Down
4 changes: 2 additions & 2 deletions tests/graphics/test_render.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
@pytest.mark.parametrize("env_spec", ["highway-v0", "merge-v0"])
def test_render(env_spec):
env = gym.make(env_spec, render_mode="rgb_array").unwrapped
env.configure({"offscreen_rendering": True})
env.config.update({"offscreen_rendering": True})
env.reset()
img = env.render()
env.close()
Expand All @@ -25,7 +25,7 @@ def test_render(env_spec):
@pytest.mark.parametrize("env_spec", ["highway-v0", "merge-v0"])
def test_obs_grayscale(env_spec, stack_size=4):
env = gym.make(env_spec).unwrapped
env.configure(
env.config.update(
{
"offscreen_rendering": True,
"observation": {
Expand Down

0 comments on commit d0382fd

Please sign in to comment.