Skip to content

Commit

Permalink
PEP8 changes
Browse files Browse the repository at this point in the history
  • Loading branch information
Norbert Kozlowski committed Oct 14, 2018
1 parent 5332e56 commit a46a005
Show file tree
Hide file tree
Showing 29 changed files with 256 additions and 184 deletions.
28 changes: 28 additions & 0 deletions .editorconfig
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# EditorConfig is awesome: http://EditorConfig.org

# top-most EditorConfig file
root = true

# Unix-style newlines with a newline ending every file
[*]
end_of_line = lf
insert_final_newline = true
charset = utf-8

# 4 space indentation
[*.py]
indent_style = space
indent_size = 4
max_line_length = 79

[*.maze]
insert_final_newline = false

# Matches the exact files either package.json or .travis.yml
[*.yml]
indent_style = space
indent_size = 2

[Makefile]
indent_style = tab
indent_size = 8
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ python:

script:
- pip install -r requirements.txt
- pytest
- make test

deploy:
skip_cleanup: true
Expand Down
8 changes: 6 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,2 +1,6 @@
test:
pytest
lint:
mypy .

test: lint
py.test --pep8 -m pep8
py.test
2 changes: 1 addition & 1 deletion examples/checkerboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,4 @@
state, reward, done, info = env.step(action)
logging.info(f"\tObtained reward: {reward}, state: {state}")

logging.info("Finished")
logging.info("Finished")
15 changes: 6 additions & 9 deletions examples/handeye.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import logging
from random import choice

import gym

Expand All @@ -11,22 +10,20 @@
if __name__ == '__main__':
hand_eye = gym.make('HandEye3-v0')

possible_actions = list(range(6))

for i_episode in range(1):
observation = hand_eye.reset()

for t in range(100):
logging.info("Time: [{}], observation: [{}]".format(t, observation))
logging.info(f"Time: [{t}], observation: [{observation}]")

action = choice(possible_actions)
action = hand_eye.action_space.sample()

logging.info("\t\tExecuted action: [{}]".format(action))
logging.info(f"\t\tExecuted action: [{action}]")
observation, reward, done, info = hand_eye.step(action)

if done:
logging.info("Episode finished after {} timesteps.".format(t + 1))
logging.info("Last reward: {}".format(reward))
logging.info(f"Episode finished after {t+1} timesteps.")
logging.info(f"Last reward: {reward}")
break

logging.info("Finished")
logging.info("Finished")
10 changes: 4 additions & 6 deletions examples/maze.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import logging
from random import choice

import gym

Expand All @@ -11,23 +10,22 @@
if __name__ == '__main__':
maze = gym.make('MazeF1-v0')

possible_actions = list(range(8))
transitions = maze.env.get_all_possible_transitions()

for i_episode in range(1):
observation = maze.reset()

for t in range(100):
logging.info("Time: [{}], observation: [{}]".format(t, observation))
logging.info(f"Time: [{t}], observation: [{observation}]")

action = choice(possible_actions)
action = maze.action_space.sample()

logging.info("\t\tExecuted action: [{}]".format(action))
observation, reward, done, info = maze.step(action)

if done:
logging.info("Episode finished after {} timesteps.".format(t + 1))
logging.info("Last reward: {}".format(reward))
logging.info(f"Episode finished after {t+1} timesteps.")
logging.info(f"Last reward: {reward}")
break

logging.info("Finished")
2 changes: 1 addition & 1 deletion gym_checkerboard/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,4 +31,4 @@
entry_point='gym_checkerboard:Checkerboard',
max_episode_steps=max_episode_steps,
kwargs={'dim': 2, 'ndiv': 5}
)
)
8 changes: 5 additions & 3 deletions gym_checkerboard/checkerboard.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import logging
from typing import List

import gym
import numpy as np
Expand All @@ -8,16 +9,17 @@

logger = logging.getLogger(__name__)


class Checkerboard(gym.Env):
metadata = {'render.modes': ['human', 'ansi']}

REWARD = 1

def __init__(self, dim: int, ndiv: int):
def __init__(self, dim: int, ndiv: int) -> None:
logger.debug("Initializing environment")
self._dim = dim
self._board = CheckerboardSimulator(dim, ndiv)
self._state = None
self._state: List = []
self._validation_bit = 0

self.action_space = Discrete(2)
Expand Down Expand Up @@ -60,4 +62,4 @@ def _observation(self) -> list:

@property
def _true_color(self) -> int:
return self._board.get_color(*self._state)
return self._board.get_color(*self._state)
7 changes: 2 additions & 5 deletions gym_checkerboard/checkerboard_simulator.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@
WHITE = 0
BLACK = 1


class CheckerboardSimulator:

def __init__(self, dim: int, ndiv: int):
def __init__(self, dim: int, ndiv: int) -> None:
"""
:param n: dimensionality of solution space
:param nd: division of each dimension,
Expand Down Expand Up @@ -36,7 +37,3 @@ def get_color(self, *cords) -> int:
def _get_index(self, val: float) -> int:
y = np.linspace(0, 1, self.nd + 1)
return np.where(y <= val)[0][-1]




2 changes: 1 addition & 1 deletion gym_handeye/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,4 +58,4 @@
entry_point='gym_handeye:HandEye',
max_episode_steps=500,
kwargs={'grid_size': 9, 'note_in_hand': True, 'test_only_changes': 0}
)
)
48 changes: 31 additions & 17 deletions gym_handeye/handeye.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@

from gym.spaces import Discrete
import gym_handeye.utils.utils as utils
from gym_handeye.handeye_simulator import HandEyeSimulator, SURFACE, BLOCK, GRIPPER, ACTION_LOOKUP
from gym_handeye.handeye_simulator import HandEyeSimulator, SURFACE, BLOCK, \
GRIPPER, ACTION_LOOKUP


class HandEye(gym.Env):
Expand All @@ -15,10 +16,12 @@ def __init__(self, grid_size, note_in_hand, test_only_changes=0):
"""
:param grid_size: specifies the size of the monitored plain
:param note_in_hand: specifies if the tacticle sensor should switch to '2' if the block is held by the gripper
:param note_in_hand: specifies if the tacticle sensor should
switch to '2' if the block is held by the gripper
(if False, then goes back to '0')
:param test_only_changes: specifies if only condition-action combinations should be tested that invoke
a change (1), non changes (-1) or all possibilities (0) should be tested
:param test_only_changes: specifies if only condition-action
combinations should be tested that invoke a change (1),
non changes (-1) or all possibilities (0) should be tested
"""
logging.debug('Starting environment HandEye')
self.grid_size = grid_size
Expand All @@ -35,12 +38,16 @@ def __init__(self, grid_size, note_in_hand, test_only_changes=0):
def step(self, action):
"""
Run one timestep of the environment's dynamics.
Accepts an action and returns a tuple (observation, reward, done, info).
Accepts an action and returns a tuple
(observation, reward, done, info).
:param action: an action provided by the environment
:return: observation (tuple): agent's observation of the current environment
:return: observation (tuple): agent's observation of
the current environment
reward (float) : amount of reward returned after previous action
done (boolean): whether the episode has ended, in which case further step() calls will return undefined results
info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
done (boolean): whether the episode has ended, in which case
further step() calls will return undefined results
info (dict): contains auxiliary diagnostic information
(helpful for debugging, and sometimes learning)
"""

logging.debug('Executing a step, action = {}'.format(action))
Expand Down Expand Up @@ -71,6 +78,7 @@ def reset(self):
def render(self, mode='human', close=False):
"""
Renders the environment.
:param mode:
:param mode (str): the mode to render with
:param close (bool): close all open renderings
:return:
Expand Down Expand Up @@ -124,32 +132,37 @@ def get_all_possible_transitions(self):
Returns all possible transitions of the environment
This information is used to calculate the agent's knowledge
:param self
:return: all transitions as list of tuples: (start_state, action, end_state)
:return: all transitions as list of tuples:
(start_state, action, end_state)
"""

return utils.get_all_possible_transitions(self.grid_size)

def get_goal_state(self):
"""
Returns goal_state - an observation that is the environment's next goal.
Non deterministic.
Returns goal_state - an observation that is the environment's
next goal. Non deterministic.
:return:
"""
return self.handeye.get_goal_state()

def _should_end_testing(self, previous, obs):
"""
Returns if the test should end based on self.test_only_changes parameter.
Returns if the test should end based on self.test_only_changes
parameter.
:param previous: previous observation
:param obs: current observation
:return:
"""
return (self.test_only_changes == 1 and not self._change_detected(previous, obs)) or (
self.test_only_changes == -1 and self._change_detected(previous, obs))
return (self.test_only_changes == 1 and not self._change_detected(
previous, obs)) or (self.test_only_changes == -1 and
self._change_detected(previous, obs))

def _change_detected(self, previous, current):
@staticmethod
def _change_detected(previous, current):
"""
Returns true if a change was detected between observations (previous and current).
Returns true if a change was detected between
observations (previous and current).
:param previous: previous observation
:param current: current observation
:return:
Expand All @@ -165,7 +178,8 @@ def _observe(self):

def _take_action(self, action):
"""
Executes an action with all consequences. Returns true if executing an action was successful.
Executes an action with all consequences.
Returns true if executing an action was successful.
:param action:
:return:
"""
Expand Down
Loading

0 comments on commit a46a005

Please sign in to comment.