Skip to content
Snippets Groups Projects
Commit 2d054dc2 authored by Armin Co's avatar Armin Co
Browse files

Environment works with keras

parent c6693614
Branches
No related tags found
No related merge requests found
......@@ -8,8 +8,9 @@ import weakref
import numpy as np
import pygame
import time
from steering_wheel import Controller
from steering_wheel import ACTION_SPACE, Controller
# find carla module
try:
......@@ -68,6 +69,7 @@ class Camera:
class CollisionSensor:
sensor = None
collision = None
def __init__(self, world, parent):
bp = world.get_blueprint_library().find('sensor.other.collision')
......@@ -80,6 +82,7 @@ class CollisionSensor:
if not self:
return
print(event.other_actor)
self.collision = event
impulse = event.normal_impulse
intensity = math.sqrt(impulse.x**2 + impulse.y**2 + impulse.z**2)
print(intensity)
......@@ -94,6 +97,7 @@ class World:
blueprint_library = None
spawn_points = None
actors = []
was_big_reward = False
def __init__(self, world):
self.world = world
......@@ -104,9 +108,17 @@ class World:
def reset(self):
""" Remove and create new player/vehicle. """
self.destroy()
time.sleep(0.5)
self.spawn_player()
self.spawn_actors()
def observation(self):
if self.player is not None:
pos = self.player.get_transform()
return [pos.location.x, pos.location.y, pos.rotation.yaw]
else:
return [0,0,0]
def spawn_player(self):
""" Add a vehicle to the world. """
while self.player is None:
......@@ -125,10 +137,11 @@ class World:
transforms = []
transforms.append(carla.Transform(carla.Location(x=-19.2, y=54.5, z=0.2), carla.Rotation(pitch=0.0, yaw=-180, roll=0.0)))
transforms.append(carla.Transform(carla.Location(x=-14.3, y=54.7, z=0.2), carla.Rotation(pitch=0.0, yaw=-180, roll=0.0)))
transforms.append(carla.Transform(carla.Location(x=-41.3, y=53.7, z=0.2), carla.Rotation(pitch=0.0, yaw=-180, roll=0.0)))
transforms.append(carla.Transform(carla.Location(x=-37.3, y=53.7, z=0.2), carla.Rotation(pitch=0.0, yaw=-180, roll=0.0)))
mass = carla.VehiclePhysicsControl(mass=900000.0)
for transform in transforms:
actor = self.world.try_spawn_actor(blueprint, transform)
actor.apply_physics_control(mass)
if actor is not None:
self.actors.append(actor)
......@@ -136,48 +149,102 @@ class World:
""" Remove vehicle from the world. """
if self.player is not None:
self.player.destroy()
self.player = None
if self.actors is not None:
for actor in self.actors:
actor.destroy()
self.actors = []
def step(self, action):
""" Apply controls to vehicle. """
self.player.apply_control(action)
# print(str(self.player.get_transform()))
# print(str(self.player.get_velocity()))
controls = Controller.action_to_controls(action)
c = carla.VehicleControl(throttle=controls[0], steer=controls[1], brake=controls[2], reverse=controls[3])
self.player.apply_control(c)
reward = self.reward()
self.collision_sensor.collision = None
return reward
def reward(self):
target = carla.Transform(carla.Location(x=-25.534311, y=54.460903, z=0.112781), carla.Rotation(pitch=0.000000, yaw=-175.922913, roll=-6.221135))
pos = self.player.get_transform()
pos_diff = (target.location.x - pos.location.x)**2 + (target.location.y - pos.location.y)**2 + 1
rot_diff = (target.rotation.yaw - pos.rotation.yaw)**2 + 1
r = ((1/pos_diff) - 0.5)
at_final_position = False
if pos_diff < 1.2:
if self.was_big_reward == False:
r += 100
self.was_big_reward = True
if rot_diff < 5:
at_final_position = True
else:
if self.was_big_reward == True:
self.was_big_reward = False
r -= 50
velocity = self.player.get_velocity()
v = (velocity.x)**2 + (velocity.y)**2
done = False
if at_final_position and v < 0.01:
done = True
if self.collision_sensor.collision is not None:
r -= 100
done = True
return self.observation(), r, done, None
class ActionSpace:
n = ACTION_SPACE
class ObservationSpace:
shape = [3]
class CarlaEnvironment:
action_space = ActionSpace
observation_space = ObservationSpace
world = None
client = None
camera = None
allow_render = False
def __init__(self, host="127.0.0.1", port=2000):
def __init__(self, host="127.0.0.1", port=2000, render=False):
pygame.init()
self.client = carla.Client(host, port)
self.client.set_timeout(2.0)
self.client.load_world('Town07')
self.world = World(self.client.get_world())
if render:
self.allow_render = True
self.camera = Camera(self.world, camera_type='semantic_segmentation')
print(self.observation_space.shape[0])
def step(self, action):
control = carla.VehicleControl(throttle=action[0], steer=action[2], brake=action[1], reverse=action[3])
self.world.step(control)
def reset(self):
self.world.reset()
return self.world.observation()
def render(self):
if self.allow_render:
self.camera.on_update()
def step(self, action, render=False):
if render:
self.render()
return self.world.step(action)
if __name__ == "__main__":
LOCALHOST="127.0.0.1"
SERVER="192.168.188.20"
pygame.init()
env = CarlaEnvironment(host=LOCALHOST, render=True)
clock = pygame.time.Clock()
env = CarlaEnvironment(host=LOCALHOST)
cam = Camera(env.world, camera_type='semantic_segmentation')
ctrl = Controller()
cumulated_reward = 0
while ctrl.is_running():
clock.tick(60)
clock.tick(20)
ctrl.on_update()
env.step(ctrl.get_action())
cam.on_update()
obs, reward, done = env.step(ctrl.get_action(), render=True)
if done:
break
print(str(reward) + ' ' + str(done))
cumulated_reward += reward
env.world.destroy()
pygame.quit()
print(cumulated_reward)
......@@ -4,9 +4,10 @@ Run your desired environment and agent configuration.
import os
import atexit
import gym
from carla_environment import CarlaEnvironment
# import gym
from agents import DQAgent as QAgent
from agents import QAgent as QAgent
import environment_wrapper as ew
# Allow GPU usage or force tensorflow to use the CPU.
......@@ -15,13 +16,15 @@ if FORCE_CPU:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
RENDER = False
if __name__ == '__main__':
# 1. Create an environment
env = gym.make('LunarLander-v2')
env = CarlaEnvironment(render=RENDER)
# env = gym.make('LunarLander-v2')
print(env.observation_space.shape[0])
# 2. Create a learning agent
marvin = QAgent(env.action_space.n, env.observation_space.shape[0], 'FromScratchDouble')
marvin = QAgent(env.action_space.n, env.observation_space.shape[0], 'CarlaTest')
# (2.5) *optional* Load agent memory and/or net from disk.
agnt = 'agent'
......@@ -31,11 +34,10 @@ if __name__ == '__main__':
marvin.load('saved_agents/' + agnt + '/' + agnt, net=LOAD_ANN, memory=LOAD_MEMORIES)
# 3. Set your configurations for the run.
RENDER = False
LEARNING = True
LEARN_ONLINE = True
LEARN_OFFLINE = False
RUN_EPISODES = 500
RUN_EPISODES = 10
LEARN_OFFLINE_EPOCHS = 500
SAVE_PATH = "./saved_agents"
......
......@@ -9,6 +9,18 @@ JOYSTICK_REVERSE=3
INPUT_OFFSET = 2.0
IDLE=0
FORWARD=1
REVERSE=2
FORWARD_LEFT=3
FORWARD_RIGHT=4
LEFT=5
RIGHT=6
BRAKE=7
REVERSE_LEFT=8
REVERSE_RIGHT=9
ACTION_SPACE=10
class ManualSteeringWheel:
""" Steering wheel """
axis_count = 0.0
......@@ -52,9 +64,49 @@ class ManualSteeringWheel:
self.get_throttle(update=False)
self.get_brakes(update=False)
def get_action(self):
def action_to_controls(action):
if action == IDLE:
return [0.0, 0.0, 0.0, False]
if action == FORWARD:
return [0.8, 0.0, 0.0, False]
if action == REVERSE:
return [0.8, 0.0, 0.0, True]
if action == FORWARD_LEFT:
return [0.8, -0.4, 0.0, False]
if action == FORWARD_RIGHT:
return [0.8, 0.4, 0.0, False]
if action == LEFT:
return [0.0, -0.4, 0.0, False]
if action == RIGHT:
return [0.0, 0.5, 0.0, False]
if action == BRAKE:
return [0.0, 0.0, 0.5, False]
if action == REVERSE_LEFT:
return [0.5, -0.4, 0.0, True]
if action == REVERSE_RIGHT:
return [0.5, 0.4, 0.0, True]
return [self.throttle, self.brakes, self.direction, self.reverse]
def get_action(self):
action = IDLE
if self.throttle > 0.5 and self.brakes < 0.5 and abs(self.direction) < 0.1 and not self.reverse:
action = FORWARD
elif self.throttle > 0.5 and self.brakes < 0.5 and self.direction < -0.2 and not self.reverse:
action = FORWARD_LEFT
elif self.throttle > 0.5 and self.brakes < 0.5 and self.direction > 0.2 and not self.reverse:
action = FORWARD_RIGHT
elif self.throttle > 0.5 and self.brakes < 0.5 and abs(self.direction) < 0.2 and self.reverse:
action = REVERSE
elif self.throttle < 0.5 and self.brakes < 0.5 and self.direction < -0.2:
action = LEFT
elif self.throttle < 0.5 and self.brakes < 0.5 and self.direction > 0.2:
action = RIGHT
elif self.brakes > 0.5:
action = BRAKE
elif self.throttle > 0.5 and self.brakes < 0.5 and self.direction < -0.2 and self.reverse:
action = REVERSE_LEFT
elif self.throttle > 0.5 and self.brakes < 0.5 and self.direction > 0.2 and self.reverse:
action = REVERSE_RIGHT
return action
class Controller(ManualSteeringWheel):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment