Skip to content
Snippets Groups Projects
Commit 747e4d42 authored by Armin Co's avatar Armin Co
Browse files

Minor Refactoring

larger net
parent 2d054dc2
No related branches found
No related tags found
No related merge requests found
......@@ -81,11 +81,11 @@ class CollisionSensor:
self = weak_self()
if not self:
return
print(event.other_actor)
# print(event.other_actor)
self.collision = event
impulse = event.normal_impulse
intensity = math.sqrt(impulse.x**2 + impulse.y**2 + impulse.z**2)
print(intensity)
# print(intensity)
......@@ -209,13 +209,14 @@ class CarlaEnvironment:
def __init__(self, host="127.0.0.1", port=2000, render=False):
pygame.init()
self.client = carla.Client(host, port)
self.client.set_timeout(2.0)
self.client.set_timeout(5.0)
time.sleep(1.0)
self.client.load_world('Town07')
time.sleep(1.0)
self.world = World(self.client.get_world())
if render:
self.allow_render = True
self.camera = Camera(self.world, camera_type='semantic_segmentation')
print(self.observation_space.shape[0])
def reset(self):
self.world.reset()
......
......@@ -19,7 +19,7 @@ def reset(environment):
return state
def one_episode(environment, agent, render, learn, max_steps=3000):
def one_episode(environment, agent, render, learn, max_steps=1000):
""" Perform one episode of the agent in the environment. """
score = 0
state = reset(environment)
......@@ -79,6 +79,6 @@ def process_logs(avg_score_history, loss, title="Title", render=False):
plt.plot([i+1 for i in range(0, len(loss), 2)], loss[::2])
plt.plot([i+1 for i in range(0, len(avg_score_history), 2)], avg_score_history[::2], '--')
plt.title(title)
plt.savefig(title + '.png', format="png")
plt.savefig('saved_agents/' + title + '.png', format="png")
if render:
plt.show()
......@@ -5,6 +5,7 @@ Run your desired environment and agent configuration.
import os
import atexit
from carla_environment import CarlaEnvironment
CARLA=True
# import gym
from agents import QAgent as QAgent
......@@ -20,25 +21,23 @@ RENDER = False
if __name__ == '__main__':
# 1. Create an environment
env = CarlaEnvironment(render=RENDER)
# env = gym.make('LunarLander-v2')
print(env.observation_space.shape[0])
# 2. Create a learning agent
marvin = QAgent(env.action_space.n, env.observation_space.shape[0], 'CarlaTest')
marvin = QAgent(env.action_space.n, env.observation_space.shape[0], 'Carla')
# (2.5) *optional* Load agent memory and/or net from disk.
agnt = 'agent'
agnt = 'Carla'
LOAD_ANN = False
LOAD_MEMORIES = False
LOAD_MEMORIES = True
if LOAD_ANN or LOAD_MEMORIES:
marvin.load('saved_agents/' + agnt + '/' + agnt, net=LOAD_ANN, memory=LOAD_MEMORIES)
# 3. Set your configurations for the run.
LEARNING = True
LEARN_ONLINE = True
LEARN_OFFLINE = False
RUN_EPISODES = 10
LEARN_OFFLINE_EPOCHS = 500
LEARN_OFFLINE = True
RUN_EPISODES = 100
LEARN_OFFLINE_EPOCHS = 1000
SAVE_PATH = "./saved_agents"
# Register an *atexit* callback,
......@@ -63,5 +62,7 @@ if __name__ == '__main__':
if LEARNING:
marvin.save(SAVE_PATH)
if CARLA:
env.world.destroy()
# Show the result of the runl.
ew.process_logs(avg_score, loss, title=marvin.name, render=RENDER)
......@@ -14,7 +14,7 @@ class QNet:
self.net.summary()
def compile_net(self, action_space, state_space):
self.net.add(Dense(160, input_dim=state_space, activation=relu))
self.net.add(Dense(256, input_dim=state_space, activation=relu))
self.net.add(Dense(128, activation=relu))
self.net.add(Dense(action_space, activation=linear))
self.net.compile(loss='mse', optimizer=Adam(lr=self.learn_rate))
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment