diff --git a/carla_environment.py b/carla_environment.py
index d1c660d4c5a129a655271cdd51fd7888fe325eea..10e2d83369ac5cca7917410af7141eeb36d319de 100644
--- a/carla_environment.py
+++ b/carla_environment.py
@@ -81,11 +81,11 @@ class CollisionSensor:
         self = weak_self()
         if not self:
             return
-        print(event.other_actor)
+        # print(event.other_actor)
         self.collision = event
         impulse = event.normal_impulse
         intensity = math.sqrt(impulse.x**2 + impulse.y**2 + impulse.z**2)
-        print(intensity)
+        # print(intensity)
 
 
 
@@ -209,13 +209,14 @@ class CarlaEnvironment:
     def __init__(self, host="127.0.0.1", port=2000, render=False):
         pygame.init()
         self.client = carla.Client(host, port)
-        self.client.set_timeout(2.0)
+        self.client.set_timeout(5.0)
+        time.sleep(1.0)
         self.client.load_world('Town07')
+        time.sleep(1.0)
         self.world = World(self.client.get_world())
         if render:
             self.allow_render = True
             self.camera = Camera(self.world, camera_type='semantic_segmentation')
-        print(self.observation_space.shape[0])
     
     def reset(self):
         self.world.reset()
diff --git a/environment_wrapper.py b/environment_wrapper.py
index 19e6cb0166e73c5ddbc4cb893258845720a77c21..fe29d01340d5e13e34913839af85d922f3592296 100644
--- a/environment_wrapper.py
+++ b/environment_wrapper.py
@@ -19,7 +19,7 @@ def reset(environment):
     return state
 
 
-def one_episode(environment, agent, render, learn, max_steps=3000):
+def one_episode(environment, agent, render, learn, max_steps=1000):
     """ Perform one episode of the agent in the environment. """
     score = 0
     state = reset(environment)
@@ -79,6 +79,6 @@ def process_logs(avg_score_history, loss, title="Title", render=False):
     plt.plot([i+1 for i in range(0, len(loss), 2)], loss[::2])
     plt.plot([i+1 for i in range(0, len(avg_score_history), 2)], avg_score_history[::2], '--')
     plt.title(title)
-    plt.savefig(title + '.png', format="png")
+    plt.savefig('saved_agents/' + title + '.png', format="png")
     if render:
         plt.show()
diff --git a/main.py b/main.py
index f09752bb10849ef59ead2b90c7d8bd9e82f21eb6..85397861041bfa6949b57c29e8690d4739bd8d0e 100644
--- a/main.py
+++ b/main.py
@@ -5,6 +5,7 @@ Run your desired environment and agent configuration.
 import os
 import atexit
 from carla_environment import CarlaEnvironment
+CARLA=True
 # import gym
 
 from agents import QAgent as QAgent
@@ -20,25 +21,23 @@ RENDER = False
 if __name__ == '__main__':
     # 1. Create an environment
     env = CarlaEnvironment(render=RENDER)
-    # env = gym.make('LunarLander-v2')
-    print(env.observation_space.shape[0])
 
     # 2. Create a learning agent
-    marvin = QAgent(env.action_space.n, env.observation_space.shape[0], 'CarlaTest')
+    marvin = QAgent(env.action_space.n, env.observation_space.shape[0], 'Carla')
 
     # (2.5) *optional* Load agent memory and/or net from disk.
-    agnt = 'agent'
+    agnt = 'Carla'
     LOAD_ANN = False
-    LOAD_MEMORIES = False
+    LOAD_MEMORIES = True
     if LOAD_ANN or LOAD_MEMORIES:
         marvin.load('saved_agents/' + agnt + '/' + agnt, net=LOAD_ANN, memory=LOAD_MEMORIES)
 
     # 3. Set your configurations for the run.
     LEARNING = True
     LEARN_ONLINE = True
-    LEARN_OFFLINE = False
-    RUN_EPISODES = 10
-    LEARN_OFFLINE_EPOCHS = 500
+    LEARN_OFFLINE = True
+    RUN_EPISODES = 100
+    LEARN_OFFLINE_EPOCHS = 1000
     SAVE_PATH = "./saved_agents"
 
     # Register an *atexit* callback,
@@ -63,5 +62,7 @@ if __name__ == '__main__':
     if LEARNING:
         marvin.save(SAVE_PATH)
 
+    if CARLA:
+        env.world.destroy()
     # Show the result of the runl.
     ew.process_logs(avg_score, loss, title=marvin.name, render=RENDER)
diff --git a/networks.py b/networks.py
index 1e87bfb0dc2c8492bf99cb90ffcd685f4bb7ee5e..846e6fa8d670f08ab84c656369bc0fcca2b67a6a 100644
--- a/networks.py
+++ b/networks.py
@@ -14,7 +14,7 @@ class QNet:
         self.net.summary()
 
     def compile_net(self, action_space, state_space):
-        self.net.add(Dense(160, input_dim=state_space, activation=relu))
+        self.net.add(Dense(256, input_dim=state_space, activation=relu))
         self.net.add(Dense(128, activation=relu))
         self.net.add(Dense(action_space, activation=linear))
         self.net.compile(loss='mse', optimizer=Adam(lr=self.learn_rate))