diff --git a/EvolutionStrategies/main.py b/EvolutionStrategies/main.py
index d8c8a50521a4b13ec9edfb04cab71985bf6d436b..d8edb31c9be91589f691c530ad26b18c08429a30 100644
--- a/EvolutionStrategies/main.py
+++ b/EvolutionStrategies/main.py
@@ -13,8 +13,8 @@ GENS = 7000
 MAX_STEPS = 300  # after 1600 steps the Environment gives us a done anyway.
 DECAY_ALPHA = True
 
-VERSION = 1
-TEST_WALKER = True
+VERSION = 100
+TEST_WALKER = False
 LOAD_BRAIN = False
 RENDER_BEST = False
 if TEST_WALKER:
@@ -31,6 +31,7 @@ def plot_reward(rewards):
 
 if __name__ == '__main__':
     avg_rewards = []
+    best_avg_reward = -1000
 
     try:
         population = Population(POP_SIZE, HIDDEN_LAYER, BIAS, MUTATION_FACTOR, MAX_STEPS, LOAD_BRAIN, VERSION, LEARNING_RATE, RENDER_BEST)
@@ -53,8 +54,12 @@ if __name__ == '__main__':
             population.evolve()
             print("Time for Gen: ", time.time() - start_time)
             if gen % 10 == 0:
-                population.walker.save()
-                avg_rewards.append(population.get_walker_stats())
+                avg_reward = population.get_walker_stats()
+                if avg_reward > best_avg_reward:
+                    population.walker.save()
+                    best_avg_reward = avg_reward
+                    print("New best walker found")
+                avg_rewards.append(avg_reward)
                 population.walker.save_mlp_weights(gen)
                 with open(f'./models/{HIDDEN_LAYER}_{VERSION}_{POP_SIZE}_{LEARNING_RATE}_AvgRewards', 'wb') as fp:
                     pickle.dump(avg_rewards, fp)
diff --git a/EvolutionStrategies/mlp_visualizer.py b/EvolutionStrategies/mlp_visualizer.py
index ff933fc3407cdecf841a1d569b95920b550395db..cd10d30f0fec73a25067f71d2923288398dba0b9 100644
--- a/EvolutionStrategies/mlp_visualizer.py
+++ b/EvolutionStrategies/mlp_visualizer.py
@@ -88,7 +88,7 @@ class NeuralNetwork():
         for layer in self.layers:
             layer.draw()
         pyplot.axis('scaled')
-        pyplot.savefig(f'./models/mlp_{gen}.png', dpi=300)
+        pyplot.savefig(f'./models/NN_Images/mlp_{gen}.png', dpi=300)
         pyplot.cla()
         #pyplot.show()
 
diff --git a/MutateActions/main.py b/MutateActions/main.py
index a8d4fe7545d19b013a9bc01b16fa04b411f9bdee..794b6703d6703775adb3096dcedafccdab5048ed 100644
--- a/MutateActions/main.py
+++ b/MutateActions/main.py
@@ -10,7 +10,7 @@ MUTATION_FACTOR = 0.2  # 0 <= x <= 1
 GAME_CANCELLED = False
 LOAD_BRAIN = False
 RENDER_BEST = False
-TEST_WALKER = True
+TEST_WALKER = False
 
 if TEST_WALKER:
     LOAD_BRAIN = True
diff --git a/MutateActions/population.py b/MutateActions/population.py
index 4f62c2c6118f925b8a849ca6194ee8e5c9b13f35..282244222d086bbd37de91a5ac1ccbd08ad48441 100644
--- a/MutateActions/population.py
+++ b/MutateActions/population.py
@@ -21,6 +21,7 @@ class Population:
         self.walkers = []
         self.envs = []
         self.fitnesses = None
+        self.best_fitness = -1000
         for i in range(self.size):
             self.walkers.append(Walker(self.brain_size, load_brain, render_best))
         self.reset_environments()
@@ -56,7 +57,10 @@ class Population:
     def natural_selection(self):  # gets the next generation of players
         self.calculate_fitness_sum()
         self.set_best_walker()
-        self.walkers[self.best_walker_index].brain.save()
+        if self.best_walker_fitness > self.best_fitness:
+            self.walkers[self.best_walker_index].brain.save()
+            self.best_fitness = self.best_walker_fitness
+            print("New best walker found")
         # the champion lives on
         new_walkers = [self.walkers[self.best_walker_index].get_baby()]
         new_walkers[0].is_best = True