Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
L
LearningEnvironment
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Container registry
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Armin Co
LearningEnvironment
Commits
747e4d42
Commit
747e4d42
authored
Feb 12, 2021
by
Armin Co
Browse files
Options
Downloads
Patches
Plain Diff
Minor Refactoring
larger net
parent
2d054dc2
No related branches found
No related tags found
No related merge requests found
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
carla_environment.py
+5
-4
5 additions, 4 deletions
carla_environment.py
environment_wrapper.py
+2
-2
2 additions, 2 deletions
environment_wrapper.py
main.py
+9
-8
9 additions, 8 deletions
main.py
networks.py
+1
-1
1 addition, 1 deletion
networks.py
with
17 additions
and
15 deletions
carla_environment.py
+
5
−
4
View file @
747e4d42
...
@@ -81,11 +81,11 @@ class CollisionSensor:
...
@@ -81,11 +81,11 @@ class CollisionSensor:
self
=
weak_self
()
self
=
weak_self
()
if
not
self
:
if
not
self
:
return
return
print
(
event
.
other_actor
)
#
print(event.other_actor)
self
.
collision
=
event
self
.
collision
=
event
impulse
=
event
.
normal_impulse
impulse
=
event
.
normal_impulse
intensity
=
math
.
sqrt
(
impulse
.
x
**
2
+
impulse
.
y
**
2
+
impulse
.
z
**
2
)
intensity
=
math
.
sqrt
(
impulse
.
x
**
2
+
impulse
.
y
**
2
+
impulse
.
z
**
2
)
print
(
intensity
)
#
print(intensity)
...
@@ -209,13 +209,14 @@ class CarlaEnvironment:
...
@@ -209,13 +209,14 @@ class CarlaEnvironment:
def
__init__
(
self
,
host
=
"
127.0.0.1
"
,
port
=
2000
,
render
=
False
):
def
__init__
(
self
,
host
=
"
127.0.0.1
"
,
port
=
2000
,
render
=
False
):
pygame
.
init
()
pygame
.
init
()
self
.
client
=
carla
.
Client
(
host
,
port
)
self
.
client
=
carla
.
Client
(
host
,
port
)
self
.
client
.
set_timeout
(
2.0
)
self
.
client
.
set_timeout
(
5.0
)
time
.
sleep
(
1.0
)
self
.
client
.
load_world
(
'
Town07
'
)
self
.
client
.
load_world
(
'
Town07
'
)
time
.
sleep
(
1.0
)
self
.
world
=
World
(
self
.
client
.
get_world
())
self
.
world
=
World
(
self
.
client
.
get_world
())
if
render
:
if
render
:
self
.
allow_render
=
True
self
.
allow_render
=
True
self
.
camera
=
Camera
(
self
.
world
,
camera_type
=
'
semantic_segmentation
'
)
self
.
camera
=
Camera
(
self
.
world
,
camera_type
=
'
semantic_segmentation
'
)
print
(
self
.
observation_space
.
shape
[
0
])
def
reset
(
self
):
def
reset
(
self
):
self
.
world
.
reset
()
self
.
world
.
reset
()
...
...
This diff is collapsed.
Click to expand it.
environment_wrapper.py
+
2
−
2
View file @
747e4d42
...
@@ -19,7 +19,7 @@ def reset(environment):
...
@@ -19,7 +19,7 @@ def reset(environment):
return
state
return
state
def
one_episode
(
environment
,
agent
,
render
,
learn
,
max_steps
=
3
000
):
def
one_episode
(
environment
,
agent
,
render
,
learn
,
max_steps
=
1
000
):
"""
Perform one episode of the agent in the environment.
"""
"""
Perform one episode of the agent in the environment.
"""
score
=
0
score
=
0
state
=
reset
(
environment
)
state
=
reset
(
environment
)
...
@@ -79,6 +79,6 @@ def process_logs(avg_score_history, loss, title="Title", render=False):
...
@@ -79,6 +79,6 @@ def process_logs(avg_score_history, loss, title="Title", render=False):
plt
.
plot
([
i
+
1
for
i
in
range
(
0
,
len
(
loss
),
2
)],
loss
[::
2
])
plt
.
plot
([
i
+
1
for
i
in
range
(
0
,
len
(
loss
),
2
)],
loss
[::
2
])
plt
.
plot
([
i
+
1
for
i
in
range
(
0
,
len
(
avg_score_history
),
2
)],
avg_score_history
[::
2
],
'
--
'
)
plt
.
plot
([
i
+
1
for
i
in
range
(
0
,
len
(
avg_score_history
),
2
)],
avg_score_history
[::
2
],
'
--
'
)
plt
.
title
(
title
)
plt
.
title
(
title
)
plt
.
savefig
(
title
+
'
.png
'
,
format
=
"
png
"
)
plt
.
savefig
(
'
saved_agents/
'
+
title
+
'
.png
'
,
format
=
"
png
"
)
if
render
:
if
render
:
plt
.
show
()
plt
.
show
()
This diff is collapsed.
Click to expand it.
main.py
+
9
−
8
View file @
747e4d42
...
@@ -5,6 +5,7 @@ Run your desired environment and agent configuration.
...
@@ -5,6 +5,7 @@ Run your desired environment and agent configuration.
import
os
import
os
import
atexit
import
atexit
from
carla_environment
import
CarlaEnvironment
from
carla_environment
import
CarlaEnvironment
CARLA
=
True
# import gym
# import gym
from
agents
import
QAgent
as
QAgent
from
agents
import
QAgent
as
QAgent
...
@@ -20,25 +21,23 @@ RENDER = False
...
@@ -20,25 +21,23 @@ RENDER = False
if
__name__
==
'
__main__
'
:
if
__name__
==
'
__main__
'
:
# 1. Create an environment
# 1. Create an environment
env
=
CarlaEnvironment
(
render
=
RENDER
)
env
=
CarlaEnvironment
(
render
=
RENDER
)
# env = gym.make('LunarLander-v2')
print
(
env
.
observation_space
.
shape
[
0
])
# 2. Create a learning agent
# 2. Create a learning agent
marvin
=
QAgent
(
env
.
action_space
.
n
,
env
.
observation_space
.
shape
[
0
],
'
Carla
Test
'
)
marvin
=
QAgent
(
env
.
action_space
.
n
,
env
.
observation_space
.
shape
[
0
],
'
Carla
'
)
# (2.5) *optional* Load agent memory and/or net from disk.
# (2.5) *optional* Load agent memory and/or net from disk.
agnt
=
'
agent
'
agnt
=
'
Carla
'
LOAD_ANN
=
False
LOAD_ANN
=
False
LOAD_MEMORIES
=
Fals
e
LOAD_MEMORIES
=
Tru
e
if
LOAD_ANN
or
LOAD_MEMORIES
:
if
LOAD_ANN
or
LOAD_MEMORIES
:
marvin
.
load
(
'
saved_agents/
'
+
agnt
+
'
/
'
+
agnt
,
net
=
LOAD_ANN
,
memory
=
LOAD_MEMORIES
)
marvin
.
load
(
'
saved_agents/
'
+
agnt
+
'
/
'
+
agnt
,
net
=
LOAD_ANN
,
memory
=
LOAD_MEMORIES
)
# 3. Set your configurations for the run.
# 3. Set your configurations for the run.
LEARNING
=
True
LEARNING
=
True
LEARN_ONLINE
=
True
LEARN_ONLINE
=
True
LEARN_OFFLINE
=
Fals
e
LEARN_OFFLINE
=
Tru
e
RUN_EPISODES
=
10
RUN_EPISODES
=
10
0
LEARN_OFFLINE_EPOCHS
=
5
00
LEARN_OFFLINE_EPOCHS
=
10
00
SAVE_PATH
=
"
./saved_agents
"
SAVE_PATH
=
"
./saved_agents
"
# Register an *atexit* callback,
# Register an *atexit* callback,
...
@@ -63,5 +62,7 @@ if __name__ == '__main__':
...
@@ -63,5 +62,7 @@ if __name__ == '__main__':
if
LEARNING
:
if
LEARNING
:
marvin
.
save
(
SAVE_PATH
)
marvin
.
save
(
SAVE_PATH
)
if
CARLA
:
env
.
world
.
destroy
()
# Show the result of the runl.
# Show the result of the runl.
ew
.
process_logs
(
avg_score
,
loss
,
title
=
marvin
.
name
,
render
=
RENDER
)
ew
.
process_logs
(
avg_score
,
loss
,
title
=
marvin
.
name
,
render
=
RENDER
)
This diff is collapsed.
Click to expand it.
networks.py
+
1
−
1
View file @
747e4d42
...
@@ -14,7 +14,7 @@ class QNet:
...
@@ -14,7 +14,7 @@ class QNet:
self
.
net
.
summary
()
self
.
net
.
summary
()
def
compile_net
(
self
,
action_space
,
state_space
):
def
compile_net
(
self
,
action_space
,
state_space
):
self
.
net
.
add
(
Dense
(
160
,
input_dim
=
state_space
,
activation
=
relu
))
self
.
net
.
add
(
Dense
(
256
,
input_dim
=
state_space
,
activation
=
relu
))
self
.
net
.
add
(
Dense
(
128
,
activation
=
relu
))
self
.
net
.
add
(
Dense
(
128
,
activation
=
relu
))
self
.
net
.
add
(
Dense
(
action_space
,
activation
=
linear
))
self
.
net
.
add
(
Dense
(
action_space
,
activation
=
linear
))
self
.
net
.
compile
(
loss
=
'
mse
'
,
optimizer
=
Adam
(
lr
=
self
.
learn_rate
))
self
.
net
.
compile
(
loss
=
'
mse
'
,
optimizer
=
Adam
(
lr
=
self
.
learn_rate
))
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment