forked from maurock/snake-ga
-
Notifications
You must be signed in to change notification settings - Fork 0
/
DQN.py
115 lines (98 loc) · 5.37 KB
/
DQN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
from keras.optimizers import Adam
from keras.models import Sequential
from keras.layers.core import Dense, Dropout
import random
import numpy as np
import pandas as pd
from operator import add
class DQNAgent(object):
def __init__(self):
self.reward = 0
self.gamma = 0.9
self.dataframe = pd.DataFrame()
self.short_memory = np.array([])
self.agent_target = 1
self.agent_predict = 0
self.learning_rate = 0.0005
self.model = self.network()
#self.model = self.network("weights.hdf5")
self.epsilon = 0
self.actual = []
self.memory = []
def get_state(self, game, player, food):
state = [
(player.x_change == 20 and player.y_change == 0 and ((list(map(add, player.position[-1], [20, 0])) in player.position) or
player.position[-1][0] + 20 >= (game.game_width - 20))) or (player.x_change == -20 and player.y_change == 0 and ((list(map(add, player.position[-1], [-20, 0])) in player.position) or
player.position[-1][0] - 20 < 20)) or (player.x_change == 0 and player.y_change == -20 and ((list(map(add, player.position[-1], [0, -20])) in player.position) or
player.position[-1][-1] - 20 < 20)) or (player.x_change == 0 and player.y_change == 20 and ((list(map(add, player.position[-1], [0, 20])) in player.position) or
player.position[-1][-1] + 20 >= (game.game_height-20))), # danger straight
(player.x_change == 0 and player.y_change == -20 and ((list(map(add,player.position[-1],[20, 0])) in player.position) or
player.position[ -1][0] + 20 > (game.game_width-20))) or (player.x_change == 0 and player.y_change == 20 and ((list(map(add,player.position[-1],
[-20,0])) in player.position) or player.position[-1][0] - 20 < 20)) or (player.x_change == -20 and player.y_change == 0 and ((list(map(
add,player.position[-1],[0,-20])) in player.position) or player.position[-1][-1] - 20 < 20)) or (player.x_change == 20 and player.y_change == 0 and (
(list(map(add,player.position[-1],[0,20])) in player.position) or player.position[-1][
-1] + 20 >= (game.game_height-20))), # danger right
(player.x_change == 0 and player.y_change == 20 and ((list(map(add,player.position[-1],[20,0])) in player.position) or
player.position[-1][0] + 20 > (game.game_width-20))) or (player.x_change == 0 and player.y_change == -20 and ((list(map(
add, player.position[-1],[-20,0])) in player.position) or player.position[-1][0] - 20 < 20)) or (player.x_change == 20 and player.y_change == 0 and (
(list(map(add,player.position[-1],[0,-20])) in player.position) or player.position[-1][-1] - 20 < 20)) or (
player.x_change == -20 and player.y_change == 0 and ((list(map(add,player.position[-1],[0,20])) in player.position) or
player.position[-1][-1] + 20 >= (game.game_height-20))), #danger left
player.x_change == -20, # move left
player.x_change == 20, # move right
player.y_change == -20, # move up
player.y_change == 20, # move down
food.x_food < player.x, # food left
food.x_food > player.x, # food right
food.y_food < player.y, # food up
food.y_food > player.y # food down
]
for i in range(len(state)):
if state[i]:
state[i]=1
else:
state[i]=0
return np.asarray(state)
def set_reward(self, player, crash):
self.reward = 0
if crash:
self.reward = -10
return self.reward
if player.eaten:
self.reward = 10
return self.reward
def network(self, weights=None):
model = Sequential()
model.add(Dense(output_dim=120, activation='relu', input_dim=11))
model.add(Dropout(0.15))
model.add(Dense(output_dim=120, activation='relu'))
model.add(Dropout(0.15))
model.add(Dense(output_dim=120, activation='relu'))
model.add(Dropout(0.15))
model.add(Dense(output_dim=3, activation='softmax'))
opt = Adam(self.learning_rate)
model.compile(loss='mse', optimizer=opt)
if weights:
model.load_weights(weights)
return model
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def replay_new(self, memory):
if len(memory) > 1000:
minibatch = random.sample(memory, 1000)
else:
minibatch = memory
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = reward + self.gamma * np.amax(self.model.predict(np.array([next_state]))[0])
target_f = self.model.predict(np.array([state]))
target_f[0][np.argmax(action)] = target
self.model.fit(np.array([state]), target_f, epochs=1, verbose=0)
def train_short_memory(self, state, action, reward, next_state, done):
target = reward
if not done:
target = reward + self.gamma * np.amax(self.model.predict(next_state.reshape((1, 11)))[0])
target_f = self.model.predict(state.reshape((1, 11)))
target_f[0][np.argmax(action)] = target
self.model.fit(state.reshape((1, 11)), target_f, epochs=1, verbose=0)