-
Notifications
You must be signed in to change notification settings - Fork 72
/
logistic_mf.py
138 lines (120 loc) · 5.35 KB
/
logistic_mf.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import time
import numpy as np
def load_matrix(filename, num_users, num_items):
t0 = time.time()
counts = np.zeros((num_users, num_items))
total = 0.0
num_zeros = num_users * num_items
for i, line in enumerate(open(filename, 'r')):
user, item, count = line.strip().split('\t')
user = int(user)
item = int(item)
count = float(count)
counts[user][item] = count
total += count
num_zeros -= 1
alpha = num_zeros / total
print 'alpha %.2f' % alpha
counts *= alpha
t1 = time.time()
print 'Finished loading matrix in %f seconds' % (t1 - t0)
return counts
class LogisticMF():
def __init__(self, counts, num_factors, reg_param=0.6, gamma=1.0,
iterations=30):
self.counts = counts
self.num_users = counts.shape[0]
self.num_items = counts.shape[1]
self.num_factors = num_factors
self.iterations = iterations
self.reg_param = reg_param
self.gamma = gamma
def train_model(self):
self.ones = np.ones((self.num_users, self.num_items))
self.user_vectors = np.random.normal(size=(self.num_users,
self.num_factors))
self.item_vectors = np.random.normal(size=(self.num_items,
self.num_factors))
self.user_biases = np.random.normal(size=(self.num_users, 1))
self.item_biases = np.random.normal(size=(self.num_items, 1))
user_vec_deriv_sum = np.zeros((self.num_users, self.num_factors))
item_vec_deriv_sum = np.zeros((self.num_items, self.num_factors))
user_bias_deriv_sum = np.zeros((self.num_users, 1))
item_bias_deriv_sum = np.zeros((self.num_items, 1))
for i in range(self.iterations):
t0 = time.time()
# Fix items and solve for users
# take step towards gradient of deriv of log likelihood
# we take a step in positive direction because we are maximizing LL
user_vec_deriv, user_bias_deriv = self.deriv(True)
user_vec_deriv_sum += np.square(user_vec_deriv)
user_bias_deriv_sum += np.square(user_bias_deriv)
vec_step_size = self.gamma / np.sqrt(user_vec_deriv_sum)
bias_step_size = self.gamma / np.sqrt(user_bias_deriv_sum)
self.user_vectors += vec_step_size * user_vec_deriv
self.user_biases += bias_step_size * user_bias_deriv
# Fix users and solve for items
# take step towards gradient of deriv of log likelihood
# we take a step in positive direction because we are maximizing LL
item_vec_deriv, item_bias_deriv = self.deriv(False)
item_vec_deriv_sum += np.square(item_vec_deriv)
item_bias_deriv_sum += np.square(item_bias_deriv)
vec_step_size = self.gamma / np.sqrt(item_vec_deriv_sum)
bias_step_size = self.gamma / np.sqrt(item_bias_deriv_sum)
self.item_vectors += vec_step_size * item_vec_deriv
self.item_biases += bias_step_size * item_bias_deriv
t1 = time.time()
print 'iteration %i finished in %f seconds' % (i + 1, t1 - t0)
def deriv(self, user):
if user:
vec_deriv = np.dot(self.counts, self.item_vectors)
bias_deriv = np.expand_dims(np.sum(self.counts, axis=1), 1)
else:
vec_deriv = np.dot(self.counts.T, self.user_vectors)
bias_deriv = np.expand_dims(np.sum(self.counts, axis=0), 1)
A = np.dot(self.user_vectors, self.item_vectors.T)
A += self.user_biases
A += self.item_biases.T
A = np.exp(A)
A /= (A + self.ones)
A = (self.counts + self.ones) * A
if user:
vec_deriv -= np.dot(A, self.item_vectors)
bias_deriv -= np.expand_dims(np.sum(A, axis=1), 1)
# L2 regularization
vec_deriv -= self.reg_param * self.user_vectors
else:
vec_deriv -= np.dot(A.T, self.user_vectors)
bias_deriv -= np.expand_dims(np.sum(A, axis=0), 1)
# L2 regularization
vec_deriv -= self.reg_param * self.item_vectors
return (vec_deriv, bias_deriv)
def log_likelihood(self):
loglik = 0
A = np.dot(self.user_vectors, self.item_vectors.T)
A += self.user_biases
A += self.item_biases.T
B = A * self.counts
loglik += np.sum(B)
A = np.exp(A)
A += self.ones
A = np.log(A)
A = (self.counts + self.ones) * A
loglik -= np.sum(A)
# L2 regularization
loglik -= 0.5 * self.reg_param * np.sum(np.square(self.user_vectors))
loglik -= 0.5 * self.reg_param * np.sum(np.square(self.item_vectors))
return loglik
def print_vectors(self):
user_vecs_file = open('logmf-user-vecs-%i' % self.num_factors, 'w')
for i in range(self.num_users):
vec = ' '.join(map(str, self.user_vectors[i]))
line = '%i\t%s\n' % (i, vec)
user_vecs_file.write(line)
user_vecs_file.close()
item_vecs_file = open('logmf-item-vecs-%i' % self.num_factors, 'w')
for i in range(self.num_items):
vec = ' '.join(map(str, self.item_vectors[i]))
line = '%i\t%s\n' % (i, vec)
item_vecs_file.write(line)
item_vecs_file.close()