Skip to content

Commit

Permalink
Add resume_training method. Fixes train-after-load bug.
Browse files Browse the repository at this point in the history
  • Loading branch information
honnibal committed Jan 9, 2017
1 parent 11dbefd commit 09b030b
Showing 1 changed file with 18 additions and 1 deletion.
19 changes: 18 additions & 1 deletion thinc/linear/avgtron.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,22 @@ cdef class AveragedPerceptron:
W.val = avg.val / (self.time+1)
W += 1
avg += 1


def resume_training(self):
cdef feat_t feat_id
cdef size_t feat_addr
for i, (feat_id, feat_addr) in enumerate(self.weights.items()):
train_feat = <SparseAverageC*>self.averages.get(feat_id)
if train_feat == NULL:
if train_feat is NULL:
msg = (feat_id)
raise MemoryError(
weights = <const SparseArrayC*>feat_addr
train_feat.curr = SparseArray.clone(weights)
train_feat.avgs = SparseArray.clone(weights)
train_feat.times = SparseArray.clone(weights)
self.averages.set(feat_id, train_feat)

@property
def L1(self):
cdef long double l1 = 0.0
Expand Down Expand Up @@ -208,6 +223,8 @@ cdef class AveragedPerceptron:
weight_t grad) except -1:
if grad == 0:
return 0
if len(self.averages) < len(self.weights):
self.resume_training()
feat = <SparseAverageC*>self.averages.get(feat_id)
if feat == NULL:
feat = <SparseAverageC*>PyMem_Malloc(sizeof(SparseAverageC))
Expand Down

0 comments on commit 09b030b

Please sign in to comment.