Skip to content

Commit

Permalink
Python3 unit tests (apache#39)
Browse files Browse the repository at this point in the history
* change xrange to range for python3 compatiblity"


* remove more xrange from tests
  • Loading branch information
eric-haibin-lin committed May 19, 2017
1 parent 06f7023 commit b80f95a
Show file tree
Hide file tree
Showing 6 changed files with 8 additions and 8 deletions.
2 changes: 1 addition & 1 deletion python/mxnet/_ctypes/ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ def %s(%s):
if original_output is not None:
return original_output
ret_list = []
for i in xrange(num_output.value):
for i in range(num_output.value):
storage_type = ctypes.c_int(0)
check_call(_LIB.MXNDArrayGetStorageType(ctypes.cast(output_vars[i], NDArrayHandle),
ctypes.byref(storage_type)))
Expand Down
2 changes: 1 addition & 1 deletion python/mxnet/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def _get_outputs(self):
ctypes.byref(out_size), ctypes.byref(handles)))
num_output = out_size.value
outputs = []
for i in xrange(num_output):
for i in range(num_output):
storage_type = ctypes.c_int(0)
check_call(_LIB.MXNDArrayGetStorageType(ctypes.cast(handles[i], NDArrayHandle),
ctypes.byref(storage_type)))
Expand Down
2 changes: 1 addition & 1 deletion python/mxnet/sparse_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ def aux_types(self):
'''
aux_types = []
num_aux = self._num_aux
for i in xrange(num_aux):
for i in range(num_aux):
aux_types.append(self._aux_type(i))
return aux_types

Expand Down
4 changes: 2 additions & 2 deletions tests/python/unittest/test_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def update(self, index, weight, grad, state):
num_rows = weight.shape[0]
if self.momentum == 0.0:
# Update on a per row basis, skip all-zero rows
for row in xrange(num_rows):
for row in range(num_rows):
grad_row = grad[row].asnumpy()
all_zeros = mx.test_utils.almost_equal(grad_row, np.zeros_like(grad_row))
if all_zeros:
Expand All @@ -197,7 +197,7 @@ def update(self, index, weight, grad, state):
weight[row] = (1 - lr*wd)*weight[row] - lr*self.rescale_grad*grad[row]
else:
mom = state
for row in xrange(num_rows):
for row in range(num_rows):
grad_row = grad[row].asnumpy()
all_zeros = mx.test_utils.almost_equal(grad_row, np.zeros_like(grad_row))
if all_zeros:
Expand Down
4 changes: 2 additions & 2 deletions tests/python/unittest/test_sparse_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def test_sparse_nd_elemwise_add():
num_repeats = 10
g = lambda x,y: x + y
op = mx.nd.elemwise_add
for i in xrange(num_repeats):
for i in range(num_repeats):
shape = [(rnd.randint(1, 10),rnd.randint(1, 10))] * 2
check_sparse_nd_elemwise_binary(shape, ['default_storage'] * 2, op, g)
check_sparse_nd_elemwise_binary(shape, ['default_storage', 'row_sparse'], op, g)
Expand All @@ -36,7 +36,7 @@ def test_sparse_nd_elementwise_fallback():
num_repeats = 10
g = lambda x,y: x + y
op = mx.nd.add_n
for i in xrange(num_repeats):
for i in range(num_repeats):
shape = [(rnd.randint(1, 10), rnd.randint(1, 10))] * 2
check_sparse_nd_elemwise_binary(shape, ['default_storage'] * 2, op, g)
check_sparse_nd_elemwise_binary(shape, ['default_storage', 'row_sparse'], op, g)
Expand Down
2 changes: 1 addition & 1 deletion tests/python/unittest/test_sparse_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def test_elemwise_add_ex_multiple_stages():
check_symbolic_forward(test, {'sp_data1':sp_nd1, 'sp_data2':sp_nd2,
'ds_data':ds_nd}, [sp_np1 + sp_np2 + ds_np])

arr_grads = [mx.nd.zeros(shape) for i in xrange(3)]
arr_grads = [mx.nd.zeros(shape) for i in range(3)]
exec_test = test.bind(default_context(), args={'sp_data1':sp_nd1, 'sp_data2':sp_nd2,
'ds_data':ds_nd}, args_grad=arr_grads)
exec_test.forward(is_train=True)
Expand Down

0 comments on commit b80f95a

Please sign in to comment.