Skip to content
This repository has been archived by the owner on Sep 18, 2024. It is now read-only.

Commit

Permalink
Fix Lazy Logging (#3108)
Browse files Browse the repository at this point in the history
  • Loading branch information
HarshCasper authored Nov 23, 2020
1 parent 07dd4c5 commit 52e40cb
Show file tree
Hide file tree
Showing 11 changed files with 21 additions and 30 deletions.
11 changes: 4 additions & 7 deletions examples/trials/ga_squad/trial.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,8 +218,7 @@ def run_epoch(batches, answer_net, is_training):
loss, _, = sess.run(
[answer_net.loss, answer_net.train_op], feed_dict=feed_dict)
if count % 100 == 0:
logger.debug('%d %g except:%g, loss:%g' %
(count, used, used / count * len(batches), loss))
logger.debug('%d %g except:%g, loss:%g', count, used, used / count * len(batches), loss)
loss_sum += loss
else:
feed_dict = {answer_net.query_word: query,
Expand All @@ -239,8 +238,7 @@ def run_epoch(batches, answer_net, is_training):
contexts += context
ids = np.concatenate((ids, sample_id))
if count % 100 == 0:
logger.debug('%d %g except:%g' %
(count, used, used / count * len(batches)))
logger.debug('%d %g except:%g', count, used, used / count * len(batches))
loss = loss_sum / len(batches)
if is_training:
return loss
Expand Down Expand Up @@ -327,7 +325,7 @@ def train_with_graph(graph, qp_pairs, dev_qp_pairs):
train_batches = data.get_batches(qp_pairs, cfg.batch_size)
train_loss = run_epoch(train_batches, train_model, True)
logger.debug('epoch ' + str(epoch) +
' loss: ' + str(train_loss))
' loss: ', str(train_loss))
dev_batches = list(data.get_batches(
dev_qp_pairs, cfg.batch_size))
_, position1, position2, ids, contexts = run_epoch(
Expand Down Expand Up @@ -361,8 +359,7 @@ def train_with_graph(graph, qp_pairs, dev_qp_pairs):
with open(os.path.join(save_path, 'epoch%d.score' % epoch), 'wb') as file:
pickle.dump(
(position1, position2, ids, contexts), file)
logger.debug('epoch %d acc %g bestacc %g' %
(epoch, acc, bestacc))
logger.debug('epoch %d acc %g bestacc %g', epoch, acc, bestacc)
if patience <= iter:
break
logger.debug('save done.')
Expand Down
2 changes: 1 addition & 1 deletion examples/trials/mnist-pbt-tuner-pytorch/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ def main(args):

if os.path.isfile(load_checkpoint_path):
model_state_dict = load_checkpoint(load_checkpoint_path)
logger.info("test : " + load_checkpoint_path)
logger.info("test : ", load_checkpoint_path)
logger.info(type(model_state_dict))
model.load_state_dict(model_state_dict)

Expand Down
2 changes: 1 addition & 1 deletion examples/trials/sklearn/classification/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def run(X_train, X_test, y_train, y_test, model):
'''Train model and predict result'''
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
LOG.debug('score: %s' % score)
LOG.debug('score: %s', score)
nni.report_final_result(score)

if __name__ == '__main__':
Expand Down
2 changes: 1 addition & 1 deletion examples/trials/sklearn/regression/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def run(X_train, X_test, y_train, y_test, model):
model.fit(X_train, y_train)
predict_y = model.predict(X_test)
score = r2_score(y_test, predict_y)
LOG.debug('r2 score: %s' % score)
LOG.debug('r2 score: %s', score)
nni.report_final_result(score)

if __name__ == '__main__':
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -387,8 +387,7 @@ def update_search_space(self, search_space):
self.population = Population(search_space,
self.mutate_rate,
self.optimize_mode)
self.logger.debug('Total search space volume: '
+ str(self.population.volume))
self.logger.debug('Total search space volume: ', str(self.population.volume))

if not self.serve_list:
self.serve_list = self.population.get_offspring(
Expand Down
11 changes: 4 additions & 7 deletions examples/trials/weight_sharing/ga_squad/trial.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,8 +219,7 @@ def run_epoch(batches, answer_net, is_training):
loss, _, = sess.run(
[answer_net.loss, answer_net.train_op], feed_dict=feed_dict)
if count % 100 == 0:
logger.debug('%d %g except:%g, loss:%g' %
(count, used, used / count * len(batches), loss))
logger.debug('%d %g except:%g, loss:%g', count, used, used / count * len(batches), loss)
loss_sum += loss
else:
feed_dict = {answer_net.query_word: query,
Expand All @@ -240,8 +239,7 @@ def run_epoch(batches, answer_net, is_training):
contexts += context
ids = np.concatenate((ids, sample_id))
if count % 100 == 0:
logger.debug('%d %g except:%g' %
(count, used, used / count * len(batches)))
logger.debug('%d %g except:%g', count, used, used / count * len(batches))
loss = loss_sum / len(batches)
if is_training:
return loss
Expand Down Expand Up @@ -333,7 +331,7 @@ def train_with_graph(p_graph, qp_pairs, dev_qp_pairs):
train_batches = data.get_batches(qp_pairs, cfg.batch_size)
train_loss = run_epoch(train_batches, train_model, True)
logger.debug('epoch ' + str(epoch) +
' loss: ' + str(train_loss))
' loss: ', str(train_loss))
dev_batches = list(data.get_batches(
dev_qp_pairs, cfg.batch_size))
_, position1, position2, ids, contexts = run_epoch(
Expand Down Expand Up @@ -369,8 +367,7 @@ def train_with_graph(p_graph, qp_pairs, dev_qp_pairs):
with open(os.path.join(save_path, 'epoch%d.score' % epoch), 'wb') as file:
pickle.dump(
(position1, position2, ids, contexts), file)
logger.debug('epoch %d acc %g bestacc %g' %
(epoch, acc, bestacc))
logger.debug('epoch %d acc %g bestacc %g', epoch, acc, bestacc)
if patience <= iter:
break
logger.debug('save done.')
Expand Down
3 changes: 1 addition & 2 deletions test/async_sharing_test/simple_tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,7 @@ def generate_parameters(self, parameter_id, **kwargs):
'checksum': None,
'path': '',
}
_logger.info('generate parameter for father trial %s' %
parameter_id)
_logger.info('generate parameter for father trial %s', parameter_id)
self.thread_lock.release()
return {
'prev_id': 0,
Expand Down
2 changes: 1 addition & 1 deletion test/config/naive_test/naive_assessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def __init__(self, optimize_mode):
_logger.info('init')

def assess_trial(self, trial_job_id, trial_history):
_logger.info('assess trial %s %s' % (trial_job_id, trial_history))
_logger.info('assess trial %s %s', trial_job_id, trial_history)

id_ = trial_history[0]
if id_ in self._killed:
Expand Down
6 changes: 3 additions & 3 deletions test/config/naive_test/naive_tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,17 +21,17 @@ def __init__(self, optimize_mode):

def generate_parameters(self, parameter_id, **kwargs):
self.cur += 1
_logger.info('generate parameters: %s' % self.cur)
_logger.info('generate parameters: %s', self.cur)
return { 'x': self.cur }

def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
reward = extract_scalar_reward(value)
_logger.info('receive trial result: %s, %s, %s' % (parameter_id, parameters, reward))
_logger.info('receive trial result: %s, %s, %s', parameter_id, parameters, reward)
_result.write('%d %d\n' % (parameters['x'], reward))
_result.flush()

def update_search_space(self, search_space):
_logger.info('update_search_space: %s' % search_space)
_logger.info('update_search_space: %s', search_space)
with open(os.path.join(_pwd, 'tuner_search_space.json'), 'w') as file_:
json.dump(search_space, file_)

Expand Down
5 changes: 2 additions & 3 deletions test/ut/tools/annotation/testcase/annotated/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,7 @@ def build_network(self):
input_dim = int(math.sqrt(self.x_dim))
except:
logger.debug(
'input dim cannot be sqrt and reshape. input dim: ' +
str(self.x_dim))
'input dim cannot be sqrt and reshape. input dim: ', str(self.x_dim))
raise
x_image = tf.reshape(self.x, [-1, input_dim, input_dim, 1])
with tf.name_scope('conv1'):
Expand Down Expand Up @@ -132,7 +131,7 @@ def main():
mnist_network.build_network()
logger.debug('Mnist build network done.')
graph_location = tempfile.mkdtemp()
logger.debug('Saving graph to: %s' % graph_location)
logger.debug('Saving graph to: %s', graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
test_acc = 0.0
Expand Down
4 changes: 2 additions & 2 deletions test/ut/tools/annotation/testcase/usercode/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def build_network(self):
input_dim = int(math.sqrt(self.x_dim))
except:
#print('input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim))
logger.debug('input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim))
logger.debug('input dim cannot be sqrt and reshape. input dim: ', str(self.x_dim))
raise
x_image = tf.reshape(self.x, [-1, input_dim, input_dim, 1])

Expand Down Expand Up @@ -147,7 +147,7 @@ def main():

# Write log
graph_location = tempfile.mkdtemp()
logger.debug('Saving graph to: %s' % graph_location)
logger.debug('Saving graph to: %s', graph_location)
# print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
Expand Down

0 comments on commit 52e40cb

Please sign in to comment.