Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Cherry-pick] logger level #7920

Merged
merged 1 commit into from
Jan 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddlenlp/peft/lora/lora_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -489,7 +489,7 @@ def print_trainable_parameters(self) -> None:
freeze_numel += np.prod(weight.shape)
else:
trainable_numel += np.prod(weight.shape)
logger.info(
logger.debug(
f"Frozen parameters: {freeze_numel:.2e} || Trainable parameters:{trainable_numel:.2e} || Total parameters:{freeze_numel+trainable_numel:.2e}|| Trainable:{trainable_numel / (freeze_numel+trainable_numel):.2%}"
)

Expand Down
2 changes: 1 addition & 1 deletion paddlenlp/peft/prefix/prefix_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ def print_trainable_parameters(self) -> None:
freeze_numel += np.prod(weight.shape)
else:
trainable_numel += np.prod(weight.shape)
logger.info(
logger.debug(
f"Frozen parameters: {freeze_numel:.2e} || Trainable parameters:{trainable_numel:.2e} || Total parameters:{freeze_numel+trainable_numel:.2e}|| Trainable:{trainable_numel / (freeze_numel+trainable_numel):.2%}"
)

Expand Down
2 changes: 1 addition & 1 deletion paddlenlp/trainer/integrations.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def on_train_begin(self, args, state, control, **kwargs):

if self.vdl_writer is not None:
self.vdl_writer.add_text("args", args.to_json_string())
if "model" in kwargs:
if "model" in kwargs and logger.logger.level < 20:
model = kwargs["model"]
if isinstance(model, LoRAModel) or isinstance(model, PrefixModelForCausalLM):
model = kwargs["model"].model
Expand Down
18 changes: 9 additions & 9 deletions paddlenlp/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -729,7 +729,7 @@ def train(
# per_device_trainable_numel = sum(p.numel().item() for p in model.parameters() if not p.stop_gradient)
# TODO: Temporary fix since Tensor.numel() not supported in distributed mode
per_device_trainable_numel = sum(np.prod(p.shape) for p in model.parameters() if not p.stop_gradient)
logger.info(f" Number of trainable parameters = {per_device_trainable_numel:,} (per device)")
logger.debug(f" Number of trainable parameters = {per_device_trainable_numel:,} (per device)")
if self.args.use_hybrid_parallel:
# todo fix for pipeline_parallel_degree
parts_num = max(self.args.tensor_parallel_degree, 1) * max(self.args.pipeline_parallel_degree, 1)
Expand All @@ -745,7 +745,7 @@ def train(
trainable_numel = trainable_numel // self.args.sep_parallel_degree
# the numel is roughly, because the tensor parallel still hold own bias or layer_norm weight without splited
# so, the trainable numel is a little bigger than real.
logger.info(f" Number of trainable parameters = {trainable_numel:,} (all devices, roughly)")
logger.debug(f" Number of trainable parameters = {trainable_numel:,} (all devices, roughly)")

start_time = time.time()
self._globalstep_last_start_time = time.time()
Expand Down Expand Up @@ -2392,7 +2392,7 @@ def log(self, logs: Dict[str, float], **kwargs) -> None:
kwargs.update(timer=self.timers, paddle_pipeline_timers=paddle_pipeline_timers)

if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 4)
logs["progress_or_epoch"] = round(self.state.epoch, 4)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs, **kwargs)
Expand Down Expand Up @@ -2953,23 +2953,23 @@ def print_config(self, args=None, key=""):
"""
print config values
"""
logger.info("=" * 60)
logger.debug("=" * 60)
if args is None:
args = self.args
key = "Training"
import paddlenlp

logger.info("{:^40}".format("{} Configuration Arguments".format(key)))
logger.info("{:30}: {}".format("paddle commit id", paddle.version.commit))
logger.info("{:30}: {}".format("paddlenlp commit id", paddlenlp.version.commit))
logger.debug("{:^40}".format("{} Configuration Arguments".format(key)))
logger.debug("{:30}: {}".format("paddle commit id", paddle.version.commit))
logger.debug("{:30}: {}".format("paddlenlp commit id", paddlenlp.version.commit))

for a in dir(args):
if a[:2] != "__": # don't print double underscore methods
v = getattr(args, a)
if not isinstance(v, types.MethodType):
logger.info("{:30}: {}".format(a, v))
logger.debug("{:30}: {}".format(a, v))

logger.info("")
logger.debug("")

def is_unified_checkpoint(self, resume_from_checkpoint, safe_serialization=True):
is_unified_checkpoint_type = False
Expand Down
2 changes: 1 addition & 1 deletion paddlenlp/trainer/trainer_callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -515,7 +515,7 @@ def on_log(self, args, state, control, logs=None, **kwargs):
logs_str = ", ".join(f"{k}: {v}" for k, v in logs.items())
else:
logs_str = str(logs)
self.training_bar.write(logs_str)
logger.info(logs_str)

def on_train_end(self, args, state, control, **kwargs):
if state.is_local_process_zero:
Expand Down
12 changes: 6 additions & 6 deletions paddlenlp/trainer/training_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -1703,21 +1703,21 @@ def print_config(self, args=None, key=""):
"""
print all config values.
"""
logger.info("=" * 60)
logger.debug("=" * 60)
if args is None:
args = self
key = "Training"

import paddlenlp

logger.info("{:^40}".format("{} Configuration Arguments".format(key)))
logger.info("{:30}: {}".format("paddle commit id", paddle.version.commit))
logger.info("{:30}: {}".format("paddlenlp commit id", paddlenlp.version.commit))
logger.debug("{:^40}".format("{} Configuration Arguments".format(key)))
logger.debug("{:30}: {}".format("paddle commit id", paddle.version.commit))
logger.debug("{:30}: {}".format("paddlenlp commit id", paddlenlp.version.commit))

for a in dir(args):
if a[:2] != "__": # don't print double underscore methods
v = getattr(args, a)
if not isinstance(v, types.MethodType):
logger.info("{:30}: {}".format(a, v))
logger.debug("{:30}: {}".format(a, v))

logger.info("")
logger.debug("")
26 changes: 17 additions & 9 deletions paddlenlp/transformers/model_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1929,15 +1929,23 @@ def _find_mismatched_keys(
raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}")

if len(unexpected_keys) > 0:
logger.warning(
f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when"
f" initializing {model.__class__.__name__}: {sorted(unexpected_keys)}\n- This IS expected if you are"
f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or"
" with another architecture (e.g. initializing a BertForSequenceClassification model from a"
" BertForPreTraining model).\n- This IS NOT expected if you are initializing"
f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical"
" (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
)
if logger.logger.level < 20:
logger.warning(
f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when"
f" initializing {model.__class__.__name__}: {sorted(unexpected_keys)}\n- This IS expected if you are"
f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or"
" with another architecture (e.g. initializing a BertForSequenceClassification model from a"
" BertForPreTraining model).\n- This IS NOT expected if you are initializing"
f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical"
" (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
logger.warning(
f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when"
f" initializing the model, - This IS expected if you are"
f" initializing the model from a checkpoint of a model trained on another task or"
" with another architecture."
)
else:
logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")

Expand Down
Loading