Skip to content

Commit

Permalink
Fix (#11448)
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc authored Jan 2, 2024
1 parent 49ef54e commit 3b6f117
Show file tree
Hide file tree
Showing 3 changed files with 3 additions and 3 deletions.
2 changes: 1 addition & 1 deletion deploy/slim/prune/sensitivity_anal.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def eval_fn():
run_sensitive_analysis=True:
Automatically compute the sensitivities of convolutions in a model.
The sensitivity of a convolution is the losses of accuracy on test dataset in
differenct pruned ratios. The sensitivities can be used to get a group of best
different pruned ratios. The sensitivities can be used to get a group of best
ratios with some condition.
run_sensitive_analysis=False:
Expand Down
2 changes: 1 addition & 1 deletion deploy/slim/quantization/quant_kl.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def main(config, device, logger, vdl_writer):
if not (os.path.exists(os.path.join(inference_model_dir, "inference.pdmodel")) and \
os.path.exists(os.path.join(inference_model_dir, "inference.pdiparams")) ):
raise ValueError(
"Please set inference model dir in Global.inference_model or Global.pretrained_model for post-quantazition"
"Please set inference model dir in Global.inference_model or Global.pretrained_model for post-quantization"
)

if is_layoutxlm_ser:
Expand Down
2 changes: 1 addition & 1 deletion tools/program.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def check_device(use_gpu, use_xpu=False, use_npu=False, use_mlu=False):

try:
if use_gpu and use_xpu:
print("use_xpu and use_gpu can not both be ture.")
print("use_xpu and use_gpu can not both be true.")
if use_gpu and not paddle.is_compiled_with_cuda():
print(err.format("use_gpu", "cuda", "gpu", "use_gpu"))
sys.exit(1)
Expand Down

0 comments on commit 3b6f117

Please sign in to comment.