From 36a8907ead30975a2c27869483ba5351644703f6 Mon Sep 17 00:00:00 2001 From: Jing Xu Date: Tue, 7 Nov 2023 12:41:33 +0900 Subject: [PATCH] update out-of-date URL for Intel optimization guide --- recipes_source/recipes/tuning_guide.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/recipes_source/recipes/tuning_guide.py b/recipes_source/recipes/tuning_guide.py index 39fa667bc1..d23f3fe666 100644 --- a/recipes_source/recipes/tuning_guide.py +++ b/recipes_source/recipes/tuning_guide.py @@ -193,12 +193,15 @@ def fused_gelu(x): # # numactl --cpunodebind=N --membind=N python +############################################################################### +# More detailed descriptions can be found `here `_. + ############################################################################### # Utilize OpenMP # ~~~~~~~~~~~~~~ # OpenMP is utilized to bring better performance for parallel computation tasks. # ``OMP_NUM_THREADS`` is the easiest switch that can be used to accelerate computations. It determines number of threads used for OpenMP computations. -# CPU affinity setting controls how workloads are distributed over multiple cores. It affects communication overhead, cache line invalidation overhead, or page thrashing, thus proper setting of CPU affinity brings performance benefits. ``GOMP_CPU_AFFINITY`` or ``KMP_AFFINITY`` determines how to bind OpenMP* threads to physical processing units. +# CPU affinity setting controls how workloads are distributed over multiple cores. It affects communication overhead, cache line invalidation overhead, or page thrashing, thus proper setting of CPU affinity brings performance benefits. ``GOMP_CPU_AFFINITY`` or ``KMP_AFFINITY`` determines how to bind OpenMP* threads to physical processing units. Detailed information can be found `here `_. ############################################################################### # With the following command, PyTorch run the task on N OpenMP threads. @@ -283,7 +286,7 @@ def fused_gelu(x): traced_model(*sample_input) ############################################################################### -# While the JIT fuser for oneDNN Graph also supports inference with ``BFloat16`` datatype, +# While the JIT fuser for oneDNN Graph also supports inference with ``BFloat16`` datatype, # performance benefit with oneDNN Graph is only exhibited by machines with AVX512_BF16 # instruction set architecture (ISA). # The following code snippets serves as an example of using ``BFloat16`` datatype for inference with oneDNN Graph: