Skip to content

Commit

Permalink
more patches
Browse files Browse the repository at this point in the history
  • Loading branch information
branfosj committed Feb 11, 2023
1 parent fc45081 commit 6124d4c
Show file tree
Hide file tree
Showing 4 changed files with 90 additions and 0 deletions.
9 changes: 9 additions & 0 deletions easybuild/easyconfigs/p/PyTorch/PyTorch-1.13.1-foss-2022a.eb
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@ patches = [
'PyTorch-1.13.1_no-cuda-stubs-rpath.patch',
'PyTorch-1.13.1_remove-flaky-test-in-testnn.patch',
'PyTorch-1.13.1_skip-ao-sparsity-test-without-fbgemm.patch',
'PyTorch-1.13.1_increase-tolerance-test_ops.patch',
'PyTorch-1.13.1_install-vsx-vec-headers.patch',
'PyTorch-1.13.1_skip-failing-grad-test.patch',
]
checksums = [
{'pytorch-v1.13.1.tar.gz': 'dbc229ee9750b02b514937d017744443a269ea0241ed3f32b9af0703589d25d4'},
Expand Down Expand Up @@ -55,6 +58,12 @@ checksums = [
'be83ff61fe2dedab6d49c232936d5622df81ab49154264490021c6c828e53315'},
{'PyTorch-1.13.1_skip-ao-sparsity-test-without-fbgemm.patch':
'92cd48ef6d01aa7e07ccce1dcaf40bc3fb0f220c4aa4fea15f3e05fb42e37909'},
{'PyTorch-1.13.1_increase-tolerance-test_ops.patch':
'd53e98bf0da7788b68042dcc31bc5708dae962fde3f110cc827eb807a5d08e49'},
{'PyTorch-1.13.1_install-vsx-vec-headers.patch':
'7b678f54bb947afd4767f5877ac424b4b94ce5db609ea20f5a869ccf4027035f'},
{'PyTorch-1.13.1_skip-failing-grad-test.patch':
'6681200f9509893cb9231b5c93ac9bc5e6d9d9ae4febefca52e7cbc843ba8f51'}
]

osdependencies = [OS_PKG_IBVERBS_DEV]
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
`test_out` may fail due to slightly different values caused by different order of matrizes in SGEMM:

> Mismatched elements: 1 / 50 (2.0%)
> Greatest absolute difference: 1.430511474609375e-05 at index (4, 5) (up to 1e-05 allowed)
> Greatest relative difference: 4.65393206065873e-06 at index (4, 5) (up to 1.3e-06 allowed)

Author: Alexander Grund (TU Dresden)
Updated for PyTorch 1.13.1: Simon Branford (University of Birmingham)

--- a/test/test_ops.py
+++ b/test/test_ops.py
@@ -545,6 +545,9 @@
else list(supported_dtypes)[0]
)

+ if dtype is torch.float32:
+ self.precision, self.rel_tol = (1.5e-05, 1e-05)
+
samples = op.sample_inputs(device, dtype)
for sample in samples:
# calls it normally to get the expected result
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
Add missing headers to the installation which fixes e.g. test_cpp_extensions_aot_ninja
See https://github.com/pytorch/pytorch/pull/85547

Author: Alexander Grund (TU Dresden)
Updated for PyTorch 1.13.1: Simon Branford (University of Birmingham)

--- a/aten/src/ATen/CMakeLists.txt
+++ b/aten/src/ATen/CMakeLists.txt
@@ -56,7 +56,7 @@
EXCLUDE(ATen_CORE_TEST_SRCS "${ATen_CORE_TEST_SRCS}" ${ATen_CORE_EXCLUDED_TEST_SRCS})
endif()

-file(GLOB base_h "*.h" "detail/*.h" "cpu/*.h" "cpu/vec/vec512/*.h" "cpu/vec/vec256/*.h" "cpu/vec/*.h" "quantized/*.h" "functorch/*.h")
+file(GLOB base_h "*.h" "detail/*.h" "cpu/*.h" "cpu/vec/vec512/*.h" "cpu/vec/vec256/*.h" "cpu/vec/vec256/vsx/*.h" "cpu/vec/*.h" "quantized/*.h" "functorch/*.h")
file(GLOB base_cpp "*.cpp" "detail/*.cpp" "cpu/*.cpp" "functorch/*.cpp")
file(GLOB cuda_h "cuda/*.h" "cuda/detail/*.h" "cuda/*.cuh" "cuda/detail/*.cuh")
file(GLOB cuda_cpp "cuda/*.cpp" "cuda/detail/*.cpp")
--- a/setup.py
+++ b/setup.py
@@ -1031,6 +1031,7 @@
'include/ATen/*.h',
'include/ATen/cpu/*.h',
'include/ATen/cpu/vec/vec256/*.h',
+ 'include/ATen/cpu/vec/vec256/vsx/*.h',
'include/ATen/cpu/vec/vec512/*.h',
'include/ATen/cpu/vec/*.h',
'include/ATen/core/*.h',
@@ -1138,6 +1139,7 @@
'include/THH/*.cuh',
'include/THH/*.h*',
'include/THH/generic/*.h',
+ 'include/sleef.h',
'share/cmake/ATen/*.cmake',
'share/cmake/Caffe2/*.cmake',
'share/cmake/Caffe2/public/*.cmake',
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
Skip test_forward_mode_AD_nn_functional_max_unpool2d_cpu_float64 & test_forward_mode_AD_nn_functional_max_unpool3d_cpu_float64
which may unexpectably succeed.

Author: Simon Branford (University of Birmingham)

--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -11574,7 +11574,7 @@
# and if there are several indices pointing to the same memory,
# gradcheck is oblivious about that and cannot perturb them all at once
# (see sample_inputs_max_unpool_grad to find out more).
- DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_forward_mode_AD'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'),
@@ -11611,7 +11611,7 @@
# and if there are several indices pointing to the same memory,
# gradcheck is oblivious about that and cannot perturb them all at once
# (see sample_inputs_max_unpool_grad to find out more).
- DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_forward_mode_AD'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'),

0 comments on commit 6124d4c

Please sign in to comment.