diff --git a/L/LibUnwind/LibUnwind@1.8.1/build_tarballs.jl b/L/LibUnwind/LibUnwind@1.8.1/build_tarballs.jl new file mode 100644 index 00000000000..27e328a9633 --- /dev/null +++ b/L/LibUnwind/LibUnwind@1.8.1/build_tarballs.jl @@ -0,0 +1,88 @@ +using BinaryBuilder, Pkg +using BinaryBuilderBase: sanitize + +name = "LibUnwind" +version = v"1.8.1" + +# Collection of sources required to build libunwind +sources = [ + ArchiveSource("https://github.com/libunwind/libunwind/releases/download/v$(version)/libunwind-$(version).tar.gz", + "ddf0e32dd5fafe5283198d37e4bf9decf7ba1770b6e7e006c33e6df79e6a6157"), + DirectorySource("./bundled"), +] + +# Bash recipe for building across all platforms +script = raw""" +cd $WORKSPACE/srcdir/libunwind*/ + +atomic_patch -p0 ${WORKSPACE}/srcdir/patches/libunwind-configure-static-lzma.patch +if [[ ${target} == aarch64-linux-musl ]]; then + # https://github.com/checkpoint-restore/criu/issues/934, fixed by + # https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit?id=9966a05c7b80f075f2bc7e48dbb108d3f2927234 + pushd /opt/aarch64-linux-musl/aarch64-linux-musl/sys-root/usr/include + atomic_patch -p5 ${WORKSPACE}/srcdir/patches/linux-disentangle_sigcontext.patch + popd +fi +# https://github.com/JuliaLang/julia/issues/51467, and +# https://github.com/JuliaLang/julia/issues/51465, caused by +# https://github.com/libunwind/libunwind/pull/203 +atomic_patch -p1 ${WORKSPACE}/srcdir/patches/libunwind-revert_prelink_unwind.patch + +# https://github.com/libunwind/libunwind/pull/748 +atomic_patch -p1 ${WORKSPACE}/srcdir/patches/libunwind-aarch64-inline-asm.patch + +if [[ ${bb_full_target} == *-sanitize+memory* ]]; then + # Install msan runtime (for clang) + cp -rL ${libdir}/linux/* /opt/x86_64-linux-musl/lib/clang/*/lib/linux/ +fi + +export CFLAGS="-DPI -fPIC" +./configure \ + --prefix=${prefix} \ + --build=${MACHTYPE} \ + --host=${target} \ + --libdir=${libdir} \ + --enable-minidebuginfo \ + --enable-zlibdebuginfo \ + --disable-tests \ + --disable-conservative-checks +make -j${nproc} +make install + +# Shoe-horn liblzma.a into libunwind.a +mkdir -p unpacked/{liblzma,libunwind} +(cd unpacked/liblzma; ar -x ${prefix}/lib/liblzma.a) +(cd unpacked/libunwind; ar -x ${prefix}/lib/libunwind.a) +rm -f ${prefix}/lib/libunwind.a +ar -qc ${prefix}/lib/libunwind.a unpacked/**/* +""" + +# These are the platforms we will build for by default, unless further +# platforms are passed in on the command line. libunwind is only used +# on Linux or FreeBSD (e.g. ELF systems) +platforms = filter(p -> Sys.islinux(p) || Sys.isfreebsd(p), supported_platforms()) +push!(platforms, Platform("x86_64", "linux"; sanitize="memory")) + +# The products that we will ensure are always built +products = [ + LibraryProduct("libunwind", :libunwind), +] + +llvm_version = v"13.0.1" + +# Dependencies that must be installed before this package can be built +dependencies = [ + BuildDependency("XZ_jll"), + Dependency("Zlib_jll"), + BuildDependency(PackageSpec(name="LLVMCompilerRT_jll", + uuid="4e17d02c-6bf5-513e-be62-445f41c75a11", + version=llvm_version); + platforms=filter(p -> sanitize(p) == "memory", platforms)), +] + +# Build the tarballs. Note that libunwind started using `stdatomic.h`, which is only +# available with GCC version 4.9 or later, so we need to set a higher preferred version +# than the default. +build_tarballs(ARGS, name, version, sources, script, platforms, products, dependencies; + julia_compat="1.10", preferred_gcc_version=v"5", + preferred_llvm_version=llvm_version) diff --git a/L/LibUnwind/LibUnwind@1.8.1/bundled/patches/libunwind-aarch64-inline-asm.patch b/L/LibUnwind/LibUnwind@1.8.1/bundled/patches/libunwind-aarch64-inline-asm.patch new file mode 100644 index 00000000000..123643e30cd --- /dev/null +++ b/L/LibUnwind/LibUnwind@1.8.1/bundled/patches/libunwind-aarch64-inline-asm.patch @@ -0,0 +1,157 @@ +From 6ae71b3ea71bff0f38c7a6a05beda30b7dce1ef6 Mon Sep 17 00:00:00 2001 +From: Stephen Webb +Date: Mon, 22 Apr 2024 15:56:54 -0400 +Subject: [PATCH] Rework inline aarch64 as for setcontext + +Modern GC and clang were barfing on the inline asm constraints for the +aarch64-linux setcontext() replacement. Reformulated the asm code to +reduce the required constraints. +--- + src/aarch64/Gos-linux.c | 115 +++++++++++++++++++++------------------- + 1 file changed, 61 insertions(+), 54 deletions(-) + +diff --git a/src/aarch64/Gos-linux.c b/src/aarch64/Gos-linux.c +index 7cd8c879f..1e4949623 100644 +--- a/src/aarch64/Gos-linux.c ++++ b/src/aarch64/Gos-linux.c +@@ -2,6 +2,7 @@ + Copyright (C) 2008 CodeSourcery + Copyright (C) 2011-2013 Linaro Limited + Copyright (C) 2012 Tommi Rantala ++ Copyright 2024 Stephen M. Webb + + This file is part of libunwind. + +@@ -28,6 +29,28 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + + #ifndef UNW_REMOTE_ONLY + ++/* Magic constants generated from gen-offsets.c */ ++#define SC_R0_OFF "8" ++#define SC_R2_OFF "24" ++#define SC_R18_OFF "152" ++#define SC_R20_OFF "168" ++#define SC_R22_OFF "184" ++#define SC_R24_OFF "200" ++#define SC_R26_OFF "216" ++#define SC_R28_OFF "232" ++#define SC_R30_OFF "248" ++ ++#define FP_R08_OFF "80" ++#define FP_R09_OFF "88" ++#define FP_R10_OFF "96" ++#define FP_R11_OFF "104" ++#define FP_R12_OFF "112" ++#define FP_R13_OFF "120" ++#define FP_R14_OFF "128" ++#define FP_R15_OFF "136" ++ ++#define SC_SP_OFF "0x100" ++ + HIDDEN int + aarch64_local_resume (unw_addr_space_t as, unw_cursor_t *cursor, void *arg) + { +@@ -36,65 +59,49 @@ aarch64_local_resume (unw_addr_space_t as, unw_cursor_t *cursor, void *arg) + + if (c->sigcontext_format == AARCH64_SCF_NONE) + { ++ ++ /* ++ * This is effectively the old POSIX setcontext(). ++ * ++ * This inline asm is broken up to use local scratch registers for the ++ * uc_mcontext.regs and FPCTX base addresses because newer versions of GCC ++ * and clang barf on too many constraints (gh-702) when the C array ++ * elements are used directly. ++ * ++ * Clobbers aren't required for the inline asm because they just convince ++ * the compiler to save those registers and they never get restored ++ * becauise the asm ends with a plain ol' ret. ++ */ ++ register void* uc_mcontext __asm__ ("x5") = (void*) &uc->uc_mcontext; ++ register void* fpctx __asm__ ("x4") = (void*) GET_FPCTX(uc); ++ + /* Since there are no signals involved here we restore EH and non scratch + registers only. */ + __asm__ __volatile__ ( +- "ldr x0, %[x0]\n\t" +- "ldr x1, %[x1]\n\t" +- "ldr x2, %[x2]\n\t" +- "ldr x3, %[x3]\n\t" +- "ldr x19, %[x19]\n\t" +- "ldr x20, %[x20]\n\t" +- "ldr x21, %[x21]\n\t" +- "ldr x22, %[x22]\n\t" +- "ldr x23, %[x23]\n\t" +- "ldr x24, %[x24]\n\t" +- "ldr x25, %[x25]\n\t" +- "ldr x26, %[x26]\n\t" +- "ldr x27, %[x27]\n\t" +- "ldr x28, %[x28]\n\t" +- "ldr x29, %[x29]\n\t" +- "ldr x30, %[x30]\n\t" +- "ldr d8, %[d8]\n\t" +- "ldr d9, %[d9]\n\t" +- "ldr d10, %[d10]\n\t" +- "ldr d11, %[d11]\n\t" +- "ldr d12, %[d12]\n\t" +- "ldr d13, %[d13]\n\t" +- "ldr d14, %[d14]\n\t" +- "ldr d15, %[d15]\n\t" +- "ldr x5, %[sp]\n\t" ++ "ldp x0, x1, [x5, " SC_R0_OFF "]\n\t" ++ "ldp x2, x3, [x5, " SC_R2_OFF "]\n\t" ++ "ldp x18, x19, [x5, " SC_R18_OFF "]\n\t" ++ "ldp x20, x21, [x5, " SC_R20_OFF "]\n\t" ++ "ldp x22, x23, [x5, " SC_R22_OFF "]\n\t" ++ "ldp x24, x25, [x5, " SC_R24_OFF "]\n\t" ++ "ldp x26, x27, [x5, " SC_R26_OFF "]\n\t" ++ "ldp x28, x29, [x5, " SC_R28_OFF "]\n\t" ++ "ldr x30, [x5, " SC_R30_OFF "]\n\t" ++ "ldr d8, [x4, " FP_R08_OFF "]\n\t" ++ "ldr d9, [x4, " FP_R09_OFF "]\n\t" ++ "ldr d10, [x4, " FP_R10_OFF "]\n\t" ++ "ldr d11, [x4, " FP_R11_OFF "]\n\t" ++ "ldr d12, [x4, " FP_R12_OFF "]\n\t" ++ "ldr d13, [x4, " FP_R13_OFF "]\n\t" ++ "ldr d14, [x4, " FP_R14_OFF "]\n\t" ++ "ldr d15, [x4, " FP_R15_OFF "]\n\t" ++ "ldr x5, [x5, " SC_SP_OFF "]\n\t" + "mov sp, x5\n\t" + "ret\n" +- : +- : [x0] "m"(uc->uc_mcontext.regs[0]), +- [x1] "m"(uc->uc_mcontext.regs[1]), +- [x2] "m"(uc->uc_mcontext.regs[2]), +- [x3] "m"(uc->uc_mcontext.regs[3]), +- [x19] "m"(uc->uc_mcontext.regs[19]), +- [x20] "m"(uc->uc_mcontext.regs[20]), +- [x21] "m"(uc->uc_mcontext.regs[21]), +- [x22] "m"(uc->uc_mcontext.regs[22]), +- [x23] "m"(uc->uc_mcontext.regs[23]), +- [x24] "m"(uc->uc_mcontext.regs[24]), +- [x25] "m"(uc->uc_mcontext.regs[25]), +- [x26] "m"(uc->uc_mcontext.regs[26]), +- [x27] "m"(uc->uc_mcontext.regs[27]), +- [x28] "m"(uc->uc_mcontext.regs[28]), +- [x29] "m"(uc->uc_mcontext.regs[29]), /* FP */ +- [x30] "m"(uc->uc_mcontext.regs[30]), /* LR */ +- [d8] "m"(GET_FPCTX(uc)->vregs[8]), +- [d9] "m"(GET_FPCTX(uc)->vregs[9]), +- [d10] "m"(GET_FPCTX(uc)->vregs[10]), +- [d11] "m"(GET_FPCTX(uc)->vregs[11]), +- [d12] "m"(GET_FPCTX(uc)->vregs[12]), +- [d13] "m"(GET_FPCTX(uc)->vregs[13]), +- [d14] "m"(GET_FPCTX(uc)->vregs[14]), +- [d15] "m"(GET_FPCTX(uc)->vregs[15]), +- [sp] "m"(uc->uc_mcontext.sp) +- : "x0", "x1", "x2", "x3", "x19", "x20", "x21", "x22", "x23", "x24", +- "x25", "x26", "x27", "x28", "x29", "x30" +- ); ++ : ++ : [uc_mcontext] "r"(uc_mcontext), ++ [fpctx] "r"(fpctx) ++ ); + } + else + { diff --git a/L/LibUnwind/LibUnwind@1.8.1/bundled/patches/libunwind-configure-static-lzma.patch b/L/LibUnwind/LibUnwind@1.8.1/bundled/patches/libunwind-configure-static-lzma.patch new file mode 100644 index 00000000000..f8b428f6055 --- /dev/null +++ b/L/LibUnwind/LibUnwind@1.8.1/bundled/patches/libunwind-configure-static-lzma.patch @@ -0,0 +1,20 @@ +--- configure.orig 2023-06-04 05:19:04 ++++ configure 2023-06-07 08:35:11 +@@ -18117,7 +18117,7 @@ + $as_echo_n "(cached) " >&6 + else + ac_check_lib_save_LIBS=$LIBS +-LIBS="-llzma $LIBS" ++LIBS="-L${libdir} -l:liblzma.a $LIBS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + +@@ -18148,7 +18148,7 @@ + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_lzma_lzma_mf_is_supported" >&5 + $as_echo "$ac_cv_lib_lzma_lzma_mf_is_supported" >&6; } + if test "x$ac_cv_lib_lzma_lzma_mf_is_supported" = xyes; then : +- LIBLZMA=-llzma ++ LIBLZMA="-L${libdir} -l:liblzma.a" + + $as_echo "#define HAVE_LZMA 1" >>confdefs.h + diff --git a/L/LibUnwind/LibUnwind@1.8.1/bundled/patches/libunwind-revert_prelink_unwind.patch b/L/LibUnwind/LibUnwind@1.8.1/bundled/patches/libunwind-revert_prelink_unwind.patch new file mode 100644 index 00000000000..80de3c9ce45 --- /dev/null +++ b/L/LibUnwind/LibUnwind@1.8.1/bundled/patches/libunwind-revert_prelink_unwind.patch @@ -0,0 +1,187 @@ +From 3af39d34f576890e7f1f3e97cc1cb45b4b76aa47 Mon Sep 17 00:00:00 2001 +From: Tim Besard +Date: Tue, 16 Jan 2024 09:49:21 -0800 +Subject: [PATCH] Revert "Fix unwinding of pre-linked libraries" + +This reverts commit a4014f33775321b4106a1134b89020a7774902dd, +which regresses unwinding on FreeBSD (JuliaLang/julia#51467). +--- + include/dwarf.h | 2 -- + include/libunwind-dynamic.h | 1 - + src/dwarf/Gfind_proc_info-lsb.c | 42 +++++++-------------------------- + src/dwarf/Gfind_unwind_table.c | 1 - + 4 files changed, 8 insertions(+), 38 deletions(-) + +diff --git a/include/dwarf.h b/include/dwarf.h +index 4fd1dba0..3fc6bce2 100644 +--- a/include/dwarf.h ++++ b/include/dwarf.h +@@ -371,8 +371,6 @@ struct unw_debug_frame_list + /* The start (inclusive) and end (exclusive) of the described region. */ + unw_word_t start; + unw_word_t end; +- /* ELF load offset */ +- unw_word_t load_offset; + /* The debug frame itself. */ + char *debug_frame; + size_t debug_frame_size; +diff --git a/include/libunwind-dynamic.h b/include/libunwind-dynamic.h +index a26f2c99..c902ccd9 100644 +--- a/include/libunwind-dynamic.h ++++ b/include/libunwind-dynamic.h +@@ -141,7 +141,6 @@ typedef struct unw_dyn_info + unw_word_t gp; /* global-pointer in effect for this entry */ + int32_t format; /* real type: unw_dyn_info_format_t */ + int32_t pad; +- unw_word_t load_offset; /* ELF load offset */ + union + { + unw_dyn_proc_info_t pi; +diff --git a/src/dwarf/Gfind_proc_info-lsb.c b/src/dwarf/Gfind_proc_info-lsb.c +index c11345e8..c701ccfb 100644 +--- a/src/dwarf/Gfind_proc_info-lsb.c ++++ b/src/dwarf/Gfind_proc_info-lsb.c +@@ -108,17 +108,13 @@ linear_search (unw_addr_space_t as, unw_word_t ip, + + static int + load_debug_frame (const char *file, char **buf, size_t *bufsize, int is_local, +- unw_word_t segbase, unw_word_t *load_offset) ++ unw_word_t segbase) + { + struct elf_image ei; +- Elf_W (Ehdr) *ehdr; +- Elf_W (Phdr) *phdr; + Elf_W (Shdr) *shdr; +- int i; + int ret; + + ei.image = NULL; +- *load_offset = 0; + + ret = elf_w (load_debuginfo) (file, &ei, is_local); + if (ret != 0) +@@ -193,20 +189,6 @@ load_debug_frame (const char *file, char **buf, size_t *bufsize, int is_local, + #if defined(SHF_COMPRESSED) + } + #endif +- +- ehdr = ei.image; +- phdr = (Elf_W (Phdr) *) ((char *) ei.image + ehdr->e_phoff); +- +- for (i = 0; i < ehdr->e_phnum; ++i) +- if (phdr[i].p_type == PT_LOAD) +- { +- *load_offset = segbase - phdr[i].p_vaddr; +- +- Debug (4, "%s load offset is 0x%zx\n", file, *load_offset); +- +- break; +- } +- + mi_munmap(ei.image, ei.size); + return 0; + } +@@ -259,7 +241,6 @@ locate_debug_info (unw_addr_space_t as, unw_word_t addr, unw_word_t segbase, + int err; + char *buf; + size_t bufsize; +- unw_word_t load_offset; + + /* First, see if we loaded this frame already. */ + +@@ -287,7 +268,7 @@ locate_debug_info (unw_addr_space_t as, unw_word_t addr, unw_word_t segbase, + name = (char*) dlname; + + err = load_debug_frame (name, &buf, &bufsize, as == unw_local_addr_space, +- segbase, &load_offset); ++ segbase); + + if (!err) + { +@@ -300,7 +281,6 @@ locate_debug_info (unw_addr_space_t as, unw_word_t addr, unw_word_t segbase, + + fdesc->start = start; + fdesc->end = end; +- fdesc->load_offset = load_offset; + fdesc->debug_frame = buf; + fdesc->debug_frame_size = bufsize; + fdesc->index = NULL; +@@ -497,7 +477,6 @@ dwarf_find_debug_frame (int found, unw_dyn_info_t *di_debug, unw_word_t ip, + di->format = UNW_INFO_FORMAT_TABLE; + di->start_ip = fdesc->start; + di->end_ip = fdesc->end; +- di->load_offset = fdesc->load_offset; + di->u.ti.name_ptr = (unw_word_t) (uintptr_t) obj_name; + di->u.ti.table_data = (unw_word_t *) fdesc; + di->u.ti.table_len = sizeof (*fdesc) / sizeof (unw_word_t); +@@ -960,14 +939,12 @@ dwarf_search_unwind_table (unw_addr_space_t as, unw_word_t ip, + ip_base = segbase; + } + +- Debug (6, "lookup IP 0x%lx\n", (long) (ip - ip_base - di->load_offset)); +- + #ifndef UNW_REMOTE_ONLY + if (as == unw_local_addr_space) + { +- e = lookup (table, table_len, ip - ip_base - di->load_offset); ++ e = lookup (table, table_len, ip - ip_base); + if (e && &e[1] < &table[table_len / sizeof (struct table_entry)]) +- last_ip = e[1].start_ip_offset + ip_base + di->load_offset; ++ last_ip = e[1].start_ip_offset + ip_base; + else + last_ip = di->end_ip; + } +@@ -975,7 +952,7 @@ dwarf_search_unwind_table (unw_addr_space_t as, unw_word_t ip, + #endif + { + #ifndef UNW_LOCAL_ONLY +- int32_t last_ip_offset = di->end_ip - ip_base - di->load_offset; ++ int32_t last_ip_offset = di->end_ip - ip_base; + segbase = di->u.rti.segbase; + if ((ret = remote_lookup (as, (uintptr_t) table, table_len, + ip - ip_base, &ent, &last_ip_offset, arg)) < 0) +@@ -983,7 +960,7 @@ dwarf_search_unwind_table (unw_addr_space_t as, unw_word_t ip, + if (ret) + { + e = &ent; +- last_ip = last_ip_offset + ip_base + di->load_offset; ++ last_ip = last_ip_offset + ip_base; + } + else + e = NULL; /* no info found */ +@@ -997,8 +974,8 @@ dwarf_search_unwind_table (unw_addr_space_t as, unw_word_t ip, + unwind info. */ + return -UNW_ENOINFO; + } +- Debug (15, "ip=0x%lx, load_offset=0x%lx, start_ip=0x%lx\n", +- (long) ip, (long) di->load_offset, (long) (e->start_ip_offset)); ++ Debug (15, "ip=0x%lx, start_ip=0x%lx\n", ++ (long) ip, (long) (e->start_ip_offset)); + if (debug_frame_base) + fde_addr = e->fde_offset + debug_frame_base; + else +@@ -1022,9 +999,6 @@ dwarf_search_unwind_table (unw_addr_space_t as, unw_word_t ip, + pi->flags = UNW_PI_FLAG_DEBUG_FRAME; + } + +- pi->start_ip += di->load_offset; +- pi->end_ip += di->load_offset; +- + #if defined(NEED_LAST_IP) + pi->last_ip = last_ip; + #else +diff --git a/src/dwarf/Gfind_unwind_table.c b/src/dwarf/Gfind_unwind_table.c +index a7c4dfd3..2b503ea9 100644 +--- a/src/dwarf/Gfind_unwind_table.c ++++ b/src/dwarf/Gfind_unwind_table.c +@@ -197,7 +197,6 @@ dwarf_find_unwind_table (struct elf_dyn_info *edi, + + edi->di_cache.start_ip = start_ip; + edi->di_cache.end_ip = end_ip; +- edi->di_cache.load_offset = 0; + edi->di_cache.format = UNW_INFO_FORMAT_REMOTE_TABLE; + edi->di_cache.u.rti.name_ptr = 0; + /* two 32-bit values (ip_offset/fde_offset) per table-entry: */ +-- +2.43.0 + diff --git a/L/LibUnwind/LibUnwind@1.8.1/bundled/patches/linux-disentangle_sigcontext.patch b/L/LibUnwind/LibUnwind@1.8.1/bundled/patches/linux-disentangle_sigcontext.patch new file mode 100644 index 00000000000..6150c134010 --- /dev/null +++ b/L/LibUnwind/LibUnwind@1.8.1/bundled/patches/linux-disentangle_sigcontext.patch @@ -0,0 +1,290 @@ +From 9966a05c7b80f075f2bc7e48dbb108d3f2927234 Mon Sep 17 00:00:00 2001 +From: Dave Martin +Date: Fri, 4 Jan 2019 13:09:51 +0000 +Subject: arm64/sve: Disentangle from + + +Currently, provides common definitions for +describing SVE context structures that are also used by the ptrace +definitions in . + +For this reason, a #include of was added in +ptrace.h, but it this turns out that this can interact badly with +userspace code that tries to include ptrace.h on top of the libc +headers (which may provide their own shadow definitions for +sigcontext.h). + +To make the headers easier for userspace to consume, this patch +bounces the common definitions into an __SVE_* namespace and moves +them to a backend header that can be +included by the other headers as appropriate. This should allow +ptrace.h to be used alongside libc's sigcontext.h (if any) without +ill effects. + +This should make the situation unambiguous: is +the header to include for the sigframe-specific definitions, while + is the header to include for ptrace-specific +definitions. + +To avoid conflicting with existing usage, +remains the canonical way to get the common definitions for +SVE_VQ_MIN, sve_vq_from_vl() etc., both in userspace and in the +kernel: relying on these being defined as a side effect of +including just was never intended to be safe. + +Signed-off-by: Dave Martin +Signed-off-by: Will Deacon +--- + arch/arm64/include/uapi/asm/ptrace.h | 39 ++++++++++----------- + arch/arm64/include/uapi/asm/sigcontext.h | 56 +++++++++++++++---------------- + arch/arm64/include/uapi/asm/sve_context.h | 53 +++++++++++++++++++++++++++++ + 3 files changed, 99 insertions(+), 49 deletions(-) + create mode 100644 arch/arm64/include/uapi/asm/sve_context.h + +diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h +index 6c40b3db3285d..28d77c9ed5311 100644 +--- a/arch/arm64/include/uapi/asm/ptrace.h ++++ b/arch/arm64/include/uapi/asm/ptrace.h +@@ -23,7 +23,7 @@ + #include + + #include +-#include ++#include + + + /* +@@ -130,9 +130,9 @@ struct user_sve_header { + */ + + /* Offset from the start of struct user_sve_header to the register data */ +-#define SVE_PT_REGS_OFFSET \ +- ((sizeof(struct user_sve_header) + (SVE_VQ_BYTES - 1)) \ +- / SVE_VQ_BYTES * SVE_VQ_BYTES) ++#define SVE_PT_REGS_OFFSET \ ++ ((sizeof(struct user_sve_header) + (__SVE_VQ_BYTES - 1)) \ ++ / __SVE_VQ_BYTES * __SVE_VQ_BYTES) + + /* + * The register data content and layout depends on the value of the +@@ -178,39 +178,36 @@ struct user_sve_header { + * Additional data might be appended in the future. + */ + +-#define SVE_PT_SVE_ZREG_SIZE(vq) SVE_SIG_ZREG_SIZE(vq) +-#define SVE_PT_SVE_PREG_SIZE(vq) SVE_SIG_PREG_SIZE(vq) +-#define SVE_PT_SVE_FFR_SIZE(vq) SVE_SIG_FFR_SIZE(vq) ++#define SVE_PT_SVE_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq) ++#define SVE_PT_SVE_PREG_SIZE(vq) __SVE_PREG_SIZE(vq) ++#define SVE_PT_SVE_FFR_SIZE(vq) __SVE_FFR_SIZE(vq) + #define SVE_PT_SVE_FPSR_SIZE sizeof(__u32) + #define SVE_PT_SVE_FPCR_SIZE sizeof(__u32) + +-#define __SVE_SIG_TO_PT(offset) \ +- ((offset) - SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET) +- + #define SVE_PT_SVE_OFFSET SVE_PT_REGS_OFFSET + + #define SVE_PT_SVE_ZREGS_OFFSET \ +- __SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET) ++ (SVE_PT_REGS_OFFSET + __SVE_ZREGS_OFFSET) + #define SVE_PT_SVE_ZREG_OFFSET(vq, n) \ +- __SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n)) ++ (SVE_PT_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n)) + #define SVE_PT_SVE_ZREGS_SIZE(vq) \ +- (SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET) ++ (SVE_PT_SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET) + + #define SVE_PT_SVE_PREGS_OFFSET(vq) \ +- __SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq)) ++ (SVE_PT_REGS_OFFSET + __SVE_PREGS_OFFSET(vq)) + #define SVE_PT_SVE_PREG_OFFSET(vq, n) \ +- __SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n)) ++ (SVE_PT_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n)) + #define SVE_PT_SVE_PREGS_SIZE(vq) \ +- (SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - \ ++ (SVE_PT_SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - \ + SVE_PT_SVE_PREGS_OFFSET(vq)) + + #define SVE_PT_SVE_FFR_OFFSET(vq) \ +- __SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq)) ++ (SVE_PT_REGS_OFFSET + __SVE_FFR_OFFSET(vq)) + + #define SVE_PT_SVE_FPSR_OFFSET(vq) \ + ((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + \ +- (SVE_VQ_BYTES - 1)) \ +- / SVE_VQ_BYTES * SVE_VQ_BYTES) ++ (__SVE_VQ_BYTES - 1)) \ ++ / __SVE_VQ_BYTES * __SVE_VQ_BYTES) + #define SVE_PT_SVE_FPCR_OFFSET(vq) \ + (SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE) + +@@ -221,8 +218,8 @@ struct user_sve_header { + + #define SVE_PT_SVE_SIZE(vq, flags) \ + ((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE \ +- - SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1)) \ +- / SVE_VQ_BYTES * SVE_VQ_BYTES) ++ - SVE_PT_SVE_OFFSET + (__SVE_VQ_BYTES - 1)) \ ++ / __SVE_VQ_BYTES * __SVE_VQ_BYTES) + + #define SVE_PT_SIZE(vq, flags) \ + (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \ +diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h +index dca8f8b5168b4..5f3c0cec5af92 100644 +--- a/arch/arm64/include/uapi/asm/sigcontext.h ++++ b/arch/arm64/include/uapi/asm/sigcontext.h +@@ -130,6 +130,8 @@ struct sve_context { + + #endif /* !__ASSEMBLY__ */ + ++#include ++ + /* + * The SVE architecture leaves space for future expansion of the + * vector length beyond its initial architectural limit of 2048 bits +@@ -138,21 +140,20 @@ struct sve_context { + * See linux/Documentation/arm64/sve.txt for a description of the VL/VQ + * terminology. + */ +-#define SVE_VQ_BYTES 16 /* number of bytes per quadword */ ++#define SVE_VQ_BYTES __SVE_VQ_BYTES /* bytes per quadword */ + +-#define SVE_VQ_MIN 1 +-#define SVE_VQ_MAX 512 ++#define SVE_VQ_MIN __SVE_VQ_MIN ++#define SVE_VQ_MAX __SVE_VQ_MAX + +-#define SVE_VL_MIN (SVE_VQ_MIN * SVE_VQ_BYTES) +-#define SVE_VL_MAX (SVE_VQ_MAX * SVE_VQ_BYTES) ++#define SVE_VL_MIN __SVE_VL_MIN ++#define SVE_VL_MAX __SVE_VL_MAX + +-#define SVE_NUM_ZREGS 32 +-#define SVE_NUM_PREGS 16 ++#define SVE_NUM_ZREGS __SVE_NUM_ZREGS ++#define SVE_NUM_PREGS __SVE_NUM_PREGS + +-#define sve_vl_valid(vl) \ +- ((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX) +-#define sve_vq_from_vl(vl) ((vl) / SVE_VQ_BYTES) +-#define sve_vl_from_vq(vq) ((vq) * SVE_VQ_BYTES) ++#define sve_vl_valid(vl) __sve_vl_valid(vl) ++#define sve_vq_from_vl(vl) __sve_vq_from_vl(vl) ++#define sve_vl_from_vq(vq) __sve_vl_from_vq(vq) + + /* + * If the SVE registers are currently live for the thread at signal delivery, +@@ -205,34 +206,33 @@ struct sve_context { + * Additional data might be appended in the future. + */ + +-#define SVE_SIG_ZREG_SIZE(vq) ((__u32)(vq) * SVE_VQ_BYTES) +-#define SVE_SIG_PREG_SIZE(vq) ((__u32)(vq) * (SVE_VQ_BYTES / 8)) +-#define SVE_SIG_FFR_SIZE(vq) SVE_SIG_PREG_SIZE(vq) ++#define SVE_SIG_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq) ++#define SVE_SIG_PREG_SIZE(vq) __SVE_PREG_SIZE(vq) ++#define SVE_SIG_FFR_SIZE(vq) __SVE_FFR_SIZE(vq) + + #define SVE_SIG_REGS_OFFSET \ +- ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \ +- / SVE_VQ_BYTES * SVE_VQ_BYTES) ++ ((sizeof(struct sve_context) + (__SVE_VQ_BYTES - 1)) \ ++ / __SVE_VQ_BYTES * __SVE_VQ_BYTES) + +-#define SVE_SIG_ZREGS_OFFSET SVE_SIG_REGS_OFFSET ++#define SVE_SIG_ZREGS_OFFSET \ ++ (SVE_SIG_REGS_OFFSET + __SVE_ZREGS_OFFSET) + #define SVE_SIG_ZREG_OFFSET(vq, n) \ +- (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n)) +-#define SVE_SIG_ZREGS_SIZE(vq) \ +- (SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET) ++ (SVE_SIG_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n)) ++#define SVE_SIG_ZREGS_SIZE(vq) __SVE_ZREGS_SIZE(vq) + + #define SVE_SIG_PREGS_OFFSET(vq) \ +- (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq)) ++ (SVE_SIG_REGS_OFFSET + __SVE_PREGS_OFFSET(vq)) + #define SVE_SIG_PREG_OFFSET(vq, n) \ +- (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n)) +-#define SVE_SIG_PREGS_SIZE(vq) \ +- (SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq)) ++ (SVE_SIG_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n)) ++#define SVE_SIG_PREGS_SIZE(vq) __SVE_PREGS_SIZE(vq) + + #define SVE_SIG_FFR_OFFSET(vq) \ +- (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq)) ++ (SVE_SIG_REGS_OFFSET + __SVE_FFR_OFFSET(vq)) + + #define SVE_SIG_REGS_SIZE(vq) \ +- (SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET) +- +-#define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq)) ++ (__SVE_FFR_OFFSET(vq) + __SVE_FFR_SIZE(vq)) + ++#define SVE_SIG_CONTEXT_SIZE(vq) \ ++ (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq)) + + #endif /* _UAPI__ASM_SIGCONTEXT_H */ +diff --git a/arch/arm64/include/uapi/asm/sve_context.h b/arch/arm64/include/uapi/asm/sve_context.h +new file mode 100644 +index 0000000000000..754ab751b523f +--- /dev/null ++++ b/arch/arm64/include/uapi/asm/sve_context.h +@@ -0,0 +1,53 @@ ++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ ++/* Copyright (C) 2017-2018 ARM Limited */ ++ ++/* ++ * For use by other UAPI headers only. ++ * Do not make direct use of header or its definitions. ++ */ ++ ++#ifndef _UAPI__ASM_SVE_CONTEXT_H ++#define _UAPI__ASM_SVE_CONTEXT_H ++ ++#include ++ ++#define __SVE_VQ_BYTES 16 /* number of bytes per quadword */ ++ ++#define __SVE_VQ_MIN 1 ++#define __SVE_VQ_MAX 512 ++ ++#define __SVE_VL_MIN (__SVE_VQ_MIN * __SVE_VQ_BYTES) ++#define __SVE_VL_MAX (__SVE_VQ_MAX * __SVE_VQ_BYTES) ++ ++#define __SVE_NUM_ZREGS 32 ++#define __SVE_NUM_PREGS 16 ++ ++#define __sve_vl_valid(vl) \ ++ ((vl) % __SVE_VQ_BYTES == 0 && \ ++ (vl) >= __SVE_VL_MIN && \ ++ (vl) <= __SVE_VL_MAX) ++ ++#define __sve_vq_from_vl(vl) ((vl) / __SVE_VQ_BYTES) ++#define __sve_vl_from_vq(vq) ((vq) * __SVE_VQ_BYTES) ++ ++#define __SVE_ZREG_SIZE(vq) ((__u32)(vq) * __SVE_VQ_BYTES) ++#define __SVE_PREG_SIZE(vq) ((__u32)(vq) * (__SVE_VQ_BYTES / 8)) ++#define __SVE_FFR_SIZE(vq) __SVE_PREG_SIZE(vq) ++ ++#define __SVE_ZREGS_OFFSET 0 ++#define __SVE_ZREG_OFFSET(vq, n) \ ++ (__SVE_ZREGS_OFFSET + __SVE_ZREG_SIZE(vq) * (n)) ++#define __SVE_ZREGS_SIZE(vq) \ ++ (__SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - __SVE_ZREGS_OFFSET) ++ ++#define __SVE_PREGS_OFFSET(vq) \ ++ (__SVE_ZREGS_OFFSET + __SVE_ZREGS_SIZE(vq)) ++#define __SVE_PREG_OFFSET(vq, n) \ ++ (__SVE_PREGS_OFFSET(vq) + __SVE_PREG_SIZE(vq) * (n)) ++#define __SVE_PREGS_SIZE(vq) \ ++ (__SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - __SVE_PREGS_OFFSET(vq)) ++ ++#define __SVE_FFR_OFFSET(vq) \ ++ (__SVE_PREGS_OFFSET(vq) + __SVE_PREGS_SIZE(vq)) ++ ++#endif /* ! _UAPI__ASM_SVE_CONTEXT_H */ +-- +cgit +