From 9631286a66afb9146b5e55be50e5d7dda754999d Mon Sep 17 00:00:00 2001 From: Thomas A Date: Sun, 3 Apr 2022 13:20:06 -0700 Subject: [PATCH 1/4] Remove libmalloc from darling repo I want to relocate this to darling-libmalloc instead --- src/libmalloc/.clang-format | 126 - src/libmalloc/.gitignore | 3 - src/libmalloc/CMakeLists.txt | 56 - src/libmalloc/include/malloc/_malloc.h | 56 - src/libmalloc/include/malloc/malloc.h | 314 -- .../libmalloc.xcodeproj/project.pbxproj | 1635 --------- src/libmalloc/man/malloc.3 | 334 -- src/libmalloc/man/malloc_size.3 | 55 - src/libmalloc/man/malloc_zone_malloc.3 | 162 - src/libmalloc/man/manpages.lst | 7 - src/libmalloc/private/make_tapi_happy.h | 87 - src/libmalloc/private/malloc_implementation.h | 85 - src/libmalloc/private/malloc_private.h | 79 - src/libmalloc/private/stack_logging.h | 204 -- src/libmalloc/resolver/resolver.c | 21 - src/libmalloc/resolver/resolver.h | 27 - src/libmalloc/resolver/resolver_internal.h | 26 - src/libmalloc/src/base.h | 133 - src/libmalloc/src/bitarray.c | 683 ---- src/libmalloc/src/bitarray.h | 103 - src/libmalloc/src/debug.h | 62 - src/libmalloc/src/dtrace.h | 50 - src/libmalloc/src/empty.s | 36 - src/libmalloc/src/frozen_malloc.c | 178 - src/libmalloc/src/frozen_malloc.h | 40 - src/libmalloc/src/internal.h | 140 - src/libmalloc/src/legacy_malloc.c | 62 - src/libmalloc/src/legacy_malloc.h | 31 - src/libmalloc/src/locking.h | 70 - src/libmalloc/src/magazine_inline.h | 714 ---- src/libmalloc/src/magazine_large.c | 843 ----- src/libmalloc/src/magazine_malloc.c | 1760 ---------- src/libmalloc/src/magazine_malloc.h | 419 --- src/libmalloc/src/magazine_medium.c | 2892 ---------------- src/libmalloc/src/magazine_rack.c | 159 - src/libmalloc/src/magazine_rack.h | 98 - src/libmalloc/src/magazine_small.c | 2485 -------------- src/libmalloc/src/magazine_tiny.c | 2748 --------------- src/libmalloc/src/magazine_zone.h | 810 ----- src/libmalloc/src/magmallocProvider.d | 18 - src/libmalloc/src/malloc.c | 2780 ---------------- src/libmalloc/src/malloc_common.c | 113 - src/libmalloc/src/malloc_common.h | 43 - src/libmalloc/src/malloc_printf.c | 239 -- src/libmalloc/src/msl_lite_support.c | 135 - src/libmalloc/src/nano_malloc.c | 1956 ----------- src/libmalloc/src/nano_malloc.h | 46 - src/libmalloc/src/nano_malloc_common.c | 311 -- src/libmalloc/src/nano_malloc_common.h | 82 - src/libmalloc/src/nano_zone.h | 142 - src/libmalloc/src/nano_zone_common.h | 57 - src/libmalloc/src/nanov2_malloc.c | 2950 ----------------- src/libmalloc/src/nanov2_malloc.h | 46 - src/libmalloc/src/nanov2_zone.h | 322 -- src/libmalloc/src/platform.h | 122 - src/libmalloc/src/printf.h | 80 - src/libmalloc/src/purgeable_malloc.c | 426 --- src/libmalloc/src/purgeable_malloc.h | 32 - src/libmalloc/src/thresholds.h | 228 -- src/libmalloc/src/trace.h | 71 - src/libmalloc/src/vm.c | 295 -- src/libmalloc/src/vm.h | 58 - src/libmalloc/tools/malloc_replay.cpp | 1012 ------ src/libmalloc/tools/malloc_replay.h | 211 -- src/libmalloc/tools/malloc_replay_plotter.py | 366 -- src/libmalloc/tools/read-radix-tree | 24 - src/libmalloc/xcodeconfig/interposable.list | 1 - src/libmalloc/xcodeconfig/libmalloc.xcconfig | 92 - .../xcodeconfig/libmalloc_eos.xcconfig | 13 - .../xcodeconfig/libmalloc_resolved.xcconfig | 10 - .../xcodeconfig/libmalloc_resolver.xcconfig | 3 - .../xcodeconfig/libmalloc_static.xcconfig | 11 - src/libmalloc/xcodescripts/install-codes.sh | 10 - src/libmalloc/xcodescripts/manpages.sh | 45 - src/libmalloc/xcodescripts/reindent.sh | 27 - .../xcodescripts/sanitise_headers.sh | 21 - 76 files changed, 30191 deletions(-) delete mode 100644 src/libmalloc/.clang-format delete mode 100644 src/libmalloc/.gitignore delete mode 100755 src/libmalloc/CMakeLists.txt delete mode 100644 src/libmalloc/include/malloc/_malloc.h delete mode 100644 src/libmalloc/include/malloc/malloc.h delete mode 100644 src/libmalloc/libmalloc.xcodeproj/project.pbxproj delete mode 100644 src/libmalloc/man/malloc.3 delete mode 100644 src/libmalloc/man/malloc_size.3 delete mode 100644 src/libmalloc/man/malloc_zone_malloc.3 delete mode 100644 src/libmalloc/man/manpages.lst delete mode 100644 src/libmalloc/private/make_tapi_happy.h delete mode 100644 src/libmalloc/private/malloc_implementation.h delete mode 100644 src/libmalloc/private/malloc_private.h delete mode 100644 src/libmalloc/private/stack_logging.h delete mode 100644 src/libmalloc/resolver/resolver.c delete mode 100644 src/libmalloc/resolver/resolver.h delete mode 100644 src/libmalloc/resolver/resolver_internal.h delete mode 100644 src/libmalloc/src/base.h delete mode 100644 src/libmalloc/src/bitarray.c delete mode 100644 src/libmalloc/src/bitarray.h delete mode 100644 src/libmalloc/src/debug.h delete mode 100644 src/libmalloc/src/dtrace.h delete mode 100644 src/libmalloc/src/empty.s delete mode 100644 src/libmalloc/src/frozen_malloc.c delete mode 100644 src/libmalloc/src/frozen_malloc.h delete mode 100644 src/libmalloc/src/internal.h delete mode 100644 src/libmalloc/src/legacy_malloc.c delete mode 100644 src/libmalloc/src/legacy_malloc.h delete mode 100644 src/libmalloc/src/locking.h delete mode 100644 src/libmalloc/src/magazine_inline.h delete mode 100644 src/libmalloc/src/magazine_large.c delete mode 100644 src/libmalloc/src/magazine_malloc.c delete mode 100644 src/libmalloc/src/magazine_malloc.h delete mode 100644 src/libmalloc/src/magazine_medium.c delete mode 100644 src/libmalloc/src/magazine_rack.c delete mode 100644 src/libmalloc/src/magazine_rack.h delete mode 100644 src/libmalloc/src/magazine_small.c delete mode 100644 src/libmalloc/src/magazine_tiny.c delete mode 100644 src/libmalloc/src/magazine_zone.h delete mode 100644 src/libmalloc/src/magmallocProvider.d delete mode 100644 src/libmalloc/src/malloc.c delete mode 100644 src/libmalloc/src/malloc_common.c delete mode 100644 src/libmalloc/src/malloc_common.h delete mode 100644 src/libmalloc/src/malloc_printf.c delete mode 100644 src/libmalloc/src/msl_lite_support.c delete mode 100644 src/libmalloc/src/nano_malloc.c delete mode 100644 src/libmalloc/src/nano_malloc.h delete mode 100644 src/libmalloc/src/nano_malloc_common.c delete mode 100644 src/libmalloc/src/nano_malloc_common.h delete mode 100644 src/libmalloc/src/nano_zone.h delete mode 100644 src/libmalloc/src/nano_zone_common.h delete mode 100644 src/libmalloc/src/nanov2_malloc.c delete mode 100644 src/libmalloc/src/nanov2_malloc.h delete mode 100644 src/libmalloc/src/nanov2_zone.h delete mode 100644 src/libmalloc/src/platform.h delete mode 100644 src/libmalloc/src/printf.h delete mode 100644 src/libmalloc/src/purgeable_malloc.c delete mode 100644 src/libmalloc/src/purgeable_malloc.h delete mode 100644 src/libmalloc/src/thresholds.h delete mode 100644 src/libmalloc/src/trace.h delete mode 100644 src/libmalloc/src/vm.c delete mode 100644 src/libmalloc/src/vm.h delete mode 100644 src/libmalloc/tools/malloc_replay.cpp delete mode 100644 src/libmalloc/tools/malloc_replay.h delete mode 100644 src/libmalloc/tools/malloc_replay_plotter.py delete mode 100644 src/libmalloc/tools/read-radix-tree delete mode 100644 src/libmalloc/xcodeconfig/interposable.list delete mode 100644 src/libmalloc/xcodeconfig/libmalloc.xcconfig delete mode 100644 src/libmalloc/xcodeconfig/libmalloc_eos.xcconfig delete mode 100644 src/libmalloc/xcodeconfig/libmalloc_resolved.xcconfig delete mode 100644 src/libmalloc/xcodeconfig/libmalloc_resolver.xcconfig delete mode 100644 src/libmalloc/xcodeconfig/libmalloc_static.xcconfig delete mode 100644 src/libmalloc/xcodescripts/install-codes.sh delete mode 100644 src/libmalloc/xcodescripts/manpages.sh delete mode 100755 src/libmalloc/xcodescripts/reindent.sh delete mode 100755 src/libmalloc/xcodescripts/sanitise_headers.sh diff --git a/src/libmalloc/.clang-format b/src/libmalloc/.clang-format deleted file mode 100644 index 2faab1266..000000000 --- a/src/libmalloc/.clang-format +++ /dev/null @@ -1,126 +0,0 @@ -# Format of this file is YAML -# Minimum clang-format version required: clang-format version 3.6.0 -# Detailed description of options available at http://clang.llvm.org/docs/ClangFormatStyleOptions.html - -AlignEscapedNewlinesLeft: true -# Bad: -# void foo() { -# someFunction(); -# someOtherFunction(); -# } -# Good: -# void foo() { -# someFunction(); -# someOtherFunction(); -# } - -AlignTrailingComments: true -# align all comments to right based of // -# == Avoid using // based comments altogether == - -AlignAfterOpenBracket: false -# don't align after a bracket, uses indentation rules to indent - -AllowAllParametersOfDeclarationOnNextLine: false -# allow funtion definition as -# someFunction(foo, -# bar, -# baz); - -AlignConsecutiveAssignments: false -# does not align consecutive assignments with '=' operator - -AllowShortBlocksOnASingleLine: true -# single statement block can be merged on one line -# e.g if (a) { return; } - -AllowShortCaseLabelsOnASingleLine: false -# Single statement case statements should be on their own lines - -AllowShortFunctionsOnASingleLine: None -# Bad: -# int foo() { return 123; } - -AllowShortIfStatementsOnASingleLine: false -# Bad: -# if (someOtherVar) return; -# Good: -# if (someOtherVar) -# return; - -AllowShortLoopsOnASingleLine: false -# Bad: -# while(i>0) i--; -# Good: -# while(i>0) { -# i--; -# } - -AlwaysBreakAfterDefinitionReturnType: true -# Ensures return type is one its own line -# e.g. unsigned int -# function(char param) { } - -AlwaysBreakBeforeMultilineStrings: true -# multine strings should begin on new line - -BinPackArguments: true -BinPackParameters: false -# functions arguments should all be on one line or have a single line for each param - -BreakBeforeBinaryOperators: None -# break for new line after binary operator in case of length is over ColumnLimit -# e.g. -# int foo = bar + -# baz; - -BreakBeforeBraces: Linux -# Always attach braces to surrounding context except - -# break before braces on function, namespace and class definitions - -ColumnLimit: 132 -# every body has wide screen now. 132 seems to be reasonable limit now. - -ContinuationIndentWidth: 8 -# indent continued lines by two tabs - -IndentCaseLabels: false -# case labels have same indentation as switch statement. - -IndentWidth: 4 -# 4 spaces for indentation -TabWidth: 4 -# tabwidth is 4 spaces - -UseTab: Always -# always indent lines with tabs - -IndentWrappedFunctionNames: false -KeepEmptyLinesAtTheStartOfBlocks: false -# remove excess empty lines at start of blocks. - -PointerAlignment: Right -# "void *foo" (vs. "void* foo" or "void * foo") - -SpaceAfterCStyleCast: false -# No space after (cast). E.g -# int blah = (int)((void *)foo + bar) - -SpaceBeforeAssignmentOperators: true -# Assignment = should be seperated by spaces on both sides. - -SpaceBeforeParens: ControlStatements -# for control statements a space is required before '{' -# Bad: for(){ statement; } -# Good: for() { statement; } - -SpaceInEmptyParentheses: false -# No spaces required for empty () - -SpacesInCStyleCastParentheses: false -# No spaces required for (unsigned int) type cast - -SpacesInParentheses: false - -SpacesInSquareBrackets: false -# No spaces in [count] style invocations of [] diff --git a/src/libmalloc/.gitignore b/src/libmalloc/.gitignore deleted file mode 100644 index 02aa0f4f0..000000000 --- a/src/libmalloc/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -# /libmalloc.xcodeproj/ -/libmalloc.xcodeproj/*.xcworkspace -/libmalloc.xcodeproj/xcuserdata diff --git a/src/libmalloc/CMakeLists.txt b/src/libmalloc/CMakeLists.txt deleted file mode 100755 index 89d05b509..000000000 --- a/src/libmalloc/CMakeLists.txt +++ /dev/null @@ -1,56 +0,0 @@ -project(darling-libsystem_malloc) - -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include/malloc) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/private) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/resolver) - -add_compile_options( - -nostdinc -) - -add_definitions( - -DPRIVATE - -DOS_UNFAIR_LOCK_INLINE=1 - -D__DARWIN_UNIX03 -) - -set(libmalloc_sources - src/bitarray.c - src/frozen_malloc.c - src/legacy_malloc.c - src/magazine_large.c - src/magazine_malloc.c - src/magazine_small.c - src/magazine_tiny.c - src/malloc.c - src/nano_malloc.c - src/purgeable_malloc.c - src/magazine_medium.c - src/magazine_rack.c - src/malloc_common.c - src/malloc_printf.c - src/msl_lite_support.c - src/nano_malloc_common.c - src/nanov2_malloc.c - src/vm.c -) - -set_source_files_properties(src/nanov2_malloc.c PROPERTIES - COMPILE_FLAGS " -DOS_VARIANT_NOTRESOLVED=1 -DOS_VARIANT_RESOLVED=1" -) - -set(DYLIB_INSTALL_NAME "/usr/lib/system/libsystem_malloc.dylib") -add_circular(system_malloc FAT - SOURCES - ${libmalloc_sources} - SIBLINGS - system_kernel - platform - system_dyld - compiler_rt - UPWARD - system_c -) - -install(TARGETS system_malloc DESTINATION libexec/darling/usr/lib/system) diff --git a/src/libmalloc/include/malloc/_malloc.h b/src/libmalloc/include/malloc/_malloc.h deleted file mode 100644 index c0270235d..000000000 --- a/src/libmalloc/include/malloc/_malloc.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2018 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef _MALLOC_UNDERSCORE_MALLOC_H_ -#define _MALLOC_UNDERSCORE_MALLOC_H_ - -/* - * This header is included from , so the contents of this file have - * broad source compatibility and POSIX conformance implications. - * Be cautious about what is included and declared here. - */ - -#include -#include -#include <_types.h> -#include - -__BEGIN_DECLS - -void *malloc(size_t __size) __result_use_check __alloc_size(1); -void *calloc(size_t __count, size_t __size) __result_use_check __alloc_size(1,2); -void free(void *); -void *realloc(void *__ptr, size_t __size) __result_use_check __alloc_size(2); -#if !defined(_ANSI_SOURCE) && (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) -void *valloc(size_t) __alloc_size(1); -#endif // !defined(_ANSI_SOURCE) && (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) -#if (__DARWIN_C_LEVEL >= __DARWIN_C_FULL) && \ - ((defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \ - (defined(__cplusplus) && __cplusplus >= 201703L)) -void *aligned_alloc(size_t __alignment, size_t __size) __result_use_check __alloc_size(2) __OSX_AVAILABLE(10.15) __IOS_AVAILABLE(13.0) __TVOS_AVAILABLE(13.0) __WATCHOS_AVAILABLE(6.0); -#endif -int posix_memalign(void **__memptr, size_t __alignment, size_t __size) __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0); - -__END_DECLS - -#endif /* _MALLOC_UNDERSCORE_MALLOC_H_ */ diff --git a/src/libmalloc/include/malloc/malloc.h b/src/libmalloc/include/malloc/malloc.h deleted file mode 100644 index 40e7469da..000000000 --- a/src/libmalloc/include/malloc/malloc.h +++ /dev/null @@ -1,314 +0,0 @@ -/* - * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef _MALLOC_MALLOC_H_ -#define _MALLOC_MALLOC_H_ - -#include -#include -#include -#include - -#if __has_feature(ptrauth_calls) -#include - -// Zone function pointer, type-diversified but not address-diversified (because -// the zone can be copied). Process-independent because the zone structure may -// be in the shared library cache. -#define MALLOC_ZONE_FN_PTR(fn) __ptrauth(ptrauth_key_process_independent_code, \ - FALSE, ptrauth_string_discriminator("malloc_zone_fn." #fn)) fn - -// Introspection function pointer, address- and type-diversified. -// Process-independent because the malloc_introspection_t structure that contains -// these pointers may be in the shared library cache. -#define MALLOC_INTROSPECT_FN_PTR(fn) __ptrauth(ptrauth_key_process_independent_code, \ - TRUE, ptrauth_string_discriminator("malloc_introspect_fn." #fn)) fn - -// Pointer to the introspection pointer table, type-diversified but not -// address-diversified (because the zone can be copied). -// Process-independent because the table pointer may be in the shared library cache. -#define MALLOC_INTROSPECT_TBL_PTR(ptr) __ptrauth(ptrauth_key_process_independent_data,\ - FALSE, ptrauth_string_discriminator("malloc_introspect_tbl")) ptr - -#endif // __has_feature(ptrauth_calls) - -#ifndef MALLOC_ZONE_FN_PTR -#define MALLOC_ZONE_FN_PTR(fn) fn -#define MALLOC_INTROSPECT_FN_PTR(fn) fn -#define MALLOC_INTROSPECT_TBL_PTR(ptr) ptr -#endif // MALLOC_ZONE_FN_PTR - -__BEGIN_DECLS -/********* Type definitions ************/ - -typedef struct _malloc_zone_t { - /* Only zone implementors should depend on the layout of this structure; - Regular callers should use the access functions below */ - void *reserved1; /* RESERVED FOR CFAllocator DO NOT USE */ - void *reserved2; /* RESERVED FOR CFAllocator DO NOT USE */ - size_t (* MALLOC_ZONE_FN_PTR(size))(struct _malloc_zone_t *zone, const void *ptr); /* returns the size of a block or 0 if not in this zone; must be fast, especially for negative answers */ - void *(* MALLOC_ZONE_FN_PTR(malloc))(struct _malloc_zone_t *zone, size_t size); - void *(* MALLOC_ZONE_FN_PTR(calloc))(struct _malloc_zone_t *zone, size_t num_items, size_t size); /* same as malloc, but block returned is set to zero */ - void *(* MALLOC_ZONE_FN_PTR(valloc))(struct _malloc_zone_t *zone, size_t size); /* same as malloc, but block returned is set to zero and is guaranteed to be page aligned */ - void (* MALLOC_ZONE_FN_PTR(free))(struct _malloc_zone_t *zone, void *ptr); - void *(* MALLOC_ZONE_FN_PTR(realloc))(struct _malloc_zone_t *zone, void *ptr, size_t size); - void (* MALLOC_ZONE_FN_PTR(destroy))(struct _malloc_zone_t *zone); /* zone is destroyed and all memory reclaimed */ - const char *zone_name; - - /* Optional batch callbacks; these may be NULL */ - unsigned (* MALLOC_ZONE_FN_PTR(batch_malloc))(struct _malloc_zone_t *zone, size_t size, void **results, unsigned num_requested); /* given a size, returns pointers capable of holding that size; returns the number of pointers allocated (maybe 0 or less than num_requested) */ - void (* MALLOC_ZONE_FN_PTR(batch_free))(struct _malloc_zone_t *zone, void **to_be_freed, unsigned num_to_be_freed); /* frees all the pointers in to_be_freed; note that to_be_freed may be overwritten during the process */ - - struct malloc_introspection_t * MALLOC_INTROSPECT_TBL_PTR(introspect); - unsigned version; - - /* aligned memory allocation. The callback may be NULL. Present in version >= 5. */ - void *(* MALLOC_ZONE_FN_PTR(memalign))(struct _malloc_zone_t *zone, size_t alignment, size_t size); - - /* free a pointer known to be in zone and known to have the given size. The callback may be NULL. Present in version >= 6.*/ - void (* MALLOC_ZONE_FN_PTR(free_definite_size))(struct _malloc_zone_t *zone, void *ptr, size_t size); - - /* Empty out caches in the face of memory pressure. The callback may be NULL. Present in version >= 8. */ - size_t (* MALLOC_ZONE_FN_PTR(pressure_relief))(struct _malloc_zone_t *zone, size_t goal); - - /* - * Checks whether an address might belong to the zone. May be NULL. Present in version >= 10. - * False positives are allowed (e.g. the pointer was freed, or it's in zone space that has - * not yet been allocated. False negatives are not allowed. - */ - boolean_t (* MALLOC_ZONE_FN_PTR(claimed_address))(struct _malloc_zone_t *zone, void *ptr); -} malloc_zone_t; - -/********* Creation and destruction ************/ - -extern malloc_zone_t *malloc_default_zone(void); - /* The initial zone */ - -extern malloc_zone_t *malloc_create_zone(vm_size_t start_size, unsigned flags); - /* Creates a new zone with default behavior and registers it */ - -extern void malloc_destroy_zone(malloc_zone_t *zone); - /* Destroys zone and everything it allocated */ - -/********* Block creation and manipulation ************/ - -extern void *malloc_zone_malloc(malloc_zone_t *zone, size_t size) __alloc_size(2); - /* Allocates a new pointer of size size; zone must be non-NULL */ - -extern void *malloc_zone_calloc(malloc_zone_t *zone, size_t num_items, size_t size) __alloc_size(2,3); - /* Allocates a new pointer of size num_items * size; block is cleared; zone must be non-NULL */ - -extern void *malloc_zone_valloc(malloc_zone_t *zone, size_t size) __alloc_size(2); - /* Allocates a new pointer of size size; zone must be non-NULL; Pointer is guaranteed to be page-aligned and block is cleared */ - -extern void malloc_zone_free(malloc_zone_t *zone, void *ptr); - /* Frees pointer in zone; zone must be non-NULL */ - -extern void *malloc_zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) __alloc_size(3); - /* Enlarges block if necessary; zone must be non-NULL */ - -extern malloc_zone_t *malloc_zone_from_ptr(const void *ptr); - /* Returns the zone for a pointer, or NULL if not in any zone. - The ptr must have been returned from a malloc or realloc call. */ - -extern size_t malloc_size(const void *ptr); - /* Returns size of given ptr */ - -extern size_t malloc_good_size(size_t size); - /* Returns number of bytes greater than or equal to size that can be allocated without padding */ - -extern void *malloc_zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) __alloc_size(3) __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0); - /* - * Allocates a new pointer of size size whose address is an exact multiple of alignment. - * alignment must be a power of two and at least as large as sizeof(void *). - * zone must be non-NULL. - */ - -/********* Batch methods ************/ - -extern unsigned malloc_zone_batch_malloc(malloc_zone_t *zone, size_t size, void **results, unsigned num_requested); - /* Allocates num blocks of the same size; Returns the number truly allocated (may be 0) */ - -extern void malloc_zone_batch_free(malloc_zone_t *zone, void **to_be_freed, unsigned num); - /* frees all the pointers in to_be_freed; note that to_be_freed may be overwritten during the process; This function will always free even if the zone has no batch callback */ - -/********* Functions for libcache ************/ - -extern malloc_zone_t *malloc_default_purgeable_zone(void) __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0); - /* Returns a pointer to the default purgeable_zone. */ - -extern void malloc_make_purgeable(void *ptr) __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0); - /* Make an allocation from the purgeable zone purgeable if possible. */ - -extern int malloc_make_nonpurgeable(void *ptr) __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0); - /* Makes an allocation from the purgeable zone nonpurgeable. - * Returns zero if the contents were not purged since the last - * call to malloc_make_purgeable, else returns non-zero. */ - -/********* Functions for zone implementors ************/ - -extern void malloc_zone_register(malloc_zone_t *zone); - /* Registers a custom malloc zone; Should typically be called after a - * malloc_zone_t has been filled in with custom methods by a client. See - * malloc_create_zone for creating additional malloc zones with the - * default allocation and free behavior. */ - -extern void malloc_zone_unregister(malloc_zone_t *zone); - /* De-registers a zone - Should typically be called before calling the zone destruction routine */ - -extern void malloc_set_zone_name(malloc_zone_t *zone, const char *name); - /* Sets the name of a zone */ - -extern const char *malloc_get_zone_name(malloc_zone_t *zone); - /* Returns the name of a zone */ - -size_t malloc_zone_pressure_relief(malloc_zone_t *zone, size_t goal) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); - /* malloc_zone_pressure_relief() advises the malloc subsystem that the process is under memory pressure and - * that the subsystem should make its best effort towards releasing (i.e. munmap()-ing) "goal" bytes from "zone". - * If "goal" is passed as zero, the malloc subsystem will attempt to achieve maximal pressure relief in "zone". - * If "zone" is passed as NULL, all zones are examined for pressure relief opportunities. - * malloc_zone_pressure_relief() returns the number of bytes released. - */ - -typedef struct { - vm_address_t address; - vm_size_t size; -} vm_range_t; - -typedef struct malloc_statistics_t { - unsigned blocks_in_use; - size_t size_in_use; - size_t max_size_in_use; /* high water mark of touched memory */ - size_t size_allocated; /* reserved in memory */ -} malloc_statistics_t; - -typedef kern_return_t memory_reader_t(task_t remote_task, vm_address_t remote_address, vm_size_t size, void **local_memory); - /* given a task, "reads" the memory at the given address and size -local_memory: set to a contiguous chunk of memory; validity of local_memory is assumed to be limited (until next call) */ - -#define MALLOC_PTR_IN_USE_RANGE_TYPE 1 /* for allocated pointers */ -#define MALLOC_PTR_REGION_RANGE_TYPE 2 /* for region containing pointers */ -#define MALLOC_ADMIN_REGION_RANGE_TYPE 4 /* for region used internally */ -#define MALLOC_ZONE_SPECIFIC_FLAGS 0xff00 /* bits reserved for zone-specific purposes */ - -typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned); - /* given a task and context, "records" the specified addresses */ - -/* Print function for the print_task() operation. */ -typedef void print_task_printer_t(const char *fmt, ...); - -typedef struct malloc_introspection_t { - kern_return_t (* MALLOC_INTROSPECT_FN_PTR(enumerator))(task_t task, void *, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder); /* enumerates all the malloc pointers in use */ - size_t (* MALLOC_INTROSPECT_FN_PTR(good_size))(malloc_zone_t *zone, size_t size); - boolean_t (* MALLOC_INTROSPECT_FN_PTR(check))(malloc_zone_t *zone); /* Consistency checker */ - void (* MALLOC_INTROSPECT_FN_PTR(print))(malloc_zone_t *zone, boolean_t verbose); /* Prints zone */ - void (* MALLOC_INTROSPECT_FN_PTR(log))(malloc_zone_t *zone, void *address); /* Enables logging of activity */ - void (* MALLOC_INTROSPECT_FN_PTR(force_lock))(malloc_zone_t *zone); /* Forces locking zone */ - void (* MALLOC_INTROSPECT_FN_PTR(force_unlock))(malloc_zone_t *zone); /* Forces unlocking zone */ - void (* MALLOC_INTROSPECT_FN_PTR(statistics))(malloc_zone_t *zone, malloc_statistics_t *stats); /* Fills statistics */ - boolean_t (* MALLOC_INTROSPECT_FN_PTR(zone_locked))(malloc_zone_t *zone); /* Are any zone locks held */ - - /* Discharge checking. Present in version >= 7. */ - boolean_t (* MALLOC_INTROSPECT_FN_PTR(enable_discharge_checking))(malloc_zone_t *zone); - void (* MALLOC_INTROSPECT_FN_PTR(disable_discharge_checking))(malloc_zone_t *zone); - void (* MALLOC_INTROSPECT_FN_PTR(discharge))(malloc_zone_t *zone, void *memory); -#ifdef __BLOCKS__ - void (* MALLOC_INTROSPECT_FN_PTR(enumerate_discharged_pointers))(malloc_zone_t *zone, void (^report_discharged)(void *memory, void *info)); - #else - void *enumerate_unavailable_without_blocks; -#endif /* __BLOCKS__ */ - void (* MALLOC_INTROSPECT_FN_PTR(reinit_lock))(malloc_zone_t *zone); /* Reinitialize zone locks, called only from atfork_child handler. Present in version >= 9. */ - void (* MALLOC_INTROSPECT_FN_PTR(print_task))(task_t task, unsigned level, vm_address_t zone_address, memory_reader_t reader, print_task_printer_t printer); /* debug print for another process. Present in version >= 11. */ - void (* MALLOC_INTROSPECT_FN_PTR(task_statistics))(task_t task, vm_address_t zone_address, memory_reader_t reader, malloc_statistics_t *stats); /* Present in version >= 12 */ -} malloc_introspection_t; - -// The value of "level" when passed to print_task() that corresponds to -// verbose passed to print() -#define MALLOC_VERBOSE_PRINT_LEVEL 2 - -extern void malloc_printf(const char *format, ...); - /* Convenience for logging errors and warnings; - No allocation is performed during execution of this function; - Only understands usual %p %d %s formats, and %y that expresses a number of bytes (5b,10KB,1MB...) - */ - -/********* Functions for performance tools ************/ - -extern kern_return_t malloc_get_all_zones(task_t task, memory_reader_t reader, vm_address_t **addresses, unsigned *count); - /* Fills addresses and count with the addresses of the zones in task; - Note that the validity of the addresses returned correspond to the validity of the memory returned by reader */ - -/********* Debug helpers ************/ - -extern void malloc_zone_print_ptr_info(void *ptr); - /* print to stdout if this pointer is in the malloc heap, free status, and size */ - -extern boolean_t malloc_zone_check(malloc_zone_t *zone); - /* Checks zone is well formed; if !zone, checks all zones */ - -extern void malloc_zone_print(malloc_zone_t *zone, boolean_t verbose); - /* Prints summary on zone; if !zone, prints all zones */ - -extern void malloc_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats); - /* Fills statistics for zone; if !zone, sums up all zones */ - -extern void malloc_zone_log(malloc_zone_t *zone, void *address); - /* Controls logging of all activity; if !zone, for all zones; - If address==0 nothing is logged; - If address==-1 all activity is logged; - Else only the activity regarding address is logged */ - -struct mstats { - size_t bytes_total; - size_t chunks_used; - size_t bytes_used; - size_t chunks_free; - size_t bytes_free; -}; - -extern struct mstats mstats(void); - -extern boolean_t malloc_zone_enable_discharge_checking(malloc_zone_t *zone) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); -/* Increment the discharge checking enabled counter for a zone. Returns true if the zone supports checking, false if it does not. */ - -extern void malloc_zone_disable_discharge_checking(malloc_zone_t *zone) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); -/* Decrement the discharge checking enabled counter for a zone. */ - -extern void malloc_zone_discharge(malloc_zone_t *zone, void *memory) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); -/* Register memory that the programmer expects to be freed soon. - zone may be NULL in which case the zone is determined using malloc_zone_from_ptr(). - If discharge checking is off for the zone this function is a no-op. */ - -#ifdef __BLOCKS__ -extern void malloc_zone_enumerate_discharged_pointers(malloc_zone_t *zone, void (^report_discharged)(void *memory, void *info)) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); -/* Calls report_discharged for each block that was registered using malloc_zone_discharge() but has not yet been freed. - info is used to provide zone defined information about the memory block. - If zone is NULL then the enumeration covers all zones. */ -#else -extern void malloc_zone_enumerate_discharged_pointers(malloc_zone_t *zone, void *) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); -#endif /* __BLOCKS__ */ - -__END_DECLS - -#endif /* _MALLOC_MALLOC_H_ */ diff --git a/src/libmalloc/libmalloc.xcodeproj/project.pbxproj b/src/libmalloc/libmalloc.xcodeproj/project.pbxproj deleted file mode 100644 index b8dfdb851..000000000 --- a/src/libmalloc/libmalloc.xcodeproj/project.pbxproj +++ /dev/null @@ -1,1635 +0,0 @@ -// !$*UTF8*$! -{ - archiveVersion = 1; - classes = { - }; - objectVersion = 46; - objects = { - -/* Begin PBXAggregateTarget section */ - 3FE9201116A9111000D1238A /* libmalloc */ = { - isa = PBXAggregateTarget; - buildConfigurationList = 3FE9201216A9111000D1238A /* Build configuration list for PBXAggregateTarget "libmalloc" */; - buildPhases = ( - 3FC1927C16DD946500315C26 /* Install Man Pages */, - 3FE9201D16A9143E00D1238A /* Sanitise Headers (rdar://problem/10241868) */, - ); - dependencies = ( - C0CE45501C52CCBD00C24048 /* PBXTargetDependency */, - 3FE9201816A9111600D1238A /* PBXTargetDependency */, - 3FE9201616A9111400D1238A /* PBXTargetDependency */, - ); - name = libmalloc; - productName = libmalloc; - }; - 45039161198FFF73004EE2A3 /* libmalloc_test */ = { - isa = PBXAggregateTarget; - buildConfigurationList = 45039162198FFF73004EE2A3 /* Build configuration list for PBXAggregateTarget "libmalloc_test" */; - buildPhases = ( - ); - dependencies = ( - 45039168198FFFA6004EE2A3 /* PBXTargetDependency */, - 925383D91BD03D0000F745DB /* PBXTargetDependency */, - ); - name = libmalloc_test; - productName = libmalloc_test; - }; - B60A57932009307E006215CB /* executables */ = { - isa = PBXAggregateTarget; - buildConfigurationList = B60A57962009307E006215CB /* Build configuration list for PBXAggregateTarget "executables" */; - buildPhases = ( - ); - dependencies = ( - B60A579820093093006215CB /* PBXTargetDependency */, - ); - name = executables; - productName = executables; - }; - E4B7FCB222000DAD0010A840 /* libmalloc_driverkit */ = { - isa = PBXAggregateTarget; - buildConfigurationList = E4B7FCBB22000DAD0010A840 /* Build configuration list for PBXAggregateTarget "libmalloc_driverkit" */; - buildPhases = ( - E4B7FCBA22000DAD0010A840 /* Sanitise Headers (rdar://problem/10241868) */, - ); - dependencies = ( - E4B7FCB522000DAD0010A840 /* PBXTargetDependency */, - ); - name = libmalloc_driverkit; - productName = libmalloc; - }; -/* End PBXAggregateTarget section */ - -/* Begin PBXBuildFile section */ - 08315122215ED9CF00D97E04 /* malloc_implementation.h in Headers */ = {isa = PBXBuildFile; fileRef = 08315121215ED9C700D97E04 /* malloc_implementation.h */; settings = {ATTRIBUTES = (Private, ); }; }; - 08315123215EE9A100D97E04 /* stack_logging.h in Headers */ = {isa = PBXBuildFile; fileRef = C9571C391C18AA1D00A67EE3 /* stack_logging.h */; settings = {ATTRIBUTES = (Private, ); }; }; - 2B67B5682040B3AF0003E78F /* _malloc.h in Headers */ = {isa = PBXBuildFile; fileRef = 2B67B5672040B3A50003E78F /* _malloc.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 3D157E7420354E02001630BF /* perfdata.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 3D157E7320354E02001630BF /* perfdata.framework */; }; - 3FE91FED16A90B9200D1238A /* bitarray.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD116A90A8D00D1238A /* bitarray.c */; }; - 3FE91FF016A90B9200D1238A /* magazine_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD616A90A8D00D1238A /* magazine_malloc.c */; }; - 3FE91FF116A90B9200D1238A /* magmallocProvider.d in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD716A90A8D00D1238A /* magmallocProvider.d */; }; - 3FE91FF216A90B9200D1238A /* malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD816A90A8D00D1238A /* malloc.c */; }; - 3FE91FF416A90B9200D1238A /* nano_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FDA16A90A8D00D1238A /* nano_malloc.c */; }; - 3FE91FFA16A90BEF00D1238A /* malloc.h in Headers */ = {isa = PBXBuildFile; fileRef = 3FE91FF916A90BEF00D1238A /* malloc.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 3FE91FFF16A9109E00D1238A /* bitarray.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD116A90A8D00D1238A /* bitarray.c */; }; - 3FE9200116A9109E00D1238A /* magazine_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD616A90A8D00D1238A /* magazine_malloc.c */; }; - 3FE9200216A9109E00D1238A /* magmallocProvider.d in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD716A90A8D00D1238A /* magmallocProvider.d */; }; - 3FE9200316A9109E00D1238A /* malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD816A90A8D00D1238A /* malloc.c */; }; - 3FE9200416A9109E00D1238A /* nano_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FDA16A90A8D00D1238A /* nano_malloc.c */; }; - 925383DA1BD03D5100F745DB /* stress_test.c in Sources */ = {isa = PBXBuildFile; fileRef = 925383D11BD03B4A00F745DB /* stress_test.c */; }; - B61341DE20114B660038D163 /* ktrace.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = B61341DD20114B070038D163 /* ktrace.framework */; }; - B629CF28202BA149007719B9 /* nanov2_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = B68B7FA41FCDD9A500BAD1AA /* nanov2_malloc.c */; }; - B629CF2E202BB337007719B9 /* bitarray.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD116A90A8D00D1238A /* bitarray.c */; }; - B629CF2F202BB337007719B9 /* purgeable_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = C957429E1BF681B00027269A /* purgeable_malloc.c */; }; - B629CF30202BB337007719B9 /* magazine_large.c in Sources */ = {isa = PBXBuildFile; fileRef = C957429B1BF672F80027269A /* magazine_large.c */; }; - B629CF31202BB337007719B9 /* magazine_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD616A90A8D00D1238A /* magazine_malloc.c */; }; - B629CF32202BB337007719B9 /* empty.s in Sources */ = {isa = PBXBuildFile; fileRef = C9ABCA041CB6FC6800ECB399 /* empty.s */; }; - B629CF33202BB337007719B9 /* magazine_small.c in Sources */ = {isa = PBXBuildFile; fileRef = C95742981BF670D00027269A /* magazine_small.c */; }; - B629CF34202BB337007719B9 /* legacy_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = C95742AA1BF685CB0027269A /* legacy_malloc.c */; }; - B629CF35202BB337007719B9 /* magmallocProvider.d in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD716A90A8D00D1238A /* magmallocProvider.d */; }; - B629CF36202BB337007719B9 /* malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD816A90A8D00D1238A /* malloc.c */; }; - B629CF37202BB337007719B9 /* frozen_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = C95742A41BF6842F0027269A /* frozen_malloc.c */; }; - B629CF38202BB337007719B9 /* nanov2_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = B68B7FA41FCDD9A500BAD1AA /* nanov2_malloc.c */; }; - B629CF39202BB337007719B9 /* nano_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FDA16A90A8D00D1238A /* nano_malloc.c */; }; - B629CF3B202BB337007719B9 /* magazine_tiny.c in Sources */ = {isa = PBXBuildFile; fileRef = C957428F1BF419DF0027269A /* magazine_tiny.c */; }; - B629CF3C202BB337007719B9 /* nano_malloc_common.c in Sources */ = {isa = PBXBuildFile; fileRef = B68B7F9D1FCDCBC600BAD1AA /* nano_malloc_common.c */; }; - B65FBE2C2087AA2F00E21F59 /* malloc_printf.c in Sources */ = {isa = PBXBuildFile; fileRef = B65FBE2B2087AA2F00E21F59 /* malloc_printf.c */; }; - B66C71D92034BFAE0047E265 /* malloc_common.h in Headers */ = {isa = PBXBuildFile; fileRef = B66C71D72034BFAE0047E265 /* malloc_common.h */; }; - B66C71DA2034BFAE0047E265 /* malloc_common.c in Sources */ = {isa = PBXBuildFile; fileRef = B66C71D82034BFAE0047E265 /* malloc_common.c */; }; - B66C71DB2034BFD30047E265 /* malloc_common.c in Sources */ = {isa = PBXBuildFile; fileRef = B66C71D82034BFAE0047E265 /* malloc_common.c */; }; - B66C71DC2034BFD40047E265 /* malloc_common.c in Sources */ = {isa = PBXBuildFile; fileRef = B66C71D82034BFAE0047E265 /* malloc_common.c */; }; - B68B7F9E1FCDCBC600BAD1AA /* nano_malloc_common.h in Headers */ = {isa = PBXBuildFile; fileRef = B68B7F9C1FCDCBC600BAD1AA /* nano_malloc_common.h */; }; - B68B7F9F1FCDCBC600BAD1AA /* nano_malloc_common.c in Sources */ = {isa = PBXBuildFile; fileRef = B68B7F9D1FCDCBC600BAD1AA /* nano_malloc_common.c */; }; - B68B7FA01FCDCBE700BAD1AA /* nano_malloc_common.c in Sources */ = {isa = PBXBuildFile; fileRef = B68B7F9D1FCDCBC600BAD1AA /* nano_malloc_common.c */; }; - B68B7FA11FCDCBE800BAD1AA /* nano_malloc_common.c in Sources */ = {isa = PBXBuildFile; fileRef = B68B7F9D1FCDCBC600BAD1AA /* nano_malloc_common.c */; }; - B68B7FA31FCDD67100BAD1AA /* nanov2_malloc.h in Headers */ = {isa = PBXBuildFile; fileRef = B68B7FA21FCDD60F00BAD1AA /* nanov2_malloc.h */; }; - B68B7FA51FCDD9A500BAD1AA /* nanov2_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = B68B7FA41FCDD9A500BAD1AA /* nanov2_malloc.c */; }; - B68B7FA61FCDD9B200BAD1AA /* nanov2_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = B68B7FA41FCDD9A500BAD1AA /* nanov2_malloc.c */; }; - B68B7FA71FCDD9B200BAD1AA /* nanov2_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = B68B7FA41FCDD9A500BAD1AA /* nanov2_malloc.c */; }; - B6910F68202B630D00FF2EB0 /* bitarray.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD116A90A8D00D1238A /* bitarray.c */; }; - B6910F69202B630D00FF2EB0 /* purgeable_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = C957429E1BF681B00027269A /* purgeable_malloc.c */; }; - B6910F6A202B630D00FF2EB0 /* magazine_large.c in Sources */ = {isa = PBXBuildFile; fileRef = C957429B1BF672F80027269A /* magazine_large.c */; }; - B6910F6B202B630D00FF2EB0 /* magazine_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD616A90A8D00D1238A /* magazine_malloc.c */; }; - B6910F6C202B630D00FF2EB0 /* empty.s in Sources */ = {isa = PBXBuildFile; fileRef = C9ABCA041CB6FC6800ECB399 /* empty.s */; }; - B6910F6D202B630D00FF2EB0 /* magazine_small.c in Sources */ = {isa = PBXBuildFile; fileRef = C95742981BF670D00027269A /* magazine_small.c */; }; - B6910F6E202B630D00FF2EB0 /* legacy_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = C95742AA1BF685CB0027269A /* legacy_malloc.c */; }; - B6910F6F202B630D00FF2EB0 /* magmallocProvider.d in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD716A90A8D00D1238A /* magmallocProvider.d */; }; - B6910F70202B630D00FF2EB0 /* malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD816A90A8D00D1238A /* malloc.c */; }; - B6910F71202B630D00FF2EB0 /* frozen_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = C95742A41BF6842F0027269A /* frozen_malloc.c */; }; - B6910F73202B630D00FF2EB0 /* nano_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FDA16A90A8D00D1238A /* nano_malloc.c */; }; - B6910F75202B630D00FF2EB0 /* magazine_tiny.c in Sources */ = {isa = PBXBuildFile; fileRef = C957428F1BF419DF0027269A /* magazine_tiny.c */; }; - B6910F76202B630D00FF2EB0 /* nano_malloc_common.c in Sources */ = {isa = PBXBuildFile; fileRef = B68B7F9D1FCDCBC600BAD1AA /* nano_malloc_common.c */; }; - B6AFD93C214C6198007983DA /* msl_lite_support.c in Sources */ = {isa = PBXBuildFile; fileRef = B6AFD93B214C6198007983DA /* msl_lite_support.c */; }; - B6CA644E1FCE2C1900DEBA12 /* nanov2_zone.h in Headers */ = {isa = PBXBuildFile; fileRef = B6CA644D1FCE2C0A00DEBA12 /* nanov2_zone.h */; }; - B6CA644F1FCE2C1A00DEBA12 /* nanov2_zone.h in Headers */ = {isa = PBXBuildFile; fileRef = B6CA644D1FCE2C0A00DEBA12 /* nanov2_zone.h */; }; - B6CA64501FCE2C1B00DEBA12 /* nanov2_zone.h in Headers */ = {isa = PBXBuildFile; fileRef = B6CA644D1FCE2C0A00DEBA12 /* nanov2_zone.h */; }; - B6CA64521FCF1AD200DEBA12 /* nano_zone_common.h in Headers */ = {isa = PBXBuildFile; fileRef = B6CA64511FCF1AAD00DEBA12 /* nano_zone_common.h */; }; - B6CA64531FCF1AD400DEBA12 /* nano_zone_common.h in Headers */ = {isa = PBXBuildFile; fileRef = B6CA64511FCF1AAD00DEBA12 /* nano_zone_common.h */; }; - B6CA64541FCF1AD400DEBA12 /* nano_zone_common.h in Headers */ = {isa = PBXBuildFile; fileRef = B6CA64511FCF1AAD00DEBA12 /* nano_zone_common.h */; }; - B6D2ED572007D91A007AF994 /* malloc_replay.cpp in Sources */ = {isa = PBXBuildFile; fileRef = B6D2ED552007D91A007AF994 /* malloc_replay.cpp */; }; - B6D5C7F1202E26F80035E376 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = B6D5C7ED202E26CA0035E376 /* resolver.c */; }; - B6D5C7F2202E26F80035E376 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = B6D5C7ED202E26CA0035E376 /* resolver.c */; }; - B6D5C7F3202E26F90035E376 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = B6D5C7ED202E26CA0035E376 /* resolver.c */; }; - B6D5C7F4202E26F90035E376 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = B6D5C7ED202E26CA0035E376 /* resolver.c */; }; - B6D5C7F5202E26FA0035E376 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = B6D5C7ED202E26CA0035E376 /* resolver.c */; }; - C0352EC71C3F3C4400DB5126 /* malloc_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C0352EC61C3F3C3600DB5126 /* malloc_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; - C0CE45311C52C90500C24048 /* bitarray.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD116A90A8D00D1238A /* bitarray.c */; }; - C0CE45321C52C90500C24048 /* purgeable_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = C957429E1BF681B00027269A /* purgeable_malloc.c */; }; - C0CE45331C52C90500C24048 /* magazine_large.c in Sources */ = {isa = PBXBuildFile; fileRef = C957429B1BF672F80027269A /* magazine_large.c */; }; - C0CE45341C52C90500C24048 /* magazine_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD616A90A8D00D1238A /* magazine_malloc.c */; }; - C0CE45351C52C90500C24048 /* magazine_small.c in Sources */ = {isa = PBXBuildFile; fileRef = C95742981BF670D00027269A /* magazine_small.c */; }; - C0CE45361C52C90500C24048 /* legacy_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = C95742AA1BF685CB0027269A /* legacy_malloc.c */; }; - C0CE45371C52C90500C24048 /* magmallocProvider.d in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD716A90A8D00D1238A /* magmallocProvider.d */; }; - C0CE45381C52C90500C24048 /* malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FD816A90A8D00D1238A /* malloc.c */; }; - C0CE45391C52C90500C24048 /* frozen_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = C95742A41BF6842F0027269A /* frozen_malloc.c */; }; - C0CE453A1C52C90500C24048 /* nano_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 3FE91FDA16A90A8D00D1238A /* nano_malloc.c */; }; - C0CE453D1C52C90500C24048 /* magazine_tiny.c in Sources */ = {isa = PBXBuildFile; fileRef = C957428F1BF419DF0027269A /* magazine_tiny.c */; }; - C0CE45401C52C90500C24048 /* magazine_inline.h in Headers */ = {isa = PBXBuildFile; fileRef = C95742921BF41C970027269A /* magazine_inline.h */; }; - C0CE45411C52C90500C24048 /* nano_zone.h in Headers */ = {isa = PBXBuildFile; fileRef = C957427E1BF33D130027269A /* nano_zone.h */; }; - C0CE45421C52C90500C24048 /* thresholds.h in Headers */ = {isa = PBXBuildFile; fileRef = C957428C1BF411330027269A /* thresholds.h */; }; - C0CE45431C52C90500C24048 /* debug.h in Headers */ = {isa = PBXBuildFile; fileRef = C957427B1BF2C8DE0027269A /* debug.h */; }; - C0CE45441C52C90500C24048 /* frozen_malloc.h in Headers */ = {isa = PBXBuildFile; fileRef = C95742A51BF6842F0027269A /* frozen_malloc.h */; }; - C0CE45451C52C90500C24048 /* magazine_zone.h in Headers */ = {isa = PBXBuildFile; fileRef = C95742861BF3F9550027269A /* magazine_zone.h */; }; - C0CE45461C52C90500C24048 /* magazine_malloc.h in Headers */ = {isa = PBXBuildFile; fileRef = C95742951BF41E480027269A /* magazine_malloc.h */; }; - C0CE45471C52C90500C24048 /* purgeable_malloc.h in Headers */ = {isa = PBXBuildFile; fileRef = C957429F1BF681B00027269A /* purgeable_malloc.h */; }; - C0CE45481C52C90500C24048 /* base.h in Headers */ = {isa = PBXBuildFile; fileRef = C95742891BF3FD290027269A /* base.h */; }; - C0CE454E1C52C9E600C24048 /* libmalloc.a in CopyFiles */ = {isa = PBXBuildFile; fileRef = C0CE454C1C52C90500C24048 /* libmalloc.a */; }; - C932D2681D6B8D840063B19E /* vm.c in Sources */ = {isa = PBXBuildFile; fileRef = C932D2661D6B8D840063B19E /* vm.c */; }; - C932D2691D6B8D840063B19E /* vm.h in Headers */ = {isa = PBXBuildFile; fileRef = C932D2671D6B8D840063B19E /* vm.h */; }; - C938BBD31C74F7A400522BBD /* trace.h in Headers */ = {isa = PBXBuildFile; fileRef = C938BBD21C74F7A400522BBD /* trace.h */; }; - C94B447821925CA50005EA6F /* magazine_medium.c in Sources */ = {isa = PBXBuildFile; fileRef = C94B447721925C990005EA6F /* magazine_medium.c */; }; - C94B447921925CA60005EA6F /* magazine_medium.c in Sources */ = {isa = PBXBuildFile; fileRef = C94B447721925C990005EA6F /* magazine_medium.c */; }; - C94B447A21925CA60005EA6F /* magazine_medium.c in Sources */ = {isa = PBXBuildFile; fileRef = C94B447721925C990005EA6F /* magazine_medium.c */; }; - C94B447B21925CA70005EA6F /* magazine_medium.c in Sources */ = {isa = PBXBuildFile; fileRef = C94B447721925C990005EA6F /* magazine_medium.c */; }; - C94B447C21925CA80005EA6F /* magazine_medium.c in Sources */ = {isa = PBXBuildFile; fileRef = C94B447721925C990005EA6F /* magazine_medium.c */; }; - C95742721BF2C2880027269A /* bitarray.h in Headers */ = {isa = PBXBuildFile; fileRef = 3FE91FD216A90A8D00D1238A /* bitarray.h */; }; - C95742731BF2C2880027269A /* internal.h in Headers */ = {isa = PBXBuildFile; fileRef = C957426D1BF2C0C80027269A /* internal.h */; }; - C95742741BF2C2880027269A /* locking.h in Headers */ = {isa = PBXBuildFile; fileRef = C957426E1BF2C1480027269A /* locking.h */; }; - C95742751BF2C2880027269A /* printf.h in Headers */ = {isa = PBXBuildFile; fileRef = 3FE91FD916A90A8D00D1238A /* printf.h */; }; - C95742761BF2C2880027269A /* platform.h in Headers */ = {isa = PBXBuildFile; fileRef = C9F77BBA1BF2B84800812E13 /* platform.h */; }; - C95742771BF2C2880027269A /* legacy_malloc.h in Headers */ = {isa = PBXBuildFile; fileRef = 3FE91FFB16A90E6C00D1238A /* legacy_malloc.h */; }; - C957427A1BF2C67E0027269A /* nano_malloc.h in Headers */ = {isa = PBXBuildFile; fileRef = C95742791BF2C5F40027269A /* nano_malloc.h */; }; - C957427C1BF2C8DE0027269A /* debug.h in Headers */ = {isa = PBXBuildFile; fileRef = C957427B1BF2C8DE0027269A /* debug.h */; }; - C957427D1BF2C8DE0027269A /* debug.h in Headers */ = {isa = PBXBuildFile; fileRef = C957427B1BF2C8DE0027269A /* debug.h */; }; - C957427F1BF33D130027269A /* nano_zone.h in Headers */ = {isa = PBXBuildFile; fileRef = C957427E1BF33D130027269A /* nano_zone.h */; }; - C95742801BF33D130027269A /* nano_zone.h in Headers */ = {isa = PBXBuildFile; fileRef = C957427E1BF33D130027269A /* nano_zone.h */; }; - C95742871BF3F9550027269A /* magazine_zone.h in Headers */ = {isa = PBXBuildFile; fileRef = C95742861BF3F9550027269A /* magazine_zone.h */; }; - C95742881BF3F9550027269A /* magazine_zone.h in Headers */ = {isa = PBXBuildFile; fileRef = C95742861BF3F9550027269A /* magazine_zone.h */; }; - C957428A1BF3FD290027269A /* base.h in Headers */ = {isa = PBXBuildFile; fileRef = C95742891BF3FD290027269A /* base.h */; }; - C957428B1BF3FD290027269A /* base.h in Headers */ = {isa = PBXBuildFile; fileRef = C95742891BF3FD290027269A /* base.h */; }; - C957428D1BF411330027269A /* thresholds.h in Headers */ = {isa = PBXBuildFile; fileRef = C957428C1BF411330027269A /* thresholds.h */; }; - C957428E1BF411330027269A /* thresholds.h in Headers */ = {isa = PBXBuildFile; fileRef = C957428C1BF411330027269A /* thresholds.h */; }; - C95742901BF419DF0027269A /* magazine_tiny.c in Sources */ = {isa = PBXBuildFile; fileRef = C957428F1BF419DF0027269A /* magazine_tiny.c */; }; - C95742911BF419DF0027269A /* magazine_tiny.c in Sources */ = {isa = PBXBuildFile; fileRef = C957428F1BF419DF0027269A /* magazine_tiny.c */; }; - C95742931BF41C970027269A /* magazine_inline.h in Headers */ = {isa = PBXBuildFile; fileRef = C95742921BF41C970027269A /* magazine_inline.h */; }; - C95742941BF41C970027269A /* magazine_inline.h in Headers */ = {isa = PBXBuildFile; fileRef = C95742921BF41C970027269A /* magazine_inline.h */; }; - C95742961BF41E480027269A /* magazine_malloc.h in Headers */ = {isa = PBXBuildFile; fileRef = C95742951BF41E480027269A /* magazine_malloc.h */; }; - C95742971BF41E480027269A /* magazine_malloc.h in Headers */ = {isa = PBXBuildFile; fileRef = C95742951BF41E480027269A /* magazine_malloc.h */; }; - C95742991BF670D00027269A /* magazine_small.c in Sources */ = {isa = PBXBuildFile; fileRef = C95742981BF670D00027269A /* magazine_small.c */; }; - C957429A1BF670D00027269A /* magazine_small.c in Sources */ = {isa = PBXBuildFile; fileRef = C95742981BF670D00027269A /* magazine_small.c */; }; - C957429C1BF672F80027269A /* magazine_large.c in Sources */ = {isa = PBXBuildFile; fileRef = C957429B1BF672F80027269A /* magazine_large.c */; }; - C957429D1BF672F80027269A /* magazine_large.c in Sources */ = {isa = PBXBuildFile; fileRef = C957429B1BF672F80027269A /* magazine_large.c */; }; - C95742A01BF681B00027269A /* purgeable_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = C957429E1BF681B00027269A /* purgeable_malloc.c */; }; - C95742A11BF681B00027269A /* purgeable_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = C957429E1BF681B00027269A /* purgeable_malloc.c */; }; - C95742A21BF681B00027269A /* purgeable_malloc.h in Headers */ = {isa = PBXBuildFile; fileRef = C957429F1BF681B00027269A /* purgeable_malloc.h */; }; - C95742A31BF681B00027269A /* purgeable_malloc.h in Headers */ = {isa = PBXBuildFile; fileRef = C957429F1BF681B00027269A /* purgeable_malloc.h */; }; - C95742A61BF6842F0027269A /* frozen_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = C95742A41BF6842F0027269A /* frozen_malloc.c */; }; - C95742A71BF6842F0027269A /* frozen_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = C95742A41BF6842F0027269A /* frozen_malloc.c */; }; - C95742A81BF6842F0027269A /* frozen_malloc.h in Headers */ = {isa = PBXBuildFile; fileRef = C95742A51BF6842F0027269A /* frozen_malloc.h */; }; - C95742A91BF6842F0027269A /* frozen_malloc.h in Headers */ = {isa = PBXBuildFile; fileRef = C95742A51BF6842F0027269A /* frozen_malloc.h */; }; - C95742AB1BF685CB0027269A /* legacy_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = C95742AA1BF685CB0027269A /* legacy_malloc.c */; }; - C95742AC1BF685CB0027269A /* legacy_malloc.c in Sources */ = {isa = PBXBuildFile; fileRef = C95742AA1BF685CB0027269A /* legacy_malloc.c */; }; - C99E320B1D6F7366005655A8 /* magazine_rack.c in Sources */ = {isa = PBXBuildFile; fileRef = C99E32091D6F7366005655A8 /* magazine_rack.c */; }; - C99E320C1D6F7366005655A8 /* magazine_rack.h in Headers */ = {isa = PBXBuildFile; fileRef = C99E320A1D6F7366005655A8 /* magazine_rack.h */; }; - C9ABCA051CB6FC6800ECB399 /* empty.s in Sources */ = {isa = PBXBuildFile; fileRef = C9ABCA041CB6FC6800ECB399 /* empty.s */; }; -/* End PBXBuildFile section */ - -/* Begin PBXContainerItemProxy section */ - 3FE9201516A9111400D1238A /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 3FFC1BE516A908F800027192 /* Project object */; - proxyType = 1; - remoteGlobalIDString = 3FE91FFD16A9109E00D1238A; - remoteInfo = libmalloc_eOS; - }; - 3FE9201716A9111600D1238A /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 3FFC1BE516A908F800027192 /* Project object */; - proxyType = 1; - remoteGlobalIDString = 3FE91FE716A90AEC00D1238A; - remoteInfo = libsystem_malloc; - }; - 45039167198FFFA6004EE2A3 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 3FFC1BE516A908F800027192 /* Project object */; - proxyType = 1; - remoteGlobalIDString = 456E51C8197DF0D600A7E488; - remoteInfo = libmalloc_stress_test; - }; - 925383D81BD03D0000F745DB /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 3FFC1BE516A908F800027192 /* Project object */; - proxyType = 1; - remoteGlobalIDString = 925383D41BD03C0500F745DB; - remoteInfo = darwintests; - }; - B60A579720093093006215CB /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 3FFC1BE516A908F800027192 /* Project object */; - proxyType = 1; - remoteGlobalIDString = B6D2ED492007D76F007AF994; - remoteInfo = libmalloc_replay; - }; - B629CF43202BB389007719B9 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 3FFC1BE516A908F800027192 /* Project object */; - proxyType = 1; - remoteGlobalIDString = B629CF2B202BB337007719B9; - remoteInfo = libmalloc_alt; - }; - B676F4AB202B66EF00933F6D /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 3FFC1BE516A908F800027192 /* Project object */; - proxyType = 1; - remoteGlobalIDString = B6910F65202B630D00FF2EB0; - remoteInfo = libmalloc_mp; - }; - C0CE454F1C52CCBD00C24048 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 3FFC1BE516A908F800027192 /* Project object */; - proxyType = 1; - remoteGlobalIDString = C0CE452F1C52C90500C24048; - remoteInfo = libmalloc_static; - }; - E4B7FCB622000DAD0010A840 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 3FFC1BE516A908F800027192 /* Project object */; - proxyType = 1; - remoteGlobalIDString = 3FE91FE716A90AEC00D1238A; - remoteInfo = libsystem_malloc; - }; -/* End PBXContainerItemProxy section */ - -/* Begin PBXCopyFilesBuildPhase section */ - 456E51C7197DF0D600A7E488 /* CopyFiles */ = { - isa = PBXCopyFilesBuildPhase; - buildActionMask = 2147483647; - dstPath = /usr/share/man/man1/; - dstSubfolderSpec = 0; - files = ( - ); - runOnlyForDeploymentPostprocessing = 1; - }; - C0CE454D1C52C9D900C24048 /* CopyFiles */ = { - isa = PBXCopyFilesBuildPhase; - buildActionMask = 8; - dstPath = /usr/local/lib/loaderd; - dstSubfolderSpec = 0; - files = ( - C0CE454E1C52C9E600C24048 /* libmalloc.a in CopyFiles */, - ); - runOnlyForDeploymentPostprocessing = 1; - }; -/* End PBXCopyFilesBuildPhase section */ - -/* Begin PBXFileReference section */ - 08315121215ED9C700D97E04 /* malloc_implementation.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = malloc_implementation.h; sourceTree = ""; }; - 084F5E831D50204F006CD296 /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = System/Library/Frameworks/Foundation.framework; sourceTree = SDKROOT; }; - 086D54292159699B00A0ACD1 /* libSystem.B.tbd */ = {isa = PBXFileReference; lastKnownFileType = "sourcecode.text-based-dylib-definition"; name = libSystem.B.tbd; path = usr/lib/libSystem.B.tbd; sourceTree = SDKROOT; }; - 2B67B5672040B3A50003E78F /* _malloc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = _malloc.h; sourceTree = ""; }; - 2BED1E6A203E2F7700CB8C15 /* make_tapi_happy.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = make_tapi_happy.h; sourceTree = ""; }; - 3D157E7320354E02001630BF /* perfdata.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = perfdata.framework; path = ../MacOSX10.14.Internal.sdk/System/Library/PrivateFrameworks/perfdata.framework; sourceTree = SDKROOT; }; - 3FC452FF18E4ABFE003D6A38 /* manpages.sh */ = {isa = PBXFileReference; lastKnownFileType = text.script.sh; path = manpages.sh; sourceTree = ""; }; - 3FE91FC916A90A8D00D1238A /* malloc.3 */ = {isa = PBXFileReference; lastKnownFileType = text; path = malloc.3; sourceTree = ""; }; - 3FE91FCA16A90A8D00D1238A /* malloc_size.3 */ = {isa = PBXFileReference; lastKnownFileType = text; path = malloc_size.3; sourceTree = ""; }; - 3FE91FCB16A90A8D00D1238A /* malloc_zone_malloc.3 */ = {isa = PBXFileReference; lastKnownFileType = text; path = malloc_zone_malloc.3; sourceTree = ""; }; - 3FE91FD116A90A8D00D1238A /* bitarray.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = bitarray.c; sourceTree = ""; }; - 3FE91FD216A90A8D00D1238A /* bitarray.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = bitarray.h; sourceTree = ""; }; - 3FE91FD616A90A8D00D1238A /* magazine_malloc.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = magazine_malloc.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; - 3FE91FD716A90A8D00D1238A /* magmallocProvider.d */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.dtrace; path = magmallocProvider.d; sourceTree = ""; }; - 3FE91FD816A90A8D00D1238A /* malloc.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = malloc.c; sourceTree = ""; }; - 3FE91FD916A90A8D00D1238A /* printf.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = printf.h; sourceTree = ""; }; - 3FE91FDA16A90A8D00D1238A /* nano_malloc.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = nano_malloc.c; sourceTree = ""; }; - 3FE91FE016A90A8D00D1238A /* libmalloc.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = libmalloc.xcconfig; sourceTree = ""; }; - 3FE91FE116A90A8D00D1238A /* libmalloc_eos.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = libmalloc_eos.xcconfig; sourceTree = ""; }; - 3FE91FE316A90A8D00D1238A /* sanitise_headers.sh */ = {isa = PBXFileReference; lastKnownFileType = text.script.sh; path = sanitise_headers.sh; sourceTree = ""; }; - 3FE91FE816A90AEC00D1238A /* libsystem_malloc.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libsystem_malloc.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; - 3FE91FF916A90BEF00D1238A /* malloc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = malloc.h; sourceTree = ""; }; - 3FE91FFB16A90E6C00D1238A /* legacy_malloc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = legacy_malloc.h; sourceTree = ""; }; - 3FE9201016A9109E00D1238A /* libmalloc_eOS.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libmalloc_eOS.a; sourceTree = BUILT_PRODUCTS_DIR; }; - 456E51C9197DF0D600A7E488 /* libmalloc_stress_test */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = libmalloc_stress_test; sourceTree = BUILT_PRODUCTS_DIR; }; - 875E02E32125B62300A7FE8A /* aligned_alloc_test.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = aligned_alloc_test.c; sourceTree = ""; }; - 875E02E42125C1D100A7FE8A /* posix_memalign_test.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = posix_memalign_test.c; sourceTree = ""; }; - 8CB962B01F7E9F610046942E /* asan.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = asan.c; sourceTree = ""; }; - 8CB962B11F7E9FD00046942E /* tsan.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = tsan.c; sourceTree = ""; }; - 925383D01BD03B4A00F745DB /* Makefile */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.make; path = Makefile; sourceTree = ""; }; - 925383D11BD03B4A00F745DB /* stress_test.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = stress_test.c; sourceTree = ""; }; - 925383D31BD03B8F00F745DB /* manpages.lst */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = manpages.lst; sourceTree = ""; }; - B61341DD20114B070038D163 /* ktrace.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = ktrace.framework; path = Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.Internal.sdk/System/Library/PrivateFrameworks/ktrace.framework; sourceTree = DEVELOPER_DIR; }; - B629CF29202BA3C2007719B9 /* libmalloc_resolver.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = libmalloc_resolver.xcconfig; sourceTree = ""; }; - B629CF42202BB337007719B9 /* libmalloc_alt.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libmalloc_alt.a; sourceTree = BUILT_PRODUCTS_DIR; }; - B629CF46202BBDEC007719B9 /* resolver_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolver_internal.h; sourceTree = ""; }; - B629CF48202BBE3B007719B9 /* resolver.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = resolver.h; sourceTree = ""; }; - B64E100A205311DC004C4BA6 /* malloc_size_test.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = malloc_size_test.c; sourceTree = ""; }; - B6536A62204754B6005FBE22 /* perf_contended_malloc_free.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = perf_contended_malloc_free.c; sourceTree = ""; }; - B6536A6320475BA4005FBE22 /* basic_malloc_free_perf.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = basic_malloc_free_perf.c; sourceTree = ""; }; - B65A635D2252B3A000A95474 /* memory_pressure.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = memory_pressure.c; sourceTree = ""; }; - B65FBE2B2087AA2F00E21F59 /* malloc_printf.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = malloc_printf.c; sourceTree = ""; }; - B66AA658202A70B00019D607 /* libmalloc_resolved.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = libmalloc_resolved.xcconfig; sourceTree = ""; }; - B66C71D72034BFAE0047E265 /* malloc_common.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = malloc_common.h; sourceTree = ""; }; - B66C71D82034BFAE0047E265 /* malloc_common.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = malloc_common.c; sourceTree = ""; }; - B670DABD2072D0BB00139A1D /* perf_realloc.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = perf_realloc.c; sourceTree = ""; }; - B6726EC92092473D00E8AF5A /* malloc_heap_check_test.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = malloc_heap_check_test.c; sourceTree = ""; }; - B675F74520213D0A00B5038B /* nano_tests.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = nano_tests.c; sourceTree = ""; }; - B68B7F9C1FCDCBC600BAD1AA /* nano_malloc_common.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = nano_malloc_common.h; sourceTree = ""; }; - B68B7F9D1FCDCBC600BAD1AA /* nano_malloc_common.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = nano_malloc_common.c; sourceTree = ""; }; - B68B7FA21FCDD60F00BAD1AA /* nanov2_malloc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = nanov2_malloc.h; sourceTree = ""; }; - B68B7FA41FCDD9A500BAD1AA /* nanov2_malloc.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = nanov2_malloc.c; sourceTree = ""; }; - B68C985C2180BEB5003DAF36 /* region_cookie_test.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = region_cookie_test.c; sourceTree = ""; }; - B6910F89202B630D00FF2EB0 /* libmalloc_mp.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libmalloc_mp.a; sourceTree = BUILT_PRODUCTS_DIR; }; - B69B2B941FB3D00500FD5A8F /* magazine_malloc.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = magazine_malloc.c; sourceTree = ""; }; - B6A414EA1FBDF01C0038DC53 /* malloc_claimed_address_tests.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = malloc_claimed_address_tests.c; sourceTree = ""; }; - B6A494971F9918DD0016A799 /* calloc_test.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = calloc_test.c; sourceTree = ""; }; - B6A9C48C1F991716007D0853 /* malloc_free_test.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = malloc_free_test.c; sourceTree = ""; }; - B6AFD93B214C6198007983DA /* msl_lite_support.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = msl_lite_support.c; sourceTree = ""; }; - B6BE1D5D21BB120B00D5F315 /* MallocBenchTest */ = {isa = PBXFileReference; lastKnownFileType = folder; path = MallocBenchTest; sourceTree = ""; }; - B6C1C9C720D9B70F002CCC0B /* nano_trace_replay.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = nano_trace_replay.c; sourceTree = ""; }; - B6CA644D1FCE2C0A00DEBA12 /* nanov2_zone.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = nanov2_zone.h; sourceTree = ""; }; - B6CA64511FCF1AAD00DEBA12 /* nano_zone_common.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = nano_zone_common.h; sourceTree = ""; }; - B6D2ED512007D76F007AF994 /* libmalloc_replay */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = libmalloc_replay; sourceTree = BUILT_PRODUCTS_DIR; }; - B6D2ED552007D91A007AF994 /* malloc_replay.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = malloc_replay.cpp; sourceTree = ""; }; - B6D2ED562007D91A007AF994 /* malloc_replay.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = malloc_replay.h; sourceTree = ""; }; - B6D5C7ED202E26CA0035E376 /* resolver.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = resolver.c; sourceTree = ""; }; - C0352EC61C3F3C3600DB5126 /* malloc_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = malloc_private.h; sourceTree = ""; }; - C0CE450E1C52B9E300C24048 /* libmalloc_static.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = libmalloc_static.xcconfig; sourceTree = ""; }; - C0CE454C1C52C90500C24048 /* libmalloc.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libmalloc.a; sourceTree = BUILT_PRODUCTS_DIR; }; - C92853A01C767F08001FEAF3 /* install-codes.sh */ = {isa = PBXFileReference; lastKnownFileType = text.script.sh; path = "install-codes.sh"; sourceTree = ""; }; - C931B58F1C81248100D0D230 /* madvise.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = madvise.c; sourceTree = ""; }; - C932D2631D6B6ED40063B19E /* magazine_tiny_test.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = magazine_tiny_test.c; sourceTree = ""; }; - C932D2641D6B73270063B19E /* dtrace.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = dtrace.h; sourceTree = ""; }; - C932D2661D6B8D840063B19E /* vm.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = vm.c; sourceTree = ""; }; - C932D2671D6B8D840063B19E /* vm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vm.h; sourceTree = ""; }; - C938BBD21C74F7A400522BBD /* trace.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = trace.h; sourceTree = ""; }; - C93F76D71D6B9F8C0088931B /* magazine_testing.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = magazine_testing.h; sourceTree = ""; }; - C94B447721925C990005EA6F /* magazine_medium.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = magazine_medium.c; sourceTree = ""; }; - C9571C391C18AA1D00A67EE3 /* stack_logging.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stack_logging.h; sourceTree = ""; }; - C957426D1BF2C0C80027269A /* internal.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; - C957426E1BF2C1480027269A /* locking.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = locking.h; sourceTree = ""; }; - C95742791BF2C5F40027269A /* nano_malloc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = nano_malloc.h; sourceTree = ""; }; - C957427B1BF2C8DE0027269A /* debug.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = debug.h; sourceTree = ""; }; - C957427E1BF33D130027269A /* nano_zone.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = nano_zone.h; sourceTree = ""; }; - C95742861BF3F9550027269A /* magazine_zone.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = magazine_zone.h; sourceTree = ""; }; - C95742891BF3FD290027269A /* base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = base.h; sourceTree = ""; }; - C957428C1BF411330027269A /* thresholds.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = thresholds.h; sourceTree = ""; }; - C957428F1BF419DF0027269A /* magazine_tiny.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = magazine_tiny.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; - C95742921BF41C970027269A /* magazine_inline.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; lineEnding = 0; path = magazine_inline.h; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.objcpp; }; - C95742951BF41E480027269A /* magazine_malloc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = magazine_malloc.h; sourceTree = ""; }; - C95742981BF670D00027269A /* magazine_small.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = magazine_small.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; - C957429B1BF672F80027269A /* magazine_large.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = magazine_large.c; sourceTree = ""; }; - C957429E1BF681B00027269A /* purgeable_malloc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = purgeable_malloc.c; sourceTree = ""; }; - C957429F1BF681B00027269A /* purgeable_malloc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = purgeable_malloc.h; sourceTree = ""; }; - C95742A41BF6842F0027269A /* frozen_malloc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = frozen_malloc.c; sourceTree = ""; }; - C95742A51BF6842F0027269A /* frozen_malloc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = frozen_malloc.h; sourceTree = ""; }; - C95742AA1BF685CB0027269A /* legacy_malloc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = legacy_malloc.c; sourceTree = ""; }; - C99E32091D6F7366005655A8 /* magazine_rack.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = magazine_rack.c; sourceTree = ""; }; - C99E320A1D6F7366005655A8 /* magazine_rack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = magazine_rack.h; sourceTree = ""; }; - C9ABCA041CB6FC6800ECB399 /* empty.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = empty.s; sourceTree = ""; }; - C9F77BBA1BF2B84800812E13 /* platform.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = platform.h; sourceTree = ""; }; - C9F8C2681D70B521008C4044 /* magazine_small_test.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = magazine_small_test.c; sourceTree = ""; }; - C9F8C2691D74C93A008C4044 /* magazine_rack.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = magazine_rack.c; sourceTree = ""; }; -/* End PBXFileReference section */ - -/* Begin PBXFrameworksBuildPhase section */ - 3FE91FE516A90AEC00D1238A /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - runOnlyForDeploymentPostprocessing = 0; - }; - 3FE9200716A9109E00D1238A /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - runOnlyForDeploymentPostprocessing = 0; - }; - 456E51C6197DF0D600A7E488 /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - runOnlyForDeploymentPostprocessing = 0; - }; - B629CF3D202BB337007719B9 /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - runOnlyForDeploymentPostprocessing = 0; - }; - B6910F77202B630D00FF2EB0 /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - runOnlyForDeploymentPostprocessing = 0; - }; - B6D2ED4C2007D76F007AF994 /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - 3D157E7420354E02001630BF /* perfdata.framework in Frameworks */, - B61341DE20114B660038D163 /* ktrace.framework in Frameworks */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; - C0CE453E1C52C90500C24048 /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXFrameworksBuildPhase section */ - -/* Begin PBXGroup section */ - 084F5E821D50204F006CD296 /* Frameworks */ = { - isa = PBXGroup; - children = ( - 086D54292159699B00A0ACD1 /* libSystem.B.tbd */, - 3D157E7320354E02001630BF /* perfdata.framework */, - B61341DD20114B070038D163 /* ktrace.framework */, - 084F5E831D50204F006CD296 /* Foundation.framework */, - ); - name = Frameworks; - sourceTree = ""; - }; - 08C28B411D501D2C000AE997 /* tools */ = { - isa = PBXGroup; - children = ( - B6D2ED552007D91A007AF994 /* malloc_replay.cpp */, - B6D2ED562007D91A007AF994 /* malloc_replay.h */, - ); - path = tools; - sourceTree = ""; - }; - 3FE91FC816A90A8D00D1238A /* man */ = { - isa = PBXGroup; - children = ( - 925383D31BD03B8F00F745DB /* manpages.lst */, - 3FE91FC916A90A8D00D1238A /* malloc.3 */, - 3FE91FCA16A90A8D00D1238A /* malloc_size.3 */, - 3FE91FCB16A90A8D00D1238A /* malloc_zone_malloc.3 */, - ); - path = man; - sourceTree = ""; - }; - 3FE91FCC16A90A8D00D1238A /* src */ = { - isa = PBXGroup; - children = ( - C95742891BF3FD290027269A /* base.h */, - 3FE91FD116A90A8D00D1238A /* bitarray.c */, - 3FE91FD216A90A8D00D1238A /* bitarray.h */, - C957427B1BF2C8DE0027269A /* debug.h */, - C932D2641D6B73270063B19E /* dtrace.h */, - C9ABCA041CB6FC6800ECB399 /* empty.s */, - C95742A41BF6842F0027269A /* frozen_malloc.c */, - C95742A51BF6842F0027269A /* frozen_malloc.h */, - C957426D1BF2C0C80027269A /* internal.h */, - C95742AA1BF685CB0027269A /* legacy_malloc.c */, - 3FE91FFB16A90E6C00D1238A /* legacy_malloc.h */, - C957426E1BF2C1480027269A /* locking.h */, - C95742921BF41C970027269A /* magazine_inline.h */, - C957429B1BF672F80027269A /* magazine_large.c */, - 3FE91FD616A90A8D00D1238A /* magazine_malloc.c */, - C95742951BF41E480027269A /* magazine_malloc.h */, - B65FBE2B2087AA2F00E21F59 /* malloc_printf.c */, - C99E32091D6F7366005655A8 /* magazine_rack.c */, - C99E320A1D6F7366005655A8 /* magazine_rack.h */, - C94B447721925C990005EA6F /* magazine_medium.c */, - C95742981BF670D00027269A /* magazine_small.c */, - C957428F1BF419DF0027269A /* magazine_tiny.c */, - C95742861BF3F9550027269A /* magazine_zone.h */, - 3FE91FD716A90A8D00D1238A /* magmallocProvider.d */, - 3FE91FD816A90A8D00D1238A /* malloc.c */, - B66C71D82034BFAE0047E265 /* malloc_common.c */, - B66C71D72034BFAE0047E265 /* malloc_common.h */, - B6AFD93B214C6198007983DA /* msl_lite_support.c */, - B68B7F9D1FCDCBC600BAD1AA /* nano_malloc_common.c */, - B68B7F9C1FCDCBC600BAD1AA /* nano_malloc_common.h */, - 3FE91FDA16A90A8D00D1238A /* nano_malloc.c */, - C95742791BF2C5F40027269A /* nano_malloc.h */, - C957427E1BF33D130027269A /* nano_zone.h */, - B6CA64511FCF1AAD00DEBA12 /* nano_zone_common.h */, - B68B7FA41FCDD9A500BAD1AA /* nanov2_malloc.c */, - B68B7FA21FCDD60F00BAD1AA /* nanov2_malloc.h */, - B6CA644D1FCE2C0A00DEBA12 /* nanov2_zone.h */, - C9F77BBA1BF2B84800812E13 /* platform.h */, - 3FE91FD916A90A8D00D1238A /* printf.h */, - C957429E1BF681B00027269A /* purgeable_malloc.c */, - C957429F1BF681B00027269A /* purgeable_malloc.h */, - C957428C1BF411330027269A /* thresholds.h */, - C938BBD21C74F7A400522BBD /* trace.h */, - C932D2661D6B8D840063B19E /* vm.c */, - C932D2671D6B8D840063B19E /* vm.h */, - ); - path = src; - sourceTree = ""; - }; - 3FE91FDF16A90A8D00D1238A /* xcodeconfig */ = { - isa = PBXGroup; - children = ( - 3FE91FE016A90A8D00D1238A /* libmalloc.xcconfig */, - 3FE91FE116A90A8D00D1238A /* libmalloc_eos.xcconfig */, - C0CE450E1C52B9E300C24048 /* libmalloc_static.xcconfig */, - B66AA658202A70B00019D607 /* libmalloc_resolved.xcconfig */, - B629CF29202BA3C2007719B9 /* libmalloc_resolver.xcconfig */, - ); - path = xcodeconfig; - sourceTree = ""; - }; - 3FE91FE216A90A8D00D1238A /* xcodescripts */ = { - isa = PBXGroup; - children = ( - 3FC452FF18E4ABFE003D6A38 /* manpages.sh */, - 3FE91FE316A90A8D00D1238A /* sanitise_headers.sh */, - C92853A01C767F08001FEAF3 /* install-codes.sh */, - ); - path = xcodescripts; - sourceTree = ""; - }; - 3FE91FE916A90AEC00D1238A /* Products */ = { - isa = PBXGroup; - children = ( - 3FE91FE816A90AEC00D1238A /* libsystem_malloc.dylib */, - 3FE9201016A9109E00D1238A /* libmalloc_eOS.a */, - 456E51C9197DF0D600A7E488 /* libmalloc_stress_test */, - C0CE454C1C52C90500C24048 /* libmalloc.a */, - B6D2ED512007D76F007AF994 /* libmalloc_replay */, - B6910F89202B630D00FF2EB0 /* libmalloc_mp.a */, - B629CF42202BB337007719B9 /* libmalloc_alt.a */, - ); - name = Products; - sourceTree = ""; - }; - 3FE91FF716A90BEF00D1238A /* include */ = { - isa = PBXGroup; - children = ( - 3FE91FF816A90BEF00D1238A /* malloc */, - ); - path = include; - sourceTree = ""; - }; - 3FE91FF816A90BEF00D1238A /* malloc */ = { - isa = PBXGroup; - children = ( - 2B67B5672040B3A50003E78F /* _malloc.h */, - 3FE91FF916A90BEF00D1238A /* malloc.h */, - ); - path = malloc; - sourceTree = ""; - }; - 3FFC1BE416A908F800027192 = { - isa = PBXGroup; - children = ( - 3FE91FF716A90BEF00D1238A /* include */, - 3FE91FC816A90A8D00D1238A /* man */, - C9571C381C18AA0A00A67EE3 /* private */, - 3FE91FCC16A90A8D00D1238A /* src */, - B629CF45202BBDCC007719B9 /* resolver */, - 925383BD1BD03B4A00F745DB /* tests */, - 3FE91FDF16A90A8D00D1238A /* xcodeconfig */, - 3FE91FE216A90A8D00D1238A /* xcodescripts */, - 08C28B411D501D2C000AE997 /* tools */, - 3FE91FE916A90AEC00D1238A /* Products */, - 084F5E821D50204F006CD296 /* Frameworks */, - ); - sourceTree = ""; - tabWidth = 4; - usesTabs = 1; - }; - 925383BD1BD03B4A00F745DB /* tests */ = { - isa = PBXGroup; - children = ( - B6BE1D5D21BB120B00D5F315 /* MallocBenchTest */, - 8CB962B01F7E9F610046942E /* asan.c */, - 8CB962B11F7E9FD00046942E /* tsan.c */, - C931B58F1C81248100D0D230 /* madvise.c */, - C9F8C2691D74C93A008C4044 /* magazine_rack.c */, - B6A494971F9918DD0016A799 /* calloc_test.c */, - B6A9C48C1F991716007D0853 /* malloc_free_test.c */, - 875E02E32125B62300A7FE8A /* aligned_alloc_test.c */, - 875E02E42125C1D100A7FE8A /* posix_memalign_test.c */, - B6A414EA1FBDF01C0038DC53 /* malloc_claimed_address_tests.c */, - B6726EC92092473D00E8AF5A /* malloc_heap_check_test.c */, - C9F8C2681D70B521008C4044 /* magazine_small_test.c */, - B64E100A205311DC004C4BA6 /* malloc_size_test.c */, - C93F76D71D6B9F8C0088931B /* magazine_testing.h */, - C932D2631D6B6ED40063B19E /* magazine_tiny_test.c */, - B69B2B941FB3D00500FD5A8F /* magazine_malloc.c */, - B65A635D2252B3A000A95474 /* memory_pressure.c */, - 925383D01BD03B4A00F745DB /* Makefile */, - B6536A6320475BA4005FBE22 /* basic_malloc_free_perf.c */, - B675F74520213D0A00B5038B /* nano_tests.c */, - B6C1C9C720D9B70F002CCC0B /* nano_trace_replay.c */, - B6536A62204754B6005FBE22 /* perf_contended_malloc_free.c */, - B670DABD2072D0BB00139A1D /* perf_realloc.c */, - B68C985C2180BEB5003DAF36 /* region_cookie_test.c */, - 925383D11BD03B4A00F745DB /* stress_test.c */, - ); - path = tests; - sourceTree = ""; - }; - B629CF45202BBDCC007719B9 /* resolver */ = { - isa = PBXGroup; - children = ( - B629CF48202BBE3B007719B9 /* resolver.h */, - B629CF46202BBDEC007719B9 /* resolver_internal.h */, - B6D5C7ED202E26CA0035E376 /* resolver.c */, - ); - path = resolver; - sourceTree = ""; - }; - C9571C381C18AA0A00A67EE3 /* private */ = { - isa = PBXGroup; - children = ( - C9571C391C18AA1D00A67EE3 /* stack_logging.h */, - C0352EC61C3F3C3600DB5126 /* malloc_private.h */, - 08315121215ED9C700D97E04 /* malloc_implementation.h */, - 2BED1E6A203E2F7700CB8C15 /* make_tapi_happy.h */, - ); - path = private; - sourceTree = ""; - }; -/* End PBXGroup section */ - -/* Begin PBXHeadersBuildPhase section */ - 3FE91FE616A90AEC00D1238A /* Headers */ = { - isa = PBXHeadersBuildPhase; - buildActionMask = 2147483647; - files = ( - C957428A1BF3FD290027269A /* base.h in Headers */, - C0352EC71C3F3C4400DB5126 /* malloc_private.h in Headers */, - C95742731BF2C2880027269A /* internal.h in Headers */, - B6CA644E1FCE2C1900DEBA12 /* nanov2_zone.h in Headers */, - C99E320C1D6F7366005655A8 /* magazine_rack.h in Headers */, - C95742721BF2C2880027269A /* bitarray.h in Headers */, - C932D2691D6B8D840063B19E /* vm.h in Headers */, - 08315123215EE9A100D97E04 /* stack_logging.h in Headers */, - B6CA64521FCF1AD200DEBA12 /* nano_zone_common.h in Headers */, - C938BBD31C74F7A400522BBD /* trace.h in Headers */, - C95742A81BF6842F0027269A /* frozen_malloc.h in Headers */, - 08315122215ED9CF00D97E04 /* malloc_implementation.h in Headers */, - C957428D1BF411330027269A /* thresholds.h in Headers */, - C95742741BF2C2880027269A /* locking.h in Headers */, - C95742931BF41C970027269A /* magazine_inline.h in Headers */, - B68B7FA31FCDD67100BAD1AA /* nanov2_malloc.h in Headers */, - C95742761BF2C2880027269A /* platform.h in Headers */, - C957427A1BF2C67E0027269A /* nano_malloc.h in Headers */, - B66C71D92034BFAE0047E265 /* malloc_common.h in Headers */, - C957427C1BF2C8DE0027269A /* debug.h in Headers */, - 2B67B5682040B3AF0003E78F /* _malloc.h in Headers */, - C95742961BF41E480027269A /* magazine_malloc.h in Headers */, - 3FE91FFA16A90BEF00D1238A /* malloc.h in Headers */, - C95742871BF3F9550027269A /* magazine_zone.h in Headers */, - C95742771BF2C2880027269A /* legacy_malloc.h in Headers */, - C95742A21BF681B00027269A /* purgeable_malloc.h in Headers */, - B68B7F9E1FCDCBC600BAD1AA /* nano_malloc_common.h in Headers */, - C957427F1BF33D130027269A /* nano_zone.h in Headers */, - C95742751BF2C2880027269A /* printf.h in Headers */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; - 3FE9200816A9109E00D1238A /* Headers */ = { - isa = PBXHeadersBuildPhase; - buildActionMask = 2147483647; - files = ( - C95742941BF41C970027269A /* magazine_inline.h in Headers */, - C95742801BF33D130027269A /* nano_zone.h in Headers */, - B6CA644F1FCE2C1A00DEBA12 /* nanov2_zone.h in Headers */, - C957428E1BF411330027269A /* thresholds.h in Headers */, - C957427D1BF2C8DE0027269A /* debug.h in Headers */, - C95742A91BF6842F0027269A /* frozen_malloc.h in Headers */, - C95742881BF3F9550027269A /* magazine_zone.h in Headers */, - C95742971BF41E480027269A /* magazine_malloc.h in Headers */, - C95742A31BF681B00027269A /* purgeable_malloc.h in Headers */, - C957428B1BF3FD290027269A /* base.h in Headers */, - B6CA64531FCF1AD400DEBA12 /* nano_zone_common.h in Headers */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; - C0CE453F1C52C90500C24048 /* Headers */ = { - isa = PBXHeadersBuildPhase; - buildActionMask = 2147483647; - files = ( - C0CE45401C52C90500C24048 /* magazine_inline.h in Headers */, - C0CE45411C52C90500C24048 /* nano_zone.h in Headers */, - B6CA64501FCE2C1B00DEBA12 /* nanov2_zone.h in Headers */, - C0CE45421C52C90500C24048 /* thresholds.h in Headers */, - C0CE45431C52C90500C24048 /* debug.h in Headers */, - C0CE45441C52C90500C24048 /* frozen_malloc.h in Headers */, - C0CE45451C52C90500C24048 /* magazine_zone.h in Headers */, - C0CE45461C52C90500C24048 /* magazine_malloc.h in Headers */, - C0CE45471C52C90500C24048 /* purgeable_malloc.h in Headers */, - C0CE45481C52C90500C24048 /* base.h in Headers */, - B6CA64541FCF1AD400DEBA12 /* nano_zone_common.h in Headers */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXHeadersBuildPhase section */ - -/* Begin PBXLegacyTarget section */ - 925383D41BD03C0500F745DB /* darwintests */ = { - isa = PBXLegacyTarget; - buildArgumentsString = "$(ACTION)"; - buildConfigurationList = 925383D51BD03C0500F745DB /* Build configuration list for PBXLegacyTarget "darwintests" */; - buildPhases = ( - ); - buildToolPath = /usr/bin/make; - buildWorkingDirectory = tests; - dependencies = ( - ); - name = darwintests; - passBuildSettingsInEnvironment = 1; - productName = darwintests; - }; -/* End PBXLegacyTarget section */ - -/* Begin PBXNativeTarget section */ - 3FE91FE716A90AEC00D1238A /* libsystem_malloc */ = { - isa = PBXNativeTarget; - buildConfigurationList = 3FE91FEA16A90AEC00D1238A /* Build configuration list for PBXNativeTarget "libsystem_malloc" */; - buildPhases = ( - 3FE91FE416A90AEC00D1238A /* Sources */, - 3FE91FE516A90AEC00D1238A /* Frameworks */, - 3FE91FE616A90AEC00D1238A /* Headers */, - C92853A11C767F78001FEAF3 /* Install Codes File */, - ); - buildRules = ( - ); - dependencies = ( - B676F4AC202B66EF00933F6D /* PBXTargetDependency */, - B629CF44202BB389007719B9 /* PBXTargetDependency */, - ); - name = libsystem_malloc; - productName = libmalloc; - productReference = 3FE91FE816A90AEC00D1238A /* libsystem_malloc.dylib */; - productType = "com.apple.product-type.library.dynamic"; - }; - 3FE91FFD16A9109E00D1238A /* libmalloc_eOS */ = { - isa = PBXNativeTarget; - buildConfigurationList = 3FE9200D16A9109E00D1238A /* Build configuration list for PBXNativeTarget "libmalloc_eOS" */; - buildPhases = ( - 3FE91FFE16A9109E00D1238A /* Sources */, - 3FE9200716A9109E00D1238A /* Frameworks */, - 3FE9200816A9109E00D1238A /* Headers */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = libmalloc_eOS; - productName = libmalloc; - productReference = 3FE9201016A9109E00D1238A /* libmalloc_eOS.a */; - productType = "com.apple.product-type.library.static"; - }; - 456E51C8197DF0D600A7E488 /* libmalloc_stress_test */ = { - isa = PBXNativeTarget; - buildConfigurationList = 456E51CF197DF0D600A7E488 /* Build configuration list for PBXNativeTarget "libmalloc_stress_test" */; - buildPhases = ( - 456E51C5197DF0D600A7E488 /* Sources */, - 456E51C6197DF0D600A7E488 /* Frameworks */, - 456E51C7197DF0D600A7E488 /* CopyFiles */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = libmalloc_stress_test; - productName = libmalloc_stress_test; - productReference = 456E51C9197DF0D600A7E488 /* libmalloc_stress_test */; - productType = "com.apple.product-type.tool"; - }; - B629CF2B202BB337007719B9 /* libmalloc_alt */ = { - isa = PBXNativeTarget; - buildConfigurationList = B629CF3F202BB337007719B9 /* Build configuration list for PBXNativeTarget "libmalloc_alt" */; - buildPhases = ( - B629CF2C202BB337007719B9 /* Sources */, - B629CF3D202BB337007719B9 /* Frameworks */, - B629CF3E202BB337007719B9 /* Symlink normal variant */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = libmalloc_alt; - productName = libmalloc; - productReference = B629CF42202BB337007719B9 /* libmalloc_alt.a */; - productType = "com.apple.product-type.library.static"; - }; - B6910F65202B630D00FF2EB0 /* libmalloc_mp */ = { - isa = PBXNativeTarget; - buildConfigurationList = B6910F86202B630D00FF2EB0 /* Build configuration list for PBXNativeTarget "libmalloc_mp" */; - buildPhases = ( - B6910F66202B630D00FF2EB0 /* Sources */, - B6910F77202B630D00FF2EB0 /* Frameworks */, - B629CF2A202BB226007719B9 /* Symlink normal variant */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = libmalloc_mp; - productName = libmalloc; - productReference = B6910F89202B630D00FF2EB0 /* libmalloc_mp.a */; - productType = "com.apple.product-type.library.static"; - }; - B6D2ED492007D76F007AF994 /* libmalloc_replay */ = { - isa = PBXNativeTarget; - buildConfigurationList = B6D2ED4E2007D76F007AF994 /* Build configuration list for PBXNativeTarget "libmalloc_replay" */; - buildPhases = ( - B6D2ED4A2007D76F007AF994 /* Sources */, - B6D2ED4C2007D76F007AF994 /* Frameworks */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = libmalloc_replay; - productName = libmalloc_stress_test; - productReference = B6D2ED512007D76F007AF994 /* libmalloc_replay */; - productType = "com.apple.product-type.tool"; - }; - C0CE452F1C52C90500C24048 /* libmalloc_static */ = { - isa = PBXNativeTarget; - buildConfigurationList = C0CE45491C52C90500C24048 /* Build configuration list for PBXNativeTarget "libmalloc_static" */; - buildPhases = ( - C0CE45301C52C90500C24048 /* Sources */, - C0CE453E1C52C90500C24048 /* Frameworks */, - C0CE453F1C52C90500C24048 /* Headers */, - C0CE454D1C52C9D900C24048 /* CopyFiles */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = libmalloc_static; - productName = libmalloc; - productReference = C0CE454C1C52C90500C24048 /* libmalloc.a */; - productType = "com.apple.product-type.library.static"; - }; -/* End PBXNativeTarget section */ - -/* Begin PBXProject section */ - 3FFC1BE516A908F800027192 /* Project object */ = { - isa = PBXProject; - attributes = { - DefaultBuildSystemTypeForWorkspace = Latest; - LastSwiftUpdateCheck = 0700; - LastUpgradeCheck = 0900; - TargetAttributes = { - 45039161198FFF73004EE2A3 = { - CreatedOnToolsVersion = 6.0; - }; - 456E51C8197DF0D600A7E488 = { - CreatedOnToolsVersion = 6.0; - }; - 925383D41BD03C0500F745DB = { - CreatedOnToolsVersion = 7.1; - }; - B60A57932009307E006215CB = { - CreatedOnToolsVersion = 9.3; - ProvisioningStyle = Automatic; - }; - }; - }; - buildConfigurationList = 3FFC1BE816A908F800027192 /* Build configuration list for PBXProject "libmalloc" */; - compatibilityVersion = "Xcode 3.2"; - developmentRegion = English; - hasScannedForEncodings = 0; - knownRegions = ( - English, - en, - ); - mainGroup = 3FFC1BE416A908F800027192; - productRefGroup = 3FE91FE916A90AEC00D1238A /* Products */; - projectDirPath = ""; - projectRoot = ""; - targets = ( - 3FE9201116A9111000D1238A /* libmalloc */, - E4B7FCB222000DAD0010A840 /* libmalloc_driverkit */, - 45039161198FFF73004EE2A3 /* libmalloc_test */, - 3FE91FE716A90AEC00D1238A /* libsystem_malloc */, - B6910F65202B630D00FF2EB0 /* libmalloc_mp */, - B629CF2B202BB337007719B9 /* libmalloc_alt */, - 3FE91FFD16A9109E00D1238A /* libmalloc_eOS */, - C0CE452F1C52C90500C24048 /* libmalloc_static */, - B6D2ED492007D76F007AF994 /* libmalloc_replay */, - 456E51C8197DF0D600A7E488 /* libmalloc_stress_test */, - 925383D41BD03C0500F745DB /* darwintests */, - B60A57932009307E006215CB /* executables */, - ); - }; -/* End PBXProject section */ - -/* Begin PBXShellScriptBuildPhase section */ - 3FC1927C16DD946500315C26 /* Install Man Pages */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 8; - files = ( - ); - inputPaths = ( - "$(SRCROOT)/xcodescripts/manpages.sh", - ); - name = "Install Man Pages"; - outputPaths = ( - ); - runOnlyForDeploymentPostprocessing = 1; - shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; - }; - 3FE9201D16A9143E00D1238A /* Sanitise Headers (rdar://problem/10241868) */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 8; - files = ( - ); - inputPaths = ( - "$(SRCROOT)/xcodescripts/sanitise_headers.sh", - ); - name = "Sanitise Headers (rdar://problem/10241868)"; - outputPaths = ( - ); - runOnlyForDeploymentPostprocessing = 1; - shellPath = /bin/sh; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; - }; - B629CF2A202BB226007719B9 /* Symlink normal variant */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - inputPaths = ( - ); - name = "Symlink normal variant"; - outputPaths = ( - "$(CONFIGURATION_BUILD_DIR)/$(EXECUTABLE_PREFIX)$(PRODUCT_NAME)_normal$(EXECUTABLE_SUFFIX)", - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = "/bin/bash -e -x"; - shellScript = "ln -fs \"${EXECUTABLE_PREFIX}${PRODUCT_NAME}${EXECUTABLE_SUFFIX}\" \"${SCRIPT_OUTPUT_FILE_0}\""; - showEnvVarsInLog = 0; - }; - B629CF3E202BB337007719B9 /* Symlink normal variant */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - inputPaths = ( - ); - name = "Symlink normal variant"; - outputPaths = ( - "$(CONFIGURATION_BUILD_DIR)/$(EXECUTABLE_PREFIX)$(PRODUCT_NAME)_normal$(EXECUTABLE_SUFFIX)", - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = "/bin/bash -e -x"; - shellScript = "ln -fs \"${EXECUTABLE_PREFIX}${PRODUCT_NAME}${EXECUTABLE_SUFFIX}\" \"${SCRIPT_OUTPUT_FILE_0}\""; - showEnvVarsInLog = 0; - }; - C92853A11C767F78001FEAF3 /* Install Codes File */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - inputPaths = ( - "$(SRCROOT)/src/trace.h", - ); - name = "Install Codes File"; - outputPaths = ( - "$(DSTROOT)/usr/local/share/misc/libmalloc.codes", - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = /bin/bash; - shellScript = ". \"$PROJECT_DIR\"/xcodescripts/install-codes.sh"; - }; - E4B7FCBA22000DAD0010A840 /* Sanitise Headers (rdar://problem/10241868) */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 8; - files = ( - ); - inputPaths = ( - "$(SRCROOT)/xcodescripts/sanitise_headers.sh", - ); - name = "Sanitise Headers (rdar://problem/10241868)"; - outputPaths = ( - ); - runOnlyForDeploymentPostprocessing = 1; - shellPath = /bin/sh; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; - }; -/* End PBXShellScriptBuildPhase section */ - -/* Begin PBXSourcesBuildPhase section */ - 3FE91FE416A90AEC00D1238A /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 3FE91FED16A90B9200D1238A /* bitarray.c in Sources */, - B66C71DA2034BFAE0047E265 /* malloc_common.c in Sources */, - C95742A01BF681B00027269A /* purgeable_malloc.c in Sources */, - C957429C1BF672F80027269A /* magazine_large.c in Sources */, - 3FE91FF016A90B9200D1238A /* magazine_malloc.c in Sources */, - C95742991BF670D00027269A /* magazine_small.c in Sources */, - C99E320B1D6F7366005655A8 /* magazine_rack.c in Sources */, - C95742AB1BF685CB0027269A /* legacy_malloc.c in Sources */, - C932D2681D6B8D840063B19E /* vm.c in Sources */, - 3FE91FF116A90B9200D1238A /* magmallocProvider.d in Sources */, - B6AFD93C214C6198007983DA /* msl_lite_support.c in Sources */, - B68B7FA51FCDD9A500BAD1AA /* nanov2_malloc.c in Sources */, - B6D5C7F1202E26F80035E376 /* resolver.c in Sources */, - B68B7F9F1FCDCBC600BAD1AA /* nano_malloc_common.c in Sources */, - 3FE91FF216A90B9200D1238A /* malloc.c in Sources */, - C94B447821925CA50005EA6F /* magazine_medium.c in Sources */, - C95742A61BF6842F0027269A /* frozen_malloc.c in Sources */, - 3FE91FF416A90B9200D1238A /* nano_malloc.c in Sources */, - C95742901BF419DF0027269A /* magazine_tiny.c in Sources */, - B65FBE2C2087AA2F00E21F59 /* malloc_printf.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; - 3FE91FFE16A9109E00D1238A /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - C94B447B21925CA70005EA6F /* magazine_medium.c in Sources */, - B66C71DB2034BFD30047E265 /* malloc_common.c in Sources */, - 3FE91FFF16A9109E00D1238A /* bitarray.c in Sources */, - C95742A11BF681B00027269A /* purgeable_malloc.c in Sources */, - C957429D1BF672F80027269A /* magazine_large.c in Sources */, - 3FE9200116A9109E00D1238A /* magazine_malloc.c in Sources */, - C957429A1BF670D00027269A /* magazine_small.c in Sources */, - C95742AC1BF685CB0027269A /* legacy_malloc.c in Sources */, - B68B7FA01FCDCBE700BAD1AA /* nano_malloc_common.c in Sources */, - B68B7FA61FCDD9B200BAD1AA /* nanov2_malloc.c in Sources */, - 3FE9200216A9109E00D1238A /* magmallocProvider.d in Sources */, - 3FE9200316A9109E00D1238A /* malloc.c in Sources */, - C95742A71BF6842F0027269A /* frozen_malloc.c in Sources */, - 3FE9200416A9109E00D1238A /* nano_malloc.c in Sources */, - C95742911BF419DF0027269A /* magazine_tiny.c in Sources */, - B6D5C7F4202E26F90035E376 /* resolver.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; - 456E51C5197DF0D600A7E488 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 925383DA1BD03D5100F745DB /* stress_test.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; - B629CF2C202BB337007719B9 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - B629CF2E202BB337007719B9 /* bitarray.c in Sources */, - B629CF2F202BB337007719B9 /* purgeable_malloc.c in Sources */, - B6D5C7F3202E26F90035E376 /* resolver.c in Sources */, - B629CF30202BB337007719B9 /* magazine_large.c in Sources */, - B629CF31202BB337007719B9 /* magazine_malloc.c in Sources */, - C94B447A21925CA60005EA6F /* magazine_medium.c in Sources */, - B629CF32202BB337007719B9 /* empty.s in Sources */, - B629CF33202BB337007719B9 /* magazine_small.c in Sources */, - B629CF34202BB337007719B9 /* legacy_malloc.c in Sources */, - B629CF35202BB337007719B9 /* magmallocProvider.d in Sources */, - B629CF36202BB337007719B9 /* malloc.c in Sources */, - B629CF37202BB337007719B9 /* frozen_malloc.c in Sources */, - B629CF38202BB337007719B9 /* nanov2_malloc.c in Sources */, - B629CF39202BB337007719B9 /* nano_malloc.c in Sources */, - B629CF3B202BB337007719B9 /* magazine_tiny.c in Sources */, - B629CF3C202BB337007719B9 /* nano_malloc_common.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; - B6910F66202B630D00FF2EB0 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - B6910F68202B630D00FF2EB0 /* bitarray.c in Sources */, - B6910F69202B630D00FF2EB0 /* purgeable_malloc.c in Sources */, - B6D5C7F2202E26F80035E376 /* resolver.c in Sources */, - B6910F6A202B630D00FF2EB0 /* magazine_large.c in Sources */, - B6910F6B202B630D00FF2EB0 /* magazine_malloc.c in Sources */, - C94B447921925CA60005EA6F /* magazine_medium.c in Sources */, - B6910F6C202B630D00FF2EB0 /* empty.s in Sources */, - B6910F6D202B630D00FF2EB0 /* magazine_small.c in Sources */, - B6910F6E202B630D00FF2EB0 /* legacy_malloc.c in Sources */, - B6910F6F202B630D00FF2EB0 /* magmallocProvider.d in Sources */, - B6910F70202B630D00FF2EB0 /* malloc.c in Sources */, - B6910F71202B630D00FF2EB0 /* frozen_malloc.c in Sources */, - B629CF28202BA149007719B9 /* nanov2_malloc.c in Sources */, - B6910F73202B630D00FF2EB0 /* nano_malloc.c in Sources */, - B6910F75202B630D00FF2EB0 /* magazine_tiny.c in Sources */, - B6910F76202B630D00FF2EB0 /* nano_malloc_common.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; - B6D2ED4A2007D76F007AF994 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - B6D2ED572007D91A007AF994 /* malloc_replay.cpp in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; - C0CE45301C52C90500C24048 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - C0CE45311C52C90500C24048 /* bitarray.c in Sources */, - C94B447C21925CA80005EA6F /* magazine_medium.c in Sources */, - C0CE45321C52C90500C24048 /* purgeable_malloc.c in Sources */, - B6D5C7F5202E26FA0035E376 /* resolver.c in Sources */, - C0CE45331C52C90500C24048 /* magazine_large.c in Sources */, - C0CE45341C52C90500C24048 /* magazine_malloc.c in Sources */, - C9ABCA051CB6FC6800ECB399 /* empty.s in Sources */, - C0CE45351C52C90500C24048 /* magazine_small.c in Sources */, - C0CE45361C52C90500C24048 /* legacy_malloc.c in Sources */, - C0CE45371C52C90500C24048 /* magmallocProvider.d in Sources */, - C0CE45381C52C90500C24048 /* malloc.c in Sources */, - C0CE45391C52C90500C24048 /* frozen_malloc.c in Sources */, - B68B7FA71FCDD9B200BAD1AA /* nanov2_malloc.c in Sources */, - B66C71DC2034BFD40047E265 /* malloc_common.c in Sources */, - C0CE453A1C52C90500C24048 /* nano_malloc.c in Sources */, - C0CE453D1C52C90500C24048 /* magazine_tiny.c in Sources */, - B68B7FA11FCDCBE800BAD1AA /* nano_malloc_common.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXSourcesBuildPhase section */ - -/* Begin PBXTargetDependency section */ - 3FE9201616A9111400D1238A /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = 3FE91FFD16A9109E00D1238A /* libmalloc_eOS */; - targetProxy = 3FE9201516A9111400D1238A /* PBXContainerItemProxy */; - }; - 3FE9201816A9111600D1238A /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = 3FE91FE716A90AEC00D1238A /* libsystem_malloc */; - targetProxy = 3FE9201716A9111600D1238A /* PBXContainerItemProxy */; - }; - 45039168198FFFA6004EE2A3 /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = 456E51C8197DF0D600A7E488 /* libmalloc_stress_test */; - targetProxy = 45039167198FFFA6004EE2A3 /* PBXContainerItemProxy */; - }; - 925383D91BD03D0000F745DB /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = 925383D41BD03C0500F745DB /* darwintests */; - targetProxy = 925383D81BD03D0000F745DB /* PBXContainerItemProxy */; - }; - B60A579820093093006215CB /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = B6D2ED492007D76F007AF994 /* libmalloc_replay */; - targetProxy = B60A579720093093006215CB /* PBXContainerItemProxy */; - }; - B629CF44202BB389007719B9 /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = B629CF2B202BB337007719B9 /* libmalloc_alt */; - targetProxy = B629CF43202BB389007719B9 /* PBXContainerItemProxy */; - }; - B676F4AC202B66EF00933F6D /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = B6910F65202B630D00FF2EB0 /* libmalloc_mp */; - targetProxy = B676F4AB202B66EF00933F6D /* PBXContainerItemProxy */; - }; - C0CE45501C52CCBD00C24048 /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = C0CE452F1C52C90500C24048 /* libmalloc_static */; - targetProxy = C0CE454F1C52CCBD00C24048 /* PBXContainerItemProxy */; - }; - E4B7FCB522000DAD0010A840 /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = 3FE91FE716A90AEC00D1238A /* libsystem_malloc */; - targetProxy = E4B7FCB622000DAD0010A840 /* PBXContainerItemProxy */; - }; -/* End PBXTargetDependency section */ - -/* Begin XCBuildConfiguration section */ - 3FE91FEB16A90AEC00D1238A /* Debug */ = { - isa = XCBuildConfiguration; - baseConfigurationReference = B629CF29202BA3C2007719B9 /* libmalloc_resolver.xcconfig */; - buildSettings = { - COMBINE_HIDPI_IMAGES = YES; - }; - name = Debug; - }; - 3FE91FEC16A90AEC00D1238A /* Release */ = { - isa = XCBuildConfiguration; - baseConfigurationReference = B629CF29202BA3C2007719B9 /* libmalloc_resolver.xcconfig */; - buildSettings = { - COMBINE_HIDPI_IMAGES = YES; - }; - name = Release; - }; - 3FE9200E16A9109E00D1238A /* Debug */ = { - isa = XCBuildConfiguration; - baseConfigurationReference = 3FE91FE116A90A8D00D1238A /* libmalloc_eos.xcconfig */; - buildSettings = { - COMBINE_HIDPI_IMAGES = YES; - }; - name = Debug; - }; - 3FE9200F16A9109E00D1238A /* Release */ = { - isa = XCBuildConfiguration; - baseConfigurationReference = 3FE91FE116A90A8D00D1238A /* libmalloc_eos.xcconfig */; - buildSettings = { - COMBINE_HIDPI_IMAGES = YES; - }; - name = Release; - }; - 3FE9201316A9111000D1238A /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - COMBINE_HIDPI_IMAGES = YES; - PRODUCT_NAME = "$(TARGET_NAME)"; - }; - name = Debug; - }; - 3FE9201416A9111000D1238A /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - COMBINE_HIDPI_IMAGES = YES; - PRODUCT_NAME = "$(TARGET_NAME)"; - }; - name = Release; - }; - 3FFC1BE916A908F800027192 /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - ONLY_ACTIVE_ARCH = YES; - }; - name = Debug; - }; - 3FFC1BEA16A908F800027192 /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - }; - name = Release; - }; - 45039163198FFF73004EE2A3 /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - PRODUCT_NAME = "$(TARGET_NAME)"; - }; - name = Debug; - }; - 45039164198FFF73004EE2A3 /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - PRODUCT_NAME = "$(TARGET_NAME)"; - }; - name = Release; - }; - 456E51CD197DF0D600A7E488 /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - CLANG_WARN_BOOL_CONVERSION = YES; - CLANG_WARN_CONSTANT_CONVERSION = YES; - CLANG_WARN_EMPTY_BODY = YES; - CLANG_WARN_ENUM_CONVERSION = YES; - CLANG_WARN_INT_CONVERSION = YES; - CLANG_WARN_UNREACHABLE_CODE = YES; - COPY_PHASE_STRIP = NO; - GCC_OPTIMIZATION_LEVEL = 0; - GCC_PREPROCESSOR_DEFINITIONS = ( - "DEBUG=1", - "$(inherited)", - ); - GCC_WARN_64_TO_32_BIT_CONVERSION = YES; - GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; - GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; - GCC_WARN_UNUSED_FUNCTION = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - PRODUCT_NAME = "$(TARGET_NAME)"; - SDKROOT = macosx.internal; - SUPPORTED_PLATFORMS = "macosx iphoneos appletvos watchos"; - }; - name = Debug; - }; - 456E51CE197DF0D600A7E488 /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - CLANG_WARN_BOOL_CONVERSION = YES; - CLANG_WARN_CONSTANT_CONVERSION = YES; - CLANG_WARN_EMPTY_BODY = YES; - CLANG_WARN_ENUM_CONVERSION = YES; - CLANG_WARN_INT_CONVERSION = YES; - CLANG_WARN_UNREACHABLE_CODE = YES; - DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; - GCC_WARN_64_TO_32_BIT_CONVERSION = YES; - GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; - GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; - GCC_WARN_UNUSED_FUNCTION = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - PRODUCT_NAME = "$(TARGET_NAME)"; - SDKROOT = macosx.internal; - "SKIP_INSTALL[sdk=iphonesimulator*]" = YES; - SUPPORTED_PLATFORMS = "macosx iphoneos appletvos watchos"; - }; - name = Release; - }; - 925383D61BD03C0500F745DB /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - PRODUCT_NAME = "$(TARGET_NAME)"; - SDKROOT = macosx.internal; - SUPPORTED_PLATFORMS = "macosx iphoneos appletvos watchos"; - }; - name = Debug; - }; - 925383D71BD03C0500F745DB /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - PRODUCT_NAME = "$(TARGET_NAME)"; - SDKROOT = macosx.internal; - SUPPORTED_PLATFORMS = "macosx iphoneos appletvos watchos"; - }; - name = Release; - }; - B60A57942009307E006215CB /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - CODE_SIGN_STYLE = Automatic; - PRODUCT_NAME = "$(TARGET_NAME)"; - }; - name = Debug; - }; - B60A57952009307E006215CB /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - CODE_SIGN_STYLE = Automatic; - PRODUCT_NAME = "$(TARGET_NAME)"; - }; - name = Release; - }; - B629CF40202BB337007719B9 /* Debug */ = { - isa = XCBuildConfiguration; - baseConfigurationReference = B66AA658202A70B00019D607 /* libmalloc_resolved.xcconfig */; - buildSettings = { - COMBINE_HIDPI_IMAGES = YES; - PRODUCT_NAME = "$(PRODUCT_NAME)"; - RESOLVED_VARIANT = alt; - }; - name = Debug; - }; - B629CF41202BB337007719B9 /* Release */ = { - isa = XCBuildConfiguration; - baseConfigurationReference = B66AA658202A70B00019D607 /* libmalloc_resolved.xcconfig */; - buildSettings = { - COMBINE_HIDPI_IMAGES = YES; - PRODUCT_NAME = "$(PRODUCT_NAME)"; - RESOLVED_VARIANT = alt; - }; - name = Release; - }; - B6910F87202B630D00FF2EB0 /* Debug */ = { - isa = XCBuildConfiguration; - baseConfigurationReference = B66AA658202A70B00019D607 /* libmalloc_resolved.xcconfig */; - buildSettings = { - COMBINE_HIDPI_IMAGES = YES; - PRODUCT_NAME = "$(PRODUCT_NAME)"; - RESOLVED_VARIANT = mp; - }; - name = Debug; - }; - B6910F88202B630D00FF2EB0 /* Release */ = { - isa = XCBuildConfiguration; - baseConfigurationReference = B66AA658202A70B00019D607 /* libmalloc_resolved.xcconfig */; - buildSettings = { - COMBINE_HIDPI_IMAGES = YES; - PRODUCT_NAME = "$(PRODUCT_NAME)"; - RESOLVED_VARIANT = mp; - }; - name = Release; - }; - B6D2ED4F2007D76F007AF994 /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - CLANG_CXX_LANGUAGE_STANDARD = "c++0x"; - CLANG_WARN_BOOL_CONVERSION = YES; - CLANG_WARN_CONSTANT_CONVERSION = YES; - CLANG_WARN_EMPTY_BODY = YES; - CLANG_WARN_ENUM_CONVERSION = YES; - CLANG_WARN_INT_CONVERSION = YES; - CLANG_WARN_UNREACHABLE_CODE = YES; - COPY_PHASE_STRIP = NO; - FRAMEWORK_SEARCH_PATHS = ( - "$(SDKROOT)/System/Library/PrivateFrameworks", - "$(SDKROOT)$(SYSTEM_LIBRARY_DIR)/PrivateFrameworks", - ); - GCC_OPTIMIZATION_LEVEL = 0; - GCC_PREPROCESSOR_DEFINITIONS = ( - "DEBUG=1", - "$(inherited)", - ); - GCC_WARN_64_TO_32_BIT_CONVERSION = YES; - GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; - GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; - GCC_WARN_UNUSED_FUNCTION = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - HEADER_SEARCH_PATHS = "$(SRCROOT)/src"; - PRODUCT_NAME = "$(TARGET_NAME)"; - SDKROOT = macosx.internal; - SUPPORTED_PLATFORMS = "macosx iphoneos iphonesimulator appletvos appletvsimulator watchos watchsimulator"; - VALID_ARCHS = "armv6 armv7 arm64 arm64_32 armv7k x86_64 x86_64h"; - }; - name = Debug; - }; - B6D2ED502007D76F007AF994 /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - CLANG_CXX_LANGUAGE_STANDARD = "c++0x"; - CLANG_WARN_BOOL_CONVERSION = YES; - CLANG_WARN_CONSTANT_CONVERSION = YES; - CLANG_WARN_EMPTY_BODY = YES; - CLANG_WARN_ENUM_CONVERSION = YES; - CLANG_WARN_INT_CONVERSION = YES; - CLANG_WARN_UNREACHABLE_CODE = YES; - DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; - FRAMEWORK_SEARCH_PATHS = ( - "$(SDKROOT)/System/Library/PrivateFrameworks", - "$(SDKROOT)$(SYSTEM_LIBRARY_DIR)/PrivateFrameworks", - ); - GCC_WARN_64_TO_32_BIT_CONVERSION = YES; - GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; - GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; - GCC_WARN_UNUSED_FUNCTION = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - HEADER_SEARCH_PATHS = "$(SRCROOT)/src"; - PRODUCT_NAME = "$(TARGET_NAME)"; - SDKROOT = macosx.internal; - "SKIP_INSTALL[sdk=iphonesimulator*]" = YES; - SUPPORTED_PLATFORMS = "macosx iphoneos iphonesimulator appletvos appletvsimulator watchos watchsimulator"; - VALID_ARCHS = "armv6 armv7 arm64 arm64_32 armv7k x86_64 x86_64h"; - }; - name = Release; - }; - C0CE454A1C52C90500C24048 /* Debug */ = { - isa = XCBuildConfiguration; - baseConfigurationReference = C0CE450E1C52B9E300C24048 /* libmalloc_static.xcconfig */; - buildSettings = { - COMBINE_HIDPI_IMAGES = YES; - PRODUCT_NAME = "$(PRODUCT_NAME)"; - }; - name = Debug; - }; - C0CE454B1C52C90500C24048 /* Release */ = { - isa = XCBuildConfiguration; - baseConfigurationReference = C0CE450E1C52B9E300C24048 /* libmalloc_static.xcconfig */; - buildSettings = { - COMBINE_HIDPI_IMAGES = YES; - PRODUCT_NAME = "$(PRODUCT_NAME)"; - }; - name = Release; - }; - E4B7FCBC22000DAD0010A840 /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - COMBINE_HIDPI_IMAGES = YES; - PRODUCT_NAME = "$(TARGET_NAME)"; - }; - name = Debug; - }; - E4B7FCBD22000DAD0010A840 /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - COMBINE_HIDPI_IMAGES = YES; - PRODUCT_NAME = "$(TARGET_NAME)"; - }; - name = Release; - }; -/* End XCBuildConfiguration section */ - -/* Begin XCConfigurationList section */ - 3FE91FEA16A90AEC00D1238A /* Build configuration list for PBXNativeTarget "libsystem_malloc" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 3FE91FEB16A90AEC00D1238A /* Debug */, - 3FE91FEC16A90AEC00D1238A /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - 3FE9200D16A9109E00D1238A /* Build configuration list for PBXNativeTarget "libmalloc_eOS" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 3FE9200E16A9109E00D1238A /* Debug */, - 3FE9200F16A9109E00D1238A /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - 3FE9201216A9111000D1238A /* Build configuration list for PBXAggregateTarget "libmalloc" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 3FE9201316A9111000D1238A /* Debug */, - 3FE9201416A9111000D1238A /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - 3FFC1BE816A908F800027192 /* Build configuration list for PBXProject "libmalloc" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 3FFC1BE916A908F800027192 /* Debug */, - 3FFC1BEA16A908F800027192 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - 45039162198FFF73004EE2A3 /* Build configuration list for PBXAggregateTarget "libmalloc_test" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 45039163198FFF73004EE2A3 /* Debug */, - 45039164198FFF73004EE2A3 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - 456E51CF197DF0D600A7E488 /* Build configuration list for PBXNativeTarget "libmalloc_stress_test" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 456E51CD197DF0D600A7E488 /* Debug */, - 456E51CE197DF0D600A7E488 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - 925383D51BD03C0500F745DB /* Build configuration list for PBXLegacyTarget "darwintests" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 925383D61BD03C0500F745DB /* Debug */, - 925383D71BD03C0500F745DB /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - B60A57962009307E006215CB /* Build configuration list for PBXAggregateTarget "executables" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - B60A57942009307E006215CB /* Debug */, - B60A57952009307E006215CB /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - B629CF3F202BB337007719B9 /* Build configuration list for PBXNativeTarget "libmalloc_alt" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - B629CF40202BB337007719B9 /* Debug */, - B629CF41202BB337007719B9 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - B6910F86202B630D00FF2EB0 /* Build configuration list for PBXNativeTarget "libmalloc_mp" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - B6910F87202B630D00FF2EB0 /* Debug */, - B6910F88202B630D00FF2EB0 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - B6D2ED4E2007D76F007AF994 /* Build configuration list for PBXNativeTarget "libmalloc_replay" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - B6D2ED4F2007D76F007AF994 /* Debug */, - B6D2ED502007D76F007AF994 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - C0CE45491C52C90500C24048 /* Build configuration list for PBXNativeTarget "libmalloc_static" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - C0CE454A1C52C90500C24048 /* Debug */, - C0CE454B1C52C90500C24048 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - E4B7FCBB22000DAD0010A840 /* Build configuration list for PBXAggregateTarget "libmalloc_driverkit" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - E4B7FCBC22000DAD0010A840 /* Debug */, - E4B7FCBD22000DAD0010A840 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; -/* End XCConfigurationList section */ - }; - rootObject = 3FFC1BE516A908F800027192 /* Project object */; -} diff --git a/src/libmalloc/man/malloc.3 b/src/libmalloc/man/malloc.3 deleted file mode 100644 index a70249c8c..000000000 --- a/src/libmalloc/man/malloc.3 +++ /dev/null @@ -1,334 +0,0 @@ -.\" Copyright (c) 2006 Apple Computer, Inc. All rights reserved. -.\" -.\" @APPLE_LICENSE_HEADER_START@ -.\" -.\" The contents of this file constitute Original Code as defined in and -.\" are subject to the Apple Public Source License Version 1.1 (the -.\" "License"). You may not use this file except in compliance with the -.\" License. Please obtain a copy of the License at -.\" http://www.apple.com/publicsource and read it before using this file. -.\" -.\" This Original Code and all software distributed under the License are -.\" distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER -.\" EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, -.\" INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, -.\" FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the -.\" License for the specific language governing rights and limitations -.\" under the License. -.\" -.\" @APPLE_LICENSE_HEADER_END@ -.\" -.Dd Aug 13, 2008 -.Dt MALLOC 3 -.Os -.Sh NAME -.Nm calloc , -.Nm free , -.Nm malloc , -.Nm realloc , -.Nm reallocf , -.Nm valloc , -.Nm aligned_alloc -.Nd memory allocation -.Sh SYNOPSIS -.In stdlib.h -.Ft void * -.Fo calloc -.Fa "size_t count" -.Fa "size_t size" -.Fc -.Ft void -.Fo free -.Fa "void *ptr" -.Fc -.Ft void * -.Fo malloc -.Fa "size_t size" -.Fc -.Ft void * -.Fo realloc -.Fa "void *ptr" -.Fa "size_t size" -.Fc -.Ft void * -.Fo reallocf -.Fa "void *ptr" -.Fa "size_t size" -.Fc -.Ft void * -.Fo valloc -.Fa "size_t size" -.Fc -.Ft void * -.Fo aligned_alloc -.Fa "size_t alignment" -.Fa "size_t size" -.Fc -.Sh DESCRIPTION -The -.Fn malloc , -.Fn calloc , -.Fn valloc , -.Fn realloc , -and -.Fn reallocf -functions allocate memory. -The allocated memory is aligned such that it can be used for any data type, -including AltiVec- and SSE-related types. -The -.Fn aligned_alloc -function allocates memory with the requested alignment. -The -.Fn free -function frees allocations that were created via the preceding allocation -functions. -.Pp -The -.Fn malloc -function allocates -.Fa size -bytes of memory and returns a pointer to the allocated memory. -.Pp -The -.Fn calloc -function contiguously allocates enough space for -.Fa count -objects that are -.Fa size -bytes of memory each and returns a pointer to the allocated memory. -The allocated memory is filled with bytes of value zero. -.Pp -The -.Fn valloc -function allocates -.Fa size -bytes of memory and returns a pointer to the allocated memory. -The allocated memory is aligned on a page boundary. -.Pp -The -.Fn aligned_alloc -function allocates -.Fa size -bytes of memory with an alignment specified by -.Fa alignment -and returns a pointer to the allocated memory. -.Pp -The -.Fn realloc -function tries to change the size of the allocation pointed to by -.Fa ptr -to -.Fa size , -and returns -.Fa ptr . -If there is not enough room to enlarge the memory allocation pointed to by -.Fa ptr , -.Fn realloc -creates a new allocation, copies as much of the old data pointed to by -.Fa ptr -as will fit to the new allocation, frees the old allocation, and returns a -pointer to the allocated memory. -If -.Fa ptr -is -.Dv NULL , -.Fn realloc -is identical to a call to -.Fn malloc -for -.Fa size -bytes. -If -.Fa size -is zero and -.Fa ptr -is not -.Dv NULL , -a new, minimum sized object is allocated and the original object is freed. -When extending a region allocated with calloc(3), realloc(3) does not guarantee -that the additional memory is also zero-filled. -.Pp -The -.Fn reallocf -function is identical to the -.Fn realloc -function, except that it -will free the passed pointer when the requested memory cannot be allocated. -This is a -.Fx -specific API designed to ease the problems with traditional coding styles -for realloc causing memory leaks in libraries. -.Pp -The -.Fn free -function deallocates the memory allocation pointed to by -.Fa ptr . If -.Fa ptr -is a NULL pointer, no operation is performed. -.Sh RETURN VALUES -If successful, -.Fn calloc , -.Fn malloc , -.Fn realloc , -.Fn reallocf , -.Fn valloc , -and -.Fn aligned_alloc -functions return a pointer to allocated memory. -If there is an error, they return a -.Dv NULL -pointer and set -.Va errno -to -.Er ENOMEM . -.Pp -In addition, -.Fn aligned_alloc -returns a -.Dv NULL -pointer and sets -.Va errno -to -.Er EINVAL -if -.Fa size -is not an integral multiple of -.Fa alignment , -or if -.Fa alignment -is not a power of 2 at least as large as -.Fn sizeof "void *" . -.Pp -For -.Fn realloc , -the input pointer is still valid if reallocation failed. -For -.Fn reallocf , -the input pointer will have been freed if reallocation failed. -.Pp -The -.Fn free -function does not return a value. -.Sh DEBUGGING ALLOCATION ERRORS -A number of facilities are provided to aid in debugging allocation errors in -applications. -These facilities are primarily controlled via environment variables. -The recognized environment variables and their meanings are documented below. -.Sh ENVIRONMENT -The following environment variables change the behavior of the -allocation-related functions. -.Bl -tag -width ".Ev MallocStackLoggingNoCompact" -.It Ev MallocDebugReport -If set, specifies where messages are written. Set to "stderr" to write messages -to the standard error stream, "none" to discard all messages and "crash" to -write messages to standard error only for a condition that is about to cause a -crash. When not set, message are written to the standard error stream if it -appears to be a terminal (that is, if isatty(STDERR_FILENO) returns a non-zero -value) and are otherwise discarded. -.It Ev MallocGuardEdges -If set, add a guard page before and after each large block. -.It Ev MallocDoNotProtectPrelude -If set, do not add a guard page before large blocks, -even if the -.Ev MallocGuardEdges -environment variable is set. -.It Ev MallocDoNotProtectPostlude -If set, do not add a guard page after large blocks, -even if the -.Ev MallocGuardEdges -environment variable is set. -.It Ev MallocStackLogging -The default behavior if this is set is to record all allocation and deallocation events to an on-disk log, along with stacks, so that tools like -.Xr leaks 1 -and -.Xr malloc_history 1 -can be used. -.Pp -Set to "vm" to record only allocation of virtual memory regions allocated by system calls and mach traps, such as by -.Xr mmap 1 -. -.Pp -Set to "malloc" to record only allocations via -.Xr malloc 3 -and related interfaces, not virtual memory regions. -.Pp -Set to "lite" to record current allocations only, not history. These are recorded by in-memory data structures, instead of an on-disk log. -.It Ev MallocStackLoggingNoCompact -If set, record all stacks in a manner that is compatible with the -.Nm malloc_history -program. -.It Ev MallocStackLoggingDirectory -If set, records stack logs to the directory specified instead of saving them to the default location (/tmp). -.It Ev MallocScribble -If set, fill memory that has been allocated with 0xaa bytes. -This increases the likelihood that a program making assumptions about the contents of -freshly allocated memory will fail. -Also if set, fill memory that has been deallocated with 0x55 bytes. -This increases the likelihood that a program will fail due to accessing memory -that is no longer allocated. Note that due to the way in which freed memory is -managed internally, the 0x55 pattern may not appear in some parts of a -deallocated memory block. -.It Ev MallocCheckHeapStart -If set, specifies the number of allocations -.Fa -to wait before begining periodic heap checks every -.Fa -as specified by -.Ev MallocCheckHeapEach . -If -.Ev MallocCheckHeapStart -is set but -.Ev MallocCheckHeapEach -is not specified, the default check repetition is 1000. -.It Ev MallocCheckHeapEach -If set, run a consistency check on the heap every -.Fa -operations. -.Ev MallocCheckHeapEach -is only meaningful if -.Ev MallocCheckHeapStart -is also set. -.It Ev MallocCheckHeapSleep -Sets the number of seconds to sleep (waiting for a debugger to attach) when -.Ev MallocCheckHeapStart -is set and a heap corruption is detected. -The default is 100 seconds. -Setting this to zero means not to sleep at all. -Setting this to a negative number means to sleep (for the positive number of -seconds) only the very first time a heap corruption is detected. -.It Ev MallocCheckHeapAbort -When -.Ev MallocCheckHeapStart -is set and this is set to a non-zero value, causes -.Xr abort 3 -to be called if a heap corruption is detected, instead of any sleeping. -.It Ev MallocErrorAbort -If set, causes -.Xr abort 3 -to be called if an error was encountered in -.Xr malloc 3 -or -.Xr free 3 -, such as a calling -.Xr free 3 -on a pointer previously freed. -.It Ev MallocCorruptionAbort -Similar to -.Ev MallocErrorAbort -but will not abort in out of memory conditions, making it more useful to catch -only those errors which will cause memory corruption. -MallocCorruptionAbort is always set on 64-bit processes. -.It Ev MallocHelp -If set, print a list of environment variables that are paid heed to by the -allocation-related functions, along with short descriptions. -The list should correspond to this documentation. -.El -.Sh DIAGNOSTIC MESSAGES -.Sh SEE ALSO -.Xr leaks 1 , -.Xr malloc_history 1 , -.Xr abort 3 , -.Xr malloc_size 3 , -.Xr malloc_zone_malloc 3 , -.Xr posix_memalign 3 , -.Xr libgmalloc 3 diff --git a/src/libmalloc/man/malloc_size.3 b/src/libmalloc/man/malloc_size.3 deleted file mode 100644 index 378db96f8..000000000 --- a/src/libmalloc/man/malloc_size.3 +++ /dev/null @@ -1,55 +0,0 @@ -.\" Copyright (c) 2006 Apple Computer, Inc. All rights reserved. -.\" -.\" @APPLE_LICENSE_HEADER_START@ -.\" -.\" The contents of this file constitute Original Code as defined in and -.\" are subject to the Apple Public Source License Version 1.1 (the -.\" "License"). You may not use this file except in compliance with the -.\" License. Please obtain a copy of the License at -.\" http://www.apple.com/publicsource and read it before using this file. -.\" -.\" This Original Code and all software distributed under the License are -.\" distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER -.\" EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, -.\" INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, -.\" FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the -.\" License for the specific language governing rights and limitations -.\" under the License. -.\" -.\" @APPLE_LICENSE_HEADER_END@ -.\" -.Dd May 23, 2006 -.Dt MALLOC_SIZE 3 -.Os -.Sh NAME -.Nm malloc_good_size , -.Nm malloc_size -.Nd memory allocation information -.Sh SYNOPSIS -.In malloc/malloc.h -.Ft size_t -.Fo malloc_good_size -.Fa "size_t size" -.Fc -.Ft size_t -.Fo malloc_size -.Fa "const void *ptr" -.Fc -.Sh DESCRIPTION -The -.Fn malloc_size -function returns the size of the memory block -that backs the allocation pointed to by -.Fa ptr . -The memory block size is always at least as large -as the allocation it backs, and may be larger. -.Pp -The -.Fn malloc_good_size -function rounds -.Fa size -up to a value that the allocator implementation can allocate -without adding any padding; -it then returns that rounded-up value. -.Sh SEE ALSO -.Xr malloc 3 diff --git a/src/libmalloc/man/malloc_zone_malloc.3 b/src/libmalloc/man/malloc_zone_malloc.3 deleted file mode 100644 index fe0e9926b..000000000 --- a/src/libmalloc/man/malloc_zone_malloc.3 +++ /dev/null @@ -1,162 +0,0 @@ -.\" Copyright (c) 2008 Apple, Inc. All rights reserved. -.\" -.\" @APPLE_LICENSE_HEADER_START@ -.\" -.\" The contents of this file constitute Original Code as defined in and -.\" are subject to the Apple Public Source License Version 1.1 (the -.\" "License"). You may not use this file except in compliance with the -.\" License. Please obtain a copy of the License at -.\" http://www.apple.com/publicsource and read it before using this file. -.\" -.\" This Original Code and all software distributed under the License are -.\" distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER -.\" EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, -.\" INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, -.\" FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the -.\" License for the specific language governing rights and limitations -.\" under the License. -.\" -.\" @APPLE_LICENSE_HEADER_END@ -.\" -.Dd Aug 13, 2008 -.Dt MALLOC_ZONE_MALLOC 3 -.Os -.Sh NAME -.Nm malloc_create_zone , -.Nm malloc_destroy_zone , -.Nm malloc_default_zone , -.Nm malloc_zone_from_ptr , -.Nm malloc_zone_malloc , -.Nm malloc_zone_calloc , -.Nm malloc_zone_valloc , -.Nm malloc_zone_realloc , -.Nm malloc_zone_memalign , -.Nm malloc_zone_free -.Nd zone-based memory allocation -.Sh SYNOPSIS -.In malloc/malloc.h -.Ft malloc_zone_t * -.Fo malloc_create_zone -.Fa "vm_size_t start_size" -.Fa "unsigned flags" -.Fc -.Ft void -.Fo malloc_destroy_zone -.Fa "malloc_zone_t *zone" -.Fc -.Ft malloc_zone_t * -.Fo malloc_default_zone -.Fa void -.Fc -.Ft malloc_zone_t * -.Fo malloc_zone_from_ptr -.Fa "const void *ptr" -.Fc -.Ft void * -.Fo malloc_zone_malloc -.Fa "malloc_zone_t *zone" -.Fa "size_t size" -.Fc -.Ft void * -.Fo malloc_zone_calloc -.Fa "malloc_zone_t *zone" -.Fa "size_t num_items" -.Fa "size_t size" -.Fc -.Ft void * -.Fo malloc_zone_valloc -.Fa "malloc_zone_t *zone" -.Fa "size_t size" -.Fc -.Ft void * -.Fo malloc_zone_realloc -.Fa "malloc_zone_t *zone" -.Fa "void *ptr" -.Fa "size_t size" -.Fc -.Ft void * -.Fo malloc_zone_memalign -.Fa "malloc_zone_t *zone" -.Fa "size_t alignment" -.Fa "size_t size" -.Fc -.Ft void -.Fo malloc_zone_free -.Fa "malloc_zone_t *zone" -.Fa "void *ptr" -.Fc -.Sh DESCRIPTION -The -.Fn malloc_create_zone -function creates a malloc zone, advising an initial allocation of -.Fa start_size -bytes, and specifying -.Fa flags -The returned malloc zone can be used to provide custom allocation and -deallocation behavior, and to retrieve additional information about the -allocations in that zone. -At present there are no client settable flag values recognized by malloc_create_zone(), -the flags argument should always be passed as zero. -.Pp -The -.Fn malloc_destroy_zone -function deallocates all memory associated with objects in -.Fa zone -as well as -.Fa zone -itself. -.Pp -The -.Fn malloc_default_zone -function returns the default system malloc zone, used by -.Xr malloc 3 , -and -.Xr free 3 . -.Pp -The -.Fn malloc_zone_from_ptr -function returns a pointer to the malloc zone which contains -.Fa ptr -or NULL, if the pointer does not point to an allocated object in any current -malloc zone. -.Pp -The -.Fn malloc_zone_malloc , -.Fn malloc_zone_calloc , -.Fn malloc_zone_valloc , -.Fn malloc_zone_realloc , -.Fn malloc_zone_memalign , -and -.Fn malloc_zone_free -perform the same task on -.Fa zone -as their non-prefixed variants, -.Xr malloc 3 , -.Xr calloc 3 , -.Xr valloc 3 , -.Xr realloc 3 , -.Xr posix_memalign 3 , -and -.Xr free 3 perform on the default system malloc zone. -.Sh RETURN VALUES -The -.Fn malloc_create_zone , -.Fn malloc_default_zone , -and -.Fn malloc_zone_from_ptr -functions return a pointer to a malloc_zone_t structure, or NULL if there was -an error. -.Pp -The -.Fn malloc_zone_malloc , -.Fn malloc_zone_calloc , -.Fn malloc_zone_valloc , -.Fn malloc_zone_realloc , -and -.Fn malloc_zone_memalign -functions return a pointer to allocated memory. If there is an error, they -return a NULL pointer. They are not required to set -.Va errno . -.Sh SEE ALSO -.Xr malloc 3 , -.Xr posix_memalign 3 diff --git a/src/libmalloc/man/manpages.lst b/src/libmalloc/man/manpages.lst deleted file mode 100644 index c2cd0fe9d..000000000 --- a/src/libmalloc/man/manpages.lst +++ /dev/null @@ -1,7 +0,0 @@ -# manpage tables -# [ ...] - -# man3 -malloc.3 malloc.3 calloc.3 free.3 realloc.3 reallocf.3 valloc.3 aligned_alloc.3 -malloc_size.3 malloc_size.3 malloc_good_size.3 -malloc_zone_malloc.3 malloc_zone_malloc.3 malloc_create_zone.3 malloc_destroy_zone.3 malloc_default_zone.3 malloc_zone_from_ptr.3 malloc_zone_calloc.3 malloc_zone_valloc.3 malloc_zone_realloc.3 malloc_zone_memalign.3 malloc_zone_free.3 diff --git a/src/libmalloc/private/make_tapi_happy.h b/src/libmalloc/private/make_tapi_happy.h deleted file mode 100644 index 0938d4df5..000000000 --- a/src/libmalloc/private/make_tapi_happy.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -/* Make TAPI happy by declaring things that other projects forward-declare. */ -/* This header is not installed anywhere. */ - -/* For Libsystem */ -void _malloc_fork_child(void); -void _malloc_fork_parent(void); -void _malloc_fork_prepare(void); - -/* For various debugging tools? */ - -void scalable_zone_info(malloc_zone_t *zone, unsigned *info_to_fill, unsigned count); -void (*malloc_error(void (*func)(int)))(int); -extern uint64_t __mach_stack_logging_shared_memory_address; - -/* At least for malloc_replay.cpp */ -void mag_set_thread_index(unsigned int index); - -/* Externally visible from magazine_malloc.h. Not worth pulling the whole header into tapi just for this one. */ - -boolean_t scalable_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats, unsigned subzone); - -/* Globals for performance tools, replicated here from the top of malloc.c */ - -typedef void(malloc_logger_t)(uint32_t type, - uintptr_t arg1, - uintptr_t arg2, - uintptr_t arg3, - uintptr_t result, - uint32_t num_hot_frames_to_skip); -extern int32_t malloc_num_zones; -extern int32_t malloc_num_zones_allocated; -extern malloc_zone_t **malloc_zones; -extern malloc_logger_t *malloc_logger; -extern unsigned malloc_check_start; -extern unsigned malloc_check_counter; -extern unsigned malloc_check_each; -extern int _malloc_no_asl_log; -extern int _os_cpu_number_override; - -/* Globally visible for manual debugging? */ - -extern unsigned szone_check_counter; -extern unsigned szone_check_start; -extern unsigned szone_check_modulo; - -/* CoreServices checkfixes */ -void malloc_create_legacy_default_zone(void); -void zeroify_scalable_zone(malloc_zone_t *zone); - -/* This is extern-declared by some projects, like racoon (ipsec) */ -/* Maybe we can change it to a symbol-alias of free? */ -void vfree(void *ptr); - -/* Obsolete entry points. They don't work, don't use them. */ -void set_malloc_singlethreaded(boolean_t); -void malloc_singlethreaded(void); -int malloc_debug(int); - -/* WeChat references this, only god knows why. This symbol does nothing. */ -extern int stack_logging_enable_logging; - -/* For debugging */ -void tiny_print_region_free_list(void *ptr, unsigned int slot); - diff --git a/src/libmalloc/private/malloc_implementation.h b/src/libmalloc/private/malloc_implementation.h deleted file mode 100644 index 278f09447..000000000 --- a/src/libmalloc/private/malloc_implementation.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -/* Private interfaces between libsystem_malloc, libSystem, and MallocStackLogging */ - -#include - -#ifndef _MALLOC_IMPLEMENTATION_H_ -#define _MALLOC_IMPLEMENTATION_H_ - - -/********* Libsystem initializers ************/ - -struct _malloc_functions { - unsigned long version; - /* The following functions are included in version 1 of this structure */ - void * (*dlopen) (const char *path, int mode); - void * (*dlsym) (void *handle, const char *symbol); -}; - -void __malloc_init(const char *apple[]); -void __stack_logging_early_finished(const struct _malloc_functions *); - - - -/* - * Definitions intended for the malloc stack logging library only. - * This is SPI that is *not* intended for use elsewhere. It will change - * and will eventually be removed, without prior warning. - */ -#if MALLOC_ENABLE_MSL_LITE_SPI - -typedef struct szone_s szone_t; - -typedef struct _malloc_msl_lite_hooks_s { - szone_t *(*create_and_insert_msl_lite_zone)(const char *name, - void *mallocp, void *callocp, - void *vallocp, void *reallocp, void *batch_mallocp, - void *batch_freep, void *memalignp, void *freep, - void *free_definite_sizep, void *sizep); - malloc_zone_t *(*helper_zone)(szone_t *zone); - size_t (*szone_size)(szone_t *szone, const void *ptr); - void *(*szone_malloc)(szone_t *szone, size_t size); - void *(*szone_malloc_should_clear)(szone_t *szone, size_t size, - boolean_t cleared_requested); - void (*szone_free)(szone_t *szone, void *ptr); - void *(*szone_realloc)(szone_t *szone, void *ptr, size_t new_size); - void *(*szone_valloc)(szone_t *szone, size_t size); - void *(*szone_memalign)(szone_t *szone, size_t alignment, size_t size); - unsigned (*szone_batch_malloc)(szone_t *szone, size_t size, void **results, - unsigned count); - void (*szone_batch_free)(szone_t *szone, void **to_be_freed, unsigned count); - boolean_t (*has_default_zone0)(void); - - size_t (*calloc_get_size)(size_t num_items, size_t size, size_t extra_size, - size_t *total_size); - - size_t (*szone_good_size)(szone_t *szone, size_t size); - malloc_zone_t *(*basic_zone)(szone_t *zone); -} _malloc_msl_lite_hooks_t; - -#endif // MALLOC_ENABLE_MSL_LITE_SPI - -#endif - diff --git a/src/libmalloc/private/malloc_private.h b/src/libmalloc/private/malloc_private.h deleted file mode 100644 index a169b6aee..000000000 --- a/src/libmalloc/private/malloc_private.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 1999-2016 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef _MALLOC_PRIVATE_H_ -#define _MALLOC_PRIVATE_H_ - -/* Here be dragons (SPIs) */ - -#include -#include -#include -#include -#include - -/********* Callbacks ************/ - -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) -void malloc_enter_process_memory_limit_warn_mode(void); - /* A callback invoked once the process receives a warning for approaching - * memory limit. */ - -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) -void malloc_memory_event_handler(unsigned long); - /* A function invoked when malloc needs to handle any flavor of - * memory pressure notification or process memory limit notification. */ - -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) -void * reallocarray(void * in_ptr, size_t nmemb, size_t size) __DARWIN_EXTSN(reallocarray) __result_use_check; - -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) -void * reallocarrayf(void * in_ptr, size_t nmemb, size_t size) __DARWIN_EXTSN(reallocarrayf) __result_use_check; - -/* - * Checks whether an address might belong to any registered zone. False positives - * are allowed (e.g. the memory was freed, or it's in a part of the address - * space used by malloc that has not yet been allocated.) False negatives are - * not allowed. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -boolean_t malloc_claimed_address(void *ptr) __result_use_check; - -/* - * Checks whether an address might belong to a given zone. False positives are - * allowed (e.g. the memory was freed, or it's in a part of the address space - * used by malloc that has not yet been allocated.) False negatives are not - * allowed. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -boolean_t malloc_zone_claimed_address(malloc_zone_t *zone, void *ptr) __result_use_check; - -/** - * Returns whether the nano allocator is engaged. The return value is 0 if Nano - * is not engaged and the allocator version otherwise. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -int malloc_engaged_nano(void) __result_use_check; - -#endif /* _MALLOC_PRIVATE_H_ */ diff --git a/src/libmalloc/private/stack_logging.h b/src/libmalloc/private/stack_logging.h deleted file mode 100644 index 7f75b3fcf..000000000 --- a/src/libmalloc/private/stack_logging.h +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Copyright (c) 1999-2007 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -#ifndef _STACK_LOGGING_H_ -#define _STACK_LOGGING_H_ - -#import -#import -#import -#import -#import - - -/********* MallocStackLogging permanant SPIs ************/ - -#define stack_logging_type_free 0 -#define stack_logging_type_generic 1 /* anything that is not allocation/deallocation */ -#define stack_logging_type_alloc 2 /* malloc, realloc, etc... */ -#define stack_logging_type_dealloc 4 /* free, realloc, etc... */ -#define stack_logging_type_vm_allocate 16 /* vm_allocate or mmap */ -#define stack_logging_type_vm_deallocate 32 /* vm_deallocate or munmap */ -#define stack_logging_type_mapped_file_or_shared_mem 128 - -// The valid flags include those from VM_FLAGS_ALIAS_MASK, which give the user_tag of allocated VM regions. -#define stack_logging_valid_type_flags ( \ -stack_logging_type_generic | \ -stack_logging_type_alloc | \ -stack_logging_type_dealloc | \ -stack_logging_type_vm_allocate | \ -stack_logging_type_vm_deallocate | \ -stack_logging_type_mapped_file_or_shared_mem | \ -VM_FLAGS_ALIAS_MASK); - -// Following flags are absorbed by stack_logging_log_stack() -#define stack_logging_flag_zone 8 /* NSZoneMalloc, etc... */ -#define stack_logging_flag_cleared 64 /* for NewEmptyHandle */ - -typedef void(malloc_logger_t)(uint32_t type, - uintptr_t arg1, - uintptr_t arg2, - uintptr_t arg3, - uintptr_t result, - uint32_t num_hot_frames_to_skip); -extern malloc_logger_t *malloc_logger; - -/* - * Load the MallocStackLogging library and register it with libmalloc - */ -boolean_t malloc_register_stack_logger(void); - - -/********* MallocStackLogging deprecated SPIs ************ - * - * Everything here should be considered deprecated and slated for being deleted. - * Move over to the equivilant in MallocStackLogging.h - */ - -#define STACK_LOGGING_MAX_STACK_SIZE 512 - -#define STACK_LOGGING_VM_USER_TAG(flags) (((flags) & VM_FLAGS_ALIAS_MASK) >> 24) - -/* Macro used to disguise addresses so that leak finding can work */ -#define STACK_LOGGING_DISGUISE(address) ((address) ^ 0x00005555) /* nicely idempotent */ - -typedef enum { - stack_logging_mode_none = 0, - stack_logging_mode_all, - stack_logging_mode_malloc, - stack_logging_mode_vm, - stack_logging_mode_lite, - stack_logging_mode_vmlite -} stack_logging_mode_type; - -extern boolean_t turn_on_stack_logging(stack_logging_mode_type mode); -extern void turn_off_stack_logging(void); - -/* constants for enabling/disabling malloc stack logging via the memorystatus_vm_pressure_send sysctl */ -#define MEMORYSTATUS_ENABLE_MSL_MALLOC 0x10000000 -#define MEMORYSTATUS_ENABLE_MSL_VM 0x20000000 -#define MEMORYSTATUS_ENABLE_MSL_LITE 0x40000000 -#define MEMORYSTATUS_DISABLE_MSL 0x80000000 -#define MEMORYSTATUS_ENABLE_MSL_LITE_FULL (MEMORYSTATUS_ENABLE_MSL_LITE | MEMORYSTATUS_ENABLE_MSL_VM | MEMORYSTATUS_ENABLE_MSL_MALLOC) -#define MEMORYSTATUS_ENABLE_MSL_LITE_VM (MEMORYSTATUS_ENABLE_MSL_LITE | MEMORYSTATUS_ENABLE_MSL_VM) - - - -/* 64-bit-aware stack log access. As new SPI, these routines are prefixed with double-underscore to avoid conflict with Libsystem clients. */ - -typedef struct mach_stack_logging_record { - uint32_t type_flags; - uint64_t stack_identifier; - uint64_t argument; - mach_vm_address_t address; -} mach_stack_logging_record_t; - - -extern kern_return_t __mach_stack_logging_start_reading(task_t task, vm_address_t shared_memory_address, boolean_t *uses_lite_mode) - API_DEPRECATED("use MallocStackLogging/MallocStackLogging.h:msl_start_reading", macos(10.9, 10.15), ios(7.0, 13.0), watchos(1.0, 6.0), tvos(9.0, 13.0)); -extern kern_return_t __mach_stack_logging_stop_reading(task_t task) - API_DEPRECATED("use MallocStackLogging/MallocStackLogging.h:msl_stop_reading", macos(10.9, 10.15), ios(7.0, 13.0), watchos(1.0, 6.0), tvos(9.0, 13.0)); - -/* Clients *should* call these start/stop functions to properly initialize stack logging data - * structures and fully clean them up when they're done looking at a process. If the client does *not* - * call these then currently it should still work but some data structures will still remain after - * reading the stack logs (e.g., an extra shared memory segment, an open stack log file, etc). - * NULL can be passed for uses_lite_mode if the client doesn’t need them. - * - * It is recommended that the client suspend the task before actually reading the stacks, and resume the task when done, - * if the task uses lite mode. - */ - -extern kern_return_t __mach_stack_logging_set_file_path(task_t task, char* file_path) - API_DEPRECATED("No longer supported", macos(10.9, 10.15), ios(7.0, 13.0), watchos(1.0, 6.0), tvos(9.0, 13.0)); - -extern kern_return_t __mach_stack_logging_get_frames(task_t task, mach_vm_address_t address, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count) - API_DEPRECATED("No longer supported", macos(10.9, 10.15), ios(7.0, 13.0), watchos(1.0, 6.0), tvos(9.0, 13.0)); - /* Gets the last allocation record (malloc, realloc, or free) about address */ - -extern kern_return_t __mach_stack_logging_enumerate_records(task_t task, mach_vm_address_t address, void enumerator(mach_stack_logging_record_t, void *), void *context) - API_DEPRECATED("use MallocStackLogging/MallocStackLogging.h:msl_disk_stack_logs_enumerate_from_task", macos(10.9, 10.15), ios(7.0, 13.0), watchos(1.0, 6.0), tvos(9.0, 13.0)); - /* Applies enumerator to all records involving address sending context as enumerator's second parameter; if !address, applies enumerator to all records */ - -extern kern_return_t __mach_stack_logging_frames_for_uniqued_stack(task_t task, uint64_t stack_identifier, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count) - API_DEPRECATED("use __mach_stack_logging_get_frames_for_stackid instead", macos(10.9, 10.13), ios(7.0, 11.0), watchos(1.0, 4.0), tvos(9.0, 11.0)); - /* Given a uniqued_stack fills stack_frames_buffer. */ - -extern kern_return_t __mach_stack_logging_get_frames_for_stackid(task_t task, uint64_t stack_identifier, mach_vm_address_t *stack_frames_buffer, uint32_t max_stack_frames, uint32_t *count, - bool *last_frame_is_threadid) - API_DEPRECATED("use MallocStackLogging/MallocStackLogging.h:msl_get_frames_for_stackid", macos(10.9, 10.15), ios(7.0, 13.0), watchos(1.0, 6.0), tvos(9.0, 13.0)); - /* Given a uniqued_stack fills stack_frames_buffer. */ - -extern uint64_t __mach_stack_logging_stackid_for_vm_region(task_t task, mach_vm_address_t address) - API_DEPRECATED("use MallocStackLogging/MallocStackLogging.h:msl_stackid_for_vm_region", macos(10.9, 10.15), ios(7.0, 13.0), watchos(1.0, 6.0), tvos(9.0, 13.0)); - /* given the address of a vm region, lookup it's stackid */ - - -struct backtrace_uniquing_table; - -extern kern_return_t -__mach_stack_logging_uniquing_table_read_stack(struct backtrace_uniquing_table *uniquing_table, - uint64_t stackid, - mach_vm_address_t *out_frames_buffer, - uint32_t *out_frames_count, - uint32_t max_frames) - API_DEPRECATED("use MallocStackLogging/MallocStackLogging.h:msl_uniquing_table_read_stack", macos(10.9, 10.15), ios(7.0, 13.0), watchos(1.0, 6.0), tvos(9.0, 13.0)); - -extern -struct backtrace_uniquing_table * -__mach_stack_logging_copy_uniquing_table(task_t task) - API_DEPRECATED("use MallocStackLogging/MallocStackLogging.h:msl_uniquing_table_copy_from_task", macos(10.9, 10.15), ios(7.0, 13.0), watchos(1.0, 6.0), tvos(9.0, 13.0)); -/* returns a retained pointer to copy of the task's uniquing table */ - -extern -void -__mach_stack_logging_uniquing_table_release(struct backtrace_uniquing_table *) - API_DEPRECATED("use MallocStackLogging/MallocStackLogging.h:msl_uniquing_table_release", macos(10.9, 10.15), ios(7.0, 13.0), watchos(1.0, 6.0), tvos(9.0, 13.0)); - -extern -void -__mach_stack_logging_uniquing_table_retain(struct backtrace_uniquing_table *) - API_DEPRECATED("use MallocStackLogging/MallocStackLogging.h:msl_uniquing_table_retain", macos(10.9, 10.15), ios(7.0, 13.0), watchos(1.0, 6.0), tvos(9.0, 13.0)); - -extern -size_t -__mach_stack_logging_uniquing_table_sizeof(struct backtrace_uniquing_table *) - API_DEPRECATED("use MallocStackLogging/MallocStackLogging.h:msl_uniquing_table_sizeof", macos(10.9, 10.15), ios(7.0, 13.0), watchos(1.0, 6.0), tvos(9.0, 13.0)); -/* returns the serialized size of a uniquing talbe in bytes */ - -extern -void * -__mach_stack_logging_uniquing_table_serialize(struct backtrace_uniquing_table *table, mach_vm_size_t *size) - API_DEPRECATED("use MallocStackLogging/MallocStackLogging.h:msl_uniquing_table_serialize", macos(10.9, 10.15), ios(7.0, 13.0), watchos(1.0, 6.0), tvos(9.0, 13.0)); -/* Writes out a serialized representation of the table. Free it with mach_vm_deallocate. */ - -extern -struct backtrace_uniquing_table * -__mach_stack_logging_uniquing_table_copy_from_serialized(void *buffer, size_t size) - API_DEPRECATED("use MallocStackLogging/MallocStackLogging.h:msl_uniquing_table_copy_from_serialized", macos(10.9, 10.15), ios(7.0, 13.0), watchos(1.0, 6.0), tvos(9.0, 13.0)); -/* creates a malloc uniquing table from a serialized representation */ - -/* - * thread_stack_pcs is now declared in its own header file, - */ - -#endif // _STACK_LOGGING_H_ diff --git a/src/libmalloc/resolver/resolver.c b/src/libmalloc/resolver/resolver.c deleted file mode 100644 index 8eb2613b8..000000000 --- a/src/libmalloc/resolver/resolver.c +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - - diff --git a/src/libmalloc/resolver/resolver.h b/src/libmalloc/resolver/resolver.h deleted file mode 100644 index 7509b667b..000000000 --- a/src/libmalloc/resolver/resolver.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -#ifndef __MALLOC_RESOLVER_H__ -#define __MALLOC_RESOLVER_H__ - -#include "resolver_internal.h" - - -#endif // __MALLOC_RESOLVER_H__ diff --git a/src/libmalloc/resolver/resolver_internal.h b/src/libmalloc/resolver/resolver_internal.h deleted file mode 100644 index 0b2eba83c..000000000 --- a/src/libmalloc/resolver/resolver_internal.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -#ifndef __MALLOC_RESOLVER_INTERNAL_H__ -#define __MALLOC_RESOLVER_INTERNAL_H__ - -#define OS_RESOLVED_VARIANT_ADDR(s) (void *)(&s) - -#endif // __MALLOC_RESOLVER_INTERNAL_H__ diff --git a/src/libmalloc/src/base.h b/src/libmalloc/src/base.h deleted file mode 100644 index 88b9ff00f..000000000 --- a/src/libmalloc/src/base.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __BASE_H -#define __BASE_H - -#ifndef __has_extension -#define __has_extension(x) 0 -#endif - -#if __has_extension(c_static_assert) -#define MALLOC_STATIC_ASSERT(x, y) _Static_assert((x), y) -#else -#define MALLOC_STATIC_ASSERT(x, y) -#endif - -#define MALLOC_ASSERT(e) ({ \ - if (__builtin_expect(!(e), 0)) { \ - __asm__ __volatile__ (""); \ - __builtin_trap(); \ - } \ -}) - -#define MALLOC_FATAL_ERROR(cause, message) ({ \ - _os_set_crash_log_cause_and_message((cause), "FATAL ERROR - " message); \ - __asm__ __volatile__ (""); \ - __builtin_trap(); \ -}) - -#define MALLOC_REPORT_FATAL_ERROR(cause, message) ({ \ - malloc_report(ASL_LEVEL_ERR, "*** FATAL ERROR - " message ".\n"); \ - MALLOC_FATAL_ERROR((cause), message); \ -}) - -#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__arm64__) -# define __APPLE_API_PRIVATE -# include -# if defined(__i386__) || defined(__x86_64__) -# define _COMM_PAGE_VERSION_REQD 9 -# else -# define _COMM_PAGE_VERSION_REQD 3 -# endif -# undef __APPLE_API_PRIVATE -#else -# include -#endif - -#if defined(__i386__) || defined(__x86_64__) -// nano vs. magazine have different definitions -// for this cache-line size. -# define MALLOC_CACHE_LINE 128 -# define MALLOC_NANO_CACHE_LINE 64 -#elif defined(__arm__) || defined(__arm64__) -# define MALLOC_CACHE_LINE 64 -# define MALLOC_NANO_CACHE_LINE 64 -#else -# define MALLOC_CACHE_LINE 32 -# define MALLOC_NANO_CACHE_LINE 32 -#endif - -#define MALLOC_CACHE_ALIGN __attribute__ ((aligned (MALLOC_CACHE_LINE) )) -#define MALLOC_NANO_CACHE_ALIGN __attribute__ ((aligned (MALLOC_NANO_CACHE_LINE) )) -#define MALLOC_EXPORT extern __attribute__((visibility("default"))) -#define MALLOC_NOEXPORT __attribute__((visibility("hidden"))) -#define MALLOC_NOINLINE __attribute__((noinline)) -#define MALLOC_INLINE __inline__ -#define MALLOC_ALWAYS_INLINE __attribute__((always_inline)) -#define MALLOC_PACKED __attribute__((packed)) -#define MALLOC_USED __attribute__((used)) -#define MALLOC_UNUSED __attribute__((unused)) -#define CHECK_MAGAZINE_PTR_LOCKED(szone, mag_ptr, fun) {} - -#define SCRIBBLE_BYTE 0xaa /* allocated scribble */ -#define SCRABBLE_BYTE 0x55 /* free()'d scribble */ -#define SCRUBBLE_BYTE 0xdd /* madvise(..., MADV_FREE) scriblle */ - -#define NDEBUG 1 -#define trunc_page_quanta(x) trunc_page((x)) -#define round_page_quanta(x) round_page((x)) -#define vm_page_quanta_size (vm_page_size) -#define vm_page_quanta_shift (vm_page_shift) - -// add a guard page before and after each VM region to help debug -#define MALLOC_ADD_GUARD_PAGES (1 << 0) -// do not protect prelude page -#define MALLOC_DONT_PROTECT_PRELUDE (1 << 1) -// do not protect postlude page -#define MALLOC_DONT_PROTECT_POSTLUDE (1 << 2) -// write 0x55 onto free blocks -#define MALLOC_DO_SCRIBBLE (1 << 3) -// call abort() on any malloc error, such as double free or out of memory. -#define MALLOC_ABORT_ON_ERROR (1 << 4) -// allocate objects such that they may be used with VM purgability APIs -#define MALLOC_PURGEABLE (1 << 5) -// call abort() on malloc errors, but not on out of memory. -#define MALLOC_ABORT_ON_CORRUPTION (1 << 6) - -/* - * msize - a type to refer to the number of quanta of a tiny or small - * allocation. A tiny block with an msize of 3 would be 3 << SHIFT_TINY_QUANTUM - * bytes in size. - */ -typedef unsigned short msize_t; - -typedef unsigned int grain_t; // N.B. wide enough to index all free slots -typedef struct large_entry_s large_entry_t; -typedef struct szone_s szone_t; -typedef struct rack_s rack_t; -typedef struct magazine_s magazine_t; -typedef int mag_index_t; -typedef void *region_t; - -#endif // __BASE_H diff --git a/src/libmalloc/src/bitarray.c b/src/libmalloc/src/bitarray.c deleted file mode 100644 index 3e44f455e..000000000 --- a/src/libmalloc/src/bitarray.c +++ /dev/null @@ -1,683 +0,0 @@ -/* - * Copyright (c) 1999, 2000, 2003, 2005, 2008, 2012 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -// -// bitarray.c -// bitarray -// -// Created by Bertrand Serlet on 9/26/10. -// Copyright (c) 2010 Apple. All rights reserved. -// - -#include "internal.h" - -/******************************** Utilities ***************************/ - -#define STATIC_INLINE static __inline - -STATIC_INLINE unsigned -__ffsll(uint64_t xx) -{ -#if defined(__LP64__) - return __builtin_ffsl(xx); -#else - return __builtin_ffsll(xx); -#endif -} - -#define BIT_SET(old, bit) ((old) | (1ULL << (bit))) -#define BIT_GET(old, bit) ((old) & (1ULL << (bit))) -#define BIT_ZAP(old, bit) ((old) & ~(1ULL << (bit))) - -// several variants below of bit setting or zapping to generate minimal code -// All these do 1 memory read and (maybe) 1 memory write -STATIC_INLINE bool -word_get_bit_simple(uint64_t *word, unsigned bit) -{ - uint64_t old = *word; - return BIT_GET(old, bit) != 0; -} - -STATIC_INLINE void -word_set_bit_simple(uint64_t *word, unsigned bit) -{ - uint64_t old = *word; - *word = BIT_SET(old, bit); -} - -STATIC_INLINE bool -word_set_bit_changed(uint64_t *word, unsigned bit) -{ - // returns 1 iff word has changed - uint64_t old = *word; - uint64_t new = BIT_SET(old, bit); - if (old == new) { - return 0; - } - *word = new; - return 1; -} - -STATIC_INLINE bool -word_set_bit_changed_go_down(uint64_t *word, unsigned bit, bool *was_non_zero) -{ - // returns 1 iff word changed - // sets was_non_zero (when something changed) - uint64_t old = *word; - uint64_t new = BIT_SET(old, bit); - if (old == new) { - return 0; - } - *word = new; - *was_non_zero = old != 0; - return 1; -} - -STATIC_INLINE bool -word_set_bit_go_down(uint64_t *word, unsigned bit) -{ - // returns 1 iff level below should be set too - uint64_t old = *word; - uint64_t new = BIT_SET(old, bit); - if (old == new) { - return 0; - } - *word = new; - return !old; -} - -STATIC_INLINE void -word_zap_bit_simple(uint64_t *word, unsigned bit) -{ - uint64_t old = *word; - *word = BIT_ZAP(old, bit); -} - -STATIC_INLINE bool -word_zap_bit_changed(uint64_t *word, unsigned bit) -{ - // returns 1 iff word changed - uint64_t old = *word; - uint64_t new = BIT_ZAP(old, bit); - if (old == new) { - return 0; - } - *word = new; - return 1; -} - -STATIC_INLINE bool -word_zap_bit_changed_go_down(uint64_t *word, unsigned bit, bool *is_now_zero) -{ - // returns 1 iff word changed - // sets is_now_zero (when something changed) - uint64_t old = *word; - uint64_t new = BIT_ZAP(old, bit); - if (old == new) { - return 0; - } - *word = new; - *is_now_zero = !new; - return 1; -} - -STATIC_INLINE bool -word_zap_bit_go_down(uint64_t *word, unsigned bit) -{ - // returns 1 iff level below might require a bit-zeroing - uint64_t old = *word; - uint64_t new = BIT_ZAP(old, bit); - if (old == new) { - return 0; - } - *word = new; - return !new; -} - -/******************************** Helpers ***************************/ - -#define NB 9 // number of bits we process at once -// must be at least 6 (64-bit) and 9 seems the best on x86 -#define MASKNB ((1 << NB) - 1) // to just keep these bits -#define NUM_64b (1 << (NB - 6)) // number of 64-bit words we process at once - -// number of uint64_t of summaries -#define LEVEL0 (NUM_64b) -#define LEVEL1 (LEVEL0 + (1 << NB) * NUM_64b) -#define LEVEL2 (LEVEL1 + (1 << (NB + NB)) * NUM_64b) -#define LEVEL3 (LEVEL2 + (1 << (NB + NB + NB)) * NUM_64b) - -#define MAX_LEVEL 5 - -static const unsigned levels_num_words[] = { - LEVEL0, LEVEL1, LEVEL2, LEVEL3}; // this encodes the number of words reserved for the bitmap summaries at various levels - -STATIC_INLINE bool -GET_SIMPLE(uint64_t *word, unsigned bit) -{ - return word_get_bit_simple(word + (bit >> 6), bit & 63); -} - -STATIC_INLINE void -SET_SIMPLE(uint64_t *word, unsigned bit) -{ - word_set_bit_simple(word + (bit >> 6), bit & 63); -} - -STATIC_INLINE bool -SET_CHANGED(uint64_t *word, unsigned bit) -{ - // returns 1 iff word changed - return word_set_bit_changed(word + (bit >> 6), bit & 63); -} - -STATIC_INLINE bool -SET_CHANGED_GO_DOWN(uint64_t *word, unsigned bit, bool *was_non_zero) -{ - // returns 1 iff word changed - // sets was_non_zero (when something changed) - return word_set_bit_changed_go_down(word + (bit >> 6), bit & 63, was_non_zero); -} - -STATIC_INLINE bool -SET_GO_DOWN(uint64_t *word, unsigned bit) -{ - // returns 1 iff level below should be set too - return word_set_bit_go_down(word + (bit >> 6), bit & 63); -} - -STATIC_INLINE void -ZAP_SIMPLE(uint64_t *word, unsigned bit) -{ - return word_zap_bit_simple(word + (bit >> 6), bit & 63); -} - -STATIC_INLINE bool -ZAP_CHANGED(uint64_t *word, unsigned bit) -{ - // returns 1 iff word changed - return word_zap_bit_changed(word + (bit >> 6), bit & 63); -} - -STATIC_INLINE bool -all_zeros(uint64_t *words) -{ - for (unsigned w = 0; w < NUM_64b; w++) { - if (words[w]) { - return 0; - } - } - return 1; -} - -STATIC_INLINE bool -ZAP_CHANGED_GO_DOWN(uint64_t *word, unsigned bit, bool *is_now_zero) -{ - // returns 1 iff word changed - // sets is_now_zero (when something changed) - bool changed = word_zap_bit_changed_go_down(word + (bit >> 6), bit & 63, is_now_zero); - if (changed && (NUM_64b != 1)) { - // One component went entirely zero, now examine all components in the level - if (!all_zeros(word)) { - *is_now_zero = 0; - } - } - return changed; -} - -STATIC_INLINE bool -ZAP_GO_DOWN(uint64_t *word, unsigned bit) -{ - // returns 1 iff level below should be changed too - bool changed = word_zap_bit_go_down(word + (bit >> 6), bit & 63); - if (changed && (NUM_64b != 1)) { - // One component went entirely zero, now examine all components in the level - if (!all_zeros(word)) { - return 0; - } - } - return changed; -} - -STATIC_INLINE unsigned -FFS(uint64_t *word) -{ -// does NUM_64b memory reads, at most -#if NB == 6 - return __ffsll(*word); -#else - for (unsigned w = 0; w < NUM_64b; w++) { - unsigned f = __ffsll(word[w]); - if (f) { - return f + (w << 6); - } - } - return 0; -#endif -} - -/******************************** Entry Points ***************************/ - -size_t -bitarray_size(unsigned log_size) -{ - assert(log_size <= MAX_LEVEL * NB); - unsigned num = NUM_64b; - if (log_size > NB) { - unsigned level = (log_size - NB - 1) / NB; - num = levels_num_words[level] + (1 << (log_size - 6)); - } - return num * sizeof(uint64_t); -} - -bitarray_t -bitarray_create(unsigned log_size) -{ - return calloc(1, bitarray_size(log_size)); -} - -bool -bitarray_get(bitarray_t bits, unsigned log_size, index_t index) -{ - assert(log_size <= MAX_LEVEL * NB); - assert(index < (1 << log_size)); - if (log_size <= NB) { - return GET_SIMPLE(bits, index); - } - unsigned level = (log_size - NB - 1) / NB; - unsigned bit; - bit = index & MASKNB; - index >>= NB; - return GET_SIMPLE(bits + levels_num_words[level] + index * NUM_64b, bit); -} - -bool -bitarray_set(bitarray_t bits, unsigned log_size, index_t index) -{ - // returns whether changed - assert(log_size <= MAX_LEVEL * NB); - assert(index < (1 << log_size)); - if (log_size <= NB) { - return SET_CHANGED(bits, index); - } - unsigned level = (log_size - NB - 1) / NB; - bool was_non_zero; - unsigned bit; - bit = index & MASKNB; - index >>= NB; - // printf("SET_CHANGED_GO_DOWN(bits + %d, %d,…)\n", levels_num_words[level] + index, bit); - if (!SET_CHANGED_GO_DOWN(bits + levels_num_words[level] + index * NUM_64b, bit, &was_non_zero)) { - return 0; - } - if (was_non_zero) { - return 1; - } - switch (level) { - case 3: - bit = index & MASKNB; - index >>= NB; - if (!SET_GO_DOWN(bits + LEVEL2 + index * NUM_64b, bit)) { - return 1; - } - /* no break */ - case 2: - bit = index & MASKNB; - index >>= NB; - if (!SET_GO_DOWN(bits + LEVEL1 + index * NUM_64b, bit)) { - return 1; - } - /* no break */ - case 1: - bit = index & MASKNB; - index >>= NB; - if (!SET_GO_DOWN(bits + LEVEL0 + index * NUM_64b, bit)) { - return 1; - } - /* no break */ - case 0: - SET_SIMPLE(bits, index & MASKNB); - return 1; - default: - MALLOC_FATAL_ERROR(level, "invalid bitarray level"); - } -} - -bool -bitarray_zap(bitarray_t bits, unsigned log_size, index_t index) -{ - assert(log_size <= MAX_LEVEL * NB); - assert(index < (1 << log_size)); - if (log_size <= NB) { - return ZAP_CHANGED(bits, index); - } - unsigned level = (log_size - NB - 1) / NB; - bool is_now_zero; - unsigned bit; - bit = index & MASKNB; - index >>= NB; - if (!ZAP_CHANGED_GO_DOWN(bits + levels_num_words[level] + index * NUM_64b, bit, &is_now_zero)) { - return 0; - } - if (!is_now_zero) { - return 1; - } - switch (level) { - case 3: - bit = index & MASKNB; - index >>= NB; - if (!ZAP_GO_DOWN(bits + LEVEL2 + index * NUM_64b, bit)) { - return 1; - } - /* no break */ - case 2: - bit = index & MASKNB; - index >>= NB; - if (!ZAP_GO_DOWN(bits + LEVEL1 + index * NUM_64b, bit)) { - return 1; - } - /* no break */ - case 1: - bit = index & MASKNB; - index >>= NB; - if (!ZAP_GO_DOWN(bits + LEVEL0 + index * NUM_64b, bit)) { - return 1; - } - /* no break */ - case 0: - ZAP_SIMPLE(bits, index & MASKNB); - return 1; - default: - MALLOC_FATAL_ERROR(level, "invalid bitarray level"); - } -} - -// Note in the following macro that "words" and "base" are variables being written -#define ADJUST_OFFSET_FOR_FFS(words, base, current_level) \ - { \ - words += (1 << (NB * current_level)) * NUM_64b; \ - base = (base << NB) + FFS(words + base * NUM_64b) - 1; \ - } - -// Note in the following macro that "words" and "base" are variables being written -#define ADJUST_OFFSET_FOR_FFS_ACROSS_SUMMARIES(words, base, level) \ - { \ - switch (level) { \ - case 4: \ - ADJUST_OFFSET_FOR_FFS(words, base, 0); \ - ADJUST_OFFSET_FOR_FFS(words, base, 1); \ - ADJUST_OFFSET_FOR_FFS(words, base, 2); \ - break; \ - case 3: \ - ADJUST_OFFSET_FOR_FFS(words, base, 0); \ - ADJUST_OFFSET_FOR_FFS(words, base, 1); \ - break; \ - case 2: \ - ADJUST_OFFSET_FOR_FFS(words, base, 0); \ - break; \ - case 1: \ - break; \ - default: \ - MALLOC_FATAL_ERROR(level, "invalid bitarray level"); \ - } \ - } - -// Note in the following macro that "ix" and "bit" are variables being written -#define ZAP_SUMMARIES(bits, ix, level) \ - { \ - unsigned bit; \ - switch (level) { \ - case 3: \ - bit = ix & MASKNB; \ - ix >>= NB; \ - if (!ZAP_GO_DOWN(bits + LEVEL2 + ix * NUM_64b, bit)) { \ - break; \ - } \ - case 2: \ - bit = ix & MASKNB; \ - ix >>= NB; \ - if (!ZAP_GO_DOWN(bits + LEVEL1 + ix * NUM_64b, bit)) { \ - break; \ - } \ - case 1: \ - bit = ix & MASKNB; \ - ix >>= NB; \ - if (!ZAP_GO_DOWN(bits + LEVEL0 + ix * NUM_64b, bit)) { \ - break; \ - } \ - case 0: \ - ZAP_SIMPLE(bits, ix &MASKNB); \ - break; \ - default: \ - MALLOC_FATAL_ERROR(level, "invalid bitarray level"); \ - } \ - } - -index_t -bitarray_first_set(const bitarray_t bits, unsigned log_size) -{ - // return 0 if none set - assert(log_size <= MAX_LEVEL * NB); - uint64_t *words = bits; - unsigned bit = FFS(words); - if (log_size <= NB) { - return bit; - } - if (!bit) { - return 0; - } - unsigned level = (log_size - 1) / NB; - index_t base = bit - 1; // offset, in number of uin64_t words - ADJUST_OFFSET_FOR_FFS_ACROSS_SUMMARIES(words, base, level); - words += (1 << (NB * (level - 1))) * NUM_64b; - base = (base << NB) + FFS(words + base * NUM_64b) - 1; - return base + 1; //+1 because bit N is encoded as N+1 -} - -bool -bitarray_zap_first_set(bitarray_t bits, unsigned log_size, index_t *index) -{ - assert(log_size <= MAX_LEVEL * NB); - uint64_t *words = bits; - index_t ix = FFS(words); - if (!ix) { - return 0; - } - unsigned level = (log_size - 1) / NB; - if (!level) { - ix--; - *index = ix; - ZAP_SIMPLE(bits, ix); - return 1; - } - index_t base = ix - 1; // offset, in number of uin64_t words - ADJUST_OFFSET_FOR_FFS_ACROSS_SUMMARIES(words, base, level); - words += (1 << (NB * (level - 1))) * NUM_64b; - base = (base << NB) + FFS(words + base * NUM_64b) - 1; - ix = base; - *index = ix; - assert(ix < (1 << log_size)); - level--; - bool is_now_zero; - unsigned bit; - bit = ix & MASKNB; - ix >>= NB; - if (!ZAP_CHANGED_GO_DOWN(bits + levels_num_words[level] + ix * NUM_64b, bit, &is_now_zero)) { - return 1; - } - if (!is_now_zero) { - return 1; - } - ZAP_SUMMARIES(bits, ix, level); - return 1; -} - -static unsigned -FFS_and_zap_word(uint64_t *words, unsigned max, index_t *indices, index_t to_be_added) -{ - // returns the number of bits zapped - unsigned zapped = 0; - for (unsigned w = 0; w < NUM_64b; w++) { - uint64_t word = words[w]; - if (!word) { - continue; - } - while (1) { - unsigned f = __ffsll(word); - assert(f); - f--; - // printf("%d ", f); - indices[zapped++] = f + (w << 6) + to_be_added; - word = BIT_ZAP(word, f); - if (!word) { - break; - } - if (zapped >= max) { - break; - } - } - words[w] = word; - // printf("word=%lld \n", word); - if (zapped >= max) { - break; - } - } - return zapped; -} - -unsigned -bitarray_zap_first_set_multiple(bitarray_t bits, unsigned log_size, unsigned max, index_t *indices) -{ - assert(log_size <= MAX_LEVEL * NB); - if (log_size <= NB) { - return FFS_and_zap_word(bits, max, indices, 0); - } - unsigned zapped = 0; - unsigned level = (log_size - 1) / NB; - while (zapped < max) { - /* - * the lines in loop could be written just as: - * if (! bitarray_zap_first_set(bits, log_size, indices + zapped)) break; - * zapped++; - * but the code is faster because it wont go up and down in the summaries - */ - uint64_t *words = bits; - index_t ix = FFS(words); - if (!ix) { - return zapped; // if the top level summary is 0, no bit is set - } - index_t base = ix - 1; // offset, in number of uin64_t words - ADJUST_OFFSET_FOR_FFS_ACROSS_SUMMARIES(words, base, level); - words += (1 << (NB * (level - 1))) * NUM_64b; // the beginning of the non-summarized bitarray - uint64_t *word = words + base * NUM_64b; // the first non-zero word - ix = base; - // the idea here is that we zap a whole bunch of bits at once - unsigned z = FFS_and_zap_word(word, max - zapped, indices + zapped, base << NB); - assert(z); - zapped += z; - if ((zapped < max) /* entire word was zapped */ || all_zeros(word) /* partial zap, a priori */) { - // adjust summaries to reflect all zeros in the bitarray - ZAP_SUMMARIES(bits, ix, level - 1); - } - } - return zapped; -} - -#if 0 -/******************************** Test and debug utilities ***************************/ - -static void print_ones(const uint64_t *bits, unsigned num_big_words) { - unsigned base = 0; - unsigned num = num_big_words * NUM_64b; - // printf("In print_ones; num=%d, num_big=%d \n", num, num_big_words); - while (num--) { - uint64_t word = *(bits++); - if (word) { - for (unsigned bit = 0; bit < 64; bit++) { - if (word & (1ULL << bit)) { printf("%d ", base + bit); } - } - } - base += 64; - } -} - -void bitarray_print(bitarray_t bits, unsigned log_size) { - assert(log_size <= MAX_LEVEL * NB); - printf("bitarray %p log_size=%d\n", bits, log_size); - if (log_size > 4 * NB) { - printf("Level 4: "); print_ones(bits, 1); printf("\n"); - printf("Level 3: "); print_ones(bits + LEVEL0, 1 << NB); printf("\n"); - printf("Level 2: "); print_ones(bits + LEVEL1, 1 << NB); printf("\n"); - printf("Level 1: "); print_ones(bits + LEVEL2, 1 << NB); printf("\n"); - printf("Level 0: "); print_ones(bits + LEVEL3, 1 << (log_size - NB)); printf("\n"); - } else if (log_size > 3 * NB) { - printf("Level 3: "); print_ones(bits, 1); printf("\n"); - printf("Level 2: "); print_ones(bits + LEVEL0, 1 << NB); printf("\n"); - printf("Level 1: "); print_ones(bits + LEVEL1, 1 << NB); printf("\n"); - printf("Level 0: "); print_ones(bits + LEVEL2, 1 << (log_size - NB)); printf("\n"); - } else if (log_size > 2 * NB) { - printf("Level 2: "); print_ones(bits, 1); printf("\n"); - printf("Level 1: "); print_ones(bits + LEVEL0, 1 << NB); printf("\n"); - printf("Level 0: "); print_ones(bits + LEVEL1, 1 << (log_size - NB)); printf("\n"); - } else if (log_size > NB) { - printf("Level 1: "); print_ones(bits, 1); printf("\n"); - printf("Level 0: "); print_ones(bits + LEVEL0, 1 << (log_size - NB)); printf("\n"); - } else { - printf("Level 0: "); print_ones(bits, 1); printf("\n"); - } -} - -bool compare_to_truth(bitarray_t bits, unsigned nbits, const bool *truth) { - uint64_t *start = bits; - if (nbits > NB) { - unsigned level = (nbits - NB - 1) / NB; - start += levels_num_words[level]; - } - bool ok = 1; - for (unsigned bit = 0; bit < (1 << nbits); bit++) { - bool expected = truth[bit]; - uint64_t word = start[bit >> 6]; - bool actual = (word >> (bit & 63)) & 1; - if (actual != expected) { - printf("*** # for bit %d, expected=%d actual=%d\n", bit, expected, actual); - ok = 0; - } - } - return ok; -} - -unsigned first_set_in_truth(const bool *truth, unsigned log_size) { - for (unsigned bit = 0; bit < (1 << log_size); bit++) { - if (truth[bit]) { return bit + 1; } - } - return 0; -} - -void truth_print(const bool *truth, unsigned log_size) { - printf("Truth: "); - for (unsigned bit = 0; bit < (1 << log_size); bit++) { - if (truth[bit]) { printf("%d ", bit); } - } - printf("\n"); -} -#endif - -/* vim: set noet:ts=4:sw=4:cindent: */ diff --git a/src/libmalloc/src/bitarray.h b/src/libmalloc/src/bitarray.h deleted file mode 100644 index f1a5e4bae..000000000 --- a/src/libmalloc/src/bitarray.h +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright (c) 1999, 2000, 2003, 2005, 2008, 2012 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __BITARRAY_H -#define __BITARRAY_H - -typedef uint64_t *bitarray_t; // array of bits, assumed to be mostly 0 -typedef uint32_t index_t; // we limit the number of bits to be a 32-bit quantity - -/* A bitarray uses a summarization to be able to quickly say what's the first bit that is set to 1; - All together each of the entry points will do a very small number of memory access (exact number depends on log_size) */ - -MALLOC_NOEXPORT extern size_t bitarray_size(unsigned log_size); - // For a bitarray with 1<> 5 identifies the uint32_t to manipulate in the conceptually contiguous bits array - // (index >> 5) << 1 identifies the uint32_t allowing for the actual interleaving - bits[(index >> 5) << 1] |= (1 << (index & 31)); -} - -static MALLOC_INLINE MALLOC_ALWAYS_INLINE void -BITARRAY_CLR(uint32_t *bits, msize_t index) -{ - bits[(index >> 5) << 1] &= ~(1 << (index & 31)); -} - -static MALLOC_INLINE MALLOC_ALWAYS_INLINE boolean_t -BITARRAY_BIT(uint32_t *bits, msize_t index) -{ - return ((bits[(index >> 5) << 1]) >> (index & 31)) & 1; -} - -/* Macros used to manipulate the uint32_t quantity mag_bitmap. */ - -/* BITMAPV variants are used by tiny. */ -#if defined(__LP64__) -// assert(NUM_SLOTS == 64) in which case (slot >> 5) is either 0 or 1 -#define BITMAPV_SET(bitmap, slot) (bitmap[(slot) >> 5] |= 1 << ((slot)&31)) -#define BITMAPV_CLR(bitmap, slot) (bitmap[(slot) >> 5] &= ~(1 << ((slot)&31))) -#define BITMAPV_BIT(bitmap, slot) ((bitmap[(slot) >> 5] >> ((slot)&31)) & 1) -#define BITMAPV_CTZ(bitmap) (__builtin_ctzl(bitmap)) -#else -// assert(NUM_SLOTS == 32) in which case (slot >> 5) is always 0, so code it that way -#define BITMAPV_SET(bitmap, slot) (bitmap[0] |= 1 << (slot)) -#define BITMAPV_CLR(bitmap, slot) (bitmap[0] &= ~(1 << (slot))) -#define BITMAPV_BIT(bitmap, slot) ((bitmap[0] >> (slot)) & 1) -#define BITMAPV_CTZ(bitmap) (__builtin_ctz(bitmap)) -#endif - -/* BITMAPN is used by small. (slot >> 5) takes on values from 0 to 7. */ -#define BITMAPN_SET(bitmap, slot) (bitmap[(slot) >> 5] |= 1 << ((slot)&31)) -#define BITMAPN_CLR(bitmap, slot) (bitmap[(slot) >> 5] &= ~(1 << ((slot)&31))) -#define BITMAPN_BIT(bitmap, slot) ((bitmap[(slot) >> 5] >> ((slot)&31)) & 1) - -/* returns bit # of least-significant one bit, starting at 0 (undefined if !bitmap) */ -#define BITMAP32_CTZ(bitmap) (__builtin_ctz(bitmap[0])) - -#endif // __BITARRAY_H diff --git a/src/libmalloc/src/debug.h b/src/libmalloc/src/debug.h deleted file mode 100644 index d5e17a437..000000000 --- a/src/libmalloc/src/debug.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __DEBUG_H -#define __DEBUG_H - -// set to one to debug malloc itself -#define DEBUG_MALLOC 0 -// set to one to debug malloc client -#define DEBUG_CLIENT 0 -#define DEBUG_MADVISE 0 - -#if DEBUG_MALLOC -# warning DEBUG_MALLOC ENABLED -# undef MALLOC_INLINE -# undef MALLOC_UNUSED -# undef MALLOC_ALWAYS_INLINE -# undef CHECK_MAGAZINE_PTR_LOCKED - -# define MALLOC_INLINE -# define MALLOC_UNUSED -# define MALLOC_ALWAYS_INLINE -# define CHECK_MAGAZINE_PTR_LOCKED(szone, mag_ptr, fun) \ - do { \ - if (TRY_LOCK(mag_ptr->magazine_lock)) { \ - malloc_report(ASL_LEVEL_ERR, "*** magazine_lock was not set %p in %s\n", \ - mag_ptr->magazine_lock, fun); \ - } \ - } while (0) -#endif // DEBUG_MALLOC - -#if DEBUG_MALLOC || DEBUG_CLIENT -# define CHECK(szone, fun) \ - if ((szone)->debug_flags & CHECK_REGIONS) { \ - szone_check_all(szone, fun) \ - } -#else // DEBUG_MALLOC || DEBUG_CLIENT -# define CHECK(szone, fun) \ - do {} while (0) -#endif // DEBUG_MALLOC || DEBUG_CLIENT - -#endif // __DEBUG_H diff --git a/src/libmalloc/src/dtrace.h b/src/libmalloc/src/dtrace.h deleted file mode 100644 index 6aae21cf7..000000000 --- a/src/libmalloc/src/dtrace.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2016 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __DTRACE_H -#define __DTRACE_H - -#ifndef DARWINTEST -#include "magmallocProvider.h" -#else -#define MAGMALLOC_ALLOCREGION(arg0, arg1, arg2, arg3) -#define MAGMALLOC_ALLOCREGION_ENABLED() (0) -#define MAGMALLOC_DEALLOCREGION(arg0, arg1, arg2) -#define MAGMALLOC_DEALLOCREGION_ENABLED() (0) -#define MAGMALLOC_DEPOTREGION(arg0, arg1, arg2, arg3, arg4) -#define MAGMALLOC_DEPOTREGION_ENABLED() (0) -#define MAGMALLOC_MADVFREEREGION(arg0, arg1, arg2, arg3) -#define MAGMALLOC_MADVFREEREGION_ENABLED() (0) -#define MAGMALLOC_MALLOCERRORBREAK() -#define MAGMALLOC_MALLOCERRORBREAK_ENABLED() (0) -#define MAGMALLOC_PRESSURERELIEFBEGIN(arg0, arg1, arg2) -#define MAGMALLOC_PRESSURERELIEFBEGIN_ENABLED() (0) -#define MAGMALLOC_PRESSURERELIEFEND(arg0, arg1, arg2, arg3) -#define MAGMALLOC_PRESSURERELIEFEND_ENABLED() (0) -#define MAGMALLOC_RECIRCREGION(arg0, arg1, arg2, arg3, arg4) -#define MAGMALLOC_RECIRCREGION_ENABLED() (0) -#define MAGMALLOC_REFRESHINDEX(arg0, arg1, arg2) -#define MAGMALLOC_REFRESHINDEX_ENABLED() (0) -#endif - -#endif // __DTRACE_H diff --git a/src/libmalloc/src/empty.s b/src/libmalloc/src/empty.s deleted file mode 100644 index d12863715..000000000 --- a/src/libmalloc/src/empty.s +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -/* - * This file exists to force clang+ld to produce an link-time optimised - * master object file that contains no bitcode when it is laid down on - * disk. - * - * Adding a non-LTO assembly file to the link step forces ld to perform - * LTO and produce the master object file. - */ - -empty: - nop - -.subsections_via_symbols diff --git a/src/libmalloc/src/frozen_malloc.c b/src/libmalloc/src/frozen_malloc.c deleted file mode 100644 index be1ae6183..000000000 --- a/src/libmalloc/src/frozen_malloc.c +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#include "internal.h" - -/********* Support code for emacs unexec ************/ - -/* History of freezedry version numbers: - * - * 1) Old malloc (before the scalable malloc implementation in this file - * existed). - * 2) Original freezedrying code for scalable malloc. This code was apparently - * based on the old freezedrying code and was fundamentally flawed in its - * assumption that tracking allocated memory regions was adequate to fake - * operations on freezedried memory. This doesn't work, since scalable - * malloc does not store flags in front of large page-aligned allocations. - * 3) Original szone-based freezedrying code. - * 4) Fresher malloc with tiny zone - * 5) 32/64bit compatible malloc - * 6) Metadata within 1MB and 8MB region for tiny and small - * - * No version backward compatibility is provided, but the version number does - * make it possible for malloc_jumpstart() to return an error if the application - * was freezedried with an older version of malloc. - */ -#define MALLOC_FREEZEDRY_VERSION 6 - -typedef struct { - unsigned version; - unsigned nszones; - szone_t *szones; -} malloc_frozen; - -static void * -frozen_malloc(szone_t *zone, size_t new_size) -{ - return malloc(new_size); -} - -static void * -frozen_calloc(szone_t *zone, size_t num_items, size_t size) -{ - return calloc(num_items, size); -} - -static void * -frozen_valloc(szone_t *zone, size_t new_size) -{ - return valloc(new_size); -} - -static void * -frozen_realloc(szone_t *zone, void *ptr, size_t new_size) -{ - size_t old_size = szone_size(zone, ptr); - void *new_ptr; - - if (new_size <= old_size) { - return ptr; - } - new_ptr = malloc(new_size); - if (old_size > 0) { - memcpy(new_ptr, ptr, old_size); - } - return new_ptr; -} - -static void -frozen_free(szone_t *zone, void *ptr) -{ -} - -static void -frozen_destroy(szone_t *zone) -{ -} - -/********* Pseudo-private API for emacs unexec ************/ - -/* - * malloc_freezedry() records all of the szones in use, so that they can be - * partially reconstituted by malloc_jumpstart(). Due to the differences - * between reconstituted memory regions and those created by the szone code, - * care is taken not to reallocate from the freezedried memory, except in the - * case of a non-growing realloc(). - * - * Due to the flexibility provided by the zone registration mechanism, it is - * impossible to implement generic freezedrying for any zone type. This code - * only handles applications that use the szone allocator, so malloc_freezedry() - * returns 0 (error) if any non-szone zones are encountered. - */ - -uintptr_t -malloc_freezedry(void) -{ - extern unsigned malloc_num_zones; - extern malloc_zone_t **malloc_zones; - malloc_frozen *data; - unsigned i; - - /* Allocate space in which to store the freezedry state. */ - data = (malloc_frozen *)malloc(sizeof(malloc_frozen)); - - /* Set freezedry version number so that malloc_jumpstart() can check for compatibility. */ - data->version = MALLOC_FREEZEDRY_VERSION; - - /* Allocate the array of szone pointers. */ - data->nszones = malloc_num_zones; - data->szones = (szone_t *)calloc(malloc_num_zones, sizeof(szone_t)); - - /* - * Fill in the array of szone structures. They are copied rather than - * referenced, since the originals are likely to be clobbered during malloc - * initialization. - */ - for (i = 0; i < malloc_num_zones; i++) { - if (strcmp(malloc_zones[i]->zone_name, "DefaultMallocZone")) { - /* Unknown zone type. */ - free(data->szones); - free(data); - return 0; - } - memcpy(&data->szones[i], malloc_zones[i], sizeof(szone_t)); - } - - return ((uintptr_t)data); -} - -int -malloc_jumpstart(uintptr_t cookie) -{ - malloc_frozen *data = (malloc_frozen *)cookie; - unsigned i; - - if (data->version != MALLOC_FREEZEDRY_VERSION) { - /* Unsupported freezedry version. */ - return 1; - } - - for (i = 0; i < data->nszones; i++) { - /* Set function pointers. Even the functions that stay the same must be - * set, since there are no guarantees that they will be mapped to the - * same addresses. */ - data->szones[i].basic_zone.size = (void *)szone_size; - data->szones[i].basic_zone.malloc = (void *)frozen_malloc; - data->szones[i].basic_zone.calloc = (void *)frozen_calloc; - data->szones[i].basic_zone.valloc = (void *)frozen_valloc; - data->szones[i].basic_zone.free = (void *)frozen_free; - data->szones[i].basic_zone.realloc = (void *)frozen_realloc; - data->szones[i].basic_zone.destroy = (void *)frozen_destroy; - data->szones[i].basic_zone.introspect = (struct malloc_introspection_t *)&szone_introspect; - - /* Register the freezedried zone. */ - malloc_zone_register(&data->szones[i].basic_zone); - } - - return 0; -} diff --git a/src/libmalloc/src/frozen_malloc.h b/src/libmalloc/src/frozen_malloc.h deleted file mode 100644 index 37db88317..000000000 --- a/src/libmalloc/src/frozen_malloc.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - - -#ifndef __FROZEN_MALLOC_H -#define __FROZEN_MALLOC_H - -#ifndef MALLOC_EXPORT -#define MALLOC_EXPORT __attribute__((visibility("default"))) -#endif // MALLOC_EXPORT - -MALLOC_EXPORT -uintptr_t -malloc_freezedry(void); - -MALLOC_EXPORT -int -malloc_jumpstart(uintptr_t cookie); - -#endif // __FROZEN_MALLOC_H diff --git a/src/libmalloc/src/internal.h b/src/libmalloc/src/internal.h deleted file mode 100644 index 719ac9af1..000000000 --- a/src/libmalloc/src/internal.h +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __INTERNAL_H -#define __INTERNAL_H - -#define __OS_EXPOSE_INTERNALS__ 1 - -// Toggles for fixes for specific Radars. If we get enough of these, we -// probably should create a separate header file for them. -#define RDAR_48993662 1 - -#include -#include -#include <_simple.h> -#include -#undef memcpy -#define memcpy _platform_memmove -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "dtrace.h" -#include "base.h" -#include "trace.h" -#include "platform.h" -#include "debug.h" -#include "locking.h" -#include "bitarray.h" -#include "malloc/malloc.h" -#include "printf.h" -#include "frozen_malloc.h" -#include "legacy_malloc.h" -#include "magazine_malloc.h" -#include "malloc_common.h" -#include "nano_malloc_common.h" -#include "nano_malloc.h" -#include "nanov2_malloc.h" -#include "purgeable_malloc.h" -#include "malloc_private.h" -#include "thresholds.h" -#include "vm.h" -#include "magazine_rack.h" -#include "magazine_zone.h" -#include "nano_zone_common.h" -#include "nano_zone.h" -#include "nanov2_zone.h" -#include "magazine_inline.h" -#include "stack_logging.h" -#include "malloc_implementation.h" - -MALLOC_NOEXPORT -extern boolean_t malloc_tracing_enabled; - -MALLOC_NOEXPORT -extern unsigned malloc_debug_flags; - -MALLOC_NOEXPORT MALLOC_NOINLINE -void -malloc_error_break(void); - -MALLOC_NOEXPORT MALLOC_NOINLINE MALLOC_USED -int -malloc_gdb_po_unsafe(void); - -/* - * Copies the malloc library's _malloc_msl_lite_hooks_t structure to a given - * location. Size is passed to allow the structure to grow. Since this is - * a temporary arrangement, we don't need to worry about - * pointer authentication here or in the _malloc_msl_lite_hooks_t structure - * itself. - */ -struct _malloc_msl_lite_hooks_s; -typedef void (*set_msl_lite_hooks_callout_t) (struct _malloc_msl_lite_hooks_s *hooksp, size_t size); -void set_msl_lite_hooks(set_msl_lite_hooks_callout_t callout); - - -#endif // __INTERNAL_H diff --git a/src/libmalloc/src/legacy_malloc.c b/src/libmalloc/src/legacy_malloc.c deleted file mode 100644 index 68b355459..000000000 --- a/src/libmalloc/src/legacy_malloc.c +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#include "internal.h" - -/* - * For use by CheckFix: create a new zone whose behavior is, apart from - * the use of death-row and per-CPU magazines, that of Leopard. - */ -static MALLOC_NOINLINE void * -legacy_valloc(szone_t *szone, size_t size) -{ - void *ptr; - size_t num_kernel_pages; - - num_kernel_pages = round_page_quanta(size) >> vm_page_quanta_shift; - ptr = large_malloc(szone, num_kernel_pages, 0, TRUE); -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "legacy_valloc returned %p\n", ptr); - } -#endif - return ptr; -} - -malloc_zone_t * -create_legacy_scalable_zone(size_t initial_size, unsigned debug_flags) -{ - malloc_zone_t *mzone = create_scalable_zone(initial_size, debug_flags); - szone_t *szone = (szone_t *)mzone; - - if (!szone) { - return NULL; - } - - mprotect(szone, sizeof(szone->basic_zone), PROT_READ | PROT_WRITE); - szone->basic_zone.valloc = (void *)legacy_valloc; - szone->basic_zone.free_definite_size = NULL; - mprotect(szone, sizeof(szone->basic_zone), PROT_READ); - - return mzone; -} diff --git a/src/libmalloc/src/legacy_malloc.h b/src/libmalloc/src/legacy_malloc.h deleted file mode 100644 index 71ab26da5..000000000 --- a/src/libmalloc/src/legacy_malloc.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __LEGACY_MALLOC_H -#define __LEGACY_MALLOC_H - -MALLOC_NOEXPORT -malloc_zone_t * -create_legacy_scalable_zone(size_t initial_size, unsigned debug_flags); - -#endif // __LEGACY_MALLOC_H diff --git a/src/libmalloc/src/locking.h b/src/libmalloc/src/locking.h deleted file mode 100644 index dc544aa14..000000000 --- a/src/libmalloc/src/locking.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __LOCKING_H -#define __LOCKING_H - -#if OS_UNFAIR_LOCK_INLINE -#define os_unfair_lock_lock_with_options(lock, options) \ - os_unfair_lock_lock_with_options_inline(lock, options) -#define os_unfair_lock_trylock(lock) \ - os_unfair_lock_trylock_inline(lock) -#define os_unfair_lock_unlock(lock) \ - os_unfair_lock_unlock_inline(lock) -#endif // OS_UNFAIR_LOCK_INLINE - -typedef os_unfair_lock _malloc_lock_s; -#define _MALLOC_LOCK_INIT OS_UNFAIR_LOCK_INIT - -__attribute__((always_inline)) -static inline void -_malloc_lock_init(_malloc_lock_s *lock) { - *lock = OS_UNFAIR_LOCK_INIT; -} - -MALLOC_ALWAYS_INLINE -static inline void -_malloc_lock_lock(_malloc_lock_s *lock) { - return os_unfair_lock_lock_with_options(lock, OS_UNFAIR_LOCK_ADAPTIVE_SPIN | - OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION); -} - -MALLOC_ALWAYS_INLINE -static inline bool -_malloc_lock_trylock(_malloc_lock_s *lock) { - return os_unfair_lock_trylock(lock); -} - -MALLOC_ALWAYS_INLINE -static inline void -_malloc_lock_unlock(_malloc_lock_s *lock) { - return os_unfair_lock_unlock(lock); -} - -MALLOC_ALWAYS_INLINE -static inline void -_malloc_lock_assert_owner(_malloc_lock_s *lock) { - os_unfair_lock_assert_owner(lock); -} - -#endif // __LOCKING_H diff --git a/src/libmalloc/src/magazine_inline.h b/src/libmalloc/src/magazine_inline.h deleted file mode 100644 index 469bed3fe..000000000 --- a/src/libmalloc/src/magazine_inline.h +++ /dev/null @@ -1,714 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __MAGAZINE_INLINE_H -#define __MAGAZINE_INLINE_H - -extern unsigned int _os_cpu_number_override; - -/* - * MALLOC_ABSOLUTE_MAX_SIZE - There are many instances of addition to a - * user-specified size_t, which can cause overflow (and subsequent crashes) - * for values near SIZE_T_MAX. Rather than add extra "if" checks everywhere - * this occurs, it is easier to just set an absolute maximum request size, - * and immediately return an error if the requested size exceeds this maximum. - * Of course, values less than this absolute max can fail later if the value - * is still too large for the available memory. The largest value added - * seems to be PAGE_SIZE (in the macro round_page()), so to be safe, we set - * the maximum to be 2 * PAGE_SIZE less than SIZE_T_MAX. - */ -#define MALLOC_ABSOLUTE_MAX_SIZE (SIZE_T_MAX - (2 * PAGE_SIZE)) - -// Gets the allocation size for a calloc(). Multiples size by num_items and adds -// extra_size, storing the result in *total_size. Returns 0 on success, -1 (with -// errno set to ENOMEM) on overflow. -static int MALLOC_INLINE MALLOC_ALWAYS_INLINE -calloc_get_size(size_t num_items, size_t size, size_t extra_size, size_t *total_size) -{ - size_t alloc_size = size; - if (num_items != 1 && (os_mul_overflow(num_items, size, &alloc_size) - || alloc_size > MALLOC_ABSOLUTE_MAX_SIZE)) { - errno = ENOMEM; - return -1; - } - if (extra_size && (os_add_overflow(alloc_size, extra_size, &alloc_size) - || alloc_size > MALLOC_ABSOLUTE_MAX_SIZE)) { - errno = ENOMEM; - return -1; - } - *total_size = alloc_size; - return 0; -} - -/********************* FREE LIST UTILITIES ************************/ - -// A free list entry is comprised of a pair of pointers, previous and next. -// These are used to implement a doubly-linked list, which permits efficient -// extraction. -// -// Because the free list entries are previously freed objects, a misbehaved -// program may write to a pointer after it has called free() on that pointer, -// either by dereference or buffer overflow from an adjacent pointer. This write -// would then corrupt the free list's previous and next pointers, leading to a -// crash. In order to detect this case, we take advantage of the fact that -// malloc'd pointers are known to be at least 16 byte aligned, and thus have -// at least 4 trailing zero bits. -// -// When an entry is added to the free list, a checksum of the previous and next -// pointers is calculated and written to the high four bits of the respective -// pointers. Upon detection of an invalid checksum, an error is logged and NULL -// is returned. Since all code which un-checksums pointers checks for a NULL -// return, a potentially crashing or malicious dereference is avoided at the -// cost of leaking the corrupted block, and any subsequent blocks on the free -// list of that size. - -#pragma mark forward decls - -static MALLOC_INLINE uintptr_t free_list_gen_checksum(uintptr_t ptr) MALLOC_ALWAYS_INLINE; -static MALLOC_INLINE uintptr_t free_list_checksum_ptr(rack_t *rack, void *p) MALLOC_ALWAYS_INLINE; -static MALLOC_INLINE void *free_list_unchecksum_ptr(rack_t *rack, inplace_union *ptr) MALLOC_ALWAYS_INLINE; -static MALLOC_INLINE unsigned free_list_count(task_t task, - memory_reader_t reader, print_task_printer_t printer, - rack_t *mapped_rack, free_list_t ptr); - -static MALLOC_INLINE void recirc_list_extract(rack_t *rack, magazine_t *mag_ptr, region_trailer_t *node) MALLOC_ALWAYS_INLINE; -static MALLOC_INLINE void recirc_list_splice_last(rack_t *rack, magazine_t *mag_ptr, region_trailer_t *node) MALLOC_ALWAYS_INLINE; -static MALLOC_INLINE void recirc_list_splice_first(rack_t *rack, magazine_t *mag_ptr, region_trailer_t *node) MALLOC_ALWAYS_INLINE; - -static MALLOC_INLINE void -yield(void) -{ - thread_switch(MACH_PORT_NULL, SWITCH_OPTION_DEPRESS, 1); -} - -static MALLOC_INLINE kern_return_t -_malloc_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr) -{ - *ptr = (void *)address; - return 0; -} - -#pragma mark helpers - -static MALLOC_INLINE MALLOC_ALWAYS_INLINE -uint64_t -platform_hw_memsize(void) -{ -#if CONFIG_HAS_COMMPAGE_MEMSIZE - return *(uint64_t *)(uintptr_t)_COMM_PAGE_MEMORY_SIZE; -#else - uint64_t hw_memsize = 0; - size_t uint64_t_size = sizeof(hw_memsize); - // hw_memsize was always 0 if sysctlbyname failed, so preserve that behaviour - (void)sysctlbyname("hw.memsize", &hw_memsize, &uint64_t_size, 0, 0); - return hw_memsize; -#endif -} - -static MALLOC_INLINE MALLOC_ALWAYS_INLINE -uint32_t -platform_cpu_count(void) -{ -#if CONFIG_HAS_COMMPAGE_NCPUS - return *(uint8_t *)(uintptr_t)_COMM_PAGE_NCPUS; -#else - return sysconf(_SC_NPROCESSORS_CONF); -#endif -} - -#pragma mark szone locking - -static MALLOC_INLINE MALLOC_ALWAYS_INLINE void -SZONE_LOCK(szone_t *szone) -{ - _malloc_lock_lock(&szone->large_szone_lock); -} - -static MALLOC_INLINE MALLOC_ALWAYS_INLINE void -SZONE_UNLOCK(szone_t *szone) -{ - _malloc_lock_unlock(&szone->large_szone_lock); -} - -static MALLOC_INLINE MALLOC_ALWAYS_INLINE bool -SZONE_TRY_LOCK(szone_t *szone) -{ - return _malloc_lock_trylock(&szone->large_szone_lock); -} - -static MALLOC_INLINE MALLOC_ALWAYS_INLINE void -SZONE_REINIT_LOCK(szone_t *szone) -{ - _malloc_lock_init(&szone->large_szone_lock); -} - -static MALLOC_INLINE MALLOC_ALWAYS_INLINE void -SZONE_MAGAZINE_PTR_LOCK(magazine_t *mag_ptr) -{ - _malloc_lock_lock(&mag_ptr->magazine_lock); -} - -static MALLOC_INLINE MALLOC_ALWAYS_INLINE void -SZONE_MAGAZINE_PTR_UNLOCK(magazine_t *mag_ptr) -{ - _malloc_lock_unlock(&mag_ptr->magazine_lock); -} - -static MALLOC_INLINE MALLOC_ALWAYS_INLINE bool -SZONE_MAGAZINE_PTR_TRY_LOCK(magazine_t *mag_ptr) -{ - return _malloc_lock_trylock(&mag_ptr->magazine_lock); -} - -static MALLOC_INLINE MALLOC_ALWAYS_INLINE void -SZONE_MAGAZINE_PTR_REINIT_LOCK(magazine_t *mag_ptr) -{ - _malloc_lock_init(&mag_ptr->magazine_lock); -} - -#pragma mark free list - -static MALLOC_NOINLINE void -free_list_checksum_botch(rack_t *rack, void *ptr, void *value) -{ - malloc_zone_error(rack->debug_flags, true, - "Incorrect checksum for freed object %p: " - "probably modified after being freed.\n" - "Corrupt value: %p\n", ptr, value); -} - -static MALLOC_INLINE uintptr_t -free_list_gen_checksum(uintptr_t ptr) -{ - uint8_t chk; - - chk = (unsigned char)(ptr >> 0); - chk += (unsigned char)(ptr >> 8); - chk += (unsigned char)(ptr >> 16); - chk += (unsigned char)(ptr >> 24); -#if __LP64__ - chk += (unsigned char)(ptr >> 32); - chk += (unsigned char)(ptr >> 40); - chk += (unsigned char)(ptr >> 48); - chk += (unsigned char)(ptr >> 56); -#endif - - return chk; -} - -static unsigned -free_list_count(task_t task, memory_reader_t reader, - print_task_printer_t printer, rack_t *mapped_rack, free_list_t ptr) -{ - unsigned int count = 0; - - // ptr.p is always pointer in the *target* process address space. - inplace_free_entry_t mapped_inplace_free_entry; - while (ptr.p) { - count++; - if (reader(task, (vm_address_t)ptr.inplace, sizeof(*ptr.inplace), - (void **)&mapped_inplace_free_entry)) { - printer("** invalid pointer in free list: %p\n", ptr.inplace); - break; - } - ptr.p = free_list_unchecksum_ptr(mapped_rack, &mapped_inplace_free_entry->next); - } - return count; -} - -#define NYBBLE 4 -#if __LP64__ -#define ANTI_NYBBLE (64 - NYBBLE) -#else -#define ANTI_NYBBLE (32 - NYBBLE) -#endif - -static MALLOC_INLINE uintptr_t -free_list_checksum_ptr(rack_t *rack, void *ptr) -{ - uintptr_t p = (uintptr_t)ptr; - return (p >> NYBBLE) | ((free_list_gen_checksum(p ^ rack->cookie) & (uintptr_t)0xF) << ANTI_NYBBLE); // compiles to rotate instruction -} - -static MALLOC_INLINE void * -free_list_unchecksum_ptr(rack_t *rack, inplace_union *ptr) -{ - inplace_union p; - uintptr_t t = ptr->u; - - t = (t << NYBBLE) | (t >> ANTI_NYBBLE); // compiles to rotate instruction - p.u = t & ~(uintptr_t)0xF; - - if ((t ^ free_list_gen_checksum(p.u ^ rack->cookie)) & (uintptr_t)0xF) { - free_list_checksum_botch(rack, ptr, (void *)ptr->u); - __builtin_trap(); - } - return p.p; -} - -#undef ANTI_NYBBLE -#undef NYBBLE - -#pragma mark recirc helpers - -static MALLOC_INLINE void -recirc_list_extract(rack_t *rack, magazine_t *mag_ptr, region_trailer_t *node) -{ - // excise node from list - if (NULL == node->prev) { - mag_ptr->firstNode = node->next; - } else { - node->prev->next = node->next; - } - - if (NULL == node->next) { - mag_ptr->lastNode = node->prev; - } else { - node->next->prev = node->prev; - } - - node->next = node->prev = NULL; - mag_ptr->recirculation_entries--; -} - -static MALLOC_INLINE void -recirc_list_splice_last(rack_t *rack, magazine_t *mag_ptr, region_trailer_t *node) -{ - if (NULL == mag_ptr->lastNode) { - mag_ptr->firstNode = node; - node->prev = NULL; - } else { - node->prev = mag_ptr->lastNode; - mag_ptr->lastNode->next = node; - } - mag_ptr->lastNode = node; - node->next = NULL; - node->recirc_suitable = FALSE; - mag_ptr->recirculation_entries++; -} - -static MALLOC_INLINE void -recirc_list_splice_first(rack_t *rack, magazine_t *mag_ptr, region_trailer_t *node) -{ - if (NULL == mag_ptr->firstNode) { - mag_ptr->lastNode = node; - node->next = NULL; - } else { - node->next = mag_ptr->firstNode; - mag_ptr->firstNode->prev = node; - } - mag_ptr->firstNode = node; - node->prev = NULL; - node->recirc_suitable = FALSE; - mag_ptr->recirculation_entries++; -} - -/******************************************************************************* - * Region hash implementation - * - * This is essentially a duplicate of the existing Large allocator hash, minus - * the ability to remove entries. The two should be combined eventually. - ******************************************************************************/ -#pragma mark region hash - -/* - * hash_lookup_region_no_lock - Scan a hash ring looking for an entry for a - * given region. - * - * FIXME: If consecutive queries of the same region are likely, a one-entry - * cache would likely be a significant performance win here. - */ -static MALLOC_INLINE rgnhdl_t -hash_lookup_region_no_lock(region_t *regions, size_t num_entries, size_t shift, region_t r) -{ - size_t index, hash_index; - rgnhdl_t entry; - - if (!num_entries) { - return 0; - } - - // Multiplicative hash where the multiplier is a prime near (ULONG_MAX / phi). [phi = 1.618033...] - // Since the values of (((uintptr_t)r >> HASH_BLOCKS_ALIGN) are (roughly) an ascending sequence of integers, - // this hash works really well. See Knuth TAOCP, Vol. 3. -#if __LP64__ - index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 11400714819323198549ULL) >> (64 - shift); -#else - index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 2654435761UL) >> (32 - shift); -#endif - do { - entry = regions + index; - if (*entry == 0) { - return 0; - } - if (*entry == r) { - return entry; - } - if (++index == num_entries) { - index = 0; - } - } while (index != hash_index); - return 0; -} - -/* - * hash_region_insert_no_lock - Insert a region into the hash ring. - */ -static void -hash_region_insert_no_lock(region_t *regions, size_t num_entries, size_t shift, region_t r) -{ - size_t index, hash_index; - rgnhdl_t entry; - - // Multiplicative hash where the multiplier is a prime near (ULONG_MAX / phi). [phi = 1.618033...] - // Since the values of (((uintptr_t)r >> HASH_BLOCKS_ALIGN) are (roughly) an ascending sequence of integers, - // this hash works really well. See Knuth TAOCP, Vol. 3. -#if __LP64__ - index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 11400714819323198549ULL) >> (64 - shift); -#else - index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 2654435761UL) >> (32 - shift); -#endif - do { - entry = regions + index; - if (*entry == HASHRING_OPEN_ENTRY || *entry == HASHRING_REGION_DEALLOCATED) { - *entry = r; - return; - } - if (++index == num_entries) { - index = 0; - } - } while (index != hash_index); -} - -/* - * hash_regions_alloc_no_lock - Allocate space for a number of entries. This - * must be a VM allocation as to avoid recursing between allocating a new small - * region, and asking the small region to allocate space for the new list of - * regions. - */ -static region_t * -hash_regions_alloc_no_lock(size_t num_entries) -{ - size_t size = num_entries * sizeof(region_t); - return mvm_allocate_pages(round_page_quanta(size), 0, 0, VM_MEMORY_MALLOC); -} - -/* - * hash_regions_grow_no_lock - Grow the hash ring, and rehash the entries. - * Return the new region and new size to update the szone. Do not deallocate - * the old entries since someone may still be allocating them. - */ -static MALLOC_INLINE region_t * -hash_regions_grow_no_lock(region_t *regions, size_t old_size, size_t *mutable_shift, size_t *new_size) -{ - // double in size and allocate memory for the regions - *new_size = old_size + old_size; - *mutable_shift = *mutable_shift + 1; - region_t *new_regions = hash_regions_alloc_no_lock(*new_size); - - // rehash the entries into the new list - size_t index; - for (index = 0; index < old_size; ++index) { - region_t r = regions[index]; - if (r != HASHRING_OPEN_ENTRY && r != HASHRING_REGION_DEALLOCATED) { - hash_region_insert_no_lock(new_regions, *new_size, *mutable_shift, r); - } - } - return new_regions; -} - -#pragma mark mag index - -/* - * These commpage routines provide fast access to the logical cpu number - * of the calling processor assuming no pre-emption occurs. - */ - -extern unsigned int hyper_shift; -extern unsigned int phys_ncpus; -extern unsigned int logical_ncpus; - -static MALLOC_INLINE MALLOC_ALWAYS_INLINE -unsigned int -mag_max_magazines(void) -{ - return max_magazines; -} - -static MALLOC_INLINE MALLOC_ALWAYS_INLINE -unsigned int -mag_max_medium_magazines(void) -{ - return max_medium_magazines; -} - -#pragma mark mag lock - -static MALLOC_INLINE magazine_t * -mag_lock_zine_for_region_trailer(magazine_t *magazines, region_trailer_t *trailer, mag_index_t mag_index) -{ - mag_index_t refreshed_index; - magazine_t *mag_ptr = &(magazines[mag_index]); - - // Take the lock on entry. - SZONE_MAGAZINE_PTR_LOCK(mag_ptr); - - // Now in the time it took to acquire the lock, the region may have migrated - // from one magazine to another. In which case the magazine lock we obtained - // (namely magazines[mag_index].mag_lock) is stale. If so, keep on tryin' ... - while (mag_index != (refreshed_index = trailer->mag_index)) { // Note assignment - - SZONE_MAGAZINE_PTR_UNLOCK(mag_ptr); - - mag_index = refreshed_index; - mag_ptr = &(magazines[mag_index]); - SZONE_MAGAZINE_PTR_LOCK(mag_ptr); - } - - return mag_ptr; -} - -#pragma mark Region Cookie - -extern uint64_t malloc_entropy[2]; - -static uint32_t -region_cookie(void) -{ - return (uint32_t)(malloc_entropy[0] >> 8) & 0xffff; -} - -static MALLOC_INLINE void -region_check_cookie(region_t region, region_trailer_t *trailer) -{ - if (trailer->region_cookie != region_cookie()) - { - malloc_zone_error(MALLOC_ABORT_ON_ERROR, true, - "Region cookie corrupted for region %p (value is %x)\n", - region, trailer->region_cookie); - __builtin_unreachable(); - } -} - -static MALLOC_INLINE void -region_set_cookie(region_trailer_t *trailer) -{ - trailer->region_cookie = region_cookie(); -} - -#pragma mark tiny allocator - -/* - * tiny_region_for_ptr_no_lock - Returns the tiny region containing the pointer, - * or NULL if not found. - */ -static MALLOC_INLINE region_t -tiny_region_for_ptr_no_lock(rack_t *rack, const void *ptr) -{ - rgnhdl_t r = hash_lookup_region_no_lock(rack->region_generation->hashed_regions, - rack->region_generation->num_regions_allocated, - rack->region_generation->num_regions_allocated_shift, - TINY_REGION_FOR_PTR(ptr)); - - return r ? *r : r; -} - -/* - * Obtain the size of a free tiny block (in msize_t units). - */ -static MALLOC_INLINE msize_t -get_tiny_free_size_offset(const void *ptr, off_t mapped_offset) -{ - void *next_block = (void *)((uintptr_t)ptr + TINY_QUANTUM); - void *region_end = TINY_REGION_END(TINY_REGION_FOR_PTR(ptr)); - - // check whether the next block is outside the tiny region or a block header - // if so, then the size of this block is one, and there is no stored size. - if (next_block < region_end) { - uint32_t *next_header = (uint32_t *) - ((char *)TINY_BLOCK_HEADER_FOR_PTR(next_block) + mapped_offset); - msize_t next_index = TINY_INDEX_FOR_PTR(next_block); - - if (!BITARRAY_BIT(next_header, next_index)) { - return TINY_FREE_SIZE((uintptr_t)ptr + mapped_offset); - } - } - return 1; -} - -static MALLOC_INLINE msize_t -get_tiny_free_size(const void *ptr) -{ - return get_tiny_free_size_offset(ptr, 0); -} - -static MALLOC_INLINE msize_t -get_tiny_meta_header_offset(const void *ptr, off_t mapped_offset, - boolean_t *is_free) -{ - // returns msize and is_free - // may return 0 for the msize component (meaning 65536) - uint32_t *block_header; - msize_t index; - - block_header = (uint32_t *)((char *)TINY_BLOCK_HEADER_FOR_PTR(ptr) + mapped_offset); - index = TINY_INDEX_FOR_PTR(ptr); - - msize_t midx = (index >> 5) << 1; - uint32_t mask = 1 << (index & 31); - *is_free = 0; - if (0 == (block_header[midx] & mask)) { // if (!BITARRAY_BIT(block_header, index)) - return 0; - } - if (0 == (block_header[midx + 1] & mask)) { // if (!BITARRAY_BIT(in_use, index)) - *is_free = 1; - return get_tiny_free_size_offset(ptr, mapped_offset); - } - - // index >> 5 identifies the uint32_t to manipulate in the conceptually contiguous bits array - // (index >> 5) << 1 identifies the uint32_t allowing for the actual interleaving -#if defined(__LP64__) - // The return value, msize, is computed as the distance to the next 1 bit in block_header. - // That's guaranteed to be somewhere in the next 64 bits. And those bits could span three - // uint32_t block_header elements. Collect the bits into a single uint64_t and measure up with ffsl. - uint32_t *addr = ((uint32_t *)block_header) + ((index >> 5) << 1); - uint32_t bitidx = index & 31; - uint64_t word_lo = addr[0]; - uint64_t word_mid = addr[2]; - uint64_t word_hi = addr[4]; - uint64_t word_lomid = (word_lo >> bitidx) | (word_mid << (32 - bitidx)); - uint64_t word = bitidx ? word_lomid | (word_hi << (64 - bitidx)) : word_lomid; - uint32_t result = __builtin_ffsl(word >> 1); -#else - // The return value, msize, is computed as the distance to the next 1 bit in block_header. - // That's guaranteed to be somewhere in the next 32 bits. And those bits could span two - // uint32_t block_header elements. Collect the bits into a single uint32_t and measure up with ffs. - uint32_t *addr = ((uint32_t *)block_header) + ((index >> 5) << 1); - uint32_t bitidx = index & 31; - uint32_t word = bitidx ? (addr[0] >> bitidx) | (addr[2] << (32 - bitidx)) : addr[0]; - uint32_t result = __builtin_ffs(word >> 1); -#endif - return result; -} - -static MALLOC_INLINE msize_t -get_tiny_meta_header(const void *ptr, boolean_t *is_free) -{ - return get_tiny_meta_header_offset(ptr, 0, is_free); -} - -#if CONFIG_RECIRC_DEPOT -/** - * Returns true if a tiny region is below the emptiness threshold that allows it - * to be moved to the recirc depot. - */ -static MALLOC_INLINE boolean_t -tiny_region_below_recirc_threshold(region_t region) -{ - region_trailer_t *trailer = REGION_TRAILER_FOR_TINY_REGION(region); - return trailer->bytes_used < DENSITY_THRESHOLD(TINY_REGION_PAYLOAD_BYTES); -} - -/** - * Returns true if a tiny magazine has crossed the emptiness threshold that - * allows regions to be moved to the recirc depot. - */ -static MALLOC_INLINE boolean_t -tiny_magazine_below_recirc_threshold(magazine_t *mag_ptr) -{ - size_t a = mag_ptr->num_bytes_in_magazine; // Total bytes allocated to this magazine - size_t u = mag_ptr->mag_num_bytes_in_objects; // In use (malloc'd) from this magaqzine - - return a - u > ((3 * TINY_REGION_PAYLOAD_BYTES) / 2) - && u < DENSITY_THRESHOLD(a); -} -#endif // CONFIG_RECIRC_DEPOT - -#pragma mark small allocator - -/* - * small_region_for_ptr_no_lock - Returns the small region containing the pointer, - * or NULL if not found. - */ -static MALLOC_INLINE region_t -small_region_for_ptr_no_lock(rack_t *rack, const void *ptr) -{ - rgnhdl_t r = hash_lookup_region_no_lock(rack->region_generation->hashed_regions, - rack->region_generation->num_regions_allocated, rack->region_generation->num_regions_allocated_shift, - SMALL_REGION_FOR_PTR(ptr)); - return r ? *r : r; -} - -#if CONFIG_RECIRC_DEPOT -/** - * Returns true if a small region is below the emptiness threshold that allows - * it to be moved to the recirc depot. - */ -static MALLOC_INLINE boolean_t -small_region_below_recirc_threshold(region_t region) -{ - region_trailer_t *trailer = REGION_TRAILER_FOR_SMALL_REGION(region); - return trailer->bytes_used < DENSITY_THRESHOLD(SMALL_REGION_PAYLOAD_BYTES); -} - -/** - * Returns true if a small magazine has crossed the emptiness threshold that - * allows regions to be moved to the recirc depot. - */ -static MALLOC_INLINE boolean_t -small_magazine_below_recirc_threshold(magazine_t *mag_ptr) -{ - size_t a = mag_ptr->num_bytes_in_magazine; // Total bytes allocated to this magazine - size_t u = mag_ptr->mag_num_bytes_in_objects; // In use (malloc'd) from this magaqzine - - return a - u > ((3 * SMALL_REGION_PAYLOAD_BYTES) / 2) - && u < DENSITY_THRESHOLD(a); -} -#endif // CONFIG_RECIRC_DEPOT - -#pragma mark medium allocator -/** - * Returns true if a small region is below the emptiness threshold that allows - * it to be moved to the recirc depot. - */ -static MALLOC_INLINE boolean_t -medium_region_below_recirc_threshold(region_t region) -{ - region_trailer_t *trailer = REGION_TRAILER_FOR_MEDIUM_REGION(region); - return trailer->bytes_used < DENSITY_THRESHOLD(MEDIUM_REGION_PAYLOAD_BYTES); -} - -/* - * medium_region_for_ptr_no_lock - Returns the medium region containing the pointer, - * or NULL if not found. - */ -static MALLOC_INLINE region_t -medium_region_for_ptr_no_lock(rack_t *rack, const void *ptr) -{ - rgnhdl_t r = hash_lookup_region_no_lock(rack->region_generation->hashed_regions, - rack->region_generation->num_regions_allocated, rack->region_generation->num_regions_allocated_shift, - MEDIUM_REGION_FOR_PTR(ptr)); - return r ? *r : r; -} - -#endif // __MAGAZINE_INLINE_H diff --git a/src/libmalloc/src/magazine_large.c b/src/libmalloc/src/magazine_large.c deleted file mode 100644 index 039125500..000000000 --- a/src/libmalloc/src/magazine_large.c +++ /dev/null @@ -1,843 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#include "internal.h" - -void -large_debug_print(task_t task, unsigned level, vm_address_t zone_address, - memory_reader_t reader, print_task_printer_t printer) -{ - szone_t *mapped_szone; - if (reader(task, zone_address, sizeof(szone_t), (void **)&mapped_szone)) { - printer("Failed to read szone structure\n"); - return; - } - - unsigned index; - large_entry_t *range; - _SIMPLE_STRING b = _simple_salloc(); - - if (b) { - large_entry_t *mapped_large_entries; - if (reader(task, (vm_address_t)mapped_szone->large_entries, - mapped_szone->num_large_entries * sizeof(large_entry_t), - (void **)&mapped_large_entries)) { - printer("Failed to read large entries\n"); - return; - } - - _simple_sprintf(b, "Large allocator active blocks - total %y:\n", - mapped_szone->num_bytes_in_large_objects); - for (index = 0, range = mapped_large_entries; - index < mapped_szone->num_large_entries; index++, range++) { - if (range->address) { - _simple_sprintf(b, " Slot %5d: %p, size %y", index, - (void *)range->address, range->size); - _simple_sprintf(b, "%s\n", - (range->did_madvise_reusable ? ", madvised" : "")); - } - } - -#if CONFIG_LARGE_CACHE - _simple_sprintf(b, "\nLarge allocator death row cache, %d entries\n" - "\tMax cached size:\t%y\n", - mapped_szone->large_cache_depth, - (uint64_t)mapped_szone->large_cache_entry_limit); - _simple_sprintf(b, "\tCurrent size:\t\t%y\n\tReserve size:\t\t%y\n" - "\tReserve limit:\t\t%y\n", - mapped_szone->large_entry_cache_bytes, - mapped_szone->large_entry_cache_reserve_bytes, - mapped_szone->large_entry_cache_reserve_limit); - for (index = 0, range = mapped_szone->large_entry_cache; - index < mapped_szone->large_cache_depth; index++, range++) { - _simple_sprintf(b, " Slot %5d: %p, size %y", index, - (void *)range->address, range->size); - char *age = ""; - if (index == mapped_szone->large_entry_cache_newest) { - age = "[newest]"; - } else if (index == mapped_szone->large_entry_cache_oldest) { - age = "[oldest]"; - } - _simple_sprintf(b, " %s %s\n", age, - (range->did_madvise_reusable ? " madvised" : "")); - } - _simple_sprintf(b, "\n"); -#else // CONFIG_LARGE_CACHE - _simple_sprintf(b, "Large allocator death row cache not configured\n"); -#endif // CONFIG_LARGE_CACHE - printer("%s\n", _simple_string(b)); - _simple_sfree(b); - } -} - -#if DEBUG_MALLOC -static void -large_debug_print_self(szone_t *szone, boolean_t verbose) -{ - large_debug_print(mach_task_self(), verbose ? MALLOC_VERBOSE_PRINT_LEVEL : 0, - (vm_address_t)szone, _malloc_default_reader, malloc_report_simple); -} -#endif // DEBUG_MALLOC - -/* - * Scan the hash ring looking for an entry containing a given pointer. - */ -static large_entry_t * -large_entry_containing_pointer_no_lock(szone_t *szone, const void *ptr) -{ - // result only valid with lock held - unsigned num_large_entries = szone->num_large_entries; - unsigned hash_index; - unsigned index; - large_entry_t *range; - - if (!num_large_entries) { - return NULL; - } - - hash_index = ((uintptr_t)ptr >> vm_page_quanta_shift) % num_large_entries; - index = hash_index; - - do { - range = szone->large_entries + index; - if (range->address == (vm_address_t)ptr) { - return range; - } else if ((vm_address_t)ptr >= range->address - && (vm_address_t)ptr < range->address + range->size) { - return range; - } - - // Since we may be looking for an inner pointer, we might not get an - // exact match on the address, so we need to scan further and to skip - // over empty entries. It will usually be faster to scan backwards. - index = index == 0 ? num_large_entries - 1 : index - 1; - } while (index != hash_index); - - return NULL; -} - -/* - * Scan the hash ring looking for an entry for the given pointer. - */ -large_entry_t * -large_entry_for_pointer_no_lock(szone_t *szone, const void *ptr) -{ - // result only valid with lock held - unsigned num_large_entries = szone->num_large_entries; - unsigned hash_index; - unsigned index; - large_entry_t *range; - - if (!num_large_entries) { - return NULL; - } - - hash_index = ((uintptr_t)ptr >> vm_page_quanta_shift) % num_large_entries; - index = hash_index; - - do { - range = szone->large_entries + index; - if (range->address == (vm_address_t)ptr) { - return range; - } - if (0 == range->address) { - return NULL; // end of chain - } - index++; - if (index == num_large_entries) { - index = 0; - } - } while (index != hash_index); - - return NULL; -} - -static void -large_entry_insert_no_lock(szone_t *szone, large_entry_t range) -{ - unsigned num_large_entries = szone->num_large_entries; - unsigned hash_index = (((uintptr_t)(range.address)) >> vm_page_quanta_shift) % num_large_entries; - unsigned index = hash_index; - large_entry_t *entry; - - // assert(szone->num_large_objects_in_use < szone->num_large_entries); /* must be called with room to spare */ - - do { - entry = szone->large_entries + index; - if (0 == entry->address) { - *entry = range; - return; // end of chain - } - index++; - if (index == num_large_entries) { - index = 0; - } - } while (index != hash_index); - - // assert(0); /* must not fallthrough! */ -} - -// FIXME: can't we simply swap the (now empty) entry with the last entry on the collision chain for this hash slot? -static MALLOC_INLINE void -large_entries_rehash_after_entry_no_lock(szone_t *szone, large_entry_t *entry) -{ - unsigned num_large_entries = szone->num_large_entries; - uintptr_t hash_index = entry - szone->large_entries; - uintptr_t index = hash_index; - large_entry_t range; - - // assert(entry->address == 0) /* caller must have cleared *entry */ - - do { - index++; - if (index == num_large_entries) { - index = 0; - } - range = szone->large_entries[index]; - if (0 == range.address) { - return; - } - szone->large_entries[index].address = (vm_address_t)0; - szone->large_entries[index].size = 0; - szone->large_entries[index].did_madvise_reusable = FALSE; - large_entry_insert_no_lock(szone, range); // this will reinsert in the - // proper place - } while (index != hash_index); - - // assert(0); /* since entry->address == 0, must not fallthrough! */ -} - -// FIXME: num should probably be a size_t, since you can theoretically allocate -// more than 2^32-1 large_threshold objects in 64 bit. -static MALLOC_INLINE large_entry_t * -large_entries_alloc_no_lock(unsigned num) -{ - size_t size = num * sizeof(large_entry_t); - - // Note that we allocate memory (via a system call) under a spin lock - // That is certainly evil, however it's very rare in the lifetime of a process - // The alternative would slow down the normal case - return mvm_allocate_pages(round_page_quanta(size), 0, 0, VM_MEMORY_MALLOC_LARGE); -} - -void -large_entries_free_no_lock(szone_t *szone, large_entry_t *entries, unsigned num, vm_range_t *range_to_deallocate) -{ - size_t size = num * sizeof(large_entry_t); - - range_to_deallocate->address = (vm_address_t)entries; - range_to_deallocate->size = round_page_quanta(size); -} - -static large_entry_t * -large_entries_grow_no_lock(szone_t *szone, vm_range_t *range_to_deallocate) -{ - // sets range_to_deallocate - unsigned old_num_entries = szone->num_large_entries; - large_entry_t *old_entries = szone->large_entries; - // always an odd number for good hashing - unsigned new_num_entries = - (old_num_entries) ? old_num_entries * 2 + 1 : (unsigned)((vm_page_quanta_size / sizeof(large_entry_t)) - 1); - large_entry_t *new_entries = large_entries_alloc_no_lock(new_num_entries); - unsigned index = old_num_entries; - large_entry_t oldRange; - - // if the allocation of new entries failed, bail - if (new_entries == NULL) { - return NULL; - } - - szone->num_large_entries = new_num_entries; - szone->large_entries = new_entries; - - /* rehash entries into the new list */ - while (index--) { - oldRange = old_entries[index]; - if (oldRange.address) { - large_entry_insert_no_lock(szone, oldRange); - } - } - - if (old_entries) { - large_entries_free_no_lock(szone, old_entries, old_num_entries, range_to_deallocate); - } else { - range_to_deallocate->address = (vm_address_t)0; - range_to_deallocate->size = 0; - } - - return new_entries; -} - -// frees the specific entry in the size table -// returns a range to truly deallocate -static vm_range_t -large_entry_free_no_lock(szone_t *szone, large_entry_t *entry) -{ - vm_range_t range; - - MALLOC_TRACE(TRACE_large_free, (uintptr_t)szone, (uintptr_t)entry->address, entry->size, 0); - - range.address = entry->address; - range.size = entry->size; - - if (szone->debug_flags & MALLOC_ADD_GUARD_PAGES) { - mvm_protect((void *)range.address, range.size, PROT_READ | PROT_WRITE, szone->debug_flags); - range.address -= vm_page_quanta_size; - range.size += 2 * vm_page_quanta_size; - } - - entry->address = 0; - entry->size = 0; - entry->did_madvise_reusable = FALSE; - large_entries_rehash_after_entry_no_lock(szone, entry); - -#if DEBUG_MALLOC - if (large_entry_for_pointer_no_lock(szone, (void *)range.address)) { - large_debug_print_self(szone, 1); - malloc_report(ASL_LEVEL_ERR, "*** freed entry %p still in use; num_large_entries=%d\n", (void *)range.address, szone->num_large_entries); - } -#endif - return range; -} - -kern_return_t -large_in_use_enumerator(task_t task, - void *context, - unsigned type_mask, - vm_address_t large_entries_address, - unsigned num_entries, - memory_reader_t reader, - vm_range_recorder_t recorder) -{ - unsigned index = 0; - vm_range_t buffer[MAX_RECORDER_BUFFER]; - unsigned count = 0; - large_entry_t *entries; - kern_return_t err; - vm_range_t range; - large_entry_t entry; - - err = reader(task, large_entries_address, sizeof(large_entry_t) * num_entries, (void **)&entries); - if (err) { - return err; - } - - index = num_entries; - if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) { - range.address = large_entries_address; - range.size = round_page_quanta(num_entries * sizeof(large_entry_t)); - recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &range, 1); - } - if (type_mask & (MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE)) { - while (index--) { - entry = entries[index]; - if (entry.address) { - range.address = entry.address; - range.size = entry.size; - buffer[count++] = range; - if (count >= MAX_RECORDER_BUFFER) { - recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE, buffer, count); - count = 0; - } - } - } - } - if (count) { - recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE, buffer, count); - } - return 0; -} - -void * -large_malloc(szone_t *szone, size_t num_kernel_pages, unsigned char alignment, boolean_t cleared_requested) -{ - void *addr; - vm_range_t range_to_deallocate; - size_t size; - large_entry_t large_entry; - - MALLOC_TRACE(TRACE_large_malloc, (uintptr_t)szone, num_kernel_pages, alignment, cleared_requested); - - if (!num_kernel_pages) { - num_kernel_pages = 1; // minimal allocation size for this szone - } - size = (size_t)num_kernel_pages << vm_page_quanta_shift; - range_to_deallocate.size = 0; - range_to_deallocate.address = 0; - -#if CONFIG_LARGE_CACHE - if (size <= szone->large_cache_entry_limit) { // Look for a large_entry_t on the death-row cache? - SZONE_LOCK(szone); - - int i, best = -1, idx = szone->large_entry_cache_newest, stop_idx = szone->large_entry_cache_oldest; - size_t best_size = SIZE_T_MAX; - - while (1) { // Scan large_entry_cache for best fit, starting with most recent entry - size_t this_size = szone->large_entry_cache[idx].size; - addr = (void *)szone->large_entry_cache[idx].address; - - if (0 == alignment || 0 == (((uintptr_t)addr) & (((uintptr_t)1 << alignment) - 1))) { - if (size == this_size) { // size match! - best = idx; - best_size = this_size; - break; - } - - if (size <= this_size && this_size < best_size) { // improved fit? - best = idx; - best_size = this_size; - } - } - - if (idx == stop_idx) { // exhausted live ring? - break; - } - - if (idx) { - idx--; // bump idx down - } else { - idx = szone->large_cache_depth - 1; // wrap idx - } - } - - if (best > -1 && (best_size - size) < size) { // limit fragmentation to 50% - addr = (void *)szone->large_entry_cache[best].address; - boolean_t was_madvised_reusable = szone->large_entry_cache[best].did_madvise_reusable; - - // Compact live ring to fill entry now vacated at large_entry_cache[best] - // while preserving time-order - if (szone->large_entry_cache_oldest < szone->large_entry_cache_newest) { - // Ring hasn't wrapped. Fill in from right. - for (i = best; i < szone->large_entry_cache_newest; ++i) { - szone->large_entry_cache[i] = szone->large_entry_cache[i + 1]; - } - - szone->large_entry_cache_newest--; // Pull in right endpoint. - - } else if (szone->large_entry_cache_newest < szone->large_entry_cache_oldest) { - // Ring has wrapped. Arrange to fill in from the contiguous side. - if (best <= szone->large_entry_cache_newest) { - // Fill from right. - for (i = best; i < szone->large_entry_cache_newest; ++i) { - szone->large_entry_cache[i] = szone->large_entry_cache[i + 1]; - } - - if (0 < szone->large_entry_cache_newest) { - szone->large_entry_cache_newest--; - } else { - szone->large_entry_cache_newest = szone->large_cache_depth - 1; - } - } else { - // Fill from left. - for (i = best; i > szone->large_entry_cache_oldest; --i) { - szone->large_entry_cache[i] = szone->large_entry_cache[i - 1]; - } - - if (szone->large_entry_cache_oldest < szone->large_cache_depth - 1) { - szone->large_entry_cache_oldest++; - } else { - szone->large_entry_cache_oldest = 0; - } - } - - } else { - // By trichotomy, large_entry_cache_newest == large_entry_cache_oldest. - // That implies best == large_entry_cache_newest == large_entry_cache_oldest - // and the ring is now empty. - szone->large_entry_cache[best].address = 0; - szone->large_entry_cache[best].size = 0; - szone->large_entry_cache[best].did_madvise_reusable = FALSE; - } - - if ((szone->num_large_objects_in_use + 1) * 4 > szone->num_large_entries) { - // density of hash table too high; grow table - // we do that under lock to avoid a race - large_entry_t *entries = large_entries_grow_no_lock(szone, &range_to_deallocate); - if (entries == NULL) { - SZONE_UNLOCK(szone); - return NULL; - } - } - - large_entry.address = (vm_address_t)addr; - large_entry.size = best_size; - large_entry.did_madvise_reusable = FALSE; - large_entry_insert_no_lock(szone, large_entry); - - szone->num_large_objects_in_use++; - szone->num_bytes_in_large_objects += best_size; - if (!was_madvised_reusable) { - szone->large_entry_cache_reserve_bytes -= best_size; - } - - szone->large_entry_cache_bytes -= best_size; - - if (szone->flotsam_enabled && szone->large_entry_cache_bytes < SZONE_FLOTSAM_THRESHOLD_LOW) { - szone->flotsam_enabled = FALSE; - } - - SZONE_UNLOCK(szone); - - if (range_to_deallocate.size) { - // we deallocate outside the lock - mvm_deallocate_pages((void *)range_to_deallocate.address, range_to_deallocate.size, 0); - } - - if (cleared_requested) { - memset(addr, 0, size); - } - - return addr; - } else { - SZONE_UNLOCK(szone); - } - } - - range_to_deallocate.size = 0; - range_to_deallocate.address = 0; -#endif /* CONFIG_LARGE_CACHE */ - - addr = mvm_allocate_pages(size, alignment, szone->debug_flags, VM_MEMORY_MALLOC_LARGE); - if (addr == NULL) { - return NULL; - } - - SZONE_LOCK(szone); - if ((szone->num_large_objects_in_use + 1) * 4 > szone->num_large_entries) { - // density of hash table too high; grow table - // we do that under lock to avoid a race - large_entry_t *entries = large_entries_grow_no_lock(szone, &range_to_deallocate); - if (entries == NULL) { - SZONE_UNLOCK(szone); - return NULL; - } - } - - large_entry.address = (vm_address_t)addr; - large_entry.size = size; - large_entry.did_madvise_reusable = FALSE; - large_entry_insert_no_lock(szone, large_entry); - - szone->num_large_objects_in_use++; - szone->num_bytes_in_large_objects += size; - SZONE_UNLOCK(szone); - - if (range_to_deallocate.size) { - // we deallocate outside the lock - mvm_deallocate_pages((void *)range_to_deallocate.address, range_to_deallocate.size, 0); - } - return addr; -} - -void -free_large(szone_t *szone, void *ptr) -{ - // We have established ptr is page-aligned and neither tiny nor small - large_entry_t *entry; - vm_range_t vm_range_to_deallocate; - - SZONE_LOCK(szone); - entry = large_entry_for_pointer_no_lock(szone, ptr); - if (entry) { -#if CONFIG_LARGE_CACHE - if (entry->size <= szone->large_cache_entry_limit && - -1 != madvise((void *)(entry->address), entry->size, - MADV_CAN_REUSE)) { // Put the large_entry_t on the death-row cache? - int idx = szone->large_entry_cache_newest, stop_idx = szone->large_entry_cache_oldest; - large_entry_t this_entry = *entry; // Make a local copy, "entry" is volatile when lock is let go. - boolean_t reusable = TRUE; - boolean_t should_madvise = - szone->large_entry_cache_reserve_bytes + this_entry.size > szone->large_entry_cache_reserve_limit; - - // Already freed? - // [Note that repeated entries in death-row risk vending the same entry subsequently - // to two different malloc() calls. By checking here the (illegal) double free - // is accommodated, matching the behavior of the previous implementation.] - while (1) { // Scan large_entry_cache starting with most recent entry - if (szone->large_entry_cache[idx].address == entry->address) { - malloc_zone_error(szone->debug_flags, true, "pointer %p being freed already on death-row\n", ptr); - SZONE_UNLOCK(szone); - return; - } - - if (idx == stop_idx) { // exhausted live ring? - break; - } - - if (idx) { - idx--; // bump idx down - } else { - idx = szone->large_cache_depth - 1; // wrap idx - } - } - - SZONE_UNLOCK(szone); - - if (szone->debug_flags & MALLOC_PURGEABLE) { // Are we a purgable zone? - int state = VM_PURGABLE_NONVOLATILE; // restore to default condition - - if (KERN_SUCCESS != vm_purgable_control(mach_task_self(), this_entry.address, VM_PURGABLE_SET_STATE, &state)) { - malloc_report(ASL_LEVEL_ERR, "*** can't vm_purgable_control(..., VM_PURGABLE_SET_STATE) for large freed block at %p\n", - (void *)this_entry.address); - reusable = FALSE; - } - } - - if (szone->large_legacy_reset_mprotect) { // Linked for Leopard? - // Accomodate Leopard apps that (illegally) mprotect() their own guard pages on large malloc'd allocations - int err = mprotect((void *)(this_entry.address), this_entry.size, PROT_READ | PROT_WRITE); - if (err) { - malloc_report(ASL_LEVEL_ERR, "*** can't reset protection for large freed block at %p\n", (void *)this_entry.address); - reusable = FALSE; - } - } - - // madvise(..., MADV_REUSABLE) death-row arrivals if hoarding would exceed large_entry_cache_reserve_limit - if (should_madvise) { - // Issue madvise to avoid paging out the dirtied free()'d pages in "entry" - MAGMALLOC_MADVFREEREGION((void *)szone, (void *)0, (void *)(this_entry.address), (int)this_entry.size); // DTrace USDT Probe - - // Ok to do this madvise on embedded because we won't call MADV_FREE_REUSABLE on a large - // cache block twice without MADV_FREE_REUSE in between. - - if (-1 == madvise((void *)(this_entry.address), this_entry.size, MADV_FREE_REUSABLE)) { - /* -1 return: VM map entry change makes this unfit for reuse. */ -#if DEBUG_MADVISE - malloc_zone_error(szone->debug_flags, false, - "free_large madvise(..., MADV_FREE_REUSABLE) failed for %p, length=%d\n", - (void *)this_entry.address, this_entry.size); -#endif - reusable = FALSE; - } - } - - SZONE_LOCK(szone); - - // Re-acquire "entry" after interval just above where we let go the lock. - entry = large_entry_for_pointer_no_lock(szone, ptr); - if (NULL == entry) { - malloc_zone_error(szone->debug_flags, true, "entry for pointer %p being freed from death-row vanished\n", ptr); - SZONE_UNLOCK(szone); - return; - } - - // Add "entry" to death-row ring - if (reusable) { - int idx = szone->large_entry_cache_newest; // Most recently occupied - vm_address_t addr; - size_t adjsize; - - if (szone->large_entry_cache_newest == szone->large_entry_cache_oldest && - 0 == szone->large_entry_cache[idx].address) { - // Ring is empty, idx is good as it stands - addr = 0; - adjsize = 0; - } else { - // Extend the queue to the "right" by bumping up large_entry_cache_newest - if (idx == szone->large_cache_depth - 1) { - idx = 0; // Wrap index - } else { - idx++; // Bump index - } - if (idx == szone->large_entry_cache_oldest) { // Fully occupied - // Drop this entry from the cache and deallocate the VM - addr = szone->large_entry_cache[idx].address; - adjsize = szone->large_entry_cache[idx].size; - szone->large_entry_cache_bytes -= adjsize; - if (!szone->large_entry_cache[idx].did_madvise_reusable) { - szone->large_entry_cache_reserve_bytes -= adjsize; - } - } else { - // Using an unoccupied cache slot - addr = 0; - adjsize = 0; - } - } - - if ((szone->debug_flags & MALLOC_DO_SCRIBBLE)) { - memset((void *)(entry->address), should_madvise ? SCRUBBLE_BYTE : SCRABBLE_BYTE, entry->size); - } - - entry->did_madvise_reusable = should_madvise; // Was madvise()'d above? - if (!should_madvise) { // Entered on death-row without madvise() => up the hoard total - szone->large_entry_cache_reserve_bytes += entry->size; - } - - szone->large_entry_cache_bytes += entry->size; - - if (!szone->flotsam_enabled && szone->large_entry_cache_bytes > SZONE_FLOTSAM_THRESHOLD_HIGH) { - szone->flotsam_enabled = TRUE; - } - - szone->large_entry_cache[idx] = *entry; - szone->large_entry_cache_newest = idx; - - szone->num_large_objects_in_use--; - szone->num_bytes_in_large_objects -= entry->size; - - (void)large_entry_free_no_lock(szone, entry); - - if (0 == addr) { - SZONE_UNLOCK(szone); - return; - } - - // Fall through to drop large_entry_cache_oldest from the cache, - // and then deallocate its pages. - - // Trim the queue on the "left" by bumping up large_entry_cache_oldest - if (szone->large_entry_cache_oldest == szone->large_cache_depth - 1) { - szone->large_entry_cache_oldest = 0; - } else { - szone->large_entry_cache_oldest++; - } - - // we deallocate_pages, including guard pages, outside the lock - SZONE_UNLOCK(szone); - mvm_deallocate_pages((void *)addr, (size_t)adjsize, 0); - return; - } else { - /* fall through to discard an allocation that is not reusable */ - } - } -#endif /* CONFIG_LARGE_CACHE */ - - szone->num_large_objects_in_use--; - szone->num_bytes_in_large_objects -= entry->size; - - vm_range_to_deallocate = large_entry_free_no_lock(szone, entry); - } else { -#if DEBUG_MALLOC - large_debug_print_self(szone, 1); -#endif - malloc_zone_error(szone->debug_flags, true, "pointer %p being freed was not allocated\n", ptr); - SZONE_UNLOCK(szone); - return; - } - SZONE_UNLOCK(szone); // we release the lock asap - CHECK(szone, __PRETTY_FUNCTION__); - - // we deallocate_pages, including guard pages, outside the lock - if (vm_range_to_deallocate.address) { -#if DEBUG_MALLOC - // FIXME: large_entry_for_pointer_no_lock() needs the lock held ... - if (large_entry_for_pointer_no_lock(szone, (void *)vm_range_to_deallocate.address)) { - large_debug_print_self(szone, 1); - malloc_report(ASL_LEVEL_ERR, "*** invariant broken: %p still in use num_large_entries=%d\n", - (void *)vm_range_to_deallocate.address, szone->num_large_entries); - } -#endif - mvm_deallocate_pages((void *)vm_range_to_deallocate.address, (size_t)vm_range_to_deallocate.size, 0); - } -} - -void * -large_try_shrink_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_good_size) -{ - size_t shrinkage = old_size - new_good_size; - - if (shrinkage) { - SZONE_LOCK(szone); - /* contract existing large entry */ - large_entry_t *large_entry = large_entry_for_pointer_no_lock(szone, ptr); - if (!large_entry) { - malloc_zone_error(szone->debug_flags, true, "large entry %p reallocated is not properly in table\n", ptr); - SZONE_UNLOCK(szone); - return ptr; - } - - large_entry->address = (vm_address_t)ptr; - large_entry->size = new_good_size; - szone->num_bytes_in_large_objects -= shrinkage; - boolean_t guarded = szone->debug_flags & MALLOC_ADD_GUARD_PAGES; - SZONE_UNLOCK(szone); // we release the lock asap - - if (guarded) { - // Keep the page above the new end of the allocation as the - // postlude guard page. - kern_return_t err; - err = mprotect((void *)((uintptr_t)ptr + new_good_size), vm_page_quanta_size, 0); - if (err) { - malloc_report(ASL_LEVEL_ERR, "*** can't mvm_protect(0x0) region for new postlude guard page at %p\n", - ptr + new_good_size); - } - new_good_size += vm_page_quanta_size; - shrinkage -= vm_page_quanta_size; - } - - mvm_deallocate_pages((void *)((uintptr_t)ptr + new_good_size), shrinkage, 0); - } - return ptr; -} - -int -large_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) -{ - vm_address_t addr = (vm_address_t)ptr + old_size; - large_entry_t *large_entry; - kern_return_t err; - - SZONE_LOCK(szone); - large_entry = large_entry_for_pointer_no_lock(szone, (void *)addr); - SZONE_UNLOCK(szone); - - if (large_entry) { // check if "addr = ptr + old_size" is already spoken for - return 0; // large pointer already exists in table - extension is not going to work - } - - new_size = round_page_quanta(new_size); - /* - * Ask for allocation at a specific address, and mark as realloc - * to request coalescing with previous realloc'ed extensions. - */ - err = vm_allocate(mach_task_self(), &addr, new_size - old_size, VM_MAKE_TAG(VM_MEMORY_REALLOC)); - if (err != KERN_SUCCESS) { - return 0; - } - - SZONE_LOCK(szone); - /* extend existing large entry */ - large_entry = large_entry_for_pointer_no_lock(szone, ptr); - if (!large_entry) { - malloc_zone_error(szone->debug_flags, true, "large entry %p reallocated is not properly in table\n", ptr); - SZONE_UNLOCK(szone); - return 0; // Bail, leaking "addr" - } - - large_entry->address = (vm_address_t)ptr; - large_entry->size = new_size; - szone->num_bytes_in_large_objects += new_size - old_size; - SZONE_UNLOCK(szone); // we release the lock asap - - return 1; -} - -boolean_t -large_claimed_address(szone_t *szone, void *ptr) -{ - SZONE_LOCK(szone); - boolean_t result = large_entry_containing_pointer_no_lock(szone, - (void *)trunc_page((uintptr_t)ptr)) != NULL; - SZONE_UNLOCK(szone); - return result; -} diff --git a/src/libmalloc/src/magazine_malloc.c b/src/libmalloc/src/magazine_malloc.c deleted file mode 100644 index 98c24d0fc..000000000 --- a/src/libmalloc/src/magazine_malloc.c +++ /dev/null @@ -1,1760 +0,0 @@ -/* - * Copyright (c) 1999, 2006, 2008 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -/* Author: Bertrand Serlet, August 1999 */ - -/* - * Multithread enhancements for "tiny" allocations introduced February 2008. - * These are in the spirit of "Hoard". See: - * Emery D. Berger, Kathryn S. McKinley, Robert D. Blumofe, and Paul R. Wilson. 2000. - * Hoard: a scalable memory allocator for multithreaded applications. - * In Proceedings of the ninth international conference on Architectural support for - * programming languages and operating systems (ASPLOS IX). - * ACM, New York, NY, USA, 117-128. - * DOI: https://doi.org/10.1145/378993.379232 - * Retrieved on 2008-02-22. - */ - -#include "internal.h" - -#if DEBUG_MALLOC -#define LOG(szone, ptr) (szone->log_address && (((uintptr_t)szone->log_address == -1) || (szone->log_address == (void *)(ptr)))) -#else -#define LOG(szone, ptr) 0 -#endif - -// Maximum number of magazines, set from the number of logical CPUS and -// possibly limited by the MallocMaxMagazines environment variable. -int max_magazines; - -// Control whether medium is enabled at all when creating new magazine zones -bool magazine_medium_enabled = true; - -// Control the DRAM limit at which medium kicks in. -uint64_t magazine_medium_active_threshold = MEDIUM_ACTIVATION_THRESHOLD; - -// Control the DRAM limit at which the expanded large cache kicks in. -uint64_t magazine_large_expanded_cache_threshold = LARGE_CACHE_EXPANDED_THRESHOLD; - -// Maximum number of magzines that the medium -// allocator will use. This addresses a 32-bit load-offset range issue found -// in some apps when introducing medium. -int max_medium_magazines; - -// Number of regions to retain in a recirc depot. -#if CONFIG_RECIRC_DEPOT -int recirc_retained_regions = DEFAULT_RECIRC_RETAINED_REGIONS; -#endif // CONFIG_RECIRC_DEPOT - -/********************* Zone call backs ************************/ -/* - * Mark these MALLOC_NOINLINE to avoid bloating the purgeable zone call backs - */ -void -szone_free(szone_t *szone, void *ptr) -{ - region_t tiny_region; - region_t small_region; - -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in szone_free with %p\n", ptr); - } -#endif - if (!ptr) { - return; - } - /* - * Try to free to a tiny region. - */ - if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) { - malloc_zone_error(szone->debug_flags, true, "Non-aligned pointer %p being freed\n", ptr); - return; - } - if ((tiny_region = tiny_region_for_ptr_no_lock(&szone->tiny_rack, ptr)) != NULL) { - if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) { - malloc_zone_error(szone->debug_flags, true, "Pointer %p to metadata being freed\n", ptr); - return; - } - free_tiny(&szone->tiny_rack, ptr, tiny_region, 0, false); - return; - } - - /* - * Try to free to a small region. - */ - if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) { - malloc_zone_error(szone->debug_flags, true, "Non-aligned pointer %p being freed (2)\n", ptr); - return; - } - if ((small_region = small_region_for_ptr_no_lock(&szone->small_rack, ptr)) != NULL) { - if (SMALL_META_INDEX_FOR_PTR(ptr) >= NUM_SMALL_BLOCKS) { - malloc_zone_error(szone->debug_flags, true, "Pointer %p to metadata being freed (2)\n", ptr); - return; - } - free_small(&szone->small_rack, ptr, small_region, 0); - return; - } - -#if CONFIG_MEDIUM_ALLOCATOR - region_t medium_region; - - if (szone->is_medium_engaged && - (medium_region = medium_region_for_ptr_no_lock(&szone->medium_rack, ptr)) != NULL) { - if (MEDIUM_META_INDEX_FOR_PTR(ptr) >= NUM_MEDIUM_BLOCKS) { - malloc_zone_error(szone->debug_flags, true, "Pointer %p to metadata being freed (2)\n", ptr); - return; - } - free_medium(&szone->medium_rack, ptr, medium_region, 0); - return; - } -#endif // CONFIG_MEDIUM_ALLOCATOR - - /* check that it's a legal large allocation */ - if ((uintptr_t)ptr & (vm_page_quanta_size - 1)) { - malloc_zone_error(szone->debug_flags, true, "non-page-aligned, non-allocated pointer %p being freed\n", ptr); - return; - } - free_large(szone, ptr); -} - -void -szone_free_definite_size(szone_t *szone, void *ptr, size_t size) -{ -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in szone_free_definite_size with %p\n", ptr); - } - - if (0 == size) { - malloc_zone_error(szone->debug_flags, true, "pointer %p of size zero being freed\n", ptr); - return; - } - -#endif - if (!ptr) { - return; - } - - /* - * Try to free to a tiny region. - */ - if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) { - malloc_zone_error(szone->debug_flags, true, "Non-aligned pointer %p being freed\n", ptr); - return; - } - if (size <= TINY_LIMIT_THRESHOLD) { - if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) { - malloc_zone_error(szone->debug_flags, true, "Pointer %p to metadata being freed\n", ptr); - return; - } - free_tiny(&szone->tiny_rack, ptr, TINY_REGION_FOR_PTR(ptr), size, false); - return; - } - - /* - * Try to free to a small region. - */ - if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) { - malloc_zone_error(szone->debug_flags, true, "Non-aligned pointer %p being freed (2)\n", ptr); - return; - } - if (size <= SMALL_LIMIT_THRESHOLD) { - if (SMALL_META_INDEX_FOR_PTR(ptr) >= NUM_SMALL_BLOCKS) { - malloc_zone_error(szone->debug_flags, true, "Pointer %p to metadata being freed (2)\n", ptr); - return; - } - free_small(&szone->small_rack, ptr, SMALL_REGION_FOR_PTR(ptr), size); - return; - } - -#if CONFIG_MEDIUM_ALLOCATOR - /* - * Try to free to a medium region. - */ - if (szone->is_medium_engaged && size <= MEDIUM_LIMIT_THRESHOLD) { - if (MEDIUM_META_INDEX_FOR_PTR(ptr) >= NUM_MEDIUM_BLOCKS) { - malloc_zone_error(szone->debug_flags, true, "Pointer %p to metadata being freed (2)\n", ptr); - return; - } - free_medium(&szone->medium_rack, ptr, MEDIUM_REGION_FOR_PTR(ptr), size); - return; - } -#endif // CONFIG_MEDIUM_ALLOCATOR - - /* check that it's a legal large allocation */ - if ((uintptr_t)ptr & (vm_page_quanta_size - 1)) { - malloc_zone_error(szone->debug_flags, true, "non-page-aligned, non-allocated pointer %p being freed\n", ptr); - return; - } - free_large(szone, ptr); -} - -MALLOC_NOINLINE void * -szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested) -{ - void *ptr; - msize_t msize; - - if (size <= TINY_LIMIT_THRESHOLD) { - msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1); - if (!msize) { - msize = 1; - } - ptr = tiny_malloc_should_clear(&szone->tiny_rack, msize, cleared_requested); - } else if (size <= SMALL_LIMIT_THRESHOLD) { - msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1); - if (!msize) { - msize = 1; - } - ptr = small_malloc_should_clear(&szone->small_rack, msize, cleared_requested); -#if CONFIG_MEDIUM_ALLOCATOR - } else if (szone->is_medium_engaged && size <= MEDIUM_LIMIT_THRESHOLD) { - msize = MEDIUM_MSIZE_FOR_BYTES(size + MEDIUM_QUANTUM - 1); - if (!msize) { - msize = 1; - } - ptr = medium_malloc_should_clear(&szone->medium_rack, msize, cleared_requested); -#endif - } else { - size_t num_kernel_pages = round_page_quanta(size) >> vm_page_quanta_shift; - if (num_kernel_pages == 0) { /* Overflowed */ - ptr = 0; - } else { - ptr = large_malloc(szone, num_kernel_pages, 0, cleared_requested); - } - } -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "szone_malloc returned %p\n", ptr); - } -#endif - /* - * If requested, scribble on allocated memory. - */ - if ((szone->debug_flags & MALLOC_DO_SCRIBBLE) && ptr && !cleared_requested && size) { - memset(ptr, SCRIBBLE_BYTE, szone_size(szone, ptr)); - } - - return ptr; -} - -void * -szone_malloc(szone_t *szone, size_t size) -{ - return szone_malloc_should_clear(szone, size, 0); -} - -void * -szone_calloc(szone_t *szone, size_t num_items, size_t size) -{ - size_t total_bytes; - if (calloc_get_size(num_items, size, 0, &total_bytes)) { - return NULL; - } - return szone_malloc_should_clear(szone, total_bytes, 1); -} - -void * -szone_valloc(szone_t *szone, size_t size) -{ - void *ptr; - - if (size <= MEDIUM_LIMIT_THRESHOLD) { - ptr = szone_memalign(szone, vm_page_quanta_size, size); - } else { - size_t num_kernel_pages; - - num_kernel_pages = round_page_quanta(size) >> vm_page_quanta_shift; - ptr = large_malloc(szone, num_kernel_pages, 0, 0); - } - -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "szone_valloc returned %p\n", ptr); - } -#endif - return ptr; -} - -/* Isolate PIC-base load here. */ -size_t -szone_size_try_large(szone_t *szone, const void *ptr) -{ - size_t size = 0; - large_entry_t *entry; - - SZONE_LOCK(szone); - entry = large_entry_for_pointer_no_lock(szone, ptr); - if (entry) { - size = entry->size; - } - SZONE_UNLOCK(szone); -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "szone_size for %p returned %d\n", ptr, (unsigned)size); - } -#endif - return size; -} - -size_t -szone_size(szone_t *szone, const void *ptr) -{ - size_t sz = 0; - - if (!ptr) { - return 0; - } -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in szone_size for %p (szone=%p)\n", ptr, szone); - } -#endif - - /* - * Look for it in a tiny region. - */ - if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) { - return 0; - } - - sz = tiny_size(&szone->tiny_rack, ptr); - if (sz) { - return sz; - } - - /* - * Look for it in a small region. - */ - if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) { - return 0; - } - - sz = small_size(&szone->small_rack, ptr); - if (sz) { - return sz; - } - -#if CONFIG_MEDIUM_ALLOCATOR - /* - * Look for it in a medium region. - */ - if (szone->is_medium_engaged) { - sz = medium_size(&szone->medium_rack, ptr); - if (sz) { - return sz; - } - } -#endif // CONFIG_MEDIUM_ALLOCATOR - - /* - * If not page-aligned, it cannot have come from a large allocation. - */ - if ((uintptr_t)ptr & (vm_page_quanta_size - 1)) { - return 0; - } - - /* - * Look for it in a large entry. - */ - return szone_size_try_large(szone, ptr); -} - -void * -szone_realloc(szone_t *szone, void *ptr, size_t new_size) -{ - size_t old_size, new_good_size, valid_size; - void *new_ptr; - -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in szone_realloc for %p, %d\n", ptr, (unsigned)new_size); - } -#endif - if (NULL == ptr) { - // If ptr is a null pointer, realloc() shall be equivalent to malloc() for the specified size. - return szone_malloc(szone, new_size); - } else if (0 == new_size) { - // If size is 0 and ptr is not a null pointer, the object pointed to is freed. - szone_free(szone, ptr); - // If size is 0, either a null pointer or a unique pointer that can be successfully passed - // to free() shall be returned. - return szone_malloc(szone, 1); - } - - old_size = szone_size(szone, ptr); - if (!old_size) { - malloc_zone_error(szone->debug_flags, true, "pointer %p being reallocated was not allocated\n", ptr); - return NULL; - } - - new_good_size = szone_good_size(szone, new_size); - if (new_good_size == old_size) { // Existing allocation is best fit evar? - return ptr; - } - - /* - * If the new size suits the tiny allocator and the pointer being resized - * belongs to a tiny region, try to reallocate in-place. - */ - if (new_good_size <= TINY_LIMIT_THRESHOLD) { - if (old_size <= TINY_LIMIT_THRESHOLD) { - if (new_good_size <= (old_size >> 1)) { - /* - * Serious shrinkage (more than half). free() the excess. - */ - return tiny_try_shrink_in_place(&szone->tiny_rack, ptr, old_size, new_good_size); - } else if (new_good_size <= old_size) { - /* - * new_good_size smaller than old_size but not by much (less than half). - * Avoid thrashing at the expense of some wasted storage. - */ - if (szone->debug_flags & MALLOC_DO_SCRIBBLE) { - memset(ptr + new_size, SCRIBBLE_BYTE, old_size - new_size); - } - return ptr; - } else if (tiny_try_realloc_in_place(&szone->tiny_rack, ptr, old_size, new_good_size)) { // try to grow the allocation - if (szone->debug_flags & MALLOC_DO_SCRIBBLE) { - memset(ptr + old_size, SCRIBBLE_BYTE, new_good_size - old_size); - } - return ptr; - } - } - - /* - * Else if the new size suits the small allocator and the pointer being resized - * belongs to a small region, and we're not protecting the small allocations - * try to reallocate in-place. - */ - } else if (new_good_size <= SMALL_LIMIT_THRESHOLD) { - if (TINY_LIMIT_THRESHOLD < old_size && old_size <= SMALL_LIMIT_THRESHOLD) { - if (new_good_size <= (old_size >> 1)) { - return small_try_shrink_in_place(&szone->small_rack, ptr, old_size, new_good_size); - } else if (new_good_size <= old_size) { - if (szone->debug_flags & MALLOC_DO_SCRIBBLE) { - memset(ptr + new_size, SCRIBBLE_BYTE, old_size - new_size); - } - return ptr; - } else if (small_try_realloc_in_place(&szone->small_rack, ptr, old_size, new_good_size)) { - if (szone->debug_flags & MALLOC_DO_SCRIBBLE) { - memset(ptr + old_size, SCRIBBLE_BYTE, new_good_size - old_size); - } - return ptr; - } - } - -#if CONFIG_MEDIUM_ALLOCATOR - } else if (szone->is_medium_engaged && new_good_size <= MEDIUM_LIMIT_THRESHOLD) { - if (SMALL_LIMIT_THRESHOLD < old_size && old_size <= MEDIUM_LIMIT_THRESHOLD) { - if (new_good_size <= (old_size >> 1)) { - return medium_try_shrink_in_place(&szone->medium_rack, ptr, old_size, new_good_size); - } else if (new_good_size <= old_size) { - if (szone->debug_flags & MALLOC_DO_SCRIBBLE) { - memset(ptr + new_size, SCRIBBLE_BYTE, old_size - new_size); - } - } else if (medium_try_realloc_in_place(&szone->medium_rack, ptr, old_size, new_good_size)) { - if (szone->debug_flags & MALLOC_DO_SCRIBBLE) { - memset(ptr + old_size, SCRIBBLE_BYTE, new_good_size - old_size); - } - return ptr; - } - } -#endif // CONFIG_MEDIUM_ALLOCATOR - - /* - * Else if the allocation's a large allocation, try to reallocate in-place there. - */ - } else if (!(szone->debug_flags & MALLOC_PURGEABLE) && // purgeable needs fresh allocation - (old_size > LARGE_THRESHOLD(szone)) && (new_good_size > LARGE_THRESHOLD(szone))) { - if (new_good_size <= (old_size >> 1)) { - return large_try_shrink_in_place(szone, ptr, old_size, new_good_size); - } else if (new_good_size <= old_size) { - if (szone->debug_flags & MALLOC_DO_SCRIBBLE) { - memset(ptr + new_size, SCRIBBLE_BYTE, old_size - new_size); - } - return ptr; - } else if (large_try_realloc_in_place(szone, ptr, old_size, new_good_size)) { - if (szone->debug_flags & MALLOC_DO_SCRIBBLE) { - memset(ptr + old_size, SCRIBBLE_BYTE, new_good_size - old_size); - } - return ptr; - } - } - - /* - * Can't reallocate in place for whatever reason; allocate a new buffer and copy. - */ - if (new_good_size <= (old_size >> 1)) { - /* Serious shrinkage (more than half). FALL THROUGH to alloc/copy/free. */ - } else if (new_good_size <= old_size) { - if (szone->debug_flags & MALLOC_DO_SCRIBBLE) { - memset(ptr + new_size, SCRIBBLE_BYTE, old_size - new_size); - } - return ptr; - } - - new_ptr = szone_malloc(szone, new_size); - if (new_ptr == NULL) { - return NULL; - } - - /* - * If the allocation's large enough, try to copy using VM. If that fails, or - * if it's too small, just copy by hand. - */ - valid_size = MIN(old_size, new_size); -#if CONFIG_REALLOC_CAN_USE_VMCOPY - if ((valid_size <= VM_COPY_THRESHOLD) || - vm_copy(mach_task_self(), (vm_address_t)ptr, valid_size, (vm_address_t)new_ptr)) -#endif - { - memcpy(new_ptr, ptr, valid_size); - } - szone_free(szone, ptr); - -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "szone_realloc returned %p for %d\n", new_ptr, (unsigned)new_size); - } -#endif - return new_ptr; -} - -void * -szone_memalign(szone_t *szone, size_t alignment, size_t size) -{ - if (size == 0) { - size = 1; // Ensures we'll return an aligned free()-able pointer - } - if ((size + alignment) < size) { // size_t arithmetic wrapped! - return NULL; - } - - // alignment is a power of 2 at least as large as sizeof(void *), hence - // non-zero. Since size + alignment didn't wrap, 0 <= size + alignment - 1 - // < size + alignment - size_t span = size + alignment - 1; - - if (alignment <= TINY_QUANTUM) { - // Trivially satisfied by tiny, small, medium, or large. - return szone_malloc(szone, size); - } - if (span <= TINY_LIMIT_THRESHOLD) { - return tiny_memalign(szone, alignment, size, span); - } - if (TINY_LIMIT_THRESHOLD < size && alignment <= SMALL_QUANTUM) { - // Trivially satisfied by small, medium or large. - return szone_malloc(szone, size); - } - if (size <= TINY_LIMIT_THRESHOLD) { - // The allocation asked for a size that TINY would normally fulfill - // but it cannot guarantee the alignment. So bump it up to fit inside - // SMALL and try again. - size = TINY_LIMIT_THRESHOLD + TINY_QUANTUM; - span = size + alignment - 1; - } - if (span <= SMALL_LIMIT_THRESHOLD) { - return small_memalign(szone, alignment, size, span); - } -#if CONFIG_MEDIUM_ALLOCATOR - if (szone->is_medium_engaged) { - if (size <= SMALL_LIMIT_THRESHOLD) { - size = SMALL_LIMIT_THRESHOLD + SMALL_QUANTUM; - span = size + alignment - 1; - } - if (szone->is_medium_engaged && span <= MEDIUM_LIMIT_THRESHOLD) { - return medium_memalign(szone, alignment, size, span); - } - } -#endif // CONFIG_MEDIUM_ALLOCATOR - if (LARGE_THRESHOLD(szone) < size && alignment <= vm_page_quanta_size) { - // Trivially satisfied by large (which rounds to a whole page). - return szone_malloc(szone, size); - } - // ensure block allocated by large does not have a small-possible size - size_t num_kernel_pages = round_page_quanta(MAX(LARGE_THRESHOLD(szone) + 1, - size)) >> vm_page_quanta_shift; - if (num_kernel_pages == 0) { /* Overflowed */ - return NULL; - } else { - return large_malloc(szone, num_kernel_pages, - MAX(vm_page_quanta_shift, __builtin_ctz((unsigned)alignment)), 0); - } - /* NOTREACHED */ - __builtin_unreachable(); -} - -// Given a size, returns the number of pointers allocated capable of holding -// that size, up to the limit specified by the 'count' argument. These pointers -// are stored in the 'results' array, which must be allocated by the caller. -// May return zero, since this function is only a best attempt at allocating -// the pointers. Clients should be prepared to call malloc for any additional -// blocks they need. -unsigned -szone_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count) -{ - // only bother implementing this for tiny - if (size <= TINY_LIMIT_THRESHOLD) { - return tiny_batch_malloc(szone, size, results, count); - } - return 0; -} - -void -szone_batch_free(szone_t *szone, void **to_be_freed, unsigned count) -{ - // frees all the pointers in to_be_freed - // note that to_be_freed may be overwritten during the process - if (!count) { - return; - } - - CHECK(szone, __PRETTY_FUNCTION__); - - // We only support batch malloc in tiny. Let it free all of the pointers - // that belong to it, then let the standard free deal with the rest. - tiny_batch_free(szone, to_be_freed, count); - - CHECK(szone, __PRETTY_FUNCTION__); - while (count--) { - void *ptr = to_be_freed[count]; - if (ptr) { - szone_free(szone, ptr); - } - } -} - -// FIXME: Suppose one of the locks is held? -static void -szone_destroy(szone_t *szone) -{ - size_t index; - large_entry_t *large; - vm_range_t range_to_deallocate; - -#if CONFIG_LARGE_CACHE - SZONE_LOCK(szone); - - /* disable any memory pressure responder */ - szone->flotsam_enabled = FALSE; - - // stack allocated copy of the death-row cache - int idx = szone->large_entry_cache_oldest, idx_max = szone->large_entry_cache_newest; - large_entry_t local_entry_cache[LARGE_ENTRY_CACHE_SIZE_HIGH]; - - memcpy((void *)local_entry_cache, (void *)szone->large_entry_cache, sizeof(local_entry_cache)); - - szone->large_entry_cache_oldest = szone->large_entry_cache_newest = 0; - szone->large_entry_cache[0].address = 0x0; - szone->large_entry_cache[0].size = 0; - szone->large_entry_cache_bytes = 0; - szone->large_entry_cache_reserve_bytes = 0; - - SZONE_UNLOCK(szone); - - // deallocate the death-row cache outside the zone lock - while (idx != idx_max) { - mvm_deallocate_pages((void *)local_entry_cache[idx].address, local_entry_cache[idx].size, 0); - if (++idx == szone->large_cache_depth) { - idx = 0; - } - } - if (0 != local_entry_cache[idx].address && 0 != local_entry_cache[idx].size) { - mvm_deallocate_pages((void *)local_entry_cache[idx].address, local_entry_cache[idx].size, 0); - } -#endif - - /* destroy large entries */ - index = szone->num_large_entries; - while (index--) { - large = szone->large_entries + index; - if (large->address) { - // we deallocate_pages, including guard pages - mvm_deallocate_pages((void *)(large->address), large->size, szone->debug_flags); - } - } - large_entries_free_no_lock(szone, szone->large_entries, szone->num_large_entries, &range_to_deallocate); - if (range_to_deallocate.size) { - mvm_deallocate_pages((void *)range_to_deallocate.address, (size_t)range_to_deallocate.size, 0); - } - - /* destroy allocator regions */ - rack_destroy_regions(&szone->tiny_rack, TINY_REGION_SIZE); - rack_destroy_regions(&szone->small_rack, SMALL_REGION_SIZE); - - /* destroy rack region hash rings and racks themselves */ - rack_destroy(&szone->tiny_rack); - rack_destroy(&szone->small_rack); - -#if CONFIG_MEDIUM_ALLOCATOR - if (szone->is_medium_engaged) { - rack_destroy_regions(&szone->medium_rack, MEDIUM_REGION_SIZE); - rack_destroy(&szone->medium_rack); - } -#endif // CONFIG_MEDIUM_ALLOCATOR - - mvm_deallocate_pages((void *)szone, SZONE_PAGED_SIZE, 0); -} - -size_t -szone_good_size(szone_t *szone, size_t size) -{ - msize_t msize; - - // Find a good size for this tiny allocation. - if (size <= TINY_LIMIT_THRESHOLD) { - msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1); - if (!msize) { - msize = 1; - } - return TINY_BYTES_FOR_MSIZE(msize); - } - - // Find a good size for this small allocation. - if (size <= SMALL_LIMIT_THRESHOLD) { - msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1); - if (!msize) { - msize = 1; - } - return SMALL_BYTES_FOR_MSIZE(msize); - } - -#if CONFIG_MEDIUM_ALLOCATOR - if (szone->is_medium_engaged && size <= MEDIUM_LIMIT_THRESHOLD) { - msize = MEDIUM_MSIZE_FOR_BYTES(size + MEDIUM_QUANTUM - 1); - if (!msize) { - msize = 1; - } - return MEDIUM_BYTES_FOR_MSIZE(msize); - } -#endif // CONFIG_MEDIUM_ALLOCATOR - - // Check for integer overflow on the size, since unlike the two cases above, - // there is no upper bound on allocation size at this point. - if (size > round_page_quanta(size)) { - return (size_t)(-1LL); - } - -#if DEBUG_MALLOC - // It is not acceptable to see a size of zero here, since that means we - // failed to catch a request for zero bytes in the tiny check, or the size - // overflowed to zero during some arithmetic. - if (size == 0) { - malloc_report(ASL_LEVEL_INFO, "szone_good_size() invariant broken %y\n", size); - } -#endif - return round_page_quanta(size); -} - -boolean_t -szone_claimed_address(szone_t *szone, void *ptr) -{ - return tiny_claimed_address(&szone->tiny_rack, ptr) - || small_claimed_address(&szone->small_rack, ptr) -#if CONFIG_MEDIUM_ALLOCATOR - || (szone->is_medium_engaged && - medium_claimed_address(&szone->medium_rack, ptr)) -#endif // CONFIG_MEDIUM_ALLOCATOR - || large_claimed_address(szone, ptr); -} - -unsigned szone_check_counter = 0; -unsigned szone_check_start = 0; -unsigned szone_check_modulo = 1; - -static MALLOC_NOINLINE boolean_t -szone_check_all(szone_t *szone, const char *function) -{ - size_t index; - - /* check tiny regions - chould check region count */ - for (index = 0; index < szone->tiny_rack.region_generation->num_regions_allocated; ++index) { - region_t tiny = szone->tiny_rack.region_generation->hashed_regions[index]; - - if (HASHRING_REGION_DEALLOCATED == tiny) { - continue; - } - - if (tiny) { - magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone->tiny_rack.magazines, - REGION_TRAILER_FOR_TINY_REGION(tiny), - MAGAZINE_INDEX_FOR_TINY_REGION(tiny)); - - if (!tiny_check_region(&szone->tiny_rack, tiny, index, szone_check_counter)) { - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - szone->debug_flags &= ~CHECK_REGIONS; - return 0; - } - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - } - } - /* check tiny free lists */ - for (index = 0; index < NUM_TINY_SLOTS; ++index) { - if (!tiny_free_list_check(&szone->tiny_rack, (grain_t)index, szone_check_counter)) { - szone->debug_flags &= ~CHECK_REGIONS; - return 0; - } - } - - /* check small regions - could check region count */ - for (index = 0; index < szone->small_rack.region_generation->num_regions_allocated; ++index) { - region_t small = szone->small_rack.region_generation->hashed_regions[index]; - - if (HASHRING_REGION_DEALLOCATED == small) { - continue; - } - - if (small) { - magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(szone->small_rack.magazines, - REGION_TRAILER_FOR_SMALL_REGION(small), - MAGAZINE_INDEX_FOR_SMALL_REGION(small)); - - if (!small_check_region(&szone->small_rack, small, index, szone_check_counter)) { - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - szone->debug_flags &= ~CHECK_REGIONS; - return 0; - } - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - } - } - /* check small free lists */ - for (index = 0; index < SMALL_FREE_SLOT_COUNT(&szone->small_rack); ++index) { - if (!small_free_list_check(&szone->small_rack, (grain_t)index, szone_check_counter)) { - szone->debug_flags &= ~CHECK_REGIONS; - return 0; - } - } - -#if CONFIG_MEDIUM_ALLOCATOR - if (szone->is_medium_engaged) { - /* check medium regions - could check region count */ - for (index = 0; index < szone->medium_rack.region_generation->num_regions_allocated; ++index) { - region_t medium = szone->medium_rack.region_generation->hashed_regions[index]; - - if (HASHRING_REGION_DEALLOCATED == medium) { - continue; - } - - if (medium) { - magazine_t *medium_mag_ptr = mag_lock_zine_for_region_trailer(szone->medium_rack.magazines, - REGION_TRAILER_FOR_MEDIUM_REGION(medium), - MAGAZINE_INDEX_FOR_MEDIUM_REGION(medium)); - - if (!medium_check_region(&szone->medium_rack, medium, index, szone_check_counter)) { - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - szone->debug_flags &= ~CHECK_REGIONS; - return 0; - } - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - } - } - /* check medium free lists */ - for (index = 0; index < MEDIUM_FREE_SLOT_COUNT(&szone->medium_rack); ++index) { - if (!medium_free_list_check(&szone->medium_rack, (grain_t)index, szone_check_counter)) { - szone->debug_flags &= ~CHECK_REGIONS; - return 0; - } - } - } -#endif // CONFIG_MEDIUM_ALLOCATOR - - return 1; -} - -static boolean_t -szone_check(szone_t *szone) -{ - if ((++szone_check_counter % 10000) == 0) { - malloc_report(ASL_LEVEL_NOTICE, "at szone_check counter=%d\n", szone_check_counter); - } - - if (szone_check_counter < szone_check_start) { - return 1; - } - - if (szone_check_counter % szone_check_modulo) { - return 1; - } - - return szone_check_all(szone, ""); -} - -static kern_return_t -szone_ptr_in_use_enumerator(task_t task, - void *context, - unsigned type_mask, - vm_address_t zone_address, - memory_reader_t reader, - vm_range_recorder_t recorder) -{ - szone_t *szone; - kern_return_t err; - - if (!reader) { - reader = _malloc_default_reader; - } - - err = reader(task, zone_address, sizeof(szone_t), (void **)&szone); - if (err) { - return err; - } - - err = tiny_in_use_enumerator(task, context, type_mask, szone, reader, recorder); - if (err) { - return err; - } - - err = small_in_use_enumerator(task, context, type_mask, szone, reader, recorder); - if (err) { - return err; - } - -#if CONFIG_MEDIUM_ALLOCATOR - if (szone->is_medium_engaged) { - err = medium_in_use_enumerator(task, context, type_mask, szone, reader, recorder); - if (err) { - return err; - } - } -#endif // CONFIG_MEDIUM_ALLOCATOR - - err = large_in_use_enumerator( - task, context, type_mask, (vm_address_t)szone->large_entries, szone->num_large_entries, reader, recorder); - return err; -} - -static boolean_t -scalable_zone_info_task(task_t task, memory_reader_t reader, - malloc_zone_t *zone, unsigned *info_to_fill, unsigned count) -{ - szone_t *szone = (void *)zone; - unsigned info[13]; - - // We do not lock to facilitate debug - - size_t s = 0; - unsigned t = 0; - size_t u = 0; - mag_index_t mag_index; - - magazine_t *mapped_magazines; - if (reader(task, (vm_address_t)szone->tiny_rack.magazines, - sizeof(magazine_t), (void **)&mapped_magazines)) { - return false; - } - for (mag_index = -1; mag_index < szone->tiny_rack.num_magazines; mag_index++) { - s += mapped_magazines[mag_index].mag_bytes_free_at_start; - s += mapped_magazines[mag_index].mag_bytes_free_at_end; - t += mapped_magazines[mag_index].mag_num_objects; - u += mapped_magazines[mag_index].mag_num_bytes_in_objects; - } - - info[4] = (unsigned)t; - info[5] = (unsigned)u; - - if (reader(task, (vm_address_t)szone->small_rack.magazines, - sizeof(magazine_t), (void **)&mapped_magazines)) { - return false; - } - for (t = 0, u = 0, mag_index = -1; mag_index < szone->small_rack.num_magazines; mag_index++) { - s += mapped_magazines[mag_index].mag_bytes_free_at_start; - s += mapped_magazines[mag_index].mag_bytes_free_at_end; - t += mapped_magazines[mag_index].mag_num_objects; - u += mapped_magazines[mag_index].mag_num_bytes_in_objects; - } - - info[6] = (unsigned)t; - info[7] = (unsigned)u; - - info[8] = (unsigned)szone->num_large_objects_in_use; - info[9] = (unsigned)szone->num_bytes_in_large_objects; - - info[10] = 0; // DEPRECATED szone->num_huge_entries; - info[11] = 0; // DEPRECATED szone->num_bytes_in_huge_objects; - - info[12] = szone->debug_flags; - - info[0] = info[4] + info[6] + info[8] + info[10]; - info[1] = info[5] + info[7] + info[9] + info[11]; - - info[3] = (unsigned)(szone->tiny_rack.num_regions - szone->tiny_rack.num_regions_dealloc) * TINY_REGION_SIZE + - (unsigned)(szone->small_rack.num_regions - szone->small_rack.num_regions_dealloc) * SMALL_REGION_SIZE + info[9] + info[11]; - - info[2] = info[3] - (unsigned)s; - memcpy(info_to_fill, info, sizeof(unsigned) * count); - - return true; -} - -// Following method is deprecated: use scalable_zone_statistics instead -// Required for backward compatibility. -void -scalable_zone_info(malloc_zone_t *zone, unsigned *info_to_fill, unsigned count) { - scalable_zone_info_task(mach_task_self(), _malloc_default_reader, zone, - info_to_fill, count); -} - -// FIXME: consistent picture requires locking! -static MALLOC_NOINLINE void -szone_print(task_t task, unsigned level, vm_address_t zone_address, - memory_reader_t reader, print_task_printer_t printer) -{ - unsigned info[13]; - size_t index; - region_t region; - region_t mapped_region; - - szone_t *szone = (szone_t *)zone_address; - szone_t *mapped_szone; - if (reader(task, zone_address, sizeof(szone_t), (void **)&mapped_szone)) { - printer("Failed to read szone structure\n"); - return; - } - - if (!scalable_zone_info_task(task, reader, (void *)mapped_szone, info, 13)) { - printer("Failed to get scalable zone info\n"); - return; - } - printer("Scalable zone %p: inUse=%u(%u) touched=%u allocated=%u flags=0x%x\n", - zone_address, info[0], info[1], info[2], info[3], info[12]); - printer("\ttiny=%u(%u) small=%u(%u) large=%u(%u)\n", info[4], - info[5], info[6], info[7], info[8], info[9]); - // tiny - printer("%lu tiny regions:\n", mapped_szone->tiny_rack.num_regions); - if (mapped_szone->tiny_rack.num_regions_dealloc) { - printer("[%lu tiny regions have been vm_deallocate'd]\n", - mapped_szone->tiny_rack.num_regions_dealloc); - } - - region_hash_generation_t *mapped_region_generation; - region_t *mapped_hashed_regions; - magazine_t *mapped_magazines; - if (reader(task, (vm_address_t)mapped_szone->tiny_rack.region_generation, - sizeof(region_hash_generation_t), (void **)&mapped_region_generation)) { - printer("Failed to map tiny rack region_generation\n"); - return; - } - if (reader(task, (vm_address_t)mapped_region_generation->hashed_regions, - sizeof(region_t), (void **)&mapped_hashed_regions)) { - printer("Failed to map tiny rack hashed_regions\n"); - return; - } - if (reader(task, (vm_address_t)mapped_szone->tiny_rack.magazines, - mapped_szone->tiny_rack.num_magazines * sizeof(magazine_t), - (void **)&mapped_magazines)) { - printer("Failed to map tiny rack magazines\n"); - return; - } - - int recirc_regions = 0; - for (index = 0; index < mapped_region_generation->num_regions_allocated; ++index) { - region = mapped_hashed_regions[index]; - if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) { - if (reader(task, (vm_address_t)region, sizeof(struct tiny_region), - (void **)&mapped_region)) { - printer("Failed to map region %p\n", region); - return; - } - mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(mapped_region); - if (mag_index == DEPOT_MAGAZINE_INDEX) { - recirc_regions++; - } - print_tiny_region(task, reader, printer, level, region, - (region == mapped_magazines[mag_index].mag_last_region) - ? mapped_magazines[mag_index].mag_bytes_free_at_start - : 0, - (region == mapped_magazines[mag_index].mag_last_region) - ? mapped_magazines[mag_index].mag_bytes_free_at_end - : 0); - } - } - -#if CONFIG_RECIRC_DEPOT - magazine_t *mapped_recirc_depot = &mapped_magazines[DEPOT_MAGAZINE_INDEX]; - if (mapped_recirc_depot->mag_num_bytes_in_objects) { - printer("Tiny recirc depot: total bytes: %llu, in-use bytes: %llu, " - "allocations: %llu, regions: %d (min # retained regions: %d)\n", - mapped_recirc_depot->num_bytes_in_magazine, - mapped_recirc_depot->mag_num_bytes_in_objects, - mapped_recirc_depot->mag_num_objects, recirc_regions, - recirc_retained_regions); - } else { - printer("Tiny recirc depot is empty\n"); - } -#else // CONFIG_RECIRC_DEPOT - printer("Tiny recirc depot not configured\n"); -#endif // CONFIG_RECIRC_DEPOT - - if (level > 0) { - print_tiny_free_list(task, reader, printer, &szone->tiny_rack); - } - - // small - printer("%lu small regions:\n", mapped_szone->small_rack.num_regions); - if (mapped_szone->small_rack.num_regions_dealloc) { - printer("[%lu small regions have been vm_deallocate'd]\n", - mapped_szone->small_rack.num_regions_dealloc); - } - if (reader(task, (vm_address_t)mapped_szone->small_rack.region_generation, - sizeof(region_hash_generation_t), (void **)&mapped_region_generation)) { - printer("Failed to map small rack region_generation\n"); - return; - } - if (reader(task, (vm_address_t)mapped_region_generation->hashed_regions, - sizeof(region_t), (void **)&mapped_hashed_regions)) { - printer("Failed to map small rack hashed_regions\n"); - return; - } - if (reader(task, (vm_address_t)mapped_szone->small_rack.magazines, - mapped_szone->small_rack.num_magazines * sizeof(magazine_t), - (void **)&mapped_magazines)) { - printer("Failed to map small rack magazines\n"); - return; - } - - recirc_regions = 0; - for (index = 0; index < mapped_region_generation->num_regions_allocated; ++index) { - region = mapped_hashed_regions[index]; - if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) { - if (reader(task, (vm_address_t)region, sizeof(struct small_region), - (void **)&mapped_region)) { - printer("Failed to map region %p\n", region); - return; - } - mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(mapped_region); - if (mag_index == DEPOT_MAGAZINE_INDEX) { - recirc_regions++; - } - print_small_region(task, reader, printer, mapped_szone, level, region, - (region == mapped_magazines[mag_index].mag_last_region) - ? mapped_magazines[mag_index].mag_bytes_free_at_start - : 0, - (region == mapped_magazines[mag_index].mag_last_region) - ? mapped_magazines[mag_index].mag_bytes_free_at_end - : 0); - } - } - -#if CONFIG_RECIRC_DEPOT - mapped_recirc_depot = &mapped_magazines[DEPOT_MAGAZINE_INDEX]; - if (mapped_recirc_depot->mag_num_bytes_in_objects) { - printer("Small recirc depot: total bytes: %llu, in-use bytes: %llu, " - "allocations: %llu, regions: %d (min # retained regions: %d)\n", - mapped_recirc_depot->num_bytes_in_magazine, - mapped_recirc_depot->mag_num_bytes_in_objects, - mapped_recirc_depot->mag_num_objects, recirc_regions, - recirc_retained_regions); - } else { - printer("Small recirc depot is empty\n"); - } -#else // CONFIG_RECIRC_DEPOT - printer("Small recirc depot not configured\n"); -#endif // CONFIG_RECIRC_DEPOT - - if (level > 0) { - print_small_free_list(task, reader, printer, &szone->small_rack); - } - -#if CONFIG_MEDIUM_ALLOCATOR - if (szone->is_medium_engaged) { - // medium - printer("%lu medium regions:\n", mapped_szone->medium_rack.num_regions); - if (mapped_szone->medium_rack.num_regions_dealloc) { - printer("[%lu medium regions have been vm_deallocate'd]\n", - mapped_szone->medium_rack.num_regions_dealloc); - } - if (reader(task, (vm_address_t)mapped_szone->medium_rack.region_generation, - sizeof(region_hash_generation_t), (void **)&mapped_region_generation)) { - printer("Failed to map medium rack region_generation\n"); - return; - } - if (reader(task, (vm_address_t)mapped_region_generation->hashed_regions, - sizeof(region_t), (void **)&mapped_hashed_regions)) { - printer("Failed to map medium rack hashed_regions\n"); - return; - } - if (reader(task, (vm_address_t)mapped_szone->medium_rack.magazines, - mapped_szone->medium_rack.num_magazines * sizeof(magazine_t), - (void **)&mapped_magazines)) { - printer("Failed to map medium rack magazines\n"); - return; - } - - recirc_regions = 0; - for (index = 0; index < mapped_region_generation->num_regions_allocated; ++index) { - region = mapped_hashed_regions[index]; - if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) { - if (reader(task, (vm_address_t)region, sizeof(struct medium_region), - (void **)&mapped_region)) { - printer("Failed to map region %p\n", region); - return; - } - mag_index_t mag_index = MAGAZINE_INDEX_FOR_MEDIUM_REGION(mapped_region); - if (mag_index == DEPOT_MAGAZINE_INDEX) { - recirc_regions++; - } - print_medium_region(task, reader, printer, mapped_szone, level, - region, - (region == mapped_magazines[mag_index].mag_last_region) - ? mapped_magazines[mag_index].mag_bytes_free_at_start - : 0, - (region == mapped_magazines[mag_index].mag_last_region) - ? mapped_magazines[mag_index].mag_bytes_free_at_end - : 0); - } - } - -#if CONFIG_RECIRC_DEPOT - mapped_recirc_depot = &mapped_magazines[DEPOT_MAGAZINE_INDEX]; - if (mapped_recirc_depot->mag_num_bytes_in_objects) { - printer("Medium recirc depot: total bytes: %llu, in-use bytes: %llu, " - "allocations: %llu, regions: %d (min # retained regions: %d)\n", - mapped_recirc_depot->num_bytes_in_magazine, - mapped_recirc_depot->mag_num_bytes_in_objects, - mapped_recirc_depot->mag_num_objects, recirc_regions, - recirc_retained_regions); - } else { - printer("Medium recirc depot is empty\n"); - } -#else // CONFIG_RECIRC_DEPOT - printer("Medium recirc depot not configured\n"); -#endif // CONFIG_RECIRC_DEPOT - - if (level > 0) { - print_medium_free_list(task, reader, printer, &szone->medium_rack); - } - } -#endif // CONFIG_MEDIUM_ALLOCATOR - - // Large - large_debug_print(task, level, zone_address, reader, printer); -} - -static void -szone_print_self(szone_t *szone, boolean_t verbose) -{ - szone_print(mach_task_self(), verbose ? MALLOC_VERBOSE_PRINT_LEVEL : 0, - (vm_address_t)szone, _malloc_default_reader, malloc_report_simple); -} - -static void -szone_print_task(task_t task, unsigned level, vm_address_t zone_address, - memory_reader_t reader, print_task_printer_t printer) -{ - szone_print(task, level, zone_address, reader, printer); -} - -static void -szone_log(malloc_zone_t *zone, void *log_address) -{ - szone_t *szone = (szone_t *)zone; - - szone->log_address = log_address; -} - -// -// When forcing the lock on the entire zone, make sure we are out of the critical section in each magazine -static MALLOC_INLINE void -szone_force_lock_magazine(szone_t *szone, magazine_t *mag) -{ - while (1) { - SZONE_MAGAZINE_PTR_LOCK(mag); - if (!mag->alloc_underway) { - return; - } - - SZONE_MAGAZINE_PTR_UNLOCK(mag); - yield(); - } -} - -static void -szone_force_lock(szone_t *szone) -{ - mag_index_t i; - - for (i = 0; i < szone->tiny_rack.num_magazines; ++i) { - szone_force_lock_magazine(szone, &szone->tiny_rack.magazines[i]); - } - szone_force_lock_magazine(szone, &szone->tiny_rack.magazines[DEPOT_MAGAZINE_INDEX]); - - for (i = 0; i < szone->small_rack.num_magazines; ++i) { - szone_force_lock_magazine(szone, &szone->small_rack.magazines[i]); - } - szone_force_lock_magazine(szone, &szone->small_rack.magazines[DEPOT_MAGAZINE_INDEX]); - -#if CONFIG_MEDIUM_ALLOCATOR - if (szone->is_medium_engaged) { - for (i = 0; i < szone->medium_rack.num_magazines; ++i) { - szone_force_lock_magazine(szone, &szone->medium_rack.magazines[i]); - } - szone_force_lock_magazine(szone, &szone->medium_rack.magazines[DEPOT_MAGAZINE_INDEX]); - } -#endif - - SZONE_LOCK(szone); -} - -static void -szone_force_unlock(szone_t *szone) -{ - mag_index_t i; - - SZONE_UNLOCK(szone); - -#if CONFIG_MEDIUM_ALLOCATOR - if (szone->is_medium_engaged) { - for (i = -1; i < szone->medium_rack.num_magazines; ++i) { - SZONE_MAGAZINE_PTR_UNLOCK((&(szone->medium_rack.magazines[i]))); - } - } -#endif // CONFIG_MEDIUM_ALLOCATOR - - for (i = -1; i < szone->small_rack.num_magazines; ++i) { - SZONE_MAGAZINE_PTR_UNLOCK((&(szone->small_rack.magazines[i]))); - } - - for (i = -1; i < szone->tiny_rack.num_magazines; ++i) { - SZONE_MAGAZINE_PTR_UNLOCK((&(szone->tiny_rack.magazines[i]))); - } -} - -static void -szone_reinit_lock(szone_t *szone) -{ - mag_index_t i; - - SZONE_REINIT_LOCK(szone); - -#if CONFIG_MEDIUM_ALLOCATOR - if (szone->is_medium_engaged) { - for (i = -1; i < szone->medium_rack.num_magazines; ++i) { - SZONE_MAGAZINE_PTR_REINIT_LOCK((&(szone->medium_rack.magazines[i]))); - } - } -#endif // CONFIG_MEDIUM_ALLOCATOR - - for (i = -1; i < szone->small_rack.num_magazines; ++i) { - SZONE_MAGAZINE_PTR_REINIT_LOCK((&(szone->small_rack.magazines[i]))); - } - - for (i = -1; i < szone->tiny_rack.num_magazines; ++i) { - SZONE_MAGAZINE_PTR_REINIT_LOCK((&(szone->tiny_rack.magazines[i]))); - } -} - -static boolean_t -szone_locked(szone_t *szone) -{ - mag_index_t i; - int tookLock; - - tookLock = SZONE_TRY_LOCK(szone); - if (tookLock == 0) { - return 1; - } - SZONE_UNLOCK(szone); - -#if CONFIG_MEDIUM_ALLOCATOR - if (szone->is_medium_engaged) { - for (i = -1; i < szone->small_rack.num_magazines; ++i) { - tookLock = SZONE_MAGAZINE_PTR_TRY_LOCK((&(szone->small_rack.magazines[i]))); - if (tookLock == 0) { - return 1; - } - SZONE_MAGAZINE_PTR_UNLOCK((&(szone->small_rack.magazines[i]))); - } - } -#endif // CONFIG_MEDIUM_ALLOCATOR - - for (i = -1; i < szone->small_rack.num_magazines; ++i) { - tookLock = SZONE_MAGAZINE_PTR_TRY_LOCK((&(szone->small_rack.magazines[i]))); - if (tookLock == 0) { - return 1; - } - SZONE_MAGAZINE_PTR_UNLOCK((&(szone->small_rack.magazines[i]))); - } - - for (i = -1; i < szone->tiny_rack.num_magazines; ++i) { - tookLock = SZONE_MAGAZINE_PTR_TRY_LOCK((&(szone->tiny_rack.magazines[i]))); - if (tookLock == 0) { - return 1; - } - SZONE_MAGAZINE_PTR_UNLOCK((&(szone->tiny_rack.magazines[i]))); - } - return 0; -} - -size_t -szone_pressure_relief(szone_t *szone, size_t goal) -{ - size_t total = 0; - - MAGMALLOC_PRESSURERELIEFBEGIN((void *)szone, szone->basic_zone.zone_name, (int)goal); // DTrace USDT Probe - MALLOC_TRACE(TRACE_malloc_memory_pressure | DBG_FUNC_START, (uint64_t)szone, goal, 0, 0); - -#if CONFIG_MADVISE_PRESSURE_RELIEF - tiny_madvise_pressure_relief(&szone->tiny_rack); - small_madvise_pressure_relief(&szone->small_rack); - -#if CONFIG_MEDIUM_ALLOCATOR - if (szone->is_medium_engaged) { - medium_madvise_pressure_relief(&szone->medium_rack); - } -#endif // CONFIG_MEDIUM_ALLOCATOR -#endif // CONFIG_MADVISE_PRESSURE_RELIEF - -#if CONFIG_LARGE_CACHE - if (szone->flotsam_enabled) { - SZONE_LOCK(szone); - - // stack allocated copy of the death-row cache - int idx = szone->large_entry_cache_oldest, idx_max = szone->large_entry_cache_newest; - large_entry_t local_entry_cache[LARGE_ENTRY_CACHE_SIZE_HIGH]; - - memcpy((void *)local_entry_cache, (void *)szone->large_entry_cache, sizeof(local_entry_cache)); - - szone->large_entry_cache_oldest = szone->large_entry_cache_newest = 0; - szone->large_entry_cache[0].address = 0x0; - szone->large_entry_cache[0].size = 0; - szone->large_entry_cache_bytes = 0; - szone->large_entry_cache_reserve_bytes = 0; - - szone->flotsam_enabled = FALSE; - - SZONE_UNLOCK(szone); - - // deallocate the death-row cache outside the zone lock - size_t total = 0; - while (idx != idx_max) { - mvm_deallocate_pages((void *)local_entry_cache[idx].address, local_entry_cache[idx].size, 0); - total += local_entry_cache[idx].size; - if (++idx == szone->large_cache_depth) { - idx = 0; - } - } - if (0 != local_entry_cache[idx].address && 0 != local_entry_cache[idx].size) { - mvm_deallocate_pages((void *)local_entry_cache[idx].address, local_entry_cache[idx].size, 0); - total += local_entry_cache[idx].size; - } - } -#endif - - MAGMALLOC_PRESSURERELIEFEND((void *)szone, szone->basic_zone.zone_name, (int)goal, (int)total); // DTrace USDT Probe - MALLOC_TRACE(TRACE_malloc_memory_pressure | DBG_FUNC_END, (uint64_t)szone, goal, total, 0); - - return total; -} - -boolean_t -scalable_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats, unsigned subzone) -{ - szone_t *szone = (szone_t *)zone; - - switch (subzone) { - case 0: { - size_t s = 0; - unsigned t = 0; - size_t u = 0; - mag_index_t mag_index; - - for (mag_index = -1; mag_index < szone->tiny_rack.num_magazines; mag_index++) { - s += szone->tiny_rack.magazines[mag_index].mag_bytes_free_at_start; - s += szone->tiny_rack.magazines[mag_index].mag_bytes_free_at_end; - t += szone->tiny_rack.magazines[mag_index].mag_num_objects; - u += szone->tiny_rack.magazines[mag_index].mag_num_bytes_in_objects; - } - - stats->blocks_in_use = t; - stats->size_in_use = u; - stats->size_allocated = (szone->tiny_rack.num_regions - szone->tiny_rack.num_regions_dealloc) * TINY_REGION_SIZE; - stats->max_size_in_use = stats->size_allocated - s; - return 1; - } - case 1: { - size_t s = 0; - unsigned t = 0; - size_t u = 0; - mag_index_t mag_index; - - for (mag_index = -1; mag_index < szone->small_rack.num_magazines; mag_index++) { - s += szone->small_rack.magazines[mag_index].mag_bytes_free_at_start; - s += szone->small_rack.magazines[mag_index].mag_bytes_free_at_end; - t += szone->small_rack.magazines[mag_index].mag_num_objects; - u += szone->small_rack.magazines[mag_index].mag_num_bytes_in_objects; - } - - stats->blocks_in_use = t; - stats->size_in_use = u; - stats->size_allocated = (szone->small_rack.num_regions - szone->small_rack.num_regions_dealloc) * SMALL_REGION_SIZE; - stats->max_size_in_use = stats->size_allocated - s; - return 1; - } - case 2: - stats->blocks_in_use = szone->num_large_objects_in_use; - stats->size_in_use = szone->num_bytes_in_large_objects; - stats->max_size_in_use = stats->size_allocated = stats->size_in_use; - return 1; - case 3: - stats->blocks_in_use = 0; // DEPRECATED szone->num_huge_entries; - stats->size_in_use = 0; // DEPRECATED szone->num_bytes_in_huge_objects; - stats->max_size_in_use = stats->size_allocated = 0; - return 1; - case 4: { - size_t s = 0; - unsigned t = 0; - size_t u = 0; - size_t sa = 0; - -#if CONFIG_MEDIUM_ALLOCATOR - mag_index_t mag_index; - if (szone->is_medium_engaged) { - for (mag_index = -1; mag_index < szone->medium_rack.num_magazines; mag_index++) { - s += szone->medium_rack.magazines[mag_index].mag_bytes_free_at_start; - s += szone->medium_rack.magazines[mag_index].mag_bytes_free_at_end; - t += szone->medium_rack.magazines[mag_index].mag_num_objects; - u += szone->medium_rack.magazines[mag_index].mag_num_bytes_in_objects; - } - } - - sa = (szone->medium_rack.num_regions - szone->medium_rack.num_regions_dealloc) * MEDIUM_REGION_SIZE; -#endif // CONFIG_MEDIUM_ALLOCATOR - - stats->blocks_in_use = t; - stats->size_in_use = u; - stats->size_allocated = sa; - stats->max_size_in_use = stats->size_allocated - s; - return 1; - }} - return 0; -} - -static kern_return_t -szone_statistics_task(task_t task, vm_address_t zone_address, - memory_reader_t reader, malloc_statistics_t *stats) -{ - reader = !reader && task == mach_task_self() ? _malloc_default_reader : reader; - - szone_t *szone; - kern_return_t err; - - err = reader(task, zone_address, sizeof(szone_t), (void**)&szone); - if (err) return err; - - size_t large; - size_t s = 0; - unsigned t = 0; - size_t u = 0; - mag_index_t mag_index; - - magazine_t *mags; - err = reader(task, (vm_address_t)szone->tiny_rack.magazines, sizeof(magazine_t) * szone->tiny_rack.num_magazines, (void**)&mags); - if (err) return err; - - for (mag_index = -1; mag_index < szone->tiny_rack.num_magazines; mag_index++) { - s += mags[mag_index].mag_bytes_free_at_start; - s += mags[mag_index].mag_bytes_free_at_end; - t += mags[mag_index].mag_num_objects; - u += mags[mag_index].mag_num_bytes_in_objects; - } - - err = reader(task, (vm_address_t)szone->small_rack.magazines, sizeof(magazine_t) * szone->small_rack.num_magazines, (void**)&mags); - if (err) return err; - - for (mag_index = -1; mag_index < szone->small_rack.num_magazines; mag_index++) { - s += mags[mag_index].mag_bytes_free_at_start; - s += mags[mag_index].mag_bytes_free_at_end; - t += mags[mag_index].mag_num_objects; - u += mags[mag_index].mag_num_bytes_in_objects; - } - -#if CONFIG_MEDIUM_ALLOCATOR - if (szone->is_medium_engaged) { - for (mag_index = -1; mag_index < szone->medium_rack.num_magazines; mag_index++) { - s += szone->medium_rack.magazines[mag_index].mag_bytes_free_at_start; - s += szone->medium_rack.magazines[mag_index].mag_bytes_free_at_end; - t += szone->medium_rack.magazines[mag_index].mag_num_objects; - u += szone->medium_rack.magazines[mag_index].mag_num_bytes_in_objects; - } - } -#endif // CONFIG_MEDIUM_ALLOCATOR - - large = szone->num_bytes_in_large_objects; - - stats->blocks_in_use = t + szone->num_large_objects_in_use; - stats->size_in_use = u + large; - stats->max_size_in_use = stats->size_allocated = - (szone->tiny_rack.num_regions - szone->tiny_rack.num_regions_dealloc) * TINY_REGION_SIZE + - (szone->small_rack.num_regions - szone->small_rack.num_regions_dealloc) * SMALL_REGION_SIZE + large; - -#if CONFIG_MEDIUM_ALLOCATOR - if (szone->is_medium_engaged) { - stats->max_size_in_use += (szone->medium_rack.num_regions - - szone->medium_rack.num_regions_dealloc) * MEDIUM_REGION_SIZE; - } -#endif - // Now we account for the untouched areas - stats->max_size_in_use -= s; - - return KERN_SUCCESS; -} - -static void -szone_statistics(szone_t *szone, malloc_statistics_t *stats) -{ - szone_statistics_task(mach_task_self(), (vm_address_t)szone, NULL, stats); -} - -const struct malloc_introspection_t szone_introspect = { - (void *)szone_ptr_in_use_enumerator, (void *)szone_good_size, (void *)szone_check, (void *)szone_print_self, szone_log, - (void *)szone_force_lock, (void *)szone_force_unlock, (void *)szone_statistics, (void *)szone_locked, NULL, NULL, NULL, - NULL, /* Zone enumeration version 7 and forward. */ - (void *)szone_reinit_lock, // reinit_lock version 9 and forward - (void *)szone_print_task, // print task, version 11 and forward - (void *)szone_statistics_task // stats for task, version 12 and forward -}; // marked as const to spare the DATA section - -szone_t * -create_scalable_szone(size_t initial_size, unsigned debug_flags) -{ - szone_t *szone; - -#if defined(__i386__) || defined(__x86_64__) - if (_COMM_PAGE_VERSION_REQD > (*((uint16_t *)_COMM_PAGE_VERSION))) { - MALLOC_REPORT_FATAL_ERROR((*((uint16_t *)_COMM_PAGE_VERSION)), "comm page version mismatch"); - } -#endif - - /* get memory for the zone. */ - szone = mvm_allocate_pages(SZONE_PAGED_SIZE, 0, 0, VM_MEMORY_MALLOC); - if (!szone) { - return NULL; - } - -/* set up the szone structure */ -#if 0 -#warning CHECK_REGIONS enabled - debug_flags |= CHECK_REGIONS; -#endif -#if 0 -#warning LOG enabled - szone->log_address = ~0; -#endif - - if (mvm_aslr_enabled()) { - debug_flags &= ~DISABLE_ASLR; - } else { - debug_flags |= DISABLE_ASLR; - } - -#if CONFIG_MEDIUM_ALLOCATOR || CONFIG_LARGE_CACHE - uint64_t memsize = platform_hw_memsize(); -#endif // CONFIG_MEDIUM_ALLOCATOR || CONFIG_LARGE_CACHE - -#if CONFIG_MEDIUM_ALLOCATOR - szone->is_medium_engaged = (magazine_medium_enabled && - (memsize >= magazine_medium_active_threshold)); -#endif // CONFIG_MEDIUM_ALLOCATOR - - // Query the number of configured processors. - // Uniprocessor case gets just one tiny and one small magazine (whose index is zero). This gives - // the same behavior as the original scalable malloc. MP gets per-CPU magazines - // that scale (way) better. - unsigned int max_mags = mag_max_magazines(); - uint32_t num_magazines = (max_mags > 1) ? MIN(max_mags, TINY_MAX_MAGAZINES) : 1; - rack_init(&szone->tiny_rack, RACK_TYPE_TINY, num_magazines, debug_flags); - rack_init(&szone->small_rack, RACK_TYPE_SMALL, num_magazines, debug_flags); - -#if CONFIG_MEDIUM_ALLOCATOR - if (szone->is_medium_engaged) { - unsigned max_medium_mags = mag_max_medium_magazines(); - uint32_t num_medium_mags = (max_medium_mags > 1) ? - MIN(max_medium_mags, TINY_MAX_MAGAZINES) : 1; - rack_init(&szone->medium_rack, RACK_TYPE_MEDIUM, num_medium_mags, - debug_flags); - } -#endif // CONFIG_MEDIUM_ALLOCATOR - -#if CONFIG_LARGE_CACHE - // madvise(..., MADV_REUSABLE) death-row arrivals above this threshold [~0.1%] - szone->large_entry_cache_reserve_limit = (size_t)(memsize >> 10); - if (memsize >= magazine_large_expanded_cache_threshold) { - szone->large_cache_depth = LARGE_ENTRY_CACHE_SIZE_HIGH; - szone->large_cache_entry_limit = LARGE_ENTRY_SIZE_ENTRY_LIMIT_HIGH; - } else { - szone->large_cache_depth = LARGE_ENTRY_CACHE_SIZE_LOW; - szone->large_cache_entry_limit = LARGE_ENTRY_SIZE_ENTRY_LIMIT_LOW; - } - - /* Reset protection when returning a previous large allocation? */ - int32_t libSystemVersion = NSVersionOfLinkTimeLibrary("System"); - if ((-1 != libSystemVersion) && ((libSystemVersion >> 16) < 112) /* CFSystemVersionSnowLeopard */) { - szone->large_legacy_reset_mprotect = TRUE; - } else { - szone->large_legacy_reset_mprotect = FALSE; - } -#endif - - // Initialize the security token. - szone->cookie = (uintptr_t)malloc_entropy[0]; - - szone->basic_zone.version = 12; - szone->basic_zone.size = (void *)szone_size; - szone->basic_zone.malloc = (void *)szone_malloc; - szone->basic_zone.calloc = (void *)szone_calloc; - szone->basic_zone.valloc = (void *)szone_valloc; - szone->basic_zone.free = (void *)szone_free; - szone->basic_zone.realloc = (void *)szone_realloc; - szone->basic_zone.destroy = (void *)szone_destroy; - szone->basic_zone.batch_malloc = (void *)szone_batch_malloc; - szone->basic_zone.batch_free = (void *)szone_batch_free; - szone->basic_zone.introspect = (struct malloc_introspection_t *)&szone_introspect; - szone->basic_zone.memalign = (void *)szone_memalign; - szone->basic_zone.free_definite_size = (void *)szone_free_definite_size; - szone->basic_zone.pressure_relief = (void *)szone_pressure_relief; - szone->basic_zone.claimed_address = (void *)szone_claimed_address; - - /* Set to zero once and for all as required by CFAllocator. */ - szone->basic_zone.reserved1 = 0; - /* Set to zero once and for all as required by CFAllocator. */ - szone->basic_zone.reserved2 = 0; - - /* Prevent overwriting the function pointers in basic_zone. */ - mprotect(szone, sizeof(szone->basic_zone), PROT_READ); - - szone->debug_flags = debug_flags; - _malloc_lock_init(&szone->large_szone_lock); - - szone->cpu_id_key = -1UL; // Unused. - - CHECK(szone, __PRETTY_FUNCTION__); - return szone; -} - -malloc_zone_t * -create_scalable_zone(size_t initial_size, unsigned debug_flags) { - return (malloc_zone_t *) create_scalable_szone(initial_size, debug_flags); -} - -/* vim: set noet:ts=4:sw=4:cindent: */ diff --git a/src/libmalloc/src/magazine_malloc.h b/src/libmalloc/src/magazine_malloc.h deleted file mode 100644 index 7a15fbff2..000000000 --- a/src/libmalloc/src/magazine_malloc.h +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __MAGAZINE_MALLOC_H -#define __MAGAZINE_MALLOC_H - -// MARK: magazine_malloc - -MALLOC_NOEXPORT -malloc_zone_t * -create_scalable_zone(size_t initial_size, unsigned debug_flags); - -MALLOC_NOEXPORT -szone_t * -create_scalable_szone(size_t initial_size, unsigned debug_flags); - -// Allegedly exported for performance/debugging tools - -MALLOC_EXPORT -boolean_t -scalable_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats, unsigned subzone); - -MALLOC_NOEXPORT -extern int max_magazines; - -MALLOC_NOEXPORT -extern int max_medium_magazines; - -MALLOC_NOEXPORT -extern int recirc_retained_regions; - -MALLOC_NOEXPORT -extern bool magazine_medium_enabled; - -MALLOC_NOEXPORT -extern uint64_t magazine_medium_active_threshold; - -MALLOC_NOEXPORT -extern uint64_t magazine_large_expanded_cache_threshold; - -// MARK: magazine_malloc utility functions - -MALLOC_NOEXPORT -extern const -struct malloc_introspection_t szone_introspect; - -MALLOC_NOEXPORT -void -szone_batch_free(szone_t *szone, void **to_be_freed, unsigned count); - -MALLOC_NOEXPORT -unsigned -szone_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count); - -MALLOC_NOEXPORT -void * -szone_calloc(szone_t *szone, size_t num_items, size_t size); - -MALLOC_NOEXPORT -void -szone_free(szone_t *szone, void *ptr); - -MALLOC_NOEXPORT -void -szone_free_definite_size(szone_t *szone, void *ptr, size_t size); - -MALLOC_NOEXPORT -size_t -szone_good_size(szone_t *szone, size_t size); - -MALLOC_NOEXPORT -void * -szone_malloc(szone_t *szone, size_t size); - -MALLOC_NOEXPORT -void * -szone_memalign(szone_t *szone, size_t alignment, size_t size); - -MALLOC_NOEXPORT -size_t -szone_pressure_relief(szone_t *szone, size_t goal); - -MALLOC_NOEXPORT -boolean_t -szone_claimed_address(szone_t *szone, void *ptr); - -MALLOC_NOEXPORT -void * -szone_realloc(szone_t *szone, void *ptr, size_t new_size); - -MALLOC_NOEXPORT -size_t -szone_size(szone_t *szone, const void *ptr); - -MALLOC_NOEXPORT -size_t -szone_size_try_large(szone_t *szone, const void *ptr); - -MALLOC_NOEXPORT -void * -szone_valloc(szone_t *szone, size_t size); - -// MARK: tiny region allocator functions - -MALLOC_NOEXPORT -boolean_t -tiny_check_region(rack_t *rack, region_t region, size_t region_index, - unsigned counter); - -MALLOC_NOEXPORT -void -tiny_finalize_region(rack_t *rack, magazine_t *tiny_mag_ptr); - -MALLOC_NOEXPORT -int -tiny_free_detach_region(rack_t *rack, magazine_t *tiny_mag_ptr, region_t r); - -MALLOC_NOEXPORT -boolean_t -tiny_free_list_check(rack_t *rack, grain_t slot, unsigned counter); - -MALLOC_NOEXPORT -boolean_t -tiny_free_no_lock(rack_t *rack, magazine_t *tiny_mag_ptr, mag_index_t mag_index, region_t region, void *ptr, msize_t msize, boolean_t partial_free); - -MALLOC_NOEXPORT -size_t -tiny_free_reattach_region(rack_t *rack, magazine_t *tiny_mag_ptr, region_t r); - -MALLOC_NOEXPORT -void -tiny_free_scan_madvise_free(rack_t *rack, magazine_t *depot_ptr, region_t r); - -MALLOC_NOEXPORT -kern_return_t -tiny_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone, memory_reader_t reader, - vm_range_recorder_t recorder); - -MALLOC_NOEXPORT -void * -tiny_malloc_from_free_list(rack_t *rack, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize); - -MALLOC_NOEXPORT -void * -tiny_malloc_should_clear(rack_t *rack, msize_t msize, boolean_t cleared_requested); - -MALLOC_NOEXPORT -void * -tiny_memalign(szone_t *szone, size_t alignment, size_t size, size_t span); - -MALLOC_NOEXPORT -boolean_t -tiny_claimed_address(rack_t *rack, void *ptr); - -MALLOC_NOEXPORT -void * -tiny_try_shrink_in_place(rack_t *rack, void *ptr, size_t old_size, size_t new_good_size); - -MALLOC_NOEXPORT -boolean_t -tiny_try_realloc_in_place(rack_t *rack, void *ptr, size_t old_size, size_t new_size); - -MALLOC_NOEXPORT -void -free_tiny(rack_t *rack, void *ptr, region_t tiny_region, size_t known_size, - boolean_t partial_free); - -MALLOC_NOEXPORT -size_t -tiny_size(rack_t *rack, const void *ptr); - -MALLOC_NOEXPORT -unsigned -tiny_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count); - -MALLOC_NOEXPORT -void -tiny_batch_free(szone_t *szone, void **to_be_freed, unsigned count); - -MALLOC_NOEXPORT -void -print_tiny_free_list(task_t task, memory_reader_t reader, - print_task_printer_t printer, rack_t *rack); - -MALLOC_NOEXPORT -void -print_tiny_region(task_t task, memory_reader_t reader, - print_task_printer_t printer, int level, region_t region, - size_t bytes_at_start, size_t bytes_at_end); - -#if CONFIG_MADVISE_PRESSURE_RELIEF -MALLOC_NOEXPORT -void -tiny_madvise_pressure_relief(rack_t *rack); -#endif // CONFIG_MADVISE_PRESSURE_RELIEF - -// MARK: small region allocation functions - -MALLOC_NOEXPORT -boolean_t -small_check_region(rack_t *rack, region_t region, size_t region_index, - unsigned counter); - -MALLOC_NOEXPORT -void -small_finalize_region(rack_t *rack, magazine_t *small_mag_ptr); - -MALLOC_NOEXPORT -int -small_free_detach_region(rack_t *rack, magazine_t *small_mag_ptr, region_t r); - -MALLOC_NOEXPORT -boolean_t -small_free_list_check(rack_t *rack, grain_t slot, unsigned counter); - -MALLOC_NOEXPORT -size_t -small_free_reattach_region(rack_t *rack, magazine_t *small_mag_ptr, region_t r); - -MALLOC_NOEXPORT -void -small_free_scan_madvise_free(rack_t *rack, magazine_t *depot_ptr, region_t r); - -MALLOC_NOEXPORT -kern_return_t -small_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone, memory_reader_t reader, - vm_range_recorder_t recorder); - -MALLOC_NOEXPORT -void * -small_malloc_should_clear(rack_t *rack, msize_t msize, boolean_t cleared_requested); - -MALLOC_NOEXPORT -void * -small_memalign(szone_t *szone, size_t alignment, size_t size, size_t span); - -MALLOC_NOEXPORT -boolean_t -small_claimed_address(rack_t *rack, void *ptr); - -MALLOC_NOEXPORT -void * -small_try_shrink_in_place(rack_t *rack, void *ptr, size_t old_size, size_t new_good_size); - -MALLOC_NOEXPORT -boolean_t -small_try_realloc_in_place(rack_t *rack, void *ptr, size_t old_size, size_t new_size); - -MALLOC_NOEXPORT -void -free_small(rack_t *rack, void *ptr, region_t small_region, size_t known_size); - -MALLOC_NOEXPORT -size_t -small_size(rack_t *rack, const void *ptr); - -MALLOC_NOEXPORT -void -print_small_free_list(task_t task, memory_reader_t reader, - print_task_printer_t printer, rack_t *rack); - -MALLOC_NOEXPORT -void -print_small_region(task_t task, memory_reader_t reader, - print_task_printer_t printer, szone_t *szone, int level, - region_t region, size_t bytes_at_start, size_t bytes_at_end); - -#if CONFIG_MADVISE_PRESSURE_RELIEF -MALLOC_NOEXPORT -void -small_madvise_pressure_relief(rack_t *rack); -#endif // CONFIG_MADVISE_PRESSURE_RELIEF - -// MARK: medium region allocation functions - -MALLOC_NOEXPORT -boolean_t -medium_check_region(rack_t *rack, region_t region, size_t region_index, - unsigned counter); - -MALLOC_NOEXPORT -void -medium_finalize_region(rack_t *rack, magazine_t *medium_mag_ptr); - -MALLOC_NOEXPORT -int -medium_free_detach_region(rack_t *rack, magazine_t *medium_mag_ptr, region_t r); - -MALLOC_NOEXPORT -boolean_t -medium_free_list_check(rack_t *rack, grain_t slot, unsigned counter); - -MALLOC_NOEXPORT -size_t -medium_free_reattach_region(rack_t *rack, magazine_t *medium_mag_ptr, region_t r); - -MALLOC_NOEXPORT -void -medium_free_scan_madvise_free(rack_t *rack, magazine_t *depot_ptr, region_t r); - -MALLOC_NOEXPORT -kern_return_t -medium_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone, memory_reader_t reader, - vm_range_recorder_t recorder); - -MALLOC_NOEXPORT -void * -medium_malloc_should_clear(rack_t *rack, msize_t msize, boolean_t cleared_requested); - -MALLOC_NOEXPORT -void * -medium_memalign(szone_t *szone, size_t alignment, size_t size, size_t span); - -MALLOC_NOEXPORT -boolean_t -medium_claimed_address(rack_t *rack, void *ptr); - -MALLOC_NOEXPORT -void * -medium_try_shrink_in_place(rack_t *rack, void *ptr, size_t old_size, size_t new_good_size); - -MALLOC_NOEXPORT -boolean_t -medium_try_realloc_in_place(rack_t *rack, void *ptr, size_t old_size, size_t new_size); - -MALLOC_NOEXPORT -void -free_medium(rack_t *rack, void *ptr, region_t medium_region, size_t known_size); - -MALLOC_NOEXPORT -size_t -medium_size(rack_t *rack, const void *ptr); - -MALLOC_NOEXPORT -void -print_medium_free_list(task_t task, memory_reader_t reader, - print_task_printer_t printer, rack_t *rack); - -MALLOC_NOEXPORT -void -print_medium_region(task_t task, memory_reader_t reader, - print_task_printer_t printer, szone_t *szone, int level, - region_t region, size_t bytes_at_start, size_t bytes_at_end); - -MALLOC_NOEXPORT -void -print_medium_region_vis(szone_t *szone, region_t region); - -#if CONFIG_MADVISE_PRESSURE_RELIEF -MALLOC_NOEXPORT -void -medium_madvise_pressure_relief(rack_t *rack); -#endif // CONFIG_MADVISE_PRESSURE_RELIEF - -// MARK: large region allocator functions - -MALLOC_NOEXPORT -void -free_large(szone_t *szone, void *ptr); - -MALLOC_NOEXPORT -void -large_entries_free_no_lock(szone_t *szone, large_entry_t *entries, unsigned num, vm_range_t *range_to_deallocate); - -MALLOC_NOEXPORT -large_entry_t * -large_entry_for_pointer_no_lock(szone_t *szone, const void *ptr); - -MALLOC_NOEXPORT -kern_return_t -large_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t large_entries_address, unsigned num_entries, - memory_reader_t reader, vm_range_recorder_t recorder); - -MALLOC_NOEXPORT -int -large_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size); - -MALLOC_NOEXPORT -void * -large_try_shrink_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_good_size); - -MALLOC_NOEXPORT -void * -large_malloc(szone_t *szone, size_t num_kernel_pages, unsigned char alignment, boolean_t cleared_requested); - -MALLOC_NOEXPORT -boolean_t -large_claimed_address(szone_t *szone, void *ptr); - -MALLOC_NOEXPORT -void -large_debug_print(task_t task, unsigned level, vm_address_t zone_address, - memory_reader_t reader, print_task_printer_t printer); - -MALLOC_NOEXPORT -void * -szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested); - -#endif // __MAGAZINE_MALLOC_H diff --git a/src/libmalloc/src/magazine_medium.c b/src/libmalloc/src/magazine_medium.c deleted file mode 100644 index fa5d57c54..000000000 --- a/src/libmalloc/src/magazine_medium.c +++ /dev/null @@ -1,2892 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#include "internal.h" - -#if CONFIG_MEDIUM_ALLOCATOR - -/********************* MEDIUM FREE LIST UTILITIES ************************/ - -#pragma mark meta header helpers - -static MALLOC_INLINE uint64_t -medium_sliding_madvise_granularity(magazine_t *magazine) -{ - // Use a sliding madvise granularity based on how many bytes the region - // currently has allocated. This way we will advise at a finer granularity - // as the region becomes more and more empty. - // region_trailer_t *t = REGION_TRAILER_FOR_MEDIUM_REGION(region); - if (magazine->mag_num_bytes_in_objects == 0) { - return MEDIUM_MADVISE_MIN; - } - return MAX(MEDIUM_MADVISE_MIN, 1 << (64 - - __builtin_clzl(magazine->mag_num_bytes_in_objects >> MEDIUM_MADVISE_SHIFT))); -} - -static MALLOC_INLINE void -medium_madvise_header_mark_dirty(msize_t *headers, msize_t index, - msize_t msize) -{ - // MALLOC_ASSERT(index < NUM_MEDIUM_BLOCKS); - // MALLOC_ASSERT(index + msize <= NUM_MEDIUM_BLOCKS); - headers[index] = msize; - headers[index + msize - 1] = msize; -} - -static MALLOC_INLINE void -medium_madvise_header_mark_clean(msize_t *headers, msize_t index, - msize_t msize) -{ - // MALLOC_ASSERT(index < NUM_MEDIUM_BLOCKS); - // MALLOC_ASSERT(index + msize <= NUM_MEDIUM_BLOCKS); - headers[index] = msize | MEDIUM_IS_ADVISED; - headers[index + msize - 1] = msize | MEDIUM_IS_ADVISED; -} - -static MALLOC_INLINE void -medium_madvise_header_mark_middle(msize_t *headers, msize_t index) -{ - // MALLOC_ASSERT(index < NUM_MEDIUM_BLOCKS); - headers[index] = 0; -} - -static MALLOC_INLINE msize_t -medium_madvise_header_dirty_len(msize_t *headers, msize_t index) -{ - // MALLOC_ASSERT(index < NUM_MEDIUM_BLOCKS); - if (headers[index] & MEDIUM_IS_ADVISED) { - return 0; - } - return headers[index] & ~MEDIUM_IS_ADVISED; -} - -/* - * Mark a block as free. Only the first quantum of a block is marked thusly, - * the remainder are marked "middle". - */ -static MALLOC_INLINE void -medium_meta_header_set_is_free(msize_t *meta_headers, msize_t index, msize_t msize) -{ - meta_headers[index] = msize | MEDIUM_IS_FREE; -} - -/* - * Mark a block as not free, preserving its size. - */ -static MALLOC_INLINE void -medium_meta_header_set_not_free(msize_t *meta_headers, msize_t index) -{ - meta_headers[index] &= ~MEDIUM_IS_FREE; -} - -/* - * Mark a block as in use. Only the first quantum of a block is marked thusly, - * the remainder are marked "middle". - */ -static MALLOC_INLINE void -medium_meta_header_set_in_use(msize_t *meta_headers, msize_t index, msize_t msize) -{ - meta_headers[index] = msize; -} - -/* - * Mark a quantum as being the second or later in a block. - */ -static MALLOC_INLINE void -medium_meta_header_set_middle(msize_t *meta_headers, msize_t index) -{ - meta_headers[index] = 0; -} - -static MALLOC_INLINE MALLOC_ALWAYS_INLINE -mag_index_t -medium_mag_get_thread_index(void) -{ -#if CONFIG_MEDIUM_USES_HYPER_SHIFT - if (os_likely(_os_cpu_number_override == -1)) { - return _os_cpu_number() >> hyper_shift; - } else { - return _os_cpu_number_override >> hyper_shift; - } -#else // CONFIG_MEDIUM_USES_HYPER_SHIFT - if (os_likely(_os_cpu_number_override == -1)) { - return _os_cpu_number(); - } else { - return _os_cpu_number_override; - } -#endif // CONFIG_MEDIUM_USES_HYPER_SHIFT -} - -#pragma mark in-place free list - -static MALLOC_INLINE void -medium_inplace_checksum_ptr(rack_t *rack, inplace_linkage_s *linkage, void *ptr) -{ - uintptr_t checksum = free_list_gen_checksum((uintptr_t)ptr ^ rack->cookie ^ (uintptr_t)rack); - linkage->checksum = checksum; - linkage->ptr = ptr; -} - -static MALLOC_INLINE free_list_t -medium_inplace_unchecksum_ptr(rack_t *rack, inplace_linkage_s *linkage) -{ - if (linkage->checksum != (uint8_t)free_list_gen_checksum((uintptr_t)linkage->ptr ^ rack->cookie ^ (uintptr_t)rack)) { - free_list_checksum_botch(rack, linkage, linkage->ptr); - __builtin_trap(); - } - - return (free_list_t){ .p = linkage->ptr }; -} - -static MALLOC_INLINE free_list_t -medium_inplace_unchecksum_ptr_task(task_t task, memory_reader_t reader, - print_task_printer_t printer, rack_t *rack, inplace_linkage_s *linkage) -{ - inplace_linkage_s *mapped_linkage; - rack_t *mapped_rack; - if (reader(task, (vm_address_t)linkage, sizeof(*linkage), - (void **)&mapped_linkage)) { - printer("Unable to map medium linkage pointer %p\n", linkage); - return (free_list_t){ .p = NULL }; - } - - if (reader(task, (vm_address_t)rack, - sizeof(struct rack_s), (void **)&mapped_rack)) { - printer("Failed to map medium rack\n"); - return (free_list_t){ .p = NULL }; - } - - if (mapped_linkage->checksum != (uint8_t)free_list_gen_checksum( - (uintptr_t)mapped_linkage->ptr ^ mapped_rack->cookie ^ (uintptr_t)rack)) { - free_list_checksum_botch(rack, linkage, mapped_linkage->ptr); - __builtin_trap(); - } - - return (free_list_t){ .p = mapped_linkage->ptr }; -} - -static MALLOC_INLINE free_list_t -medium_inplace_free_entry_get_previous(rack_t *rack, medium_inplace_free_entry_t ptr) -{ - return medium_inplace_unchecksum_ptr(rack, &ptr->previous); -} - -static MALLOC_INLINE void -medium_inplace_free_entry_set_previous(rack_t *rack, medium_inplace_free_entry_t entry, free_list_t previous) -{ - medium_inplace_checksum_ptr(rack, &entry->previous, previous.p); -} - -static MALLOC_INLINE free_list_t -medium_inplace_free_entry_get_next(rack_t *rack, medium_inplace_free_entry_t ptr) -{ - return medium_inplace_unchecksum_ptr(rack, &ptr->next); -} - -static MALLOC_INLINE free_list_t -medium_inplace_free_entry_get_next_task(task_t task, memory_reader_t reader, - print_task_printer_t printer, rack_t *rack, - medium_inplace_free_entry_t ptr) -{ - return medium_inplace_unchecksum_ptr_task(task, reader, printer, rack, - &ptr->next); -} - -static MALLOC_INLINE void -medium_inplace_free_entry_set_next(rack_t *rack, medium_inplace_free_entry_t entry, free_list_t next) -{ - medium_inplace_checksum_ptr(rack, &entry->next, next.p); -} - -#pragma mark OOB free list - -// Returns true if the address and size of the free list entry would result -// in the free entry being the only data on a given page. -static MALLOC_INLINE boolean_t -medium_needs_oob_free_entry(void *ptr, msize_t msize) -{ - return ((trunc_page_quanta((uintptr_t)ptr) == (uintptr_t)ptr) && - (MEDIUM_BYTES_FOR_MSIZE(msize) >= vm_kernel_page_size)); -} - -// Returns true if the address given lies within the region's OOB free -// list entries, rather than a free_list_t in the region's heap space. -static MALLOC_INLINE boolean_t -medium_is_oob_free_entry(free_list_t ptr) -{ - medium_region_t region = MEDIUM_REGION_FOR_PTR(ptr.p); - return (((uintptr_t)ptr.p >= (uintptr_t)®ion->medium_oob_free_entries[0]) && - ((uintptr_t)ptr.p < (uintptr_t)®ion->medium_oob_free_entries[MEDIUM_OOB_COUNT])); -} - -static MALLOC_INLINE void -medium_oob_free_entry_set_previous(oob_free_entry_t oobe, free_list_t previous) -{ - // MALLOC_ASSERT(medium_is_oob_free_entry(previous) || !((uintptr_t)previous.p & 0x1fffffff)); - oobe->prev = (uintptr_t)previous.p; -} - -static MALLOC_INLINE free_list_t -medium_oob_free_entry_get_previous(oob_free_entry_t oobe) -{ - return (free_list_t){ .p = (void *)oobe->prev }; -} - -static MALLOC_INLINE void -medium_oob_free_entry_set_next(oob_free_entry_t oobe, free_list_t next) -{ - // MALLOC_ASSERT(medium_is_oob_free_entry(next) || !((uintptr_t)next.p & 0x1fffffff)); - oobe->next = (uintptr_t)next.p; -} - -static MALLOC_INLINE free_list_t -medium_oob_free_entry_get_next(oob_free_entry_t oobe) -{ - return (free_list_t){ .p = (void *)oobe->next }; -} - -static MALLOC_INLINE free_list_t -medium_oob_free_entry_get_next_task(task_t task, memory_reader_t reader, - print_task_printer_t printer, oob_free_entry_t oobe) -{ - oob_free_entry_t mapped_oobe; - if (reader(task, (vm_address_t)oobe, sizeof(*oobe), - (void **)&mapped_oobe)) { - printer("Failed to map medium oobe pointer\n"); - return (free_list_t){ .p = NULL }; - } - return (free_list_t){ .p = (void *)mapped_oobe->next }; -} - -static MALLOC_INLINE void * -medium_oob_free_entry_get_ptr(oob_free_entry_t oobe) -{ - medium_region_t region = MEDIUM_REGION_FOR_PTR(oobe); - uint16_t block = oobe->ptr & ~MEDIUM_IS_OOB; - return (void *)((uintptr_t)region + (block << SHIFT_MEDIUM_QUANTUM)); -} - -static MALLOC_INLINE void * -medium_oob_free_entry_get_ptr_task(task_t task, memory_reader_t reader, - print_task_printer_t printer, oob_free_entry_t oobe) -{ - // We need to map the oob_free_entry_t to read the pointer value. - oob_free_entry_t mapped_oobe; - if (reader(task, (vm_address_t)oobe, sizeof(*oobe), - (void **)&mapped_oobe)) { - printer("Failed to map medium oobe pointer\n"); - return NULL; - } - - if (!(mapped_oobe->ptr & MEDIUM_IS_OOB)) { - return NULL; - } - - // The rest of this code works with target process addresses and returns an - // address in the target process. - medium_region_t region = MEDIUM_REGION_FOR_PTR(oobe); - uint16_t block = mapped_oobe->ptr & ~MEDIUM_IS_OOB; - return (void *)((uintptr_t)region + (block << SHIFT_MEDIUM_QUANTUM)); -} - -static MALLOC_INLINE void -medium_oob_free_entry_set_ptr(oob_free_entry_t oobe, void *ptr) -{ - oobe->ptr = MEDIUM_IS_OOB | (MEDIUM_OFFSET_FOR_PTR(ptr) >> SHIFT_MEDIUM_QUANTUM); -} - -static MALLOC_INLINE void -medium_oob_free_entry_set_free(oob_free_entry_t oobe) -{ - oobe->prev = ~0; - oobe->next = ~0; - oobe->ptr = 0; -} - -// Finds the first unused OOB free list entry in the pointer's region. -// Returns NULL if all of the OOB entries are used. -static MALLOC_INLINE oob_free_entry_t -medium_oob_free_find_empty(void *ptr, msize_t msize) -{ - medium_region_t region = MEDIUM_REGION_FOR_PTR(ptr); - - // There are 61 of these entries at the end of a medium region. - // If this changes, then a linear search through the list may - // become an unsuitable choice. - for (int i=0; i < MEDIUM_OOB_COUNT; i++) { - if (region->medium_oob_free_entries[i].ptr == 0) { - return ®ion->medium_oob_free_entries[i]; - } - } - -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_INFO, "used all slots of OOB entries\n"); -#endif - return NULL; -} - -static MALLOC_INLINE oob_free_entry_t -medium_oob_free_find_ptr(void *ptr, msize_t msize) -{ - medium_region_t region = MEDIUM_REGION_FOR_PTR(ptr); - - // There are 61 of these entries at the end of a medium region. - // If this changes, then a linear search through the list may - // become an unsuitable choice. - for (int i=0; i < MEDIUM_OOB_COUNT; i++) { - oob_free_entry_t oob = ®ion->medium_oob_free_entries[i]; - if (medium_oob_free_entry_get_ptr(oob) == ptr && - oob->ptr & MEDIUM_IS_OOB) { - return ®ion->medium_oob_free_entries[i]; - } - } - - return NULL; -} - -#pragma mark generic free list - -static MALLOC_INLINE void -medium_free_list_set_previous(rack_t *rack, free_list_t entry, free_list_t previous) -{ - if (medium_is_oob_free_entry(entry)) { - medium_oob_free_entry_set_previous(entry.oob, previous); - } else { - medium_inplace_free_entry_set_previous(rack, entry.medium_inplace, previous); - } -} - -static MALLOC_INLINE free_list_t -medium_free_list_get_previous(rack_t *rack, free_list_t ptr) -{ - MALLOC_ASSERT(ptr.p); - if (medium_is_oob_free_entry(ptr)) { - return medium_oob_free_entry_get_previous(ptr.oob); - } else { - return medium_inplace_free_entry_get_previous(rack, ptr.medium_inplace); - } -} - -static MALLOC_INLINE void -medium_free_list_set_next(rack_t *rack, free_list_t entry, free_list_t next) -{ - if (medium_is_oob_free_entry(entry)) { - medium_oob_free_entry_set_next(entry.oob, next); - } else { - medium_inplace_free_entry_set_next(rack, entry.medium_inplace, next); - } -} - -static MALLOC_INLINE free_list_t -medium_free_list_get_next(rack_t *rack, free_list_t ptr) -{ - MALLOC_ASSERT(ptr.p); - if (medium_is_oob_free_entry(ptr)) { - return medium_oob_free_entry_get_next(ptr.oob); - } else { - return medium_inplace_free_entry_get_next(rack, ptr.medium_inplace); - } -} - -static MALLOC_INLINE free_list_t -medium_free_list_get_next_task(task_t task, memory_reader_t reader, - print_task_printer_t printer, rack_t *rack, free_list_t ptr) -{ - MALLOC_ASSERT(ptr.p); - if (medium_is_oob_free_entry(ptr)) { - return medium_oob_free_entry_get_next_task(task, reader, printer, ptr.oob); - } else { - return medium_inplace_free_entry_get_next_task(task, reader, printer, - rack, ptr.medium_inplace); - } -} - -static MALLOC_INLINE void * -medium_free_list_get_ptr(rack_t *rack, free_list_t ptr) -{ - if (!ptr.p) { - return NULL; - } else if (medium_is_oob_free_entry(ptr)) { - return medium_oob_free_entry_get_ptr(ptr.oob); - } else { - return (void *)ptr.p; - } -} - -static MALLOC_INLINE void * -medium_free_list_get_ptr_task(task_t task, memory_reader_t reader, - print_task_printer_t printer, free_list_t ptr) -{ - if (!ptr.p) { - return NULL; - } else if (medium_is_oob_free_entry(ptr)) { - return medium_oob_free_entry_get_ptr_task(task, reader, printer, ptr.oob); - } else { - return (void *)ptr.p; - } -} - -// Returns a free_list_t that is either inline or not based on the -// pointer and msize. -static MALLOC_INLINE free_list_t -medium_free_list_from_ptr(rack_t *rack, void *ptr, msize_t msize) -{ - MALLOC_ASSERT(msize); - - // The default is to put the free_list_t in the memory that - // the pointer leads to. - free_list_t entry; - entry.p = ptr; - - // If the pointer is page aligned, and the msize is greater - // than a whole page, then we try and put the entry in - // the out-of-band area instead. - if (medium_needs_oob_free_entry(ptr, msize)) { - oob_free_entry_t oobe = medium_oob_free_find_empty(ptr, msize); - if (oobe) { - medium_oob_free_entry_set_ptr(oobe, ptr); - entry.oob = oobe; - } - } - - return entry; -} - -static MALLOC_INLINE void -medium_free_mark_free(rack_t *rack, free_list_t entry, msize_t msize) -{ - // Marks both the start and end block of a free-list entry as free. - void *ptr = medium_free_list_get_ptr(rack, entry); - msize_t *meta_headers = MEDIUM_META_HEADER_FOR_PTR(ptr); - uintptr_t start_index = MEDIUM_META_INDEX_FOR_PTR(ptr); - uintptr_t end_index = MEDIUM_META_INDEX_FOR_PTR(ptr + MEDIUM_BYTES_FOR_MSIZE(msize) - 1); - MALLOC_ASSERT(start_index <= end_index); - - medium_meta_header_set_is_free(meta_headers, start_index, msize); - medium_meta_header_set_is_free(meta_headers, end_index, msize); -} - -static MALLOC_INLINE void -medium_free_mark_middle(rack_t *rack, free_list_t entry, msize_t msize) -{ - // Marks both the start and end block of a free-list entry as "middle" (unfree). - void *ptr = medium_free_list_get_ptr(rack, entry); - msize_t *meta_headers = MEDIUM_META_HEADER_FOR_PTR(ptr); - uintptr_t start_index = MEDIUM_META_INDEX_FOR_PTR(ptr); - uintptr_t end_index = MEDIUM_META_INDEX_FOR_PTR(ptr + MEDIUM_BYTES_FOR_MSIZE(msize) - 1); - MALLOC_ASSERT(start_index <= end_index); - MALLOC_ASSERT((meta_headers[start_index] & ~MEDIUM_IS_FREE) == msize); - - medium_meta_header_set_middle(meta_headers, start_index); - medium_meta_header_set_middle(meta_headers, end_index); -} - -static MALLOC_INLINE void -medium_free_mark_unfree(rack_t *rack, free_list_t entry, msize_t msize) -{ - // Marks both the start and end block of a free-list entry as not free. - void *ptr = medium_free_list_get_ptr(rack, entry); - msize_t *meta_headers = MEDIUM_META_HEADER_FOR_PTR(ptr); - uintptr_t start_index = MEDIUM_META_INDEX_FOR_PTR(ptr); - uintptr_t end_index = MEDIUM_META_INDEX_FOR_PTR(ptr + - MEDIUM_BYTES_FOR_MSIZE(msize) - 1); - MALLOC_ASSERT(start_index <= end_index); - - medium_meta_header_set_not_free(meta_headers, start_index); - medium_meta_header_set_not_free(meta_headers, end_index); -} - -static MALLOC_INLINE unsigned int -medium_free_list_count(task_t task, memory_reader_t reader, - print_task_printer_t printer, rack_t *rack, free_list_t ptr) -{ - unsigned int count = 0; - while (ptr.p) { - count++; - ptr = medium_free_list_get_next_task(task, reader, printer, rack, ptr); - } - return count; -} - -/* - * Adds an item to the proper free list, and also marks the meta-header of the - * block properly. - * Assumes szone has been locked - */ -static free_list_t -medium_free_list_add_ptr(rack_t *rack, magazine_t *medium_mag_ptr, void *ptr, msize_t msize) -{ - grain_t slot = MEDIUM_FREE_SLOT_FOR_MSIZE(rack, msize); - free_list_t free_head = medium_mag_ptr->mag_free_list[slot]; - - // This will either return the free_list_t for the current pointer, or attempt - // to reserve an OOB entry for us. - free_list_t free_ptr = medium_free_list_from_ptr(rack, ptr, msize); - -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize); - } - if (((uintptr_t)ptr) & (MEDIUM_QUANTUM - 1)) { - malloc_zone_error(rack->debug_flags, true, "medium_free_list_add_ptr: Unaligned ptr %p\n", ptr); - } - if (!msize) { - malloc_zone_error(rack->debug_flags, true, "medium_free_list_add_ptr: msize=0, %p\n", ptr); - } -#endif - - medium_free_list_set_previous(rack, free_ptr, (free_list_t){ .p = NULL }); - medium_free_list_set_next(rack, free_ptr, free_head); - - // Set the start and end blocks of the meta header as "free". Marking the last block - // allows coalescing the regions when we free adjacent regions. - medium_free_mark_free(rack, free_ptr, msize); - - if (medium_free_list_get_ptr(rack, free_head)) { -#if DEBUG_MALLOC - if (medium_free_list_get_previous(szone, free_head)) { - malloc_zone_error(rack->debug_flags, true, "medium_free_list_add_ptr: Internal invariant broken (free_head->previous != NULL)\n" - "ptr=%p slot=%d free_head=%p previous=%p\n", ptr, slot, (void *)free_head, free_head->previous.p); - } - if (!MEDIUM_PTR_IS_FREE(medium_free_list_get_ptr(szone, free_head))) { - malloc_zone_error(rack->debug_flags, true, "medium_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)\n" - "ptr=%p slot=%d free_head=%p\n", ptr, slot, (void *)medium_free_list_get_ptr(szone, free_head)); - } -#endif - medium_free_list_set_previous(rack, free_head, free_ptr); - } else { - BITMAPN_SET(medium_mag_ptr->mag_bitmap, slot); - } - - medium_mag_ptr->mag_free_list[slot] = free_ptr; - return free_ptr; -} - -/* - * Removes the item pointed to by ptr in the proper free list. - * Assumes szone has been locked - */ -static void -medium_free_list_remove_ptr_no_clear(rack_t *rack, magazine_t *medium_mag_ptr, free_list_t entry, msize_t msize) -{ - grain_t slot = MEDIUM_FREE_SLOT_FOR_MSIZE(rack, msize); - free_list_t next, previous; - -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "In %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize); - } -#endif - - previous = medium_free_list_get_previous(rack, entry); - next = medium_free_list_get_next(rack, entry); - - if (!medium_free_list_get_ptr(rack, previous)) { - // The block to remove is the head of the free list -#if DEBUG_MALLOC - if (medium_mag_ptr->mag_free_list[slot] != ptr) { - malloc_zone_error(rack->debug_flags, true, - "medium_free_list_remove_ptr_no_clear: Internal invariant broken (medium_mag_ptr->mag_free_list[slot])\n" - "ptr=%p slot=%d msize=%d medium_mag_ptr->mag_free_list[slot]=%p\n", ptr, slot, msize, - (void *)medium_mag_ptr->mag_free_list[slot]); - return; - } -#endif - medium_mag_ptr->mag_free_list[slot] = next; - if (!medium_free_list_get_ptr(rack, next)) { - BITMAPN_CLR(medium_mag_ptr->mag_bitmap, slot); - } - } else { - // Check that the next pointer of "previous" points to "entry". - free_list_t prev_next = medium_free_list_get_next(rack, previous); - if (medium_free_list_get_ptr(rack, prev_next) != medium_free_list_get_ptr(rack, entry)) { - malloc_zone_error(rack->debug_flags, true, - "medium_free_list_remove_ptr_no_clear: Internal invariant broken (next ptr of prev) for %p, prev_next=%p\n", - medium_free_list_get_ptr(rack, entry), medium_free_list_get_ptr(rack, prev_next)); - __builtin_unreachable(); // Always crashes in malloc_zone_error(). - } - medium_free_list_set_next(rack, previous, next); - } - - if (medium_free_list_get_ptr(rack, next)) { - // Check that the previous pointer of "next" points to "entry". - free_list_t next_prev = medium_free_list_get_previous(rack, next); - if (medium_free_list_get_ptr(rack, next_prev) != medium_free_list_get_ptr(rack, entry)) { - malloc_zone_error(rack->debug_flags, true, - "medium_free_list_remove_ptr_no_clear: Internal invariant broken (prev ptr of next) for %p, next_prev=%p\n", - medium_free_list_get_ptr(rack, entry), medium_free_list_get_ptr(rack, next_prev)); - __builtin_unreachable(); // Always crashes in malloc_zone_error(). - } - medium_free_list_set_previous(rack, next, previous); - } - - if (medium_is_oob_free_entry(entry)) { - medium_oob_free_entry_set_free(entry.oob); - } -} - -static void -medium_free_list_remove_ptr(rack_t *rack, magazine_t *medium_mag_ptr, free_list_t entry, msize_t msize) -{ - // In the general case we want to ensure we marked these entries as "middle" - // while we are in this function. However, when we're moving free list entries - // from/to the recirc depot we rely on the metadata bits being intact to - // reconstruct the free list. In that case we have to be able to skip this - // metadata manipulation. - medium_free_mark_middle(rack, entry, msize); - medium_free_list_remove_ptr_no_clear(rack, medium_mag_ptr, entry, msize); -} - -// Find a free list entry by its pointer address. This should only really be used -// by medium_finalize_region, or similar, where the free_list_t entry of a known -// pointer is desired. Otherwise it is cheaper to always pull off the free lists. -static free_list_t -medium_free_list_find_by_ptr(rack_t *rack, magazine_t *medium_mag_ptr, void *ptr, msize_t msize) -{ - if (*MEDIUM_METADATA_FOR_PTR(ptr) == (MEDIUM_IS_FREE | msize)) { - // If the block is marked free, and of size `msize`, then we first must check - // if the alignment+size is such that we could have use an OOB-entry. - if (medium_needs_oob_free_entry(ptr, msize)) { - // Scan the OOB entries looking for this address. - medium_region_t region = MEDIUM_REGION_FOR_PTR(ptr); - for (int i=0; imedium_oob_free_entries[i].ptr) { - continue; - } - if (medium_oob_free_entry_get_ptr(®ion->medium_oob_free_entries[i]) == ptr) { - return (free_list_t){ .oob = ®ion->medium_oob_free_entries[i] }; - } - } - } - - // Otherwise, the freed pointer will be in place. - return (free_list_t){ .p = ptr }; - } - - malloc_zone_error(rack->debug_flags, true, - "medium_free_list_find_by_ptr: ptr is not free (ptr metadata !MEDIUM_IS_FREE), " - "ptr=%p msize=%d metadata=0x%x\n", ptr, msize, *MEDIUM_METADATA_FOR_PTR(ptr)); - __builtin_trap(); -} - -void -medium_finalize_region(rack_t *rack, magazine_t *medium_mag_ptr) -{ - void *last_block, *previous_block; - msize_t last_msize, previous_msize, last_index; - free_list_t previous; - - // It is possible that the block prior to the last block in the region has - // been free'd, but was not coalesced with the free bytes at the end of the - // block, since we treat the bytes at the end of the region as "in use" in - // the meta headers. Attempt to coalesce the last block with the previous - // block, so we don't violate the "no consecutive free blocks" invariant. - // - // FIXME: If we could calculate the previous medium free size in the same - // manner as tiny_previous_preceding_free, it would eliminate the - // index & previous msize checks, which are a guard against reading - // bogus data out of in-use or written-on-freed memory. - // - // FIXME: Need to investigate how much work would be required to increase - // 'mag_bytes_free_at_end' when freeing the preceding block, rather - // than performing this workaround. - // - if (medium_mag_ptr->mag_bytes_free_at_end) { - last_block = MEDIUM_REGION_END(medium_mag_ptr->mag_last_region) - medium_mag_ptr->mag_bytes_free_at_end; - last_msize = MEDIUM_MSIZE_FOR_BYTES(medium_mag_ptr->mag_bytes_free_at_end); - - last_index = MEDIUM_META_INDEX_FOR_PTR(last_block); - previous_msize = MEDIUM_PREVIOUS_MSIZE(last_block); - - if (last_index && (previous_msize <= last_index)) { - previous_block = (void *)((uintptr_t)last_block - MEDIUM_BYTES_FOR_MSIZE(previous_msize)); - - if (MEDIUM_PTR_IS_FREE(previous_block)) { - previous = medium_free_list_find_by_ptr(rack, medium_mag_ptr, previous_block, previous_msize); - medium_free_list_remove_ptr(rack, medium_mag_ptr, previous, previous_msize); - last_block = previous_block; - last_msize += previous_msize; - } - } - - // splice last_block into the free list - medium_free_list_add_ptr(rack, medium_mag_ptr, last_block, last_msize); - medium_mag_ptr->mag_bytes_free_at_end = 0; - } - -#if CONFIG_ASLR_INTERNAL - free_list_t next; - - if (medium_mag_ptr->mag_bytes_free_at_start) { - last_block = MEDIUM_REGION_ADDRESS(medium_mag_ptr->mag_last_region); - last_msize = MEDIUM_MSIZE_FOR_BYTES(medium_mag_ptr->mag_bytes_free_at_start); - - void *next_block = (void *)((uintptr_t)last_block + medium_mag_ptr->mag_bytes_free_at_start); - if (MEDIUM_PTR_IS_FREE(next_block)) { - msize_t next_msize = MEDIUM_PTR_SIZE(next_block); - next = medium_free_list_find_by_ptr(rack, medium_mag_ptr, next_block, next_msize); - medium_free_list_remove_ptr(rack, medium_mag_ptr, next, next_msize); - last_msize += next_msize; - } - - // splice last_block into the free list - medium_free_list_add_ptr(rack, medium_mag_ptr, last_block, last_msize); - medium_mag_ptr->mag_bytes_free_at_start = 0; - } -#endif - - // TODO: Will we ever need to coalesce the blocks at the beginning and end when we finalize? - medium_mag_ptr->mag_last_region = NULL; -} - -int -medium_free_detach_region(rack_t *rack, magazine_t *medium_mag_ptr, region_t r) -{ - unsigned char *ptr = MEDIUM_REGION_ADDRESS(r); - msize_t *meta_headers = MEDIUM_META_HEADER_FOR_PTR(ptr); - uintptr_t start = (uintptr_t)MEDIUM_REGION_ADDRESS(r); - uintptr_t current = start; - uintptr_t limit = (uintptr_t)MEDIUM_REGION_END(r); - int total_alloc = 0; - - while (current < limit) { - unsigned index = MEDIUM_META_INDEX_FOR_PTR(current); - msize_t msize_and_free = meta_headers[index]; - boolean_t is_free = msize_and_free & MEDIUM_IS_FREE; - msize_t msize = msize_and_free & ~MEDIUM_IS_FREE; - - if (!msize) { -#if DEBUG_MALLOC - boolean_t is_free = msize_and_free & MEDIUM_IS_FREE; - malloc_report(ASL_LEVEL_ERR, "*** medium_free_detach_region error with %p: msize=%d is_free=%d\n", (void *)current, msize, is_free); -#endif - break; - } - - if (is_free) { - free_list_t entry = medium_free_list_find_by_ptr(rack, medium_mag_ptr, (void *)current, msize); - medium_free_list_remove_ptr_no_clear(rack, medium_mag_ptr, entry, msize); - } else { - total_alloc++; - } - current += MEDIUM_BYTES_FOR_MSIZE(msize); - } - return total_alloc; -} - -size_t -medium_free_reattach_region(rack_t *rack, magazine_t *medium_mag_ptr, region_t r) -{ - unsigned char *ptr = MEDIUM_REGION_ADDRESS(r); - msize_t *meta_headers = MEDIUM_META_HEADER_FOR_PTR(ptr); - uintptr_t start = (uintptr_t)MEDIUM_REGION_ADDRESS(r); - uintptr_t current = start; - uintptr_t limit = (uintptr_t)MEDIUM_REGION_END(r); - size_t total_alloc = 0; - - while (current < limit) { - unsigned index = MEDIUM_META_INDEX_FOR_PTR(current); - msize_t msize_and_free = meta_headers[index]; - boolean_t is_free = msize_and_free & MEDIUM_IS_FREE; - msize_t msize = msize_and_free & ~MEDIUM_IS_FREE; - - if (!msize) { -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_ERR, "*** medium_free_reattach_region error with %p: msize=%d is_free=%d\n", (void *)current, msize, is_free); -#endif - break; - } - if (is_free) { - medium_free_list_add_ptr(rack, medium_mag_ptr, (void *)current, msize); - } else { - total_alloc += MEDIUM_BYTES_FOR_MSIZE(msize); - } - current += MEDIUM_BYTES_FOR_MSIZE(msize); - } - return total_alloc; -} - -typedef struct _medium_advisory_s { - struct _medium_advisory_s *next; - size_t size; -} medium_advisory_s, *medium_advisory_t; - -void -medium_free_scan_madvise_free(rack_t *rack, magazine_t *depot_ptr, region_t r) -{ - uintptr_t start = (uintptr_t)MEDIUM_REGION_ADDRESS(r); - uintptr_t current = start; - uintptr_t limit = (uintptr_t)MEDIUM_REGION_END(r); - msize_t *meta_headers = MEDIUM_META_HEADER_FOR_PTR(start); - msize_t *madv_headers = MEDIUM_MADVISE_HEADER_FOR_PTR(start); - - medium_advisory_t advisories = NULL; - - // Scan the metadata identifying blocks which span one or more pages. Mark - // the pages MADV_FREE taking care to preserve free list management data. - while (current < limit) { - unsigned index = MEDIUM_META_INDEX_FOR_PTR(current); - - msize_t alloc_msize = meta_headers[index] & ~MEDIUM_IS_FREE; - bool alloc_is_free = meta_headers[index] & MEDIUM_IS_FREE; - - if (alloc_is_free && alloc_msize == MEDIUM_MAX_MSIZE) { - // The first allocation is both free and covers the entire of the - // zone. -#if DEBUG_MALLOC - // first block is all free - malloc_report(ASL_LEVEL_ERR, "*** medium_free_scan_madvise_free " - "first block is all free! %p: msize=%d is_free=%d\n", - (void *)current, alloc_msize, alloc_is_free); -#endif - uintptr_t pgLo = round_page_kernel(start + sizeof(medium_inplace_free_entry_s) + - sizeof(msize_t)); - uintptr_t pgHi = trunc_page_kernel(start - sizeof(msize_t) + - (NUM_MEDIUM_BLOCKS << SHIFT_MEDIUM_QUANTUM)); - - if (pgLo < pgHi) { - medium_advisory_t mat = (medium_advisory_t)pgLo; - mat->next = advisories; - mat->size = pgHi - pgLo; - } - break; - } - if (!alloc_msize) { -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_ERR, "*** medium_free_scan_madvise_free " - "error with %p: msize=%d is_free=%d\n", (void *)current, - msize, is_free); -#endif - break; - } - if (alloc_is_free) { - msize_t advise_msize = madv_headers[index] - & ~MEDIUM_IS_ADVISED; - bool run_is_advised = madv_headers[index] - & MEDIUM_IS_ADVISED; - - if (advise_msize != alloc_msize || !run_is_advised) { - uintptr_t pgLo = round_page_kernel(current + - sizeof(medium_inplace_free_entry_s) + sizeof(msize_t)); - uintptr_t pgHi = trunc_page_kernel(current - sizeof(msize_t) + - MEDIUM_BYTES_FOR_MSIZE(alloc_msize)); - - if (pgLo < pgHi) { - medium_advisory_t mat = (medium_advisory_t)pgLo; - mat->next = advisories; - mat->size = pgHi - pgLo; - } - - memset(&madv_headers[index], 0, sizeof(uint16_t) * alloc_msize); - medium_madvise_header_mark_clean(madv_headers, index, - alloc_msize); - } - } - current += MEDIUM_BYTES_FOR_MSIZE(alloc_msize); - } - - if (advisories) { - OSAtomicIncrement32Barrier( - &(REGION_TRAILER_FOR_MEDIUM_REGION(r)->pinned_to_depot)); - SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr); - while (advisories) { - uintptr_t addr = (uintptr_t)advisories; - size_t size = advisories->size; - advisories = advisories->next; - - mvm_madvise_free(rack, r, addr, addr + size, NULL, - rack->debug_flags & MALLOC_DO_SCRIBBLE); - } - SZONE_MAGAZINE_PTR_LOCK(depot_ptr); - OSAtomicDecrement32Barrier( - &(REGION_TRAILER_FOR_MEDIUM_REGION(r)->pinned_to_depot)); - } -} - -static region_t -medium_find_msize_region(rack_t *rack, magazine_t *medium_mag_ptr, mag_index_t mag_index, msize_t msize) -{ - void *ptr; - grain_t slot = MEDIUM_FREE_SLOT_FOR_MSIZE(rack, msize); - free_list_t *free_list = medium_mag_ptr->mag_free_list; - free_list_t *the_slot = free_list + slot; - free_list_t *limit; - unsigned bitmap; - - // Assumes we've locked the magazine - CHECK_MAGAZINE_PTR_LOCKED(szone, medium_mag_ptr, __PRETTY_FUNCTION__); - - // Look for an exact match by checking the freelist for this msize. - ptr = medium_free_list_get_ptr(rack, *the_slot); - if (ptr) { - return MEDIUM_REGION_FOR_PTR(ptr); - } - - // Mask off the bits representing slots holding free blocks smaller than - // the size we need. - // - // BITMAPN_CTZ implementation - unsigned idx = slot >> 5; - bitmap = 0; - unsigned mask = ~((1 << (slot & 31)) - 1); - for (; idx < MEDIUM_FREELIST_BITMAP_WORDS(rack); ++idx) { - bitmap = medium_mag_ptr->mag_bitmap[idx] & mask; - if (bitmap != 0) { - break; - } - mask = ~0U; - } - // Check for fallthrough: No bits set in bitmap - if ((bitmap == 0) && (idx == MEDIUM_FREELIST_BITMAP_WORDS(rack))) { - return NULL; - } - - // Start looking at the first set bit, plus 32 bits for every word of - // zeroes or entries that were too medium. - slot = BITMAP32_CTZ((&bitmap)) + (idx * 32); - - limit = free_list + MEDIUM_FREE_SLOT_COUNT(rack) - 1; - free_list += slot; - - if (free_list < limit) { - ptr = medium_free_list_get_ptr(rack, *free_list); - if (ptr) { - return MEDIUM_REGION_FOR_PTR(ptr); - } else { - /* Shouldn't happen. Fall through to look at last slot. */ -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_ERR, "in medium_malloc_from_free_list(), mag_bitmap out of sync, slot=%d\n", slot); -#endif - } - } - - // We are now looking at the last slot, which contains blocks equal to, or - // due to coalescing of free blocks, larger than (num_medium_slots - 1) * (medium quantum size). - ptr = medium_free_list_get_ptr(rack, *limit); - if (ptr) { - return MEDIUM_REGION_FOR_PTR(ptr); - } - - return NULL; -} - -static boolean_t -medium_get_region_from_depot(rack_t *rack, magazine_t *medium_mag_ptr, mag_index_t mag_index, msize_t msize) -{ - magazine_t *depot_ptr = &(rack->magazines[DEPOT_MAGAZINE_INDEX]); - - /* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */ - if (rack->num_magazines == 1) { // Uniprocessor, single magazine, so no recirculation necessary - return 0; - } - -#if DEBUG_MALLOC - if (DEPOT_MAGAZINE_INDEX == mag_index) { - malloc_zone_error(rack->debug_flags, true, "medium_get_region_from_depot called for magazine index -1\n", NULL, NULL); - return 0; - } -#endif - - SZONE_MAGAZINE_PTR_LOCK(depot_ptr); - - // Appropriate a Depot'd region that can satisfy requested msize. - region_trailer_t *node; - region_t sparse_region; - - while (1) { - sparse_region = medium_find_msize_region(rack, depot_ptr, DEPOT_MAGAZINE_INDEX, msize); - if (NULL == sparse_region) { // Depot empty? - SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr); - return 0; - } - - node = REGION_TRAILER_FOR_MEDIUM_REGION(sparse_region); - if (0 >= node->pinned_to_depot) { - break; - } - - SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr); - yield(); - SZONE_MAGAZINE_PTR_LOCK(depot_ptr); - } - - // disconnect node from Depot - recirc_list_extract(rack, depot_ptr, node); - - // Iterate the region pulling its free entries off the (locked) Depot's free list - int objects_in_use = medium_free_detach_region(rack, depot_ptr, sparse_region); - - // Transfer ownership of the region - MAGAZINE_INDEX_FOR_MEDIUM_REGION(sparse_region) = mag_index; - node->pinned_to_depot = 0; - - // Iterate the region putting its free entries on its new (locked) magazine's free list - size_t bytes_inplay = medium_free_reattach_region(rack, medium_mag_ptr, sparse_region); - - depot_ptr->mag_num_bytes_in_objects -= bytes_inplay; - depot_ptr->num_bytes_in_magazine -= MEDIUM_REGION_PAYLOAD_BYTES; - depot_ptr->mag_num_objects -= objects_in_use; - - medium_mag_ptr->mag_num_bytes_in_objects += bytes_inplay; - medium_mag_ptr->num_bytes_in_magazine += MEDIUM_REGION_PAYLOAD_BYTES; - medium_mag_ptr->mag_num_objects += objects_in_use; - - // connect to magazine as first node - recirc_list_splice_first(rack, medium_mag_ptr, node); - - SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr); - - MAGMALLOC_DEPOTREGION(MEDIUM_SZONE_FROM_RACK(rack), (int)mag_index, (void *)sparse_region, MEDIUM_REGION_SIZE, - (int)BYTES_USED_FOR_MEDIUM_REGION(sparse_region)); // DTrace USDT Probe - - return 1; -} - -#if CONFIG_MADVISE_PRESSURE_RELIEF -void -medium_madvise_pressure_relief(rack_t *rack) -{ - mag_index_t mag_index; - magazine_t *medium_depot_ptr = &rack->magazines[DEPOT_MAGAZINE_INDEX]; - - for (mag_index = 0; mag_index < rack->num_magazines; mag_index++) { - size_t index; - for (index = 0; index < rack->region_generation->num_regions_allocated; ++index) { - SZONE_LOCK(MEDIUM_SZONE_FROM_RACK(rack)); - - region_t medium = rack->region_generation->hashed_regions[index]; - if (!medium || medium == HASHRING_REGION_DEALLOCATED) { - SZONE_UNLOCK(MEDIUM_SZONE_FROM_RACK(rack)); - continue; - } - - magazine_t *mag_ptr = mag_lock_zine_for_region_trailer(rack->magazines, - REGION_TRAILER_FOR_MEDIUM_REGION(medium), - MAGAZINE_INDEX_FOR_MEDIUM_REGION(medium)); - - SZONE_UNLOCK(MEDIUM_SZONE_FROM_RACK(rack)); - - /* Ordering is important here, the magazine of a region may potentially change - * during mag_lock_zine_for_region_trailer, so src_mag_index must be taken - * after we've obtained the lock. - */ - mag_index_t src_mag_index = MAGAZINE_INDEX_FOR_MEDIUM_REGION(medium); - - /* We can (and must) ignore magazines that are already in the recirc depot. */ - if (src_mag_index == DEPOT_MAGAZINE_INDEX) { - SZONE_MAGAZINE_PTR_UNLOCK(mag_ptr); - continue; - } - - if (REGION_TRAILER_FOR_MEDIUM_REGION(medium)->pinned_to_depot > 0) { - SZONE_MAGAZINE_PTR_UNLOCK(mag_ptr); - continue; - } - - if (medium == mag_ptr->mag_last_region && (mag_ptr->mag_bytes_free_at_end || mag_ptr->mag_bytes_free_at_start)) { - medium_finalize_region(rack, mag_ptr); - } - - /* Because this region is currently in use, we can't safely madvise it while - * it's attached to the magazine. For this operation we have to remove it from - * the current mag, attach it to the depot and then madvise. - */ - - recirc_list_extract(rack, mag_ptr, REGION_TRAILER_FOR_MEDIUM_REGION(medium)); - int objects_in_use = medium_free_detach_region(rack, mag_ptr, medium); - - SZONE_MAGAZINE_PTR_LOCK(medium_depot_ptr); - MAGAZINE_INDEX_FOR_MEDIUM_REGION(medium) = DEPOT_MAGAZINE_INDEX; - REGION_TRAILER_FOR_MEDIUM_REGION(medium)->pinned_to_depot = 0; - - size_t bytes_inplay = medium_free_reattach_region(rack, medium_depot_ptr, medium); - - /* Fix up the metadata of the target magazine while the region is in the depot. */ - mag_ptr->mag_num_bytes_in_objects -= bytes_inplay; - mag_ptr->num_bytes_in_magazine -= MEDIUM_REGION_PAYLOAD_BYTES; - mag_ptr->mag_num_objects -= objects_in_use; - - /* Now we can drop the magazine lock of the source mag. */ - SZONE_MAGAZINE_PTR_UNLOCK(mag_ptr); - - medium_depot_ptr->mag_num_bytes_in_objects += bytes_inplay; - medium_depot_ptr->num_bytes_in_magazine += MEDIUM_REGION_PAYLOAD_BYTES; - medium_depot_ptr->mag_num_objects -= objects_in_use; - - recirc_list_splice_last(rack, medium_depot_ptr, REGION_TRAILER_FOR_MEDIUM_REGION(medium)); - - /* Actually do the scan, done holding the depot lock, the call will drop the lock - * around the actual madvise syscalls. - */ - medium_free_scan_madvise_free(rack, medium_depot_ptr, medium); - - /* Now the region is in the recirc depot, the next allocations to require more - * blocks will come along and take one of these regions back out of the depot. - * As OS X madvise's reuse on an per-region basis, we leave as many of these - * regions in the depot as possible after memory pressure. - */ - SZONE_MAGAZINE_PTR_UNLOCK(medium_depot_ptr); - } - } -} -#endif // CONFIG_MADVISE_PRESSURE_RELIEF - -static MALLOC_INLINE void -medium_madvise_free_range_conditional_no_lock(rack_t *rack, magazine_t *mag_ptr, - size_t trigger_level, region_t region, free_list_t *fl, msize_t flmsz, - void *ptr, size_t sz) -{ - region_trailer_t *node = REGION_TRAILER_FOR_MEDIUM_REGION(region); - msize_t *madvh = MEDIUM_MADVISE_HEADER_FOR_PTR(ptr); - - msize_t trigger_msize = trigger_level >> SHIFT_MEDIUM_QUANTUM; - - size_t free_header_size = sizeof(medium_inplace_free_entry_s) + sizeof(msize_t); - size_t free_trailer_size = sizeof(msize_t); - // We have to avoid the free_list header at the start of the allocation - // unless the entry is out in the OOB area, in which case, we can madvise - // the whole region. - if (medium_is_oob_free_entry(*fl)) { - free_header_size = 0; - free_trailer_size = 0; - } - - uintptr_t rangep = (uintptr_t)medium_free_list_get_ptr(rack, *fl); - msize_t range_idx = MEDIUM_META_INDEX_FOR_PTR(rangep); - msize_t range_msz = flmsz; - - msize_t src_idx = MEDIUM_META_INDEX_FOR_PTR(ptr); - msize_t src_msz = MEDIUM_MSIZE_FOR_BYTES(sz); - msize_t src_end_idx = src_idx + src_msz - 1; - medium_madvise_header_mark_middle(madvh, src_idx); - medium_madvise_header_mark_middle(madvh, src_idx + src_msz - 1); - - msize_t left_end_idx = src_idx - 1; - msize_t left_msz = src_idx - range_idx; - msize_t right_start_idx = src_idx + src_msz; - msize_t right_end_idx = range_idx + range_msz - 1; - msize_t right_msz = right_end_idx - right_start_idx + 1; - - msize_t dirty_msz = src_msz; - - size_t vote_force = 0; - msize_t left_dirty_msz = 0; - if (range_idx < src_idx) { - // Peek back one block and see if the range directly in front of this - // one had any blocks that had not been madvised. - left_dirty_msz = medium_madvise_header_dirty_len(madvh, left_end_idx); - if (left_dirty_msz) { - dirty_msz += left_dirty_msz; - } else if (src_idx - range_idx > src_msz) { - // The left-hand region was actually clean, so only choose to - // madvise if the center region is enclosured by clean data. - vote_force++; - } - medium_madvise_header_mark_middle(madvh, range_idx); - medium_madvise_header_mark_middle(madvh, left_end_idx); - } - - msize_t right_dirty_msz = 0; - if (right_end_idx < src_end_idx) { - // Same as above, if we had trailing data coalesced with this entry - // and that was not madvised, consider it, too. - right_dirty_msz = medium_madvise_header_dirty_len(madvh, right_start_idx); - if (right_dirty_msz) { - dirty_msz += right_dirty_msz; - } else if (right_end_idx - right_start_idx > src_msz) { - vote_force++; - } - medium_madvise_header_mark_middle(madvh, right_start_idx); - medium_madvise_header_mark_middle(madvh, right_end_idx); - } - - // We absolutely can't madvise lower the the free-list entry pointer plus - // the header size. When the entry is OOB, there's no header or footer to - // store in memory. - uintptr_t safe_start_ptr = round_page_kernel(rangep + free_header_size); - uintptr_t safe_end_ptr = trunc_page_kernel(rangep + - MEDIUM_BYTES_FOR_MSIZE(range_msz) - free_trailer_size); - - // If the target region is madvisable, then madvise whatever we can but - // bound it by the safe_start/end pointers to make sure we don't clobber - // the free-list. - if ((vote_force == 2) || (dirty_msz >= trigger_msize)) { - uintptr_t lo = MAX(MEDIUM_PTR_FOR_META_INDEX(region, range_idx), - safe_start_ptr); - uintptr_t hi = MIN(MEDIUM_PTR_FOR_META_INDEX(region, range_idx) + - MEDIUM_BYTES_FOR_MSIZE(range_msz), safe_end_ptr); - - // The page that contains the freelist entry needs to be marked as not - // having been madvised. - if (range_idx < MEDIUM_META_INDEX_FOR_PTR(safe_start_ptr)) { - medium_madvise_header_mark_dirty(madvh, range_idx, - MEDIUM_META_INDEX_FOR_PTR(safe_start_ptr) - range_idx); - } - if (range_idx + range_msz > MEDIUM_META_INDEX_FOR_PTR(safe_end_ptr)) { - medium_madvise_header_mark_dirty(madvh, - MEDIUM_META_INDEX_FOR_PTR(safe_end_ptr) + 1, range_idx + - range_msz - MEDIUM_META_INDEX_FOR_PTR(safe_end_ptr)); - } - - medium_madvise_header_mark_clean(madvh, - MEDIUM_META_INDEX_FOR_PTR(lo), - MEDIUM_META_INDEX_FOR_PTR(hi) - MEDIUM_META_INDEX_FOR_PTR(lo)); - - // Mark the whole region as off-limits for the allocator while we drop - // the lock and go to the kernel. - medium_free_mark_unfree(rack, *fl, flmsz); - medium_free_list_remove_ptr_no_clear(rack, mag_ptr, *fl, flmsz); - OSAtomicIncrement32Barrier(&node->pinned_to_depot); - SZONE_MAGAZINE_PTR_UNLOCK(mag_ptr); - - mvm_madvise_free(rack, region, lo, hi, - &rack->last_madvise, rack->debug_flags & MALLOC_DO_SCRIBBLE); - - SZONE_MAGAZINE_PTR_LOCK(mag_ptr); - OSAtomicDecrement32Barrier(&node->pinned_to_depot); - *fl = medium_free_list_add_ptr(rack, mag_ptr, (void *)rangep, flmsz); - } else { - // We chose not to madvise, we need to re-mark the region as dirty - // for when we come back to it later. - if (left_dirty_msz < left_msz) { - medium_madvise_header_mark_clean(madvh, range_idx, - left_msz - left_dirty_msz); - } - if (right_dirty_msz < right_msz) { - medium_madvise_header_mark_clean(madvh, right_start_idx + - right_dirty_msz, right_msz - right_dirty_msz); - } - medium_madvise_header_mark_dirty(madvh, src_idx - left_dirty_msz, - src_msz + right_dirty_msz + left_dirty_msz); - } -} - -#if CONFIG_AGGRESSIVE_MADVISE || CONFIG_RECIRC_DEPOT -static MALLOC_INLINE void -medium_madvise_free_range_no_lock(rack_t *rack, - magazine_t *medium_mag_ptr, - size_t trigger_level, - region_t region, - free_list_t freee, - msize_t fmsize, - void *headptr, - size_t headsize) -{ - void *ptr = medium_free_list_get_ptr(rack, freee); - region_trailer_t *node = REGION_TRAILER_FOR_MEDIUM_REGION(region); - - // Lock on medium_magazines[mag_index] is already held here. - // Calculate the first page in the coalesced block that would be safe to mark MADV_FREE - size_t free_header_size = sizeof(medium_inplace_free_entry_s) + sizeof(msize_t); - - // If the free_list_t entry is out-of-line then we don't need to reserve any space - // at the start of the region. - if (medium_is_oob_free_entry(freee)) { - free_header_size = 0; - } - - uintptr_t safe_ptr = (uintptr_t)ptr + free_header_size; - uintptr_t round_safe = round_page_kernel(safe_ptr); - - // Calculate the last page in the coalesced block that would be safe to mark MADV_FREE - uintptr_t safe_extent = (uintptr_t)ptr + MEDIUM_BYTES_FOR_MSIZE(fmsize); - uintptr_t trunc_extent = trunc_page_kernel(safe_extent); - - // If the region spans more than the madvise trigger level, then mark the - // pages as advise-freed. - if (round_safe + trigger_level < trunc_extent) { - // Extend the freed block by the free region header and tail sizes to include pages - // we may have coalesced that no longer host free region tails and headers. - // This may extend over in-use ranges, but the MIN/MAX clamping below will fix that up. - uintptr_t lo = trunc_page_kernel((uintptr_t)headptr); - uintptr_t hi = round_page_kernel((uintptr_t)headptr + headsize + free_header_size); - - uintptr_t free_lo = MAX(round_safe, lo); - uintptr_t free_hi = MIN(trunc_extent, hi); - - if (free_lo < free_hi) { - // Before unlocking, ensure that the metadata for the freed region - // makes it look not free but includes the length. This ensures that - // any code that inspects the metadata while we are unlocked sees - // a valid state and will not try to use or coalesce freed memory - // into it. - medium_free_mark_unfree(rack, freee, fmsize); - medium_free_list_remove_ptr_no_clear(rack, medium_mag_ptr, freee, fmsize); - OSAtomicIncrement32Barrier(&(node->pinned_to_depot)); - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - mvm_madvise_free(rack, region, free_lo, free_hi, &rack->last_madvise, rack->debug_flags & MALLOC_DO_SCRIBBLE); - SZONE_MAGAZINE_PTR_LOCK(medium_mag_ptr); - OSAtomicDecrement32Barrier(&(node->pinned_to_depot)); - medium_free_list_add_ptr(rack, medium_mag_ptr, ptr, fmsize); - } - } -} -#endif // CONFIG_AGGRESSIVE_MADVISE || CONFIG_RECIRC_DEPOT - -#if CONFIG_RECIRC_DEPOT -static region_t -medium_free_try_depot_unmap_no_lock(rack_t *rack, magazine_t *depot_ptr, region_trailer_t *node) -{ - if (0 < node->bytes_used || 0 < node->pinned_to_depot || depot_ptr->recirculation_entries < recirc_retained_regions) { - return NULL; - } - - // disconnect first node from Depot - recirc_list_extract(rack, depot_ptr, node); - - // Iterate the region pulling its free entries off the (locked) Depot's free list - region_t sparse_region = MEDIUM_REGION_FOR_PTR(node); - int objects_in_use = medium_free_detach_region(rack, depot_ptr, sparse_region); - - if (0 == objects_in_use) { - // Invalidate the hash table entry for this region with HASHRING_REGION_DEALLOCATED. - // Using HASHRING_REGION_DEALLOCATED preserves the collision chain, using HASHRING_OPEN_ENTRY (0) would not. - rgnhdl_t pSlot = hash_lookup_region_no_lock(rack->region_generation->hashed_regions, - rack->region_generation->num_regions_allocated, - rack->region_generation->num_regions_allocated_shift, - sparse_region); - if (NULL == pSlot) { - malloc_zone_error(rack->debug_flags, true, "medium_free_try_depot_unmap_no_lock hash lookup failed: %p\n", sparse_region); - return NULL; - } - *pSlot = HASHRING_REGION_DEALLOCATED; - depot_ptr->num_bytes_in_magazine -= MEDIUM_REGION_PAYLOAD_BYTES; - // Atomically increment num_regions_dealloc -#ifdef __LP64___ - OSAtomicIncrement64(&rack->num_regions_dealloc); -#else - OSAtomicIncrement32((int32_t *)&rack->num_regions_dealloc); -#endif - - // Caller will transfer ownership of the region back to the OS with no locks held - MAGMALLOC_DEALLOCREGION(MEDIUM_SZONE_FROM_RACK(rack), (void *)sparse_region, (int)MEDIUM_REGION_SIZE); // DTrace USDT Probe - return sparse_region; - - } else { - malloc_zone_error(rack->debug_flags, true, "medium_free_try_depot_unmap_no_lock objects_in_use not zero: %d\n", objects_in_use); - return NULL; - } -} - -static boolean_t -medium_free_do_recirc_to_depot(rack_t *rack, magazine_t *medium_mag_ptr, mag_index_t mag_index) -{ - // The entire magazine crossed the "emptiness threshold". Transfer a region - // from this magazine to the Depot. Choose a region that itself has crossed the emptiness threshold (i.e - // is at least fraction "f" empty.) Such a region will be marked "suitable" on the recirculation list. - region_trailer_t *node = medium_mag_ptr->firstNode; - - while (node && (!node->recirc_suitable || node->pinned_to_depot)) { - node = node->next; - } - - if (NULL == node) { -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_ERR, "*** medium_free_do_recirc_to_depot end of list\n"); -#endif - return TRUE; // Caller must SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - } - - region_t sparse_region = MEDIUM_REGION_FOR_PTR(node); - - // Deal with unclaimed memory -- mag_bytes_free_at_end or mag_bytes_free_at start - if (sparse_region == medium_mag_ptr->mag_last_region && - (medium_mag_ptr->mag_bytes_free_at_end || medium_mag_ptr->mag_bytes_free_at_start)) { - medium_finalize_region(rack, medium_mag_ptr); - } - - // disconnect "suitable" node from magazine - recirc_list_extract(rack, medium_mag_ptr, node); - - // Iterate the region pulling its free entries off its (locked) magazine's free list - int objects_in_use = medium_free_detach_region(rack, medium_mag_ptr, sparse_region); - magazine_t *depot_ptr = &(rack->magazines[DEPOT_MAGAZINE_INDEX]); - - // hand over the region to the (locked) Depot - SZONE_MAGAZINE_PTR_LOCK(depot_ptr); - // this will cause medium_free_list_add_ptr called by medium_free_reattach_region to use - // the depot as its target magazine, rather than magazine formerly associated with sparse_region - MAGAZINE_INDEX_FOR_MEDIUM_REGION(sparse_region) = DEPOT_MAGAZINE_INDEX; - node->pinned_to_depot = 0; - - // Iterate the region putting its free entries on Depot's free list - size_t bytes_inplay = medium_free_reattach_region(rack, depot_ptr, sparse_region); - - medium_mag_ptr->mag_num_bytes_in_objects -= bytes_inplay; - medium_mag_ptr->num_bytes_in_magazine -= MEDIUM_REGION_PAYLOAD_BYTES; - medium_mag_ptr->mag_num_objects -= objects_in_use; - - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); // Unlock the originating magazine - - depot_ptr->mag_num_bytes_in_objects += bytes_inplay; - depot_ptr->num_bytes_in_magazine += MEDIUM_REGION_PAYLOAD_BYTES; - depot_ptr->mag_num_objects += objects_in_use; - - // connect to Depot as last node - recirc_list_splice_last(rack, depot_ptr, node); - - MAGMALLOC_RECIRCREGION(MEDIUM_SZONE_FROM_RACK(rack), (int)mag_index, (void *)sparse_region, MEDIUM_REGION_SIZE, - (int)BYTES_USED_FOR_MEDIUM_REGION(sparse_region)); // DTrace USDT Probe - -#if !CONFIG_AGGRESSIVE_MADVISE - // Mark free'd dirty pages with MADV_FREE to reduce memory pressure - medium_free_scan_madvise_free(rack, depot_ptr, sparse_region); -#endif - - // If the region is entirely empty vm_deallocate() it outside the depot lock - region_t r_dealloc = medium_free_try_depot_unmap_no_lock(rack, depot_ptr, node); - SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr); - if (r_dealloc) { - mvm_deallocate_pages(r_dealloc, MEDIUM_REGION_SIZE, 0); - } - return FALSE; // Caller need not unlock the originating magazine -} - -static MALLOC_INLINE boolean_t -medium_free_try_recirc_to_depot(rack_t *rack, - magazine_t *medium_mag_ptr, - mag_index_t mag_index, - region_t region, - free_list_t freee, - msize_t msize, - void *headptr, - size_t headsize) -{ - region_trailer_t *node = REGION_TRAILER_FOR_MEDIUM_REGION(region); - size_t bytes_used = node->bytes_used; - - /* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */ - if (rack->num_magazines == 1) { // Uniprocessor, single magazine, so no recirculation necessary - /* NOTHING */ - return TRUE; // Caller must do SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr) - } else if (DEPOT_MAGAZINE_INDEX != mag_index) { - // Emptiness discriminant - if (bytes_used < DENSITY_THRESHOLD(MEDIUM_REGION_PAYLOAD_BYTES)) { - /* Region has crossed threshold from density to sparsity. Mark it "suitable" on the - * recirculation candidates list. */ - node->recirc_suitable = TRUE; - } else { - /* After this free, we've found the region is still dense, so it must have been even more so before - * the free. That implies the region is already correctly marked. Do nothing. */ - } - - // Has the entire magazine crossed the "emptiness threshold"? If so, transfer a region - // from this magazine to the Depot. Choose a region that itself has crossed the emptiness threshold (i.e - // is at least fraction "f" empty.) Such a region will be marked "suitable" on the recirculation list. - - size_t a = medium_mag_ptr->num_bytes_in_magazine; // Total bytes allocated to this magazine - size_t u = medium_mag_ptr->mag_num_bytes_in_objects; // In use (malloc'd) from this magaqzine - - if (a - u > ((3 * MEDIUM_REGION_PAYLOAD_BYTES) / 2) && u < DENSITY_THRESHOLD(a)) { - return medium_free_do_recirc_to_depot(rack, medium_mag_ptr, mag_index); - } - - } else { -#if !CONFIG_AGGRESSIVE_MADVISE - // We are free'ing into the depot, so madvise as we do so unless we were madvising every incoming - // allocation anyway. - medium_madvise_free_range_no_lock(rack, medium_mag_ptr, - vm_kernel_page_size, region, freee, msize, headptr, headsize); -#endif - - if (0 < bytes_used || 0 < node->pinned_to_depot) { - /* Depot'd region is still live. Leave it in place on the Depot's recirculation list - * so as to avoid thrashing between the Depot's free list and a magazines's free list - * with detach_region/reattach_region */ - } else { - /* Depot'd region is just now empty. Consider return to OS. */ - region_t r_dealloc = medium_free_try_depot_unmap_no_lock(rack, medium_mag_ptr, node); - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - if (r_dealloc) { - mvm_deallocate_pages(r_dealloc, MEDIUM_REGION_SIZE, 0); - } - return FALSE; // Caller need not unlock - } - } - return TRUE; // Caller must do SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr) -} -#endif // CONFIG_RECIRC_DEPOT - -static MALLOC_INLINE boolean_t -medium_free_no_lock(rack_t *rack, magazine_t *medium_mag_ptr, mag_index_t mag_index, region_t region, void *ptr, msize_t msize) -{ - msize_t *meta_headers = MEDIUM_META_HEADER_FOR_PTR(ptr); - unsigned index = MEDIUM_META_INDEX_FOR_PTR(ptr); - size_t original_size = MEDIUM_BYTES_FOR_MSIZE(msize); - unsigned char *next_block = ((unsigned char *)ptr + original_size); - msize_t next_index = index + msize; - - MALLOC_TRACE(TRACE_medium_free, (uintptr_t)rack, (uintptr_t)medium_mag_ptr, (uintptr_t)ptr, MEDIUM_BYTES_FOR_MSIZE(msize)); - -#if CONFIG_AGGRESSIVE_MADVISE || CONFIG_RECIRC_DEPOT - void *original_ptr = ptr; -#endif - -#if DEBUG_MALLOC - if (!msize) { - malloc_zone_error(rack->debug_flags, true, "trying to free medium block " - "that is too small in medium_free_no_lock(), ptr=%p, msize=%d\n", - ptr, msize); - } - if (medium_madvise_header_dirty_len(MEDIUM_MADVISE_HEADER_FOR_PTR(ptr), index) == 0) { - malloc_zone_error(rack->debug_flags, true, "incoming freed block is not " - "marked as dirty in madvise metadata, ptr=%p, msize=%d\n", - ptr, msize); - } -#endif - - // We try to coalesce this block with the preceeding one - if (index > 0 && (meta_headers[index - 1] & MEDIUM_IS_FREE)) { - msize_t previous_msize = meta_headers[index - 1] & ~MEDIUM_IS_FREE; - grain_t previous_index = index - previous_msize; - - // Check if the metadata for the start of the block is also free. - if (meta_headers[previous_index] == (previous_msize | MEDIUM_IS_FREE)) { - void *previous_ptr = (void *)((uintptr_t)ptr - MEDIUM_BYTES_FOR_MSIZE(previous_msize)); - free_list_t previous = medium_free_list_find_by_ptr(rack, medium_mag_ptr, previous_ptr, previous_msize); - medium_free_list_remove_ptr(rack, medium_mag_ptr, previous, previous_msize); - ptr = previous_ptr; - medium_meta_header_set_middle(meta_headers, index); // This block is now a middle block. - msize += previous_msize; - index -= previous_msize; - } else { - _os_set_crash_log_message("medium free list metadata inconsistency (headers[previous] != previous size)"); - __builtin_trap(); - } - } - - // Try to coalesce with this block with the next block - if ((next_block < MEDIUM_REGION_END(region)) && (meta_headers[next_index] & MEDIUM_IS_FREE)) { - msize_t next_msize = meta_headers[next_index] & ~MEDIUM_IS_FREE; - free_list_t next = medium_free_list_find_by_ptr(rack, medium_mag_ptr, next_block, next_msize); - medium_free_list_remove_ptr(rack, medium_mag_ptr, next, next_msize); - msize += next_msize; - } - - if (rack->debug_flags & MALLOC_DO_SCRIBBLE) { - if (!msize) { - malloc_zone_error(rack->debug_flags, true, "incorrect size information for %p - block header was damaged\n", ptr); - } else { - memset(ptr, SCRABBLE_BYTE, MEDIUM_BYTES_FOR_MSIZE(msize)); - } - } - - free_list_t freee = medium_free_list_add_ptr(rack, medium_mag_ptr, ptr, msize); - - // use original_size and not msize to avoid double counting the coalesced blocks - medium_mag_ptr->mag_num_bytes_in_objects -= original_size; - medium_mag_ptr->mag_num_objects--; - - // Update this region's bytes in use count - region_trailer_t *node = REGION_TRAILER_FOR_MEDIUM_REGION(region); - size_t bytes_used = node->bytes_used - original_size; - node->bytes_used = (unsigned int)bytes_used; - - // Always attempt to madvise free regions that exceed the conditional - // madvise limit size. - medium_madvise_free_range_conditional_no_lock(rack, medium_mag_ptr, - medium_sliding_madvise_granularity(medium_mag_ptr), region, &freee, - msize, original_ptr, original_size); - - // Caller must do SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr) if this function - // returns TRUE. - boolean_t needs_unlock = TRUE; - -#if CONFIG_RECIRC_DEPOT - needs_unlock = medium_free_try_recirc_to_depot(rack, medium_mag_ptr, mag_index, region, freee, msize, original_ptr, original_size); -#endif - return needs_unlock; -} - -// Allocates from the last region or a freshly allocated region -static void * -medium_malloc_from_region_no_lock(rack_t *rack, - magazine_t *medium_mag_ptr, - mag_index_t mag_index, - msize_t msize, - void *aligned_address) -{ - void *ptr; - - // Before anything we transform the mag_bytes_free_at_end or mag_bytes_free_at_start - if any - to a regular free block - /* FIXME: last_block needs to be coalesced with previous entry if free, */ - if (medium_mag_ptr->mag_bytes_free_at_end || medium_mag_ptr->mag_bytes_free_at_start) { - medium_finalize_region(rack, medium_mag_ptr); - } - - // Tag the region at "aligned_address" as belonging to us, - // and so put it under the protection of the magazine lock we are holding. - // Do this before advertising "aligned_address" on the hash ring(!) - MAGAZINE_INDEX_FOR_MEDIUM_REGION(aligned_address) = mag_index; - - // Insert the new region into the hash ring - rack_region_insert(rack, (region_t)aligned_address); - - medium_mag_ptr->mag_last_region = aligned_address; - BYTES_USED_FOR_MEDIUM_REGION(aligned_address) = MEDIUM_BYTES_FOR_MSIZE(msize); - -#if CONFIG_ASLR_INTERNAL - int offset_msize = malloc_entropy[1] & MEDIUM_ENTROPY_MASK; -#if DEBUG_MALLOC - if (getenv("MallocASLRForce")) { - offset_msize = strtol(getenv("MallocASLRForce"), NULL, 0) & MEDIUM_ENTROPY_MASK; - } - if (getenv("MallocASLRPrint")) { - malloc_report(ASL_LEVEL_INFO, "Region: %p offset: %d\n", aligned_address, offset_msize); - } -#endif -#else - int offset_msize = 0; -#endif - ptr = (void *)((uintptr_t)aligned_address + - MEDIUM_BYTES_FOR_MSIZE(offset_msize)); - medium_meta_header_set_in_use(MEDIUM_META_HEADER_FOR_PTR(ptr), - offset_msize, msize); - medium_madvise_header_mark_dirty(MEDIUM_MADVISE_HEADER_FOR_PTR(ptr), - offset_msize, msize); - medium_mag_ptr->mag_num_objects++; - medium_mag_ptr->mag_num_bytes_in_objects += MEDIUM_BYTES_FOR_MSIZE(msize); - medium_mag_ptr->num_bytes_in_magazine += MEDIUM_REGION_PAYLOAD_BYTES; - - // add a big free block at the end, mark as already advised clean - // because we haven't touched it. - msize_t trailing_offset = offset_msize + msize; - msize_t trailing_msize = NUM_MEDIUM_BLOCKS - msize - offset_msize; - medium_meta_header_set_in_use(MEDIUM_META_HEADER_FOR_PTR(ptr), - trailing_offset, trailing_msize); - medium_madvise_header_mark_clean(MEDIUM_MADVISE_HEADER_FOR_PTR(ptr), - trailing_offset, trailing_msize); - medium_mag_ptr->mag_bytes_free_at_end = - MEDIUM_BYTES_FOR_MSIZE(NUM_MEDIUM_BLOCKS - msize - offset_msize); - -#if CONFIG_ASLR_INTERNAL - // add a big free block at the start - medium_mag_ptr->mag_bytes_free_at_start = MEDIUM_BYTES_FOR_MSIZE(offset_msize); - if (offset_msize) { - medium_meta_header_set_in_use(MEDIUM_META_HEADER_FOR_PTR(ptr), 0, - offset_msize); - medium_madvise_header_mark_clean(MEDIUM_MADVISE_HEADER_FOR_PTR(ptr), 0, - offset_msize); - } -#else - medium_mag_ptr->mag_bytes_free_at_start = 0; -#endif - - // connect to magazine as last node - recirc_list_splice_last(rack, medium_mag_ptr, - REGION_TRAILER_FOR_MEDIUM_REGION(aligned_address)); - - return ptr; -} - -void * -medium_memalign(szone_t *szone, size_t alignment, size_t size, size_t span) -{ - msize_t mspan = MEDIUM_MSIZE_FOR_BYTES(span + MEDIUM_QUANTUM - 1); - void *p = medium_malloc_should_clear(&szone->medium_rack, mspan, 0); - - if (NULL == p) { - return NULL; - } - - size_t offset = ((uintptr_t)p) & (alignment - 1); // p % alignment - size_t pad = (0 == offset) ? 0 : alignment - offset; // p + pad achieves desired alignment - - msize_t msize = MEDIUM_MSIZE_FOR_BYTES(size + MEDIUM_QUANTUM - 1); - msize_t mpad = MEDIUM_MSIZE_FOR_BYTES(pad + MEDIUM_QUANTUM - 1); - msize_t mwaste = mspan - msize - mpad; // excess blocks - - if (mpad > 0) { - void *q = (void *)(((uintptr_t)p) + pad); - - // Mark q as block header and in-use, thus creating two blocks. - magazine_t *medium_mag_ptr = mag_lock_zine_for_region_trailer( - szone->medium_rack.magazines, - REGION_TRAILER_FOR_MEDIUM_REGION(MEDIUM_REGION_FOR_PTR(p)), - MAGAZINE_INDEX_FOR_MEDIUM_REGION(MEDIUM_REGION_FOR_PTR(p))); - medium_meta_header_set_in_use(MEDIUM_META_HEADER_FOR_PTR(p), MEDIUM_META_INDEX_FOR_PTR(p), mpad); - medium_madvise_header_mark_dirty(MEDIUM_MADVISE_HEADER_FOR_PTR(p), MEDIUM_META_INDEX_FOR_PTR(p), mpad); - medium_meta_header_set_in_use(MEDIUM_META_HEADER_FOR_PTR(q), MEDIUM_META_INDEX_FOR_PTR(q), msize + mwaste); - medium_madvise_header_mark_dirty(MEDIUM_MADVISE_HEADER_FOR_PTR(q), MEDIUM_META_INDEX_FOR_PTR(q), msize + mwaste); - medium_mag_ptr->mag_num_objects++; - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - - // Give up mpad blocks beginning at p to the medium free list - free_medium(&szone->medium_rack, p, MEDIUM_REGION_FOR_PTR(p), MEDIUM_BYTES_FOR_MSIZE(mpad)); - - p = q; // advance p to the desired alignment - } - if (mwaste > 0) { - void *q = (void *)(((uintptr_t)p) + MEDIUM_BYTES_FOR_MSIZE(msize)); - // Mark q as block header and in-use, thus creating two blocks. - magazine_t *medium_mag_ptr = mag_lock_zine_for_region_trailer(szone->medium_rack.magazines, - REGION_TRAILER_FOR_MEDIUM_REGION(MEDIUM_REGION_FOR_PTR(p)), - MAGAZINE_INDEX_FOR_MEDIUM_REGION(MEDIUM_REGION_FOR_PTR(p))); - medium_meta_header_set_in_use(MEDIUM_META_HEADER_FOR_PTR(p), MEDIUM_META_INDEX_FOR_PTR(p), msize); - medium_madvise_header_mark_dirty(MEDIUM_MADVISE_HEADER_FOR_PTR(p), MEDIUM_META_INDEX_FOR_PTR(p), msize); - medium_meta_header_set_in_use(MEDIUM_META_HEADER_FOR_PTR(q), MEDIUM_META_INDEX_FOR_PTR(q), mwaste); - medium_madvise_header_mark_dirty(MEDIUM_MADVISE_HEADER_FOR_PTR(q), MEDIUM_META_INDEX_FOR_PTR(q), mwaste); - medium_mag_ptr->mag_num_objects++; - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - - // Give up mwaste blocks beginning at q to the medium free list - free_medium(&szone->medium_rack, q, MEDIUM_REGION_FOR_PTR(q), MEDIUM_BYTES_FOR_MSIZE(mwaste)); - } - - return p; // p has the desired size and alignment, and can later be free()'d -} - -boolean_t -medium_claimed_address(rack_t *rack, void *ptr) -{ - region_t r = medium_region_for_ptr_no_lock(rack, ptr); - return r && ptr < (void *)MEDIUM_REGION_END(r); -} - -void * -medium_try_shrink_in_place(rack_t *rack, void *ptr, size_t old_size, size_t new_good_size) -{ - msize_t new_msize = MEDIUM_MSIZE_FOR_BYTES(new_good_size); - msize_t mshrinkage = MEDIUM_MSIZE_FOR_BYTES(old_size) - new_msize; - - if (mshrinkage) { - void *q = (void *)((uintptr_t)ptr + MEDIUM_BYTES_FOR_MSIZE(new_msize)); - magazine_t *medium_mag_ptr = mag_lock_zine_for_region_trailer(rack->magazines, - REGION_TRAILER_FOR_MEDIUM_REGION(MEDIUM_REGION_FOR_PTR(ptr)), - MAGAZINE_INDEX_FOR_MEDIUM_REGION(MEDIUM_REGION_FOR_PTR(ptr))); - - // Mark q as block header and in-use, thus creating two blocks. - medium_meta_header_set_in_use(MEDIUM_META_HEADER_FOR_PTR(ptr), MEDIUM_META_INDEX_FOR_PTR(ptr), new_msize); - medium_madvise_header_mark_dirty(MEDIUM_MADVISE_HEADER_FOR_PTR(ptr), MEDIUM_META_INDEX_FOR_PTR(ptr), new_msize); - medium_meta_header_set_in_use(MEDIUM_META_HEADER_FOR_PTR(q), MEDIUM_META_INDEX_FOR_PTR(q), mshrinkage); - medium_madvise_header_mark_dirty(MEDIUM_MADVISE_HEADER_FOR_PTR(q), MEDIUM_META_INDEX_FOR_PTR(q), mshrinkage); - medium_mag_ptr->mag_num_objects++; - - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - free_medium(rack, q, MEDIUM_REGION_FOR_PTR(q), 0); - } - - return ptr; -} - -boolean_t -medium_try_realloc_in_place(rack_t *rack, void *ptr, size_t old_size, size_t new_size) -{ - // returns 1 on success - msize_t *meta_headers = MEDIUM_META_HEADER_FOR_PTR(ptr); - msize_t *madv_headers = MEDIUM_MADVISE_HEADER_FOR_PTR(ptr); - unsigned index; - msize_t old_msize, new_msize; - unsigned next_index; - void *next_block; - msize_t next_msize_and_free; - boolean_t is_free; - msize_t next_msize, leftover_msize; - void *leftover; - - index = MEDIUM_META_INDEX_FOR_PTR(ptr); - old_msize = MEDIUM_MSIZE_FOR_BYTES(old_size); - new_msize = MEDIUM_MSIZE_FOR_BYTES(new_size + MEDIUM_QUANTUM - 1); - next_index = index + old_msize; - - if (next_index >= NUM_MEDIUM_BLOCKS) { - return 0; - } - next_block = (char *)ptr + old_size; - -#if DEBUG_MALLOC - if ((uintptr_t)next_block & (MEDIUM_QUANTUM - 1)) { - malloc_zone_error(rack->debug_flags, true, "internal invariant broken in realloc(next_block) for %p\n", next_block); - } - if (meta_headers[index] != old_msize) { - malloc_report(ASL_LEVEL_ERR, "*** medium_try_realloc_in_place incorrect old %d %d\n", meta_headers[index], old_msize); - } -#endif - - magazine_t *medium_mag_ptr = mag_lock_zine_for_region_trailer(rack->magazines, - REGION_TRAILER_FOR_MEDIUM_REGION(MEDIUM_REGION_FOR_PTR(ptr)), - MAGAZINE_INDEX_FOR_MEDIUM_REGION(MEDIUM_REGION_FOR_PTR(ptr))); - if (DEPOT_MAGAZINE_INDEX == MAGAZINE_INDEX_FOR_MEDIUM_REGION(MEDIUM_REGION_FOR_PTR(ptr))) { - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - return 0; - } - - msize_t coalesced_msize = new_msize - old_msize; -#if CONFIG_MEDIUM_CACHE - void *last_free_ptr = medium_mag_ptr->mag_last_free; - msize_t last_free_msize = medium_mag_ptr->mag_last_free_msize; - if (last_free_ptr == next_block && old_msize + last_free_msize >= new_msize) { - /* - * There is a block in mag_last_free and it's immediately after - * this block and it's large enough. We can use some or all of it. - */ - leftover_msize = last_free_msize - coalesced_msize; - if (leftover_msize) { - medium_mag_ptr->mag_last_free_msize -= coalesced_msize; - medium_mag_ptr->mag_last_free += new_size - old_size; - // The block in mag_last_free is still marked as header and in-use, so copy that - // state to the block that remains. The state for the block that we're going to - // use is adjusted by the medium_meta_header_set_middle() call below. - medium_meta_header_set_in_use(meta_headers, index + new_msize, leftover_msize); - - if (madv_headers[index] & MEDIUM_IS_ADVISED) { - medium_madvise_header_mark_clean(madv_headers, index + new_msize, leftover_msize); - } else { - medium_madvise_header_mark_dirty(madv_headers, index + new_msize, leftover_msize); - } - } else { - // Using the whole block. - medium_mag_ptr->mag_last_free = NULL; - medium_mag_ptr->mag_last_free_msize = 0; - medium_mag_ptr->mag_last_free_rgn = NULL; - } - medium_meta_header_set_in_use(meta_headers, index, new_msize); - medium_madvise_header_mark_dirty(madv_headers, index, new_msize); - medium_meta_header_set_middle(meta_headers, next_index); - medium_madvise_header_mark_middle(madv_headers, next_index); - } else { -#endif // CONFIG_MEDIUM_CACHE - /* - * Try to expand into unused space immediately after this block. - */ - msize_t unused_msize = MEDIUM_MSIZE_FOR_BYTES(medium_mag_ptr->mag_bytes_free_at_end); - void *unused_start = MEDIUM_REGION_END(MEDIUM_REGION_FOR_PTR(ptr)) - medium_mag_ptr->mag_bytes_free_at_end; - if (medium_mag_ptr->mag_last_region == MEDIUM_REGION_FOR_PTR(ptr) - && coalesced_msize < unused_msize && unused_start == ptr + old_size) { - // Extend the in-use for this block to the new size - medium_meta_header_set_in_use(meta_headers, index, new_msize); - medium_madvise_header_mark_dirty(madv_headers, index, new_msize); - - // Clear the in-use size for the start of the area we extended into - medium_meta_header_set_middle(meta_headers, next_index); - medium_madvise_header_mark_middle(madv_headers, next_index); - - // Reduce mag_bytes_free_at_end and update its in-use size. - medium_mag_ptr->mag_bytes_free_at_end -= MEDIUM_BYTES_FOR_MSIZE(coalesced_msize); - medium_meta_header_set_in_use(meta_headers, index + new_msize, - MEDIUM_MSIZE_FOR_BYTES(medium_mag_ptr->mag_bytes_free_at_end)); - medium_madvise_header_mark_clean(madv_headers, index + new_msize, - MEDIUM_MSIZE_FOR_BYTES(medium_mag_ptr->mag_bytes_free_at_end)); - } else { - /* - * Look for a free block immediately afterwards. If it's large enough, we can consume (part of) - * it. - */ - next_msize_and_free = meta_headers[next_index]; - is_free = next_msize_and_free & MEDIUM_IS_FREE; - if (!is_free) { - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - return 0; // next_block is in use; - } - - next_msize = next_msize_and_free & ~MEDIUM_IS_FREE; - if (old_msize + next_msize < new_msize) { - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - return 0; // even with next block, not enough - } - - // The following block is big enough; pull it from its freelist and chop off enough to satisfy - // our needs. - free_list_t freee = medium_free_list_find_by_ptr(rack, medium_mag_ptr, next_block, next_msize); - medium_free_list_remove_ptr(rack, medium_mag_ptr, freee, next_msize); - medium_meta_header_set_middle(meta_headers, next_index); - medium_madvise_header_mark_middle(madv_headers, next_index); - leftover_msize = old_msize + next_msize - new_msize; - if (leftover_msize) { - /* there's some left, so put the remainder back */ - leftover = (unsigned char *)ptr + MEDIUM_BYTES_FOR_MSIZE(new_msize); - medium_free_list_add_ptr(rack, medium_mag_ptr, leftover, leftover_msize); - } - medium_meta_header_set_in_use(meta_headers, index, new_msize); - medium_madvise_header_mark_dirty(madv_headers, index, new_msize); - } -#if CONFIG_MEDIUM_CACHE - } -#endif // CONFIG_MEDIUM_CACHE -#if DEBUG_MALLOC - if (MEDIUM_BYTES_FOR_MSIZE(new_msize) > szone->large_threshold) { - malloc_report(ASL_LEVEL_ERR, "*** realloc in place for %p exceeded msize=%d\n", new_msize); - } - - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in medium_try_realloc_in_place(), ptr=%p, msize=%d\n", ptr, *MEDIUM_METADATA_FOR_PTR(ptr)); - } -#endif - medium_mag_ptr->mag_num_bytes_in_objects += MEDIUM_BYTES_FOR_MSIZE(new_msize - old_msize); - - // Update this region's bytes in use count - region_trailer_t *node = REGION_TRAILER_FOR_MEDIUM_REGION(MEDIUM_REGION_FOR_PTR(ptr)); - size_t bytes_used = node->bytes_used + MEDIUM_BYTES_FOR_MSIZE(new_msize - old_msize); - node->bytes_used = (unsigned int)bytes_used; - - // Emptiness discriminant - if (bytes_used < DENSITY_THRESHOLD(MEDIUM_REGION_PAYLOAD_BYTES)) { - /* After this reallocation the region is still sparse, so it must have been even more so before - * the reallocation. That implies the region is already correctly marked. Do nothing. */ - } else { - /* Region has crossed threshold from sparsity to density. Mark it not "suitable" on the - * recirculation candidates list. */ - node->recirc_suitable = FALSE; - } - - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - return 1; -} - -static char *medium_check_fail_msg = "check: incorrect medium region "; - -#define MEDIUM_CHECK_FAIL(fmt, ...) \ - malloc_zone_check_fail(medium_check_fail_msg, \ - "%ld, counter=%d\n" fmt, region_index, counter, __VA_ARGS__); - -boolean_t -medium_check_region(rack_t *rack, region_t region, size_t region_index, - unsigned counter) -{ - unsigned char *ptr = MEDIUM_REGION_ADDRESS(region); - msize_t *meta_headers = MEDIUM_META_HEADER_FOR_PTR(ptr); - unsigned char *region_end = MEDIUM_REGION_END(region); - msize_t prev_free = 0; - unsigned index; - msize_t msize_and_free; - msize_t msize; - free_list_t free_head, previous, next; - msize_t *follower; - mag_index_t mag_index = MAGAZINE_INDEX_FOR_MEDIUM_REGION(MEDIUM_REGION_FOR_PTR(ptr)); - magazine_t *medium_mag_ptr = &(rack->magazines[mag_index]); - - // Assumes locked - CHECK_MAGAZINE_PTR_LOCKED(szone, medium_mag_ptr, __PRETTY_FUNCTION__); - - if (region == medium_mag_ptr->mag_last_region) { - ptr += medium_mag_ptr->mag_bytes_free_at_start; - region_end -= medium_mag_ptr->mag_bytes_free_at_end; - } - - while (ptr < region_end) { - index = MEDIUM_META_INDEX_FOR_PTR(ptr); - msize_and_free = meta_headers[index]; - if (!(msize_and_free & MEDIUM_IS_FREE)) { - // block is in use - msize = msize_and_free; - if (!msize) { - MEDIUM_CHECK_FAIL("*** invariant broken: null msize ptr=%p num_medium_regions=%d end=%p\n", ptr, - (int)rack->num_regions, region_end); - return 0; - } -#if !CONFIG_RELAXED_INVARIANT_CHECKS - if (MEDIUM_BYTES_FOR_MSIZE(msize) > szone->large_threshold) { - MEDIUM_CHECK_FAIL("*** invariant broken for %p this medium msize=%d - size is too large\n", ptr, msize_and_free); - return 0; - } -#endif // CONFIG_RELAXED_INVARIANT_CHECKS - ptr += MEDIUM_BYTES_FOR_MSIZE(msize); - prev_free = 0; - } else { - // free pointer - msize = msize_and_free & ~MEDIUM_IS_FREE; - free_head = (free_list_t){ .p = ptr }; - follower = (msize_t *)FOLLOWING_MEDIUM_PTR(ptr, msize); - if (!msize) { - MEDIUM_CHECK_FAIL("*** invariant broken for free block %p this msize=%d\n", ptr, msize); - return 0; - } -#if !CONFIG_RELAXED_INVARIANT_CHECKS - if (prev_free) { - MEDIUM_CHECK_FAIL("*** invariant broken for %p (2 free in a row)\n", ptr); - return 0; - } -#endif - - // check for possible OOB entry if needed - if (medium_needs_oob_free_entry(ptr, msize)) { - oob_free_entry_t oob = medium_oob_free_find_ptr(ptr, msize); - if (oob) { - free_head.oob = oob; - } - } - - previous = medium_free_list_get_previous(rack, free_head); - next = medium_free_list_get_next(rack, free_head); - if (previous.p && !MEDIUM_PTR_IS_FREE(medium_free_list_get_ptr(rack, previous))) { - MEDIUM_CHECK_FAIL("*** invariant broken for %p (previous %p is not a free pointer)\n", ptr, medium_free_list_get_ptr(rack, previous)); - return 0; - } - if (next.p && !MEDIUM_PTR_IS_FREE(medium_free_list_get_ptr(rack, next))) { - MEDIUM_CHECK_FAIL("*** invariant broken for %p (next %p is not a free pointer)\n", ptr, medium_free_list_get_ptr(rack, next)); - return 0; - } - if (MEDIUM_PREVIOUS_MSIZE(follower) != msize) { - MEDIUM_CHECK_FAIL("*** invariant broken for medium free %p followed by %p in region [%p-%p] " - "(end marker incorrect) should be %d; in fact %d\n", - ptr, follower, MEDIUM_REGION_ADDRESS(region), region_end, msize, MEDIUM_PREVIOUS_MSIZE(follower)); - return 0; - } - ptr = (unsigned char *)follower; - prev_free = MEDIUM_IS_FREE; - } - } - return 1; -} - -kern_return_t -medium_in_use_enumerator(task_t task, - void *context, - unsigned type_mask, - szone_t *szone, - memory_reader_t reader, - vm_range_recorder_t recorder) -{ - size_t num_regions; - size_t index; - region_t *regions; - vm_range_t buffer[MAX_RECORDER_BUFFER]; - unsigned count = 0; - kern_return_t err; - region_t region; - vm_range_t range; - vm_range_t admin_range; - vm_range_t ptr_range; - unsigned char *mapped_region; - msize_t *block_header; - unsigned block_index; - unsigned block_limit; - msize_t msize_and_free; - msize_t msize; - magazine_t *medium_mag_base = NULL; - - region_hash_generation_t *srg_ptr; - err = reader(task, (vm_address_t)szone->medium_rack.region_generation, sizeof(region_hash_generation_t), (void **)&srg_ptr); - if (err) { - return err; - } - - num_regions = srg_ptr->num_regions_allocated; - err = reader(task, (vm_address_t)srg_ptr->hashed_regions, sizeof(region_t) * num_regions, (void **)®ions); - if (err) { - return err; - } - - if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { - // Map in all active magazines. Do this outside the iteration over regions. - err = reader(task, (vm_address_t)(szone->medium_rack.magazines), szone->medium_rack.num_magazines * sizeof(magazine_t), - (void **)&medium_mag_base); - if (err) { - return err; - } - } - - for (index = 0; index < num_regions; ++index) { - region = regions[index]; - if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) { - range.address = (vm_address_t)MEDIUM_REGION_ADDRESS(region); - range.size = MEDIUM_REGION_SIZE; - if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) { - admin_range.address = range.address + MEDIUM_METADATA_START; - admin_range.size = MEDIUM_METADATA_SIZE; - recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1); - } - if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) { - ptr_range.address = range.address; - ptr_range.size = NUM_MEDIUM_BLOCKS * MEDIUM_QUANTUM; - recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1); - } - if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { - err = reader(task, range.address, range.size, (void **)&mapped_region); - if (err) { - return err; - } - - mag_index_t mag_index = MAGAZINE_INDEX_FOR_MEDIUM_REGION(mapped_region); - magazine_t *medium_mag_ptr = medium_mag_base + mag_index; - - int cached_free_blocks = 0; -#if CONFIG_MEDIUM_CACHE - // Each magazine could have a pointer to a cached free block from - // this region. Count the regions that have such a pointer. - for (mag_index = 0; mag_index < szone->medium_rack.num_magazines; mag_index++) { - if ((void *)range.address == (medium_mag_base + mag_index)->mag_last_free_rgn) { - cached_free_blocks++; - } - } -#endif // CONFIG_MEDIUM_CACHE - - block_header = (msize_t *)(mapped_region + MEDIUM_METADATA_START + sizeof(region_trailer_t)); - block_index = 0; - block_limit = NUM_MEDIUM_BLOCKS; - if (region == medium_mag_ptr->mag_last_region) { - block_index += MEDIUM_MSIZE_FOR_BYTES(medium_mag_ptr->mag_bytes_free_at_start); - block_limit -= MEDIUM_MSIZE_FOR_BYTES(medium_mag_ptr->mag_bytes_free_at_end); - } - for (;block_index < block_limit; block_index += msize) { - msize_and_free = block_header[block_index]; - msize = msize_and_free & ~MEDIUM_IS_FREE; - if (!msize) { - return KERN_FAILURE; // Somethings amiss. Avoid looping at this block_index. - } - if (!(msize_and_free & MEDIUM_IS_FREE)) { - vm_address_t ptr = range.address + MEDIUM_BYTES_FOR_MSIZE(block_index); -#if CONFIG_MEDIUM_CACHE - // If there are still magazines that have cached free - // blocks in this region, check whether this is one of - // them and don't return the block pointer if it is. - boolean_t block_cached = false; - if (cached_free_blocks) { - for (mag_index = 0; mag_index < szone->medium_rack.num_magazines; mag_index++) { - if ((void *)ptr == (medium_mag_base + mag_index)->mag_last_free) { - block_cached = true; - cached_free_blocks--; - break; - } - } - } - if (block_cached) { - continue; - } -#endif // CONFIG_MEDIUM_CACHE - // Block in use - buffer[count].address = ptr; - buffer[count].size = MEDIUM_BYTES_FOR_MSIZE(msize); - count++; - if (count >= MAX_RECORDER_BUFFER) { - recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); - count = 0; - } - } - } - if (count) { - recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); - count = 0; - } - } - } - } - return 0; -} - -static void * -medium_malloc_from_free_list(rack_t *rack, magazine_t *medium_mag_ptr, mag_index_t mag_index, msize_t msize) -{ - msize_t this_msize; - bool was_madvised; - grain_t slot = MEDIUM_FREE_SLOT_FOR_MSIZE(rack, msize); - free_list_t *free_list = medium_mag_ptr->mag_free_list; - free_list_t *the_slot = free_list + slot; - free_list_t *limit; - unsigned bitmap; - msize_t leftover_msize; - void *leftover_ptr; - void *ptr; - - // Assumes we've locked the region - CHECK_MAGAZINE_PTR_LOCKED(szone, medium_mag_ptr, __PRETTY_FUNCTION__); - - // Look for an exact match by checking the freelist for this msize. - if (medium_free_list_get_ptr(rack, *the_slot)) { - ptr = medium_free_list_get_ptr(rack, *the_slot); - this_msize = msize; - medium_free_list_remove_ptr(rack, medium_mag_ptr, *the_slot, msize); - goto return_medium_alloc; - } - - // Mask off the bits representing slots holding free blocks smaller than - // the size we need. If there are no larger free blocks, try allocating - // from the free space at the end of the medium region. - // - // BITMAPN_CTZ implementation - unsigned idx = slot >> 5; - bitmap = 0; - unsigned mask = ~((1 << (slot & 31)) - 1); - for (; idx < MEDIUM_FREELIST_BITMAP_WORDS(rack); ++idx) { - bitmap = medium_mag_ptr->mag_bitmap[idx] & mask; - if (bitmap != 0) { - break; - } - mask = ~0U; - } - // Check for fallthrough: No bits set in bitmap - if ((bitmap == 0) && (idx == MEDIUM_FREELIST_BITMAP_WORDS(rack))) { - goto try_medium_from_end; - } - - // Start looking at the first set bit, plus 32 bits for every word of - // zeroes or entries that were too medium. - slot = BITMAP32_CTZ((&bitmap)) + (idx * 32); - - // FIXME: Explain use of - 1 here, last slot has special meaning - limit = free_list + MEDIUM_FREE_SLOT_COUNT(rack) - 1; - free_list += slot; - - // Attempt to pull off the free_list slot that we now think is full. - if ((ptr = medium_free_list_get_ptr(rack, *free_list))) { - this_msize = MEDIUM_PTR_SIZE(ptr); - was_madvised = (medium_madvise_header_dirty_len( - MEDIUM_MADVISE_HEADER_FOR_PTR(ptr), this_msize) == 0); - medium_free_list_remove_ptr(rack, medium_mag_ptr, *free_list, this_msize); - goto add_leftover_and_proceed; - } - -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_ERR, "in medium_malloc_from_free_list(), mag_bitmap out of sync, slot=%d\n", slot); -#endif - -try_medium_from_end: - // Let's see if we can use medium_mag_ptr->mag_bytes_free_at_end - if (medium_mag_ptr->mag_bytes_free_at_end >= MEDIUM_BYTES_FOR_MSIZE(msize)) { - ptr = MEDIUM_REGION_END(medium_mag_ptr->mag_last_region) - - medium_mag_ptr->mag_bytes_free_at_end; - medium_mag_ptr->mag_bytes_free_at_end -= MEDIUM_BYTES_FOR_MSIZE(msize); - if (medium_mag_ptr->mag_bytes_free_at_end) { - // let's mark this block as in use to serve as boundary - medium_meta_header_set_in_use(MEDIUM_META_HEADER_FOR_PTR(ptr), - MEDIUM_META_INDEX_FOR_PTR((unsigned char *)ptr + MEDIUM_BYTES_FOR_MSIZE(msize)), - MEDIUM_MSIZE_FOR_BYTES(medium_mag_ptr->mag_bytes_free_at_end)); - medium_madvise_header_mark_clean(MEDIUM_MADVISE_HEADER_FOR_PTR(ptr), - MEDIUM_META_INDEX_FOR_PTR((unsigned char *)ptr + MEDIUM_BYTES_FOR_MSIZE(msize)), - MEDIUM_MSIZE_FOR_BYTES(medium_mag_ptr->mag_bytes_free_at_end)); - } - this_msize = msize; - goto return_medium_alloc; - } -#if CONFIG_ASLR_INTERNAL - // Try from start if nothing left at end - if (medium_mag_ptr->mag_bytes_free_at_start >= MEDIUM_BYTES_FOR_MSIZE(msize)) { - ptr = MEDIUM_REGION_ADDRESS(medium_mag_ptr->mag_last_region) + - medium_mag_ptr->mag_bytes_free_at_start - - MEDIUM_BYTES_FOR_MSIZE(msize); - medium_mag_ptr->mag_bytes_free_at_start -= MEDIUM_BYTES_FOR_MSIZE(msize); - if (medium_mag_ptr->mag_bytes_free_at_start) { - // let's mark this block as in use to serve as boundary - medium_meta_header_set_in_use(MEDIUM_META_HEADER_FOR_PTR(ptr), 0, - MEDIUM_MSIZE_FOR_BYTES(medium_mag_ptr->mag_bytes_free_at_start)); - medium_madvise_header_mark_clean(MEDIUM_MADVISE_HEADER_FOR_PTR(ptr), 0, - MEDIUM_MSIZE_FOR_BYTES(medium_mag_ptr->mag_bytes_free_at_start)); - } - this_msize = msize; - goto return_medium_alloc; - } -#endif - return NULL; - -add_leftover_and_proceed: - if (this_msize > msize) { - leftover_msize = this_msize - msize; - leftover_ptr = (unsigned char *)ptr + MEDIUM_BYTES_FOR_MSIZE(msize); -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in medium_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize); - } -#endif - medium_free_list_add_ptr(rack, medium_mag_ptr, leftover_ptr, leftover_msize); - if (was_madvised) { - medium_madvise_header_mark_clean( - MEDIUM_MADVISE_HEADER_FOR_PTR(leftover_ptr), - MEDIUM_META_INDEX_FOR_PTR(leftover_ptr), leftover_msize); - } else { - medium_madvise_header_mark_dirty( - MEDIUM_MADVISE_HEADER_FOR_PTR(leftover_ptr), - MEDIUM_META_INDEX_FOR_PTR(leftover_ptr), leftover_msize); - } - this_msize = msize; - } - -return_medium_alloc: - medium_mag_ptr->mag_num_objects++; - medium_mag_ptr->mag_num_bytes_in_objects += MEDIUM_BYTES_FOR_MSIZE(this_msize); - - // Update this region's bytes in use count - region_trailer_t *node = REGION_TRAILER_FOR_MEDIUM_REGION(MEDIUM_REGION_FOR_PTR(ptr)); - size_t bytes_used = node->bytes_used + MEDIUM_BYTES_FOR_MSIZE(this_msize); - node->bytes_used = (unsigned int)bytes_used; - - // Emptiness discriminant - if (bytes_used < DENSITY_THRESHOLD(MEDIUM_REGION_PAYLOAD_BYTES)) { - /* After this allocation the region is still sparse, so it must have been even more so before - * the allocation. That implies the region is already correctly marked. Do nothing. */ - } else { - /* Region has crossed threshold from sparsity to density. Mark in not "suitable" on the - * recirculation candidates list. */ - node->recirc_suitable = FALSE; - } -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in medium_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize); - } -#endif - medium_meta_header_set_in_use(MEDIUM_META_HEADER_FOR_PTR(ptr), - MEDIUM_META_INDEX_FOR_PTR(ptr), this_msize); - medium_madvise_header_mark_dirty(MEDIUM_MADVISE_HEADER_FOR_PTR(ptr), - MEDIUM_META_INDEX_FOR_PTR(ptr), this_msize); - return ptr; -} - -void * -medium_malloc_should_clear(rack_t *rack, msize_t msize, boolean_t cleared_requested) -{ - void *ptr; - mag_index_t mag_index = medium_mag_get_thread_index() % rack->num_magazines; - magazine_t *medium_mag_ptr = &(rack->magazines[mag_index]); - - MALLOC_TRACE(TRACE_medium_malloc, (uintptr_t)rack, MEDIUM_BYTES_FOR_MSIZE(msize), (uintptr_t)medium_mag_ptr, cleared_requested); - - SZONE_MAGAZINE_PTR_LOCK(medium_mag_ptr); - -#if CONFIG_MEDIUM_CACHE - ptr = medium_mag_ptr->mag_last_free; - - if (medium_mag_ptr->mag_last_free_msize == msize) { - // we have a winner - medium_mag_ptr->mag_last_free = NULL; - medium_mag_ptr->mag_last_free_msize = 0; - medium_mag_ptr->mag_last_free_rgn = NULL; - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - if (cleared_requested) { - memset(ptr, 0, MEDIUM_BYTES_FOR_MSIZE(msize)); - } - return ptr; - } -#endif /* CONFIG_MEDIUM_CACHE */ - - while (1) { - ptr = medium_malloc_from_free_list(rack, medium_mag_ptr, mag_index, msize); - if (ptr) { - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - if (cleared_requested) { - memset(ptr, 0, MEDIUM_BYTES_FOR_MSIZE(msize)); - } - return ptr; - } - - if (medium_get_region_from_depot(rack, medium_mag_ptr, mag_index, msize)) { - ptr = medium_malloc_from_free_list(rack, medium_mag_ptr, mag_index, msize); - if (ptr) { - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - if (cleared_requested) { - memset(ptr, 0, MEDIUM_BYTES_FOR_MSIZE(msize)); - } - return ptr; - } - } - - // The magazine is exhausted. A new region (heap) must be allocated to satisfy this call to malloc(). - // The allocation, an mmap() system call, will be performed outside the magazine spin locks by the first - // thread that suffers the exhaustion. That thread sets "alloc_underway" and enters a critical section. - // Threads arriving here later are excluded from the critical section, yield the CPU, and then retry the - // allocation. After some time the magazine is resupplied, the original thread leaves with its allocation, - // and retry-ing threads succeed in the code just above. - if (!medium_mag_ptr->alloc_underway) { - void *fresh_region; - - // time to create a new region (do this outside the magazine lock) - medium_mag_ptr->alloc_underway = TRUE; - OSMemoryBarrier(); - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - fresh_region = mvm_allocate_pages_securely(MEDIUM_REGION_SIZE, - MEDIUM_BLOCKS_ALIGN, VM_MEMORY_MALLOC_MEDIUM, - rack->debug_flags); - SZONE_MAGAZINE_PTR_LOCK(medium_mag_ptr); - - // DTrace USDT Probe - MAGMALLOC_ALLOCREGION(MEDIUM_SZONE_FROM_RACK(rack), (int)mag_index, - fresh_region, MEDIUM_REGION_SIZE); - - if (!fresh_region) { // out of memory! - medium_mag_ptr->alloc_underway = FALSE; - OSMemoryBarrier(); - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - return NULL; - } - - ptr = medium_malloc_from_region_no_lock(rack, medium_mag_ptr, - mag_index, msize, fresh_region); - - // we don't clear because this freshly allocated space is pristine - medium_mag_ptr->alloc_underway = FALSE; - OSMemoryBarrier(); - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - return ptr; - } else { - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - yield(); - SZONE_MAGAZINE_PTR_LOCK(medium_mag_ptr); - } - } - /* NOTREACHED */ -} - -size_t -medium_size(rack_t *rack, const void *ptr) -{ - if (medium_region_for_ptr_no_lock(rack, ptr)) { - if (MEDIUM_META_INDEX_FOR_PTR(ptr) >= NUM_MEDIUM_BLOCKS) { - return 0; - } - msize_t msize_and_free = *MEDIUM_METADATA_FOR_PTR(ptr); - if (msize_and_free & MEDIUM_IS_FREE) { - return 0; - } -#if CONFIG_MEDIUM_CACHE - { - mag_index_t mag_index = MAGAZINE_INDEX_FOR_MEDIUM_REGION(MEDIUM_REGION_FOR_PTR(ptr)); - if (DEPOT_MAGAZINE_INDEX != mag_index) { - magazine_t *medium_mag_ptr = &(rack->magazines[mag_index]); - - if (ptr == medium_mag_ptr->mag_last_free) { - return 0; - } - } else { - for (mag_index = 0; mag_index < rack->num_magazines; mag_index++) { - magazine_t *medium_mag_ptr = &(rack->magazines[mag_index]); - - if (ptr == medium_mag_ptr->mag_last_free) { - return 0; - } - } - } - } -#endif - return MEDIUM_BYTES_FOR_MSIZE(msize_and_free); - } - - return 0; -} - -static MALLOC_NOINLINE void -free_medium_botch(rack_t *rack, void *ptr) -{ - mag_index_t mag_index = MAGAZINE_INDEX_FOR_MEDIUM_REGION(MEDIUM_REGION_FOR_PTR(ptr)); - magazine_t *medium_mag_ptr = &(rack->magazines[mag_index]); - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - malloc_zone_error(rack->debug_flags, true, "double free for ptr %p\n", ptr); -} - -void -free_medium(rack_t *rack, void *ptr, region_t medium_region, size_t known_size) -{ - msize_t msize; - mag_index_t mag_index = MAGAZINE_INDEX_FOR_MEDIUM_REGION(MEDIUM_REGION_FOR_PTR(ptr)); - magazine_t *medium_mag_ptr = &(rack->magazines[mag_index]); - - // ptr is known to be in medium_region - if (known_size) { - msize = MEDIUM_MSIZE_FOR_BYTES(known_size + MEDIUM_QUANTUM - 1); - } else { - msize = MEDIUM_PTR_SIZE(ptr); - if (MEDIUM_PTR_IS_FREE(ptr)) { - free_medium_botch(rack, ptr); - return; - } - } - - SZONE_MAGAZINE_PTR_LOCK(medium_mag_ptr); - -#if CONFIG_MEDIUM_CACHE - // Depot does not participate in CONFIG_MEDIUM_CACHE since it can't be directly malloc()'d - if (DEPOT_MAGAZINE_INDEX != mag_index) { - void *ptr2 = medium_mag_ptr->mag_last_free; // Might be NULL - msize_t msize2 = medium_mag_ptr->mag_last_free_msize; - region_t rgn2 = medium_mag_ptr->mag_last_free_rgn; - - /* check that we don't already have this pointer in the cache */ - if (ptr == ptr2) { - free_medium_botch(rack, ptr); - return; - } - - if ((rack->debug_flags & MALLOC_DO_SCRIBBLE) && msize) { - memset(ptr, SCRABBLE_BYTE, MEDIUM_BYTES_FOR_MSIZE(msize)); - } - - uint64_t madv_window = medium_sliding_madvise_granularity(medium_mag_ptr); - if (MEDIUM_BYTES_FOR_MSIZE(msize) > madv_window) { - uintptr_t lo = round_page_kernel((uintptr_t)ptr); - uintptr_t hi = trunc_page_kernel((uintptr_t)ptr + MEDIUM_BYTES_FOR_MSIZE(msize)); - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - mvm_madvise_free(rack, medium_region, lo, hi, NULL, false); - SZONE_MAGAZINE_PTR_LOCK(medium_mag_ptr); - - medium_madvise_header_mark_clean(MEDIUM_MADVISE_HEADER_FOR_PTR(ptr), - MEDIUM_META_INDEX_FOR_PTR(ptr), msize); - - ptr2 = medium_mag_ptr->mag_last_free; // Might be NULL - msize2 = medium_mag_ptr->mag_last_free_msize; - rgn2 = medium_mag_ptr->mag_last_free_rgn; - } - - - medium_mag_ptr->mag_last_free = ptr; - medium_mag_ptr->mag_last_free_msize = msize; - medium_mag_ptr->mag_last_free_rgn = medium_region; - - if (!ptr2) { - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - return; - } - - msize = msize2; - ptr = ptr2; - medium_region = rgn2; - } -#endif /* CONFIG_MEDIUM_CACHE */ - - // Now in the time it took to acquire the lock, the region may have migrated - // from one magazine to another. I.e. trailer->mag_index is volatile. - // In which case the magazine lock we obtained (namely magazines[mag_index].mag_lock) - // is stale. If so, keep on tryin' ... - region_trailer_t *trailer = REGION_TRAILER_FOR_MEDIUM_REGION(medium_region); - mag_index_t refreshed_index; - - while (mag_index != (refreshed_index = trailer->mag_index)) { // Note assignment - - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - - mag_index = refreshed_index; - medium_mag_ptr = &(rack->magazines[mag_index]); - SZONE_MAGAZINE_PTR_LOCK(medium_mag_ptr); - } - - if (medium_free_no_lock(rack, medium_mag_ptr, mag_index, medium_region, ptr, msize)) { - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - } - - CHECK(szone, __PRETTY_FUNCTION__); -} - -void -print_medium_free_list(task_t task, memory_reader_t reader, - print_task_printer_t printer, rack_t *rack) -{ - free_list_t ptr; - _SIMPLE_STRING b = _simple_salloc(); - mag_index_t mag_index; - - if (b) { - rack_t *mapped_rack; - magazine_t *mapped_magazines; - if (reader(task, (vm_address_t)rack, sizeof(struct rack_s), - (void **)&mapped_rack)) { - printer("Failed to map medium rack\n"); - return; - } - if (reader(task, (vm_address_t)mapped_rack->magazines, - mapped_rack->num_magazines * sizeof(magazine_t), - (void **)&mapped_magazines)) { - printer("Failed to map medium rack magazines\n"); - return; - } - - _simple_sappend(b, "medium free sizes:\n"); - grain_t free_slots = MEDIUM_FREE_SLOT_COUNT(mapped_rack); - for (mag_index = -1; mag_index < mapped_rack->num_magazines; - mag_index++) { - grain_t slot = 0; - if (mag_index == -1) { - _simple_sprintf(b, "\tRecirc depot: "); - } else { - _simple_sprintf(b, "\tMagazine %d: ", mag_index); - } - while (slot < free_slots) { - ptr = mapped_magazines[mag_index].mag_free_list[slot]; - if (medium_free_list_get_ptr_task(task, reader, printer, ptr)) { - _simple_sprintf(b, "%s%y[%lld]; ", (slot == free_slots - 1) ? - ">=" : "", (slot + 1) * MEDIUM_QUANTUM, - medium_free_list_count(task, reader, printer, - rack, ptr)); - } - slot++; - } - _simple_sappend(b, "\n"); - } - printer("%s\n", _simple_string(b)); - _simple_sfree(b); - } -} - -void -print_medium_region(task_t task, memory_reader_t reader, - print_task_printer_t printer, szone_t *szone, int level, - region_t region, size_t bytes_at_start, size_t bytes_at_end) -{ - unsigned counts[1024]; - unsigned in_use = 0; - uintptr_t start = (uintptr_t)MEDIUM_REGION_ADDRESS(region); - uintptr_t current = start + bytes_at_start; - uintptr_t limit = (uintptr_t)MEDIUM_REGION_END(region) - bytes_at_end; - uintptr_t mapped_start; - msize_t msize_and_free; - msize_t msize; - unsigned ci; - _SIMPLE_STRING b; - uintptr_t pgTot = 0; - uintptr_t advTot = 0; - - if (reader(task, (vm_address_t)start, MEDIUM_REGION_SIZE, - (void **)&mapped_start)) { - printer("Failed to map small region at %p\n", start); - return; - } - off_t start_offset = mapped_start - start; - region_t mapped_region = (region_t)mapped_start; - - if (region == HASHRING_REGION_DEALLOCATED) { - if ((b = _simple_salloc()) != NULL) { - _simple_sprintf(b, "Medium region [unknown address] was returned to the OS\n"); - printer("%s\n", _simple_string(b)); - _simple_sfree(b); - } - return; - } - - memset(counts, 0, sizeof(counts)); - while (current < limit) { - msize_and_free = *(uintptr_t *)((char *)MEDIUM_METADATA_FOR_PTR(current) - + start_offset); - msize = msize_and_free & ~MEDIUM_IS_FREE; - if (!msize) { - printer("*** error with %p: msize=%d, free: %x\n", (void *)current, - (unsigned)msize, msize_and_free & MEDIUM_IS_FREE); - break; - } - if (!(msize_and_free & MEDIUM_IS_FREE)) { - // block in use - if (msize < 1024) { - counts[msize]++; - } - in_use++; - } else { - uintptr_t pgLo = round_page_quanta(current + - sizeof(medium_inplace_free_entry_s) + sizeof(msize_t)); - uintptr_t pgHi = trunc_page_quanta(current + - MEDIUM_BYTES_FOR_MSIZE(msize) - sizeof(msize_t)); - - if (pgLo < pgHi) { - pgTot += (pgHi - pgLo); - } - - uintptr_t curAdv = current; - uintptr_t limAdv = current + MEDIUM_BYTES_FOR_MSIZE(msize); - while (curAdv < limAdv) { - msize_t adv = *(MEDIUM_MADVISE_HEADER_FOR_PTR(curAdv) + - MEDIUM_META_INDEX_FOR_PTR(curAdv)); - uintptr_t bytes = MEDIUM_BYTES_FOR_MSIZE(adv & ~MEDIUM_IS_ADVISED); - if (adv & MEDIUM_IS_ADVISED) { - advTot += bytes; - } - curAdv += bytes; - } - } - current += MEDIUM_BYTES_FOR_MSIZE(msize); - } - if ((b = _simple_salloc()) != NULL) { - mag_index_t mag_index = MAGAZINE_INDEX_FOR_MEDIUM_REGION(mapped_region); - _simple_sprintf(b, "Medium region [%p-%p, %y] \t", (void *)start, - MEDIUM_REGION_END(region), (int)MEDIUM_REGION_SIZE); - if (mag_index == DEPOT_MAGAZINE_INDEX) { - _simple_sprintf(b, "Recirc depot \t"); - } else { - _simple_sprintf(b, "Magazine=%d \t", mag_index); - } - _simple_sprintf(b, "Allocations in use=%d \t Bytes in use=%ly (%d%%) \t", - in_use, BYTES_USED_FOR_MEDIUM_REGION(region), - (int)(100.0F * BYTES_USED_FOR_MEDIUM_REGION(mapped_region))/MEDIUM_REGION_SIZE); - if (bytes_at_end || bytes_at_start) { - _simple_sprintf(b, "Untouched=%ly ", bytes_at_end + bytes_at_start); - } - _simple_sprintf(b, "Advised=%ly ", advTot); -#if CONFIG_RECIRC_DEPOT - _simple_sprintf(b, medium_region_below_recirc_threshold(mapped_region) ? - "\tEmpty enough to be moved to recirc depot" : - "\tNot empty enough to be moved to recirc depot"); -#endif // CONFIG_RECIRC_DEPOT - _simple_sprintf(b, "Dirty=%ly ", MEDIUM_REGION_PAYLOAD_BYTES - - bytes_at_start - bytes_at_end - - BYTES_USED_FOR_MEDIUM_REGION(mapped_region) - advTot); - if (level >= MALLOC_VERBOSE_PRINT_LEVEL && in_use) { - _simple_sappend(b, "\n\tSizes in use: "); - for (ci = 0; ci < 1024; ci++) { - if (counts[ci]) { - _simple_sprintf(b, "%y[%d] ", MEDIUM_BYTES_FOR_MSIZE(ci), - counts[ci]); - } - } - } - printer("%s\n", _simple_string(b)); - _simple_sfree(b); - } -} - -void -print_medium_region_vis(szone_t *szone, region_t region) -{ - _SIMPLE_STRING b; - - if (region == HASHRING_REGION_DEALLOCATED) { - if ((b = _simple_salloc()) != NULL) { - _simple_sprintf(b, "Medium region [unknown address] was returned " - "to the kernel\n"); - malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, - "%s\n", _simple_string(b)); - _simple_sfree(b); - } - return; - } - - msize_t *metah = MEDIUM_META_HEADER_FOR_PTR(region); - msize_t *madvh = MEDIUM_MADVISE_HEADER_FOR_PTR(region); - - mag_index_t mag_idx = MAGAZINE_INDEX_FOR_MEDIUM_REGION(region); - magazine_t *mag = &szone->medium_rack.magazines[mag_idx]; - - msize_t cur_msz = 0; - bool is_free = false; - msize_t cur_adv_msz = 0; - bool is_advised = false; - - if ((b = _simple_salloc()) == NULL) { - abort(); - } - - malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, - "Medium region [%p-%p, %y, %y]\n", (void *)region, - MEDIUM_REGION_END(region), (int)MEDIUM_REGION_SIZE, - ((medium_region_t)region)->trailer.bytes_used); - - for (size_t x = 0; x < NUM_MEDIUM_BLOCKS; x++) { - bool is_last_rgn = mag->mag_last_region == region; - bool is_in_free_trailer = is_last_rgn && ((x << SHIFT_MEDIUM_QUANTUM) > - (MEDIUM_REGION_SIZE - mag->mag_bytes_free_at_end)); - bool is_blk_start = false; - bool is_blk_end = cur_msz == 1; - bool is_adv_boundary = false; - - if (cur_msz == 0) { - cur_msz = metah[x] & ~MEDIUM_IS_FREE; - is_free = metah[x] & MEDIUM_IS_FREE; - is_blk_start = true; - } - if (cur_adv_msz == 0 && madvh[x]) { - cur_adv_msz = madvh[x] & ~MEDIUM_IS_FREE; - is_advised = madvh[x] & MEDIUM_IS_FREE; - is_adv_boundary = true; - } - if (cur_adv_msz > 0) { - cur_adv_msz--; - } - - const char *sigil = "?"; - if (is_in_free_trailer) { - sigil = "_"; - } else if (cur_msz > 0) { - if (is_free) { - if (is_advised) { sigil = "~"; } - else if (is_blk_start && cur_msz == 1) { sigil = "."; } - else if (is_blk_start) { sigil = "<"; } - else if (is_blk_end) { sigil = ">"; } - else if (is_adv_boundary) { sigil = "!"; } - else { sigil = "."; } - } else { - if (is_blk_start && cur_msz == 1) { sigil = "#"; } - else if (is_blk_start) { sigil = "["; } - else if (is_blk_end) { sigil = "]"; } - else { sigil = "#"; } - } - cur_msz--; - } - - if (x > 0 && (x % 128) == 0) { - malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, "%s\n", - _simple_string(b)); - _simple_sfree(b); - if ((b = _simple_salloc()) == NULL) { - abort(); - } - if (is_in_free_trailer) { - break; - } - } - - _simple_sappend(b, sigil); - } - _simple_sfree(b); -} - -static char *medium_freelist_fail_msg = "check: medium free list incorrect"; - -#define MEDIUM_FREELIST_FAIL(fmt, ...) \ - malloc_zone_check_fail(medium_freelist_fail_msg, \ - " (slot=%u), counter=%d\n" fmt, slot, counter, __VA_ARGS__); - -boolean_t -medium_free_list_check(rack_t *rack, grain_t slot, unsigned counter) -{ - mag_index_t mag_index; - - for (mag_index = -1; mag_index < rack->num_magazines; mag_index++) { - magazine_t *medium_mag_ptr = &(rack->magazines[mag_index]); - SZONE_MAGAZINE_PTR_LOCK(medium_mag_ptr); - - unsigned count = 0; - free_list_t current = rack->magazines[mag_index].mag_free_list[slot]; - free_list_t previous = (free_list_t){ .p = NULL }; - msize_t msize_and_free; - void *ptr = NULL; - - while ((ptr = medium_free_list_get_ptr(rack, current))) { - msize_and_free = *MEDIUM_METADATA_FOR_PTR(ptr); - if (!(msize_and_free & MEDIUM_IS_FREE)) { - MEDIUM_FREELIST_FAIL("*** in-use ptr in free list slot=%u count=%d ptr=%p\n", slot, count, ptr); - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - return 0; - } - if (((uintptr_t)ptr) & (MEDIUM_QUANTUM - 1)) { - MEDIUM_FREELIST_FAIL("*** unaligned ptr in free list slot=%u count=%d ptr=%p\n", slot, count, ptr); - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - return 0; - } - if (!medium_region_for_ptr_no_lock(rack, ptr)) { - MEDIUM_FREELIST_FAIL("*** ptr not in szone slot=%d count=%d ptr=%p\n", slot, count, ptr); - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - return 0; - } - if (medium_free_list_get_previous(rack, current).p != previous.p) { - MEDIUM_FREELIST_FAIL("*** previous incorrectly set slot=%u count=%d ptr=%p\n", slot, count, ptr); - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - return 0; - } - previous = current; - current = medium_free_list_get_next(rack, current); - count++; - } - - SZONE_MAGAZINE_PTR_UNLOCK(medium_mag_ptr); - } - return 1; -} - -#endif // CONFIG_MEDIUM_ALLOCATOR diff --git a/src/libmalloc/src/magazine_rack.c b/src/libmalloc/src/magazine_rack.c deleted file mode 100644 index 3ce55da29..000000000 --- a/src/libmalloc/src/magazine_rack.c +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Copyright (c) 2016 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#include "internal.h" - -void -rack_init(rack_t *rack, rack_type_t type, uint32_t num_magazines, uint32_t debug_flags) -{ - rack->type = type; - rack->rg[0].nextgen = &rack->rg[1]; - rack->rg[1].nextgen = &rack->rg[0]; - rack->region_generation = &rack->rg[0]; - - rack->region_generation->hashed_regions = rack->initial_regions; - rack->region_generation->num_regions_allocated = INITIAL_NUM_REGIONS; - rack->region_generation->num_regions_allocated_shift = INITIAL_NUM_REGIONS_SHIFT; - - memset(rack->initial_regions, '\0', sizeof(region_t) * INITIAL_NUM_REGIONS); - - rack->cookie = (uintptr_t)malloc_entropy[0]; - - if (type == RACK_TYPE_SMALL) { - // Flip the cookie for SMALL regions so that tiny and small free list - // entries will trap when used if used in opposing region types. - rack->cookie = ~rack->cookie; - } - - rack->debug_flags = debug_flags; - rack->num_magazines = num_magazines; - rack->num_regions = 0; - rack->num_regions_dealloc = 0; - rack->magazines = NULL; - - if (num_magazines > 0) { - // num_magazines + 1, the [-1] index will become the depot magazine - size_t magsize = round_page_quanta(sizeof(magazine_t) * (num_magazines + 1)); - magazine_t *magazines = mvm_allocate_pages(magsize, 0, MALLOC_ADD_GUARD_PAGES, VM_MEMORY_MALLOC); - if (!magazines) { - MALLOC_REPORT_FATAL_ERROR(0, "unable to allocate magazine array"); - } - - rack->magazines = &magazines[1]; - rack->num_magazines_mask_shift = 0; - - // The magazines are indexed in [0 .. (num_magazines - 1)] - // Find the smallest power of 2 that exceeds (num_magazines - 1) - int i = 1; - while (i <= (num_magazines - 1)) { - rack->num_magazines_mask_shift++; - i <<= 1; - } - - // Reduce i by 1 to obtain a mask covering [0 .. (num_tiny_magazines - 1)] - rack->num_magazines_mask = i - 1; - rack->last_madvise = 0; - - _malloc_lock_init(&rack->region_lock); - _malloc_lock_init(&rack->magazines[DEPOT_MAGAZINE_INDEX].magazine_lock); - - for (int i=0; i < rack->num_magazines; i++) { - _malloc_lock_init(&rack->magazines[i].magazine_lock); - } - } -} - -void -rack_destroy_regions(rack_t *rack, size_t region_size) -{ - /* destroy regions attached to this rack */ - for (int i=0; i < rack->region_generation->num_regions_allocated; i++) { - if ((rack->region_generation->hashed_regions[i] != HASHRING_OPEN_ENTRY) && - (rack->region_generation->hashed_regions[i] != HASHRING_REGION_DEALLOCATED)) - { - mvm_deallocate_pages(rack->region_generation->hashed_regions[i], region_size, 0); - rack->region_generation->hashed_regions[i] = HASHRING_REGION_DEALLOCATED; - } - } -} - -void -rack_destroy(rack_t *rack) -{ - /* if the rack has additional regions, then deallocate them */ - if (rack->region_generation->hashed_regions != rack->initial_regions) { - size_t size = round_page_quanta(rack->region_generation->num_regions_allocated * sizeof(region_t)); - mvm_deallocate_pages(rack->region_generation->hashed_regions, size, 0); - } - - if (rack->num_magazines > 0) { - size_t size = round_page_quanta(sizeof(magazine_t) * (rack->num_magazines + 1)); - mvm_deallocate_pages(&rack->magazines[-1], size, MALLOC_ADD_GUARD_PAGES); - rack->magazines = NULL; - } -} - -void -rack_region_insert(rack_t *rack, region_t region) -{ - // Here find the only place in rackland that (infrequently) takes the tiny_regions_lock. - // Only one thread at a time should be permitted to assess the density of the hash - // ring and adjust if needed. - // Only one thread at a time should be permitted to insert its new region on - // the hash ring. - // It is safe for all other threads to read the hash ring (hashed_regions) and - // the associated sizes (num_regions_allocated and num_tiny_regions). - - _malloc_lock_lock(&rack->region_lock); - - // Check to see if the hash ring of tiny regions needs to grow. Try to - // avoid the hash ring becoming too dense. - if (rack->region_generation->num_regions_allocated < (2 * rack->num_regions)) { - region_t *new_regions; - size_t new_size; - size_t new_shift = rack->region_generation->num_regions_allocated_shift; // In/Out parameter - new_regions = hash_regions_grow_no_lock(rack->region_generation->hashed_regions, - rack->region_generation->num_regions_allocated, &new_shift, &new_size); - // Do not deallocate the current hashed_regions allocation since someone may - // be iterating it. Instead, just leak it. - - // Prepare to advance to the "next generation" of the hash ring. - rack->region_generation->nextgen->hashed_regions = new_regions; - rack->region_generation->nextgen->num_regions_allocated = new_size; - rack->region_generation->nextgen->num_regions_allocated_shift = new_shift; - - // Throw the switch to atomically advance to the next generation. - rack->region_generation = rack->region_generation->nextgen; - // Ensure everyone sees the advance. - OSMemoryBarrier(); - } - - // Insert the new region into the hash ring, and update malloc statistics - hash_region_insert_no_lock(rack->region_generation->hashed_regions, - rack->region_generation->num_regions_allocated, - rack->region_generation->num_regions_allocated_shift, - region); - - rack->num_regions++; - _malloc_lock_unlock(&rack->region_lock); -} diff --git a/src/libmalloc/src/magazine_rack.h b/src/libmalloc/src/magazine_rack.h deleted file mode 100644 index d918dd0f8..000000000 --- a/src/libmalloc/src/magazine_rack.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (c) 2016 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __MAGAZINE_RACK_H -#define __MAGAZINE_RACK_H - -/******************************************************************************* - * Definitions for region hash - ******************************************************************************/ - -typedef void *region_t; -typedef region_t *rgnhdl_t; /* A pointer into hashed_regions array. */ - -#define INITIAL_NUM_REGIONS_SHIFT 6 // log2(INITIAL_NUM_REGIONS) -#define INITIAL_NUM_REGIONS (1 << INITIAL_NUM_REGIONS_SHIFT) // Must be a power of 2! -#define HASHRING_OPEN_ENTRY ((region_t)0) // Initial value and sentinel marking end of collision chain -#define HASHRING_REGION_DEALLOCATED ((region_t)-1) // Region at this slot reclaimed by OS -#define HASH_BLOCKS_ALIGN TINY_BLOCKS_ALIGN // MIN( TINY_BLOCKS_ALIGN, SMALL_BLOCKS_ALIGN, ... ) - -typedef struct region_hash_generation { - size_t num_regions_allocated; - size_t num_regions_allocated_shift; // log2(num_regions_allocated) - region_t *hashed_regions; // hashed by location - struct region_hash_generation *nextgen; -} region_hash_generation_t; - -OS_ENUM(rack_type, uint32_t, - RACK_TYPE_NONE = 0, - RACK_TYPE_TINY, - RACK_TYPE_SMALL, - RACK_TYPE_MEDIUM, -); - -/******************************************************************************* - * Per-allocator collection of regions and magazines - ******************************************************************************/ - -typedef struct rack_s { - /* Regions for tiny objects */ - _malloc_lock_s region_lock MALLOC_CACHE_ALIGN; - - rack_type_t type; - size_t num_regions; - size_t num_regions_dealloc; - region_hash_generation_t *region_generation; - region_hash_generation_t rg[2]; - region_t initial_regions[INITIAL_NUM_REGIONS]; - - int num_magazines; - unsigned num_magazines_mask; - int num_magazines_mask_shift; - uint32_t debug_flags; - - // array of per-processor magazines - magazine_t *magazines; - - uintptr_t cookie; - uintptr_t last_madvise; -} rack_t; - - -MALLOC_NOEXPORT -void -rack_init(rack_t *rack, rack_type_t type, uint32_t num_magazines, uint32_t debug_flags); - -MALLOC_NOEXPORT -void -rack_destroy_regions(rack_t *rack, size_t region_size); - -MALLOC_NOEXPORT -void -rack_destroy(rack_t *rack); - -MALLOC_NOEXPORT -void -rack_region_insert(rack_t *rack, region_t region); - -#endif // __MAGAZINE_RACK_H diff --git a/src/libmalloc/src/magazine_small.c b/src/libmalloc/src/magazine_small.c deleted file mode 100644 index 3c6c4e7a5..000000000 --- a/src/libmalloc/src/magazine_small.c +++ /dev/null @@ -1,2485 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#include "internal.h" - - -/********************* SMALL FREE LIST UTILITIES ************************/ - -#pragma mark meta header helpers - -/* - * Mark a block as free. Only the first quantum of a block is marked thusly, - * the remainder are marked "middle". - */ -static MALLOC_INLINE void -small_meta_header_set_is_free(msize_t *meta_headers, msize_t index, msize_t msize) -{ - meta_headers[index] = msize | SMALL_IS_FREE; -} - -/* - * Mark a block as not free, preserving its size. - */ -static MALLOC_INLINE void -small_meta_header_set_not_free(msize_t *meta_headers, msize_t index) -{ - meta_headers[index] &= ~SMALL_IS_FREE; -} - -/* - * Mark a block as in use. Only the first quantum of a block is marked thusly, - * the remainder are marked "middle". - */ -static MALLOC_INLINE void -small_meta_header_set_in_use(msize_t *meta_headers, msize_t index, msize_t msize) -{ - meta_headers[index] = msize; -} - -/* - * Mark a quantum as being the second or later in a block. - */ -static MALLOC_INLINE void -small_meta_header_set_middle(msize_t *meta_headers, msize_t index) -{ - meta_headers[index] = 0; -} - -static MALLOC_INLINE MALLOC_ALWAYS_INLINE -mag_index_t -small_mag_get_thread_index(void) -{ -#if CONFIG_SMALL_USES_HYPER_SHIFT - if (os_likely(_os_cpu_number_override == -1)) { - return _os_cpu_number() >> hyper_shift; - } else { - return _os_cpu_number_override >> hyper_shift; - } -#else // CONFIG_SMALL_USES_HYPER_SHIFT - if (os_likely(_os_cpu_number_override == -1)) { - return _os_cpu_number(); - } else { - return _os_cpu_number_override; - } -#endif // CONFIG_SMALL_USES_HYPER_SHIFT -} - -#pragma mark in-place free list - -static MALLOC_INLINE void -small_inplace_checksum_ptr(rack_t *rack, inplace_linkage_s *linkage, void *ptr) -{ - uintptr_t checksum = free_list_gen_checksum((uintptr_t)ptr ^ rack->cookie ^ (uintptr_t)rack); - linkage->checksum = checksum; - linkage->ptr = ptr; -} - -static MALLOC_INLINE free_list_t -small_inplace_unchecksum_ptr(rack_t *rack, inplace_linkage_s *linkage) -{ - if (linkage->checksum != (uint8_t)free_list_gen_checksum((uintptr_t)linkage->ptr ^ rack->cookie ^ (uintptr_t)rack)) { - free_list_checksum_botch(rack, linkage, linkage->ptr); - __builtin_trap(); - } - - return (free_list_t){ .p = linkage->ptr }; -} - -static MALLOC_INLINE free_list_t -small_inplace_unchecksum_ptr_task(task_t task, memory_reader_t reader, - print_task_printer_t printer, rack_t *rack, inplace_linkage_s *linkage) -{ - inplace_linkage_s *mapped_linkage; - rack_t *mapped_rack; - if (reader(task, (vm_address_t)linkage, sizeof(*linkage), - (void **)&mapped_linkage)) { - printer("Unable to map small linkage pointer %p\n", linkage); - return (free_list_t){ .p = NULL }; - } - - if (reader(task, (vm_address_t)rack, - sizeof(struct rack_s), (void **)&mapped_rack)) { - printer("Failed to map small rack\n"); - return (free_list_t){ .p = NULL }; - } - - if (mapped_linkage->checksum != (uint8_t)free_list_gen_checksum( - (uintptr_t)mapped_linkage->ptr ^ mapped_rack->cookie ^ (uintptr_t)rack)) { - free_list_checksum_botch(rack, linkage, mapped_linkage->ptr); - __builtin_trap(); - } - - return (free_list_t){ .p = mapped_linkage->ptr }; -} - -static MALLOC_INLINE free_list_t -small_inplace_free_entry_get_previous(rack_t *rack, small_inplace_free_entry_t ptr) -{ - return small_inplace_unchecksum_ptr(rack, &ptr->previous); -} - -static MALLOC_INLINE void -small_inplace_free_entry_set_previous(rack_t *rack, small_inplace_free_entry_t entry, free_list_t previous) -{ - small_inplace_checksum_ptr(rack, &entry->previous, previous.p); -} - -static MALLOC_INLINE free_list_t -small_inplace_free_entry_get_next(rack_t *rack, small_inplace_free_entry_t ptr) -{ - return small_inplace_unchecksum_ptr(rack, &ptr->next); -} - -static MALLOC_INLINE free_list_t -small_inplace_free_entry_get_next_task(task_t task, memory_reader_t reader, - print_task_printer_t printer, rack_t *rack, - small_inplace_free_entry_t ptr) -{ - return small_inplace_unchecksum_ptr_task(task, reader, printer, rack, - &ptr->next); -} - -static MALLOC_INLINE void -small_inplace_free_entry_set_next(rack_t *rack, small_inplace_free_entry_t entry, free_list_t next) -{ - small_inplace_checksum_ptr(rack, &entry->next, next.p); -} - -#pragma mark OOB free list - -// Returns true if the address and size of the free list entry would result -// in the free entry being the only data on a given page. -static MALLOC_INLINE boolean_t -small_needs_oob_free_entry(void *ptr, msize_t msize) -{ - return ((trunc_page_quanta((uintptr_t)ptr) == (uintptr_t)ptr) && (SMALL_BYTES_FOR_MSIZE(msize) >= vm_kernel_page_size)); -} - -// Returns true if the address given lies within the region's OOB free -// list entries, rather than a free_list_t in the region's heap space. -static MALLOC_INLINE boolean_t -small_is_oob_free_entry(free_list_t ptr) -{ - small_region_t region = SMALL_REGION_FOR_PTR(ptr.p); - return (((uintptr_t)ptr.p >= (uintptr_t)®ion->small_oob_free_entries[0]) && - ((uintptr_t)ptr.p < (uintptr_t)®ion->small_oob_free_entries[SMALL_OOB_COUNT])); -} - -static MALLOC_INLINE void -small_oob_free_entry_set_previous(oob_free_entry_t oobe, free_list_t previous) -{ - oobe->prev = (uintptr_t)previous.p; -} - -static MALLOC_INLINE free_list_t -small_oob_free_entry_get_previous(oob_free_entry_t oobe) -{ - return (free_list_t){ .p = (void *)oobe->prev }; -} - -static MALLOC_INLINE void -small_oob_free_entry_set_next(oob_free_entry_t oobe, free_list_t next) -{ - oobe->next = (uintptr_t)next.p; -} - -static MALLOC_INLINE free_list_t -small_oob_free_entry_get_next(oob_free_entry_t oobe) -{ - return (free_list_t){ .p = (void *)oobe->next }; -} - -static MALLOC_INLINE free_list_t -small_oob_free_entry_get_next_task(task_t task, memory_reader_t reader, - print_task_printer_t printer, oob_free_entry_t oobe) -{ - oob_free_entry_t mapped_oobe; - if (reader(task, (vm_address_t)oobe, sizeof(*oobe), - (void **)&mapped_oobe)) { - printer("Failed to map small oobe pointer\n"); - return (free_list_t){ .p = NULL }; - } - return (free_list_t){ .p = (void *)mapped_oobe->next }; -} - -static MALLOC_INLINE void * -small_oob_free_entry_get_ptr(oob_free_entry_t oobe) -{ - if (!(oobe->ptr & SMALL_IS_OOB)) { - return NULL; - } - small_region_t region = SMALL_REGION_FOR_PTR(oobe); - uint16_t block = oobe->ptr & ~SMALL_IS_OOB; - return (void *)((uintptr_t)region + (block << SHIFT_SMALL_QUANTUM)); -} - -static MALLOC_INLINE void * -small_oob_free_entry_get_ptr_task(task_t task, memory_reader_t reader, - print_task_printer_t printer, oob_free_entry_t oobe) -{ - // We need to map the oob_free_entry_t to read the pointer value. - oob_free_entry_t mapped_oobe; - if (reader(task, (vm_address_t)oobe, sizeof(*oobe), - (void **)&mapped_oobe)) { - printer("Failed to map small oobe pointer\n"); - return NULL; - } - - if (!(mapped_oobe->ptr & SMALL_IS_OOB)) { - return NULL; - } - - // The rest of this code works with target process addresses and returns an - // address in the target process. - small_region_t region = SMALL_REGION_FOR_PTR(oobe); - uint16_t block = mapped_oobe->ptr & ~SMALL_IS_OOB; - return (void *)((uintptr_t)region + (block << SHIFT_SMALL_QUANTUM)); -} - -static MALLOC_INLINE void -small_oob_free_entry_set_ptr(oob_free_entry_t oobe, void *ptr) -{ - oobe->ptr = SMALL_IS_OOB | (SMALL_OFFSET_FOR_PTR(ptr) >> SHIFT_SMALL_QUANTUM); -} - -static MALLOC_INLINE void -small_oob_free_entry_set_free(oob_free_entry_t oobe) -{ - oobe->prev = ~0; - oobe->next = ~0; - oobe->ptr = 0; -} - -// Finds the first unused OOB free list entry in the pointer's region. -// Returns NULL if all of the OOB entries are used. -static MALLOC_INLINE oob_free_entry_t -small_oob_free_find_empty(void *ptr, msize_t msize) -{ - small_region_t region = SMALL_REGION_FOR_PTR(ptr); - - // There are 61 of these entries at the end of a small region. - // If this changes, then a linear search through the list may - // become an unsuitable choice. - for (int i=0; i < SMALL_OOB_COUNT; i++) { - if (region->small_oob_free_entries[i].ptr == 0) { - return ®ion->small_oob_free_entries[i]; - } - } - -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_INFO, "used all slots of OOB entries\n"); -#endif - return NULL; -} - -static MALLOC_INLINE oob_free_entry_t -small_oob_free_find_ptr(void *ptr, msize_t msize) -{ - small_region_t region = SMALL_REGION_FOR_PTR(ptr); - - // There are 61 of these entries at the end of a small region. - // If this changes, then a linear search through the list may - // become an unsuitable choice. - for (int i=0; i < SMALL_OOB_COUNT; i++) { - oob_free_entry_t oob = ®ion->small_oob_free_entries[i]; - if (small_oob_free_entry_get_ptr(oob) == ptr && - oob->ptr & SMALL_IS_OOB) { - return ®ion->small_oob_free_entries[i]; - } - } - - return NULL; -} - -#pragma mark generic free list - -static MALLOC_INLINE void -small_free_list_set_previous(rack_t *rack, free_list_t entry, free_list_t previous) -{ - if (small_is_oob_free_entry(entry)) { - small_oob_free_entry_set_previous(entry.oob, previous); - } else { - small_inplace_free_entry_set_previous(rack, entry.small_inplace, previous); - } -} - -static MALLOC_INLINE free_list_t -small_free_list_get_previous(rack_t *rack, free_list_t ptr) -{ - MALLOC_ASSERT(ptr.p); - if (small_is_oob_free_entry(ptr)) { - return small_oob_free_entry_get_previous(ptr.oob); - } else { - return small_inplace_free_entry_get_previous(rack, ptr.small_inplace); - } -} - -static MALLOC_INLINE void -small_free_list_set_next(rack_t *rack, free_list_t entry, free_list_t next) -{ - if (small_is_oob_free_entry(entry)) { - small_oob_free_entry_set_next(entry.oob, next); - } else { - small_inplace_free_entry_set_next(rack, entry.small_inplace, next); - } -} - -static MALLOC_INLINE free_list_t -small_free_list_get_next(rack_t *rack, free_list_t ptr) -{ - MALLOC_ASSERT(ptr.p); - if (small_is_oob_free_entry(ptr)) { - return small_oob_free_entry_get_next(ptr.oob); - } else { - return small_inplace_free_entry_get_next(rack, ptr.small_inplace); - } -} - -static MALLOC_INLINE free_list_t -small_free_list_get_next_task(task_t task, memory_reader_t reader, - print_task_printer_t printer, rack_t *rack, free_list_t ptr) -{ - MALLOC_ASSERT(ptr.p); - if (small_is_oob_free_entry(ptr)) { - return small_oob_free_entry_get_next_task(task, reader, printer, ptr.oob); - } else { - return small_inplace_free_entry_get_next_task(task, reader, printer, - rack, ptr.small_inplace); - } -} - -static MALLOC_INLINE void * -small_free_list_get_ptr(free_list_t ptr) -{ - if (!ptr.p) { - return NULL; - } else if (small_is_oob_free_entry(ptr)) { - return small_oob_free_entry_get_ptr(ptr.oob); - } else { - return (void *)ptr.p; - } -} - -static MALLOC_INLINE void * -small_free_list_get_ptr_task(task_t task, memory_reader_t reader, - print_task_printer_t printer, free_list_t ptr) -{ - if (!ptr.p) { - return NULL; - } else if (small_is_oob_free_entry(ptr)) { - return small_oob_free_entry_get_ptr_task(task, reader, printer, ptr.oob); - } else { - return (void *)ptr.p; - } -} - -// Returns a free_list_t that is either inline or not based on the -// pointer and msize. -static MALLOC_INLINE free_list_t -small_free_list_from_ptr(rack_t *rack, void *ptr, msize_t msize) -{ - MALLOC_ASSERT(msize); - - // The default is to put the free_list_t in the memory that - // the pointer leads to. - free_list_t entry; - entry.p = ptr; - - // If the pointer is page aligned, and the msize is greater - // than a whole page, then we try and put the entry in - // the out-of-band area instead. - if (small_needs_oob_free_entry(ptr, msize)) { - oob_free_entry_t oobe = small_oob_free_find_empty(ptr, msize); - if (oobe) { - small_oob_free_entry_set_ptr(oobe, ptr); - entry.oob = oobe; - } - } - - return entry; -} - -static MALLOC_INLINE void -small_free_mark_free(rack_t *rack, free_list_t entry, msize_t msize) -{ - // Marks both the start and end block of a free-list entry as free. - void *ptr = small_free_list_get_ptr(entry); - msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); - uintptr_t start_index = SMALL_META_INDEX_FOR_PTR(ptr); - uintptr_t end_index = SMALL_META_INDEX_FOR_PTR(ptr + SMALL_BYTES_FOR_MSIZE(msize) - 1); - MALLOC_ASSERT(start_index <= end_index); - - small_meta_header_set_is_free(meta_headers, start_index, msize); - small_meta_header_set_is_free(meta_headers, end_index, msize); -} - -static MALLOC_INLINE void -small_free_mark_middle(rack_t *rack, free_list_t entry, msize_t msize) -{ - // Marks both the start and end block of a free-list entry as "middle" (unfree). - void *ptr = small_free_list_get_ptr(entry); - msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); - uintptr_t start_index = SMALL_META_INDEX_FOR_PTR(ptr); - uintptr_t end_index = SMALL_META_INDEX_FOR_PTR(ptr + SMALL_BYTES_FOR_MSIZE(msize) - 1); - MALLOC_ASSERT(start_index <= end_index); - MALLOC_ASSERT((meta_headers[start_index] & ~SMALL_IS_FREE) == msize); - - small_meta_header_set_middle(meta_headers, start_index); - small_meta_header_set_middle(meta_headers, end_index); -} - -static MALLOC_INLINE void -small_free_mark_unfree(rack_t *rack, free_list_t entry, msize_t msize) -{ - // Marks both the start and end block of a free-list entry as not free. - void *ptr = small_free_list_get_ptr(entry); - msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); - uintptr_t start_index = SMALL_META_INDEX_FOR_PTR(ptr); - uintptr_t end_index = SMALL_META_INDEX_FOR_PTR(ptr + SMALL_BYTES_FOR_MSIZE(msize) - 1); - MALLOC_ASSERT(start_index <= end_index); - - small_meta_header_set_not_free(meta_headers, start_index); - small_meta_header_set_not_free(meta_headers, end_index); -} - -static MALLOC_INLINE unsigned int -small_free_list_count(task_t task, memory_reader_t reader, - print_task_printer_t printer, rack_t *rack, free_list_t ptr) -{ - unsigned int count = 0; - while (ptr.p) { - count++; - ptr = small_free_list_get_next_task(task, reader, printer, rack, ptr); - } - return count; -} - -/* - * Adds an item to the proper free list, and also marks the meta-header of the - * block properly. - * Assumes szone has been locked - */ -static free_list_t -small_free_list_add_ptr(rack_t *rack, magazine_t *small_mag_ptr, void *ptr, msize_t msize) -{ - grain_t slot = SMALL_FREE_SLOT_FOR_MSIZE(rack, msize); - free_list_t free_head = small_mag_ptr->mag_free_list[slot]; - - // This will either return the free_list_t for the current pointer, or attempt - // to reserve an OOB entry for us. - free_list_t free_ptr = small_free_list_from_ptr(rack, ptr, msize); - -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize); - } - if (((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) { - malloc_zone_error(rack->debug_flags, true, "small_free_list_add_ptr: Unaligned ptr %p\n", ptr); - } -#endif - - small_free_list_set_previous(rack, free_ptr, (free_list_t){ .p = NULL }); - small_free_list_set_next(rack, free_ptr, free_head); - - // Set the start and end blocks of the meta header as "free". Marking the last block - // allows coalescing the regions when we free adjacent regions. - small_free_mark_free(rack, free_ptr, msize); - - if (small_free_list_get_ptr(free_head)) { -#if DEBUG_MALLOC - if (small_free_list_get_previous(szone, free_head)) { - malloc_zone_error(rack->debug_flags, true, "small_free_list_add_ptr: Internal invariant broken (free_head->previous != NULL)\n" - "ptr=%p slot=%d free_head=%p previous=%p\n", ptr, slot, (void *)free_head, free_head->previous.p); - } - if (!SMALL_PTR_IS_FREE(small_free_list_get_ptr(free_head))) { - malloc_zone_error(rack->debug_flags, true, "small_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)\n" - "ptr=%p slot=%d free_head=%p\n", ptr, slot, (void *)small_free_list_get_ptr(free_head)); - } -#endif - small_free_list_set_previous(rack, free_head, free_ptr); - } else { - BITMAPN_SET(small_mag_ptr->mag_bitmap, slot); - } - - small_mag_ptr->mag_free_list[slot] = free_ptr; - return free_ptr; -} - -/* - * Removes the item pointed to by ptr in the proper free list. - * Assumes szone has been locked - */ -static void -small_free_list_remove_ptr_no_clear(rack_t *rack, magazine_t *small_mag_ptr, free_list_t entry, msize_t msize) -{ - grain_t slot = SMALL_FREE_SLOT_FOR_MSIZE(rack, msize); - free_list_t next, previous; - -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "In %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize); - } -#endif - - previous = small_free_list_get_previous(rack, entry); - next = small_free_list_get_next(rack, entry); - - if (!small_free_list_get_ptr(previous)) { - // The block to remove is the head of the free list -#if DEBUG_MALLOC - if (small_mag_ptr->mag_free_list[slot] != ptr) { - malloc_zone_error(rack->debug_flags, true, - "small_free_list_remove_ptr_no_clear: Internal invariant broken (small_mag_ptr->mag_free_list[slot])\n" - "ptr=%p slot=%d msize=%d small_mag_ptr->mag_free_list[slot]=%p\n", ptr, slot, msize, - (void *)small_mag_ptr->mag_free_list[slot]); - return; - } -#endif - small_mag_ptr->mag_free_list[slot] = next; - if (!small_free_list_get_ptr(next)) { - BITMAPN_CLR(small_mag_ptr->mag_bitmap, slot); - } - } else { - // Check that the next pointer of "previous" points to "entry". - free_list_t prev_next = small_free_list_get_next(rack, previous); - if (small_free_list_get_ptr(prev_next) != small_free_list_get_ptr(entry)) { - malloc_zone_error(rack->debug_flags, true, - "small_free_list_remove_ptr_no_clear: Internal invariant broken (next ptr of prev) for %p, prev_next=%p\n", - small_free_list_get_ptr(entry), small_free_list_get_ptr(prev_next)); - __builtin_unreachable(); // Always crashes in malloc_zone_error(). - } - small_free_list_set_next(rack, previous, next); - } - - if (small_free_list_get_ptr(next)) { - // Check that the previous pointer of "next" points to "entry". - free_list_t next_prev = small_free_list_get_previous(rack, next); - if (small_free_list_get_ptr(next_prev) != small_free_list_get_ptr(entry)) { - malloc_zone_error(rack->debug_flags, true, - "small_free_list_remove_ptr_no_clear: Internal invariant broken (prev ptr of next) for %p, next_prev=%p\n", - small_free_list_get_ptr(entry), small_free_list_get_ptr(next_prev)); - __builtin_unreachable(); // Always crashes in malloc_zone_error(). - } - small_free_list_set_previous(rack, next, previous); - } - - if (small_is_oob_free_entry(entry)) { - small_oob_free_entry_set_free(entry.oob); - } -} - -static void -small_free_list_remove_ptr(rack_t *rack, magazine_t *small_mag_ptr, free_list_t entry, msize_t msize) -{ - // In the general case we want to ensure we marked these entries as "middle" - // while we are in this function. However, when we're moving free list entries - // from/to the recirc depot we rely on the metadata bits being intact to - // reconstruct the free list. In that case we have to be able to skip this - // metadata manipulation. - small_free_mark_middle(rack, entry, msize); - small_free_list_remove_ptr_no_clear(rack, small_mag_ptr, entry, msize); -} - -// Find a free list entry by its pointer address. This should only really be used -// by small_finalize_region, or similar, where the free_list_t entry of a known -// pointer is desired. Otherwise it is cheaper to always pull off the free lists. -static free_list_t -small_free_list_find_by_ptr(rack_t *rack, magazine_t *small_mag_ptr, void *ptr, msize_t msize) -{ - if (*SMALL_METADATA_FOR_PTR(ptr) == (SMALL_IS_FREE | msize)) { - // If the block is marked free, and of size `msize`, then we first must check - // if the alignment+size is such that we could have use an OOB-entry. - if (small_needs_oob_free_entry(ptr, msize)) { - // Scan the OOB entries looking for this address. - small_region_t region = SMALL_REGION_FOR_PTR(ptr); - for (int i=0; ismall_oob_free_entries[i].ptr) { - continue; - } - if (small_oob_free_entry_get_ptr(®ion->small_oob_free_entries[i]) == ptr) { - return (free_list_t){ .oob = ®ion->small_oob_free_entries[i] }; - } - } - } - - // Otherwise, the freed pointer will be in place. - return (free_list_t){ .p = ptr }; - } - - malloc_zone_error(rack->debug_flags, true, - "small_free_list_find_by_ptr: ptr is not free (ptr metadata !SMALL_IS_FREE), " - "ptr=%p msize=%d metadata=0x%x\n", ptr, msize, *SMALL_METADATA_FOR_PTR(ptr)); - __builtin_trap(); -} - -void -small_finalize_region(rack_t *rack, magazine_t *small_mag_ptr) -{ - void *last_block, *previous_block; - msize_t last_msize, previous_msize, last_index; - free_list_t previous; - - // It is possible that the block prior to the last block in the region has - // been free'd, but was not coalesced with the free bytes at the end of the - // block, since we treat the bytes at the end of the region as "in use" in - // the meta headers. Attempt to coalesce the last block with the previous - // block, so we don't violate the "no consecutive free blocks" invariant. - // - // FIXME: If we could calculate the previous small free size in the same - // manner as tiny_previous_preceding_free, it would eliminate the - // index & previous msize checks, which are a guard against reading - // bogus data out of in-use or written-on-freed memory. - // - // FIXME: Need to investigate how much work would be required to increase - // 'mag_bytes_free_at_end' when freeing the preceding block, rather - // than performing this workaround. - // - if (small_mag_ptr->mag_bytes_free_at_end) { - last_block = SMALL_REGION_END(small_mag_ptr->mag_last_region) - small_mag_ptr->mag_bytes_free_at_end; - last_msize = SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_end); - - last_index = SMALL_META_INDEX_FOR_PTR(last_block); - previous_msize = SMALL_PREVIOUS_MSIZE(last_block); - - if (last_index && (previous_msize <= last_index)) { - previous_block = (void *)((uintptr_t)last_block - SMALL_BYTES_FOR_MSIZE(previous_msize)); - - if (SMALL_PTR_IS_FREE(previous_block)) { - previous = small_free_list_find_by_ptr(rack, small_mag_ptr, previous_block, previous_msize); - small_free_list_remove_ptr(rack, small_mag_ptr, previous, previous_msize); - last_block = previous_block; - last_msize += previous_msize; - } - } - - // splice last_block into the free list - small_free_list_add_ptr(rack, small_mag_ptr, last_block, last_msize); - small_mag_ptr->mag_bytes_free_at_end = 0; - } - -#if CONFIG_ASLR_INTERNAL - free_list_t next; - - if (small_mag_ptr->mag_bytes_free_at_start) { - last_block = SMALL_REGION_ADDRESS(small_mag_ptr->mag_last_region); - last_msize = SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_start); - - void *next_block = (void *)((uintptr_t)last_block + small_mag_ptr->mag_bytes_free_at_start); - if (SMALL_PTR_IS_FREE(next_block)) { - msize_t next_msize = SMALL_PTR_SIZE(next_block); - next = small_free_list_find_by_ptr(rack, small_mag_ptr, next_block, next_msize); - small_free_list_remove_ptr(rack, small_mag_ptr, next, next_msize); - last_msize += next_msize; - } - - // splice last_block into the free list - small_free_list_add_ptr(rack, small_mag_ptr, last_block, last_msize); - small_mag_ptr->mag_bytes_free_at_start = 0; - } -#endif - - // TODO: Will we ever need to coalesce the blocks at the beginning and end when we finalize? - small_mag_ptr->mag_last_region = NULL; -} - -int -small_free_detach_region(rack_t *rack, magazine_t *small_mag_ptr, region_t r) -{ - unsigned char *ptr = SMALL_REGION_ADDRESS(r); - msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); - uintptr_t start = (uintptr_t)SMALL_REGION_ADDRESS(r); - uintptr_t current = start; - uintptr_t limit = (uintptr_t)SMALL_REGION_END(r); - int total_alloc = 0; - - while (current < limit) { - unsigned index = SMALL_META_INDEX_FOR_PTR(current); - msize_t msize_and_free = meta_headers[index]; - boolean_t is_free = msize_and_free & SMALL_IS_FREE; - msize_t msize = msize_and_free & ~SMALL_IS_FREE; - - if (!msize) { -#if DEBUG_MALLOC - boolean_t is_free = msize_and_free & SMALL_IS_FREE; - malloc_report(ASL_LEVEL_ERR, "*** small_free_detach_region error with %p: msize=%d is_free=%d\n", (void *)current, msize, is_free); -#endif - break; - } - - if (is_free) { - free_list_t entry = small_free_list_find_by_ptr(rack, small_mag_ptr, (void *)current, msize); - small_free_list_remove_ptr_no_clear(rack, small_mag_ptr, entry, msize); - } else { - total_alloc++; - } - current += SMALL_BYTES_FOR_MSIZE(msize); - } - return total_alloc; -} - -size_t -small_free_reattach_region(rack_t *rack, magazine_t *small_mag_ptr, region_t r) -{ - unsigned char *ptr = SMALL_REGION_ADDRESS(r); - msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); - uintptr_t start = (uintptr_t)SMALL_REGION_ADDRESS(r); - uintptr_t current = start; - uintptr_t limit = (uintptr_t)SMALL_REGION_END(r); - size_t total_alloc = 0; - - while (current < limit) { - unsigned index = SMALL_META_INDEX_FOR_PTR(current); - msize_t msize_and_free = meta_headers[index]; - boolean_t is_free = msize_and_free & SMALL_IS_FREE; - msize_t msize = msize_and_free & ~SMALL_IS_FREE; - - if (!msize) { -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_ERR, "*** small_free_reattach_region error with %p: msize=%d is_free=%d\n", (void *)current, msize, is_free); -#endif - break; - } - if (is_free) { - small_free_list_add_ptr(rack, small_mag_ptr, (void *)current, msize); - } else { - total_alloc += SMALL_BYTES_FOR_MSIZE(msize); - } - current += SMALL_BYTES_FOR_MSIZE(msize); - } - return total_alloc; -} - -typedef struct { - uint16_t pnum, size; -} small_pg_pair_t; - -void -small_free_scan_madvise_free(rack_t *rack, magazine_t *depot_ptr, region_t r) -{ - uintptr_t start = (uintptr_t)SMALL_REGION_ADDRESS(r); - uintptr_t current = start; - uintptr_t limit = (uintptr_t)SMALL_REGION_END(r); - msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(start); - small_pg_pair_t advisory[((SMALL_REGION_PAYLOAD_BYTES + vm_kernel_page_size - 1) >> vm_kernel_page_shift) >> - 1]; // 4096bytes stack allocated - int advisories = 0; - - // Scan the metadata identifying blocks which span one or more pages. Mark the pages MADV_FREE taking care to preserve free list - // management data. - while (current < limit) { - unsigned index = SMALL_META_INDEX_FOR_PTR(current); - msize_t msize_and_free = meta_headers[index]; - boolean_t is_free = msize_and_free & SMALL_IS_FREE; - msize_t msize = msize_and_free & ~SMALL_IS_FREE; - - if (is_free && !msize && (current == start)) { -#if DEBUG_MALLOC - // first block is all free - malloc_report(ASL_LEVEL_ERR, "*** small_free_scan_madvise_free first block is all free! %p: msize=%d is_free=%d\n", (void *)current, - msize, is_free); -#endif - uintptr_t pgLo = round_page_kernel(start + sizeof(free_list_t) + sizeof(msize_t)); - uintptr_t pgHi = trunc_page_kernel(start + SMALL_REGION_SIZE - sizeof(msize_t)); - - if (pgLo < pgHi) { - advisory[advisories].pnum = (pgLo - start) >> vm_kernel_page_shift; - advisory[advisories].size = (pgHi - pgLo) >> vm_kernel_page_shift; - advisories++; - } - break; - } - if (!msize) { -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_ERR, - "*** small_free_scan_madvise_free error with %p: msize=%d is_free=%d\n", (void *)current, msize, is_free); -#endif - break; - } - if (is_free) { - uintptr_t pgLo = round_page_kernel(current + sizeof(free_list_t) + sizeof(msize_t)); - uintptr_t pgHi = trunc_page_kernel(current + SMALL_BYTES_FOR_MSIZE(msize) - sizeof(msize_t)); - - if (pgLo < pgHi) { - advisory[advisories].pnum = (pgLo - start) >> vm_kernel_page_shift; - advisory[advisories].size = (pgHi - pgLo) >> vm_kernel_page_shift; - advisories++; - } - } - current += SMALL_BYTES_FOR_MSIZE(msize); - } - - if (advisories > 0) { - int i; - - OSAtomicIncrement32Barrier(&(REGION_TRAILER_FOR_SMALL_REGION(r)->pinned_to_depot)); - SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr); - for (i = 0; i < advisories; ++i) { - uintptr_t addr = (advisory[i].pnum << vm_page_quanta_shift) + start; - size_t size = advisory[i].size << vm_page_quanta_shift; - - mvm_madvise_free(rack, r, addr, addr + size, NULL, rack->debug_flags & MALLOC_DO_SCRIBBLE); - } - SZONE_MAGAZINE_PTR_LOCK(depot_ptr); - OSAtomicDecrement32Barrier(&(REGION_TRAILER_FOR_SMALL_REGION(r)->pinned_to_depot)); - } -} - -#if CONFIG_RECIRC_DEPOT -static region_t -small_find_msize_region(rack_t *rack, magazine_t *small_mag_ptr, mag_index_t mag_index, msize_t msize) -{ - void *ptr; - grain_t slot = SMALL_FREE_SLOT_FOR_MSIZE(rack, msize); - free_list_t *free_list = small_mag_ptr->mag_free_list; - free_list_t *the_slot = free_list + slot; - free_list_t *limit; - unsigned bitmap; - - // Assumes we've locked the magazine - CHECK_MAGAZINE_PTR_LOCKED(szone, small_mag_ptr, __PRETTY_FUNCTION__); - - // Look for an exact match by checking the freelist for this msize. - ptr = small_free_list_get_ptr(*the_slot); - if (ptr) { - return SMALL_REGION_FOR_PTR(ptr); - } - - // Mask off the bits representing slots holding free blocks smaller than - // the size we need. - // - // BITMAPN_CTZ implementation - unsigned idx = slot >> 5; - bitmap = 0; - unsigned mask = ~((1 << (slot & 31)) - 1); - for (; idx < SMALL_FREELIST_BITMAP_WORDS(rack); ++idx) { - bitmap = small_mag_ptr->mag_bitmap[idx] & mask; - if (bitmap != 0) { - break; - } - mask = ~0U; - } - // Check for fallthrough: No bits set in bitmap - if ((bitmap == 0) && (idx == SMALL_FREELIST_BITMAP_WORDS(rack))) { - return NULL; - } - - // Start looking at the first set bit, plus 32 bits for every word of - // zeroes or entries that were too small. - slot = BITMAP32_CTZ((&bitmap)) + (idx * 32); - - limit = free_list + SMALL_FREE_SLOT_COUNT(rack) - 1; - free_list += slot; - - if (free_list < limit) { - ptr = small_free_list_get_ptr(*free_list); - if (ptr) { - return SMALL_REGION_FOR_PTR(ptr); - } else { - /* Shouldn't happen. Fall through to look at last slot. */ -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_ERR, "in small_malloc_from_free_list(), mag_bitmap out of sync, slot=%d\n", slot); -#endif - } - } - - // We are now looking at the last slot, which contains blocks equal to, or - // due to coalescing of free blocks, larger than (num_small_slots - 1) * (small quantum size). - ptr = small_free_list_get_ptr(*limit); - if (ptr) { - return SMALL_REGION_FOR_PTR(ptr); - } - - return NULL; -} - -static boolean_t -small_get_region_from_depot(rack_t *rack, magazine_t *small_mag_ptr, mag_index_t mag_index, msize_t msize) -{ - magazine_t *depot_ptr = &(rack->magazines[DEPOT_MAGAZINE_INDEX]); - - /* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */ - if (rack->num_magazines == 1) { // Uniprocessor, single magazine, so no recirculation necessary - return 0; - } - -#if DEBUG_MALLOC - if (DEPOT_MAGAZINE_INDEX == mag_index) { - malloc_zone_error(rack->debug_flags, true, "small_get_region_from_depot called for magazine index -1\n", NULL, NULL); - return 0; - } -#endif - - SZONE_MAGAZINE_PTR_LOCK(depot_ptr); - - // Appropriate a Depot'd region that can satisfy requested msize. - region_trailer_t *node; - region_t sparse_region; - - while (1) { - sparse_region = small_find_msize_region(rack, depot_ptr, DEPOT_MAGAZINE_INDEX, msize); - if (NULL == sparse_region) { // Depot empty? - SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr); - return 0; - } - - node = REGION_TRAILER_FOR_SMALL_REGION(sparse_region); - if (0 >= node->pinned_to_depot) { - break; - } - - SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr); - yield(); - SZONE_MAGAZINE_PTR_LOCK(depot_ptr); - } - - // disconnect node from Depot - recirc_list_extract(rack, depot_ptr, node); - - // Iterate the region pulling its free entries off the (locked) Depot's free list - int objects_in_use = small_free_detach_region(rack, depot_ptr, sparse_region); - - // Transfer ownership of the region - MAGAZINE_INDEX_FOR_SMALL_REGION(sparse_region) = mag_index; - node->pinned_to_depot = 0; - - // Iterate the region putting its free entries on its new (locked) magazine's free list - size_t bytes_inplay = small_free_reattach_region(rack, small_mag_ptr, sparse_region); - - depot_ptr->mag_num_bytes_in_objects -= bytes_inplay; - depot_ptr->num_bytes_in_magazine -= SMALL_REGION_PAYLOAD_BYTES; - depot_ptr->mag_num_objects -= objects_in_use; - - small_mag_ptr->mag_num_bytes_in_objects += bytes_inplay; - small_mag_ptr->num_bytes_in_magazine += SMALL_REGION_PAYLOAD_BYTES; - small_mag_ptr->mag_num_objects += objects_in_use; - - // connect to magazine as last node - recirc_list_splice_last(rack, small_mag_ptr, node); - - SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr); - - MAGMALLOC_DEPOTREGION(SMALL_SZONE_FROM_RACK(rack), (int)mag_index, (void *)sparse_region, SMALL_REGION_SIZE, - (int)BYTES_USED_FOR_SMALL_REGION(sparse_region)); // DTrace USDT Probe - - return 1; -} -#endif // CONFIG_RECIRC_DEPOT - -#if CONFIG_MADVISE_PRESSURE_RELIEF -void -small_madvise_pressure_relief(rack_t *rack) -{ - mag_index_t mag_index; - magazine_t *small_depot_ptr = &rack->magazines[DEPOT_MAGAZINE_INDEX]; - - for (mag_index = 0; mag_index < rack->num_magazines; mag_index++) { - size_t index; - for (index = 0; index < rack->region_generation->num_regions_allocated; ++index) { - SZONE_LOCK(SMALL_SZONE_FROM_RACK(rack)); - - region_t small = rack->region_generation->hashed_regions[index]; - if (!small || small == HASHRING_REGION_DEALLOCATED) { - SZONE_UNLOCK(SMALL_SZONE_FROM_RACK(rack)); - continue; - } - - magazine_t *mag_ptr = mag_lock_zine_for_region_trailer(rack->magazines, - REGION_TRAILER_FOR_SMALL_REGION(small), - MAGAZINE_INDEX_FOR_SMALL_REGION(small)); - SZONE_UNLOCK(SMALL_SZONE_FROM_RACK(rack)); - - /* Ordering is important here, the magazine of a region may potentially change - * during mag_lock_zine_for_region_trailer, so src_mag_index must be taken - * after we've obtained the lock. - */ - mag_index_t src_mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(small); - - /* We can (and must) ignore magazines that are already in the recirc depot. */ - if (src_mag_index == DEPOT_MAGAZINE_INDEX) { - SZONE_MAGAZINE_PTR_UNLOCK(mag_ptr); - continue; - } - - if (small == mag_ptr->mag_last_region && (mag_ptr->mag_bytes_free_at_end || mag_ptr->mag_bytes_free_at_start)) { - small_finalize_region(rack, mag_ptr); - } - - /* Because this region is currently in use, we can't safely madvise it while - * it's attached to the magazine. For this operation we have to remove it from - * the current mag, attach it to the depot and then madvise. - */ - - recirc_list_extract(rack, mag_ptr, REGION_TRAILER_FOR_SMALL_REGION(small)); - int objects_in_use = small_free_detach_region(rack, mag_ptr, small); - - SZONE_MAGAZINE_PTR_LOCK(small_depot_ptr); - MAGAZINE_INDEX_FOR_SMALL_REGION(small) = DEPOT_MAGAZINE_INDEX; - REGION_TRAILER_FOR_SMALL_REGION(small)->pinned_to_depot = 0; - - size_t bytes_inplay = small_free_reattach_region(rack, small_depot_ptr, small); - - /* Fix up the metadata of the target magazine while the region is in the depot. */ - mag_ptr->mag_num_bytes_in_objects -= bytes_inplay; - mag_ptr->num_bytes_in_magazine -= SMALL_REGION_PAYLOAD_BYTES; - mag_ptr->mag_num_objects -= objects_in_use; - - /* Now we can drop the magazine lock of the source mag. */ - SZONE_MAGAZINE_PTR_UNLOCK(mag_ptr); - - small_depot_ptr->mag_num_bytes_in_objects += bytes_inplay; - small_depot_ptr->num_bytes_in_magazine += SMALL_REGION_PAYLOAD_BYTES; - small_depot_ptr->mag_num_objects -= objects_in_use; - - recirc_list_splice_last(rack, small_depot_ptr, REGION_TRAILER_FOR_SMALL_REGION(small)); - - /* Actually do the scan, done holding the depot lock, the call will drop the lock - * around the actual madvise syscalls. - */ - small_free_scan_madvise_free(rack, small_depot_ptr, small); - - /* Now the region is in the recirc depot, the next allocations to require more - * blocks will come along and take one of these regions back out of the depot. - * As OS X madvise's reuse on an per-region basis, we leave as many of these - * regions in the depot as possible after memory pressure. - */ - SZONE_MAGAZINE_PTR_UNLOCK(small_depot_ptr); - } - } -} -#endif // CONFIG_MADVISE_PRESSURE_RELIEF - -#if CONFIG_AGGRESSIVE_MADVISE || CONFIG_RECIRC_DEPOT -static MALLOC_INLINE void -small_madvise_free_range_no_lock(rack_t *rack, - magazine_t *small_mag_ptr, - region_t region, - free_list_t freee, - msize_t fmsize, - void *headptr, - size_t headsize) -{ - void *ptr = small_free_list_get_ptr(freee); - region_trailer_t *node = REGION_TRAILER_FOR_SMALL_REGION(region); - - // Lock on small_magazines[mag_index] is already held here. - // Calculate the first page in the coalesced block that would be safe to mark MADV_FREE - size_t free_header_size = sizeof(free_list_t) + sizeof(msize_t); - - // If the free_list_t entry is out-of-line then we don't need to reserve any space - // at the start of the region. - if (small_is_oob_free_entry(freee)) { - free_header_size = 0; - } - - uintptr_t safe_ptr = (uintptr_t)ptr + free_header_size; - uintptr_t round_safe = round_page_kernel(safe_ptr); - - // Calculate the last page in the coalesced block that would be safe to mark MADV_FREE - uintptr_t safe_extent = (uintptr_t)ptr + SMALL_BYTES_FOR_MSIZE(fmsize); - uintptr_t trunc_extent = trunc_page_kernel(safe_extent); - - // The newly freed block may complete a span of bytes that cover one or more pages. Mark the span with MADV_FREE. - if (round_safe < trunc_extent) { // Coalesced area covers a page (perhaps many) - // Extend the freed block by the free region header and tail sizes to include pages - // we may have coalesced that no longer host free region tails and headers. - // This may extend over in-use ranges, but the MIN/MAX clamping below will fix that up. - uintptr_t lo = trunc_page_kernel((uintptr_t)headptr); - uintptr_t hi = round_page_kernel((uintptr_t)headptr + headsize + free_header_size); - - uintptr_t free_lo = MAX(round_safe, lo); - uintptr_t free_hi = MIN(trunc_extent, hi); - - if (free_lo < free_hi) { - // Before unlocking, ensure that the metadata for the freed region - // makes it look not free but includes the length. This ensures that - // any code that inspects the metadata while we are unlocked sees - // a valid state and will not try to use or coalesce freed memory - // into it. - small_free_mark_unfree(rack, freee, fmsize); - small_free_list_remove_ptr_no_clear(rack, small_mag_ptr, freee, fmsize); - OSAtomicIncrement32Barrier(&(node->pinned_to_depot)); - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - mvm_madvise_free(rack, region, free_lo, free_hi, &rack->last_madvise, rack->debug_flags & MALLOC_DO_SCRIBBLE); - SZONE_MAGAZINE_PTR_LOCK(small_mag_ptr); - OSAtomicDecrement32Barrier(&(node->pinned_to_depot)); - small_free_list_add_ptr(rack, small_mag_ptr, ptr, fmsize); - } - } -} -#endif // CONFIG_AGGRESSIVE_MADVISE || CONFIG_RECIRC_DEPOT - -#if CONFIG_RECIRC_DEPOT -static region_t -small_free_try_depot_unmap_no_lock(rack_t *rack, magazine_t *depot_ptr, region_trailer_t *node) -{ - if (0 < node->bytes_used || 0 < node->pinned_to_depot || depot_ptr->recirculation_entries < recirc_retained_regions) { - return NULL; - } - - // disconnect first node from Depot - recirc_list_extract(rack, depot_ptr, node); - - // Iterate the region pulling its free entries off the (locked) Depot's free list - region_t sparse_region = SMALL_REGION_FOR_PTR(node); - int objects_in_use = small_free_detach_region(rack, depot_ptr, sparse_region); - - if (0 == objects_in_use) { - // Invalidate the hash table entry for this region with HASHRING_REGION_DEALLOCATED. - // Using HASHRING_REGION_DEALLOCATED preserves the collision chain, using HASHRING_OPEN_ENTRY (0) would not. - rgnhdl_t pSlot = hash_lookup_region_no_lock(rack->region_generation->hashed_regions, - rack->region_generation->num_regions_allocated, - rack->region_generation->num_regions_allocated_shift, - sparse_region); - if (NULL == pSlot) { - malloc_zone_error(rack->debug_flags, true, "small_free_try_depot_unmap_no_lock hash lookup failed: %p\n", sparse_region); - return NULL; - } - *pSlot = HASHRING_REGION_DEALLOCATED; - depot_ptr->num_bytes_in_magazine -= SMALL_REGION_PAYLOAD_BYTES; - // Atomically increment num_regions_dealloc -#ifdef __LP64___ - OSAtomicIncrement64(&rack->num_regions_dealloc); -#else - OSAtomicIncrement32((int32_t *)&rack->num_regions_dealloc); -#endif - - // Caller will transfer ownership of the region back to the OS with no locks held - MAGMALLOC_DEALLOCREGION(SMALL_SZONE_FROM_RACK(rack), (void *)sparse_region, (int)SMALL_REGION_SIZE); // DTrace USDT Probe - return sparse_region; - - } else { - malloc_zone_error(rack->debug_flags, true, "small_free_try_depot_unmap_no_lock objects_in_use not zero: %d\n", objects_in_use); - return NULL; - } -} - -static boolean_t -small_free_do_recirc_to_depot(rack_t *rack, magazine_t *small_mag_ptr, mag_index_t mag_index) -{ - // The entire magazine crossed the "emptiness threshold". Transfer a region - // from this magazine to the Depot. Choose a region that itself has crossed the emptiness threshold (i.e - // is at least fraction "f" empty.) Such a region will be marked "suitable" on the recirculation list. - region_trailer_t *node = small_mag_ptr->firstNode; - - while (node && (!node->recirc_suitable || node->pinned_to_depot)) { - node = node->next; - } - - if (NULL == node) { -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_ERR, "*** small_free_do_recirc_to_depot end of list\n"); -#endif - return TRUE; // Caller must SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - } - - region_t sparse_region = SMALL_REGION_FOR_PTR(node); - - // Deal with unclaimed memory -- mag_bytes_free_at_end or mag_bytes_free_at start - if (sparse_region == small_mag_ptr->mag_last_region && - (small_mag_ptr->mag_bytes_free_at_end || small_mag_ptr->mag_bytes_free_at_start)) { - small_finalize_region(rack, small_mag_ptr); - } - - // disconnect "suitable" node from magazine - recirc_list_extract(rack, small_mag_ptr, node); - - // Iterate the region pulling its free entries off its (locked) magazine's free list - int objects_in_use = small_free_detach_region(rack, small_mag_ptr, sparse_region); - magazine_t *depot_ptr = &(rack->magazines[DEPOT_MAGAZINE_INDEX]); - - // hand over the region to the (locked) Depot - SZONE_MAGAZINE_PTR_LOCK(depot_ptr); - // this will cause small_free_list_add_ptr called by small_free_reattach_region to use - // the depot as its target magazine, rather than magazine formerly associated with sparse_region - MAGAZINE_INDEX_FOR_SMALL_REGION(sparse_region) = DEPOT_MAGAZINE_INDEX; - node->pinned_to_depot = 0; - - // Iterate the region putting its free entries on Depot's free list - size_t bytes_inplay = small_free_reattach_region(rack, depot_ptr, sparse_region); - - small_mag_ptr->mag_num_bytes_in_objects -= bytes_inplay; - small_mag_ptr->num_bytes_in_magazine -= SMALL_REGION_PAYLOAD_BYTES; - small_mag_ptr->mag_num_objects -= objects_in_use; - - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); // Unlock the originating magazine - - depot_ptr->mag_num_bytes_in_objects += bytes_inplay; - depot_ptr->num_bytes_in_magazine += SMALL_REGION_PAYLOAD_BYTES; - depot_ptr->mag_num_objects += objects_in_use; - - // connect to Depot as last node - recirc_list_splice_last(rack, depot_ptr, node); - - MAGMALLOC_RECIRCREGION(SMALL_SZONE_FROM_RACK(rack), (int)mag_index, (void *)sparse_region, SMALL_REGION_SIZE, - (int)BYTES_USED_FOR_SMALL_REGION(sparse_region)); // DTrace USDT Probe - -#if !CONFIG_AGGRESSIVE_MADVISE - // Mark free'd dirty pages with MADV_FREE to reduce memory pressure - small_free_scan_madvise_free(rack, depot_ptr, sparse_region); -#endif - - // If the region is entirely empty vm_deallocate() it outside the depot lock - region_t r_dealloc = small_free_try_depot_unmap_no_lock(rack, depot_ptr, node); - SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr); - if (r_dealloc) { - mvm_deallocate_pages(r_dealloc, SMALL_REGION_SIZE, 0); - } - return FALSE; // Caller need not unlock the originating magazine -} - -static MALLOC_INLINE boolean_t -small_free_try_recirc_to_depot(rack_t *rack, - magazine_t *small_mag_ptr, - mag_index_t mag_index, - region_t region, - free_list_t freee, - msize_t msize, - void *headptr, - size_t headsize) -{ - region_trailer_t *node = REGION_TRAILER_FOR_SMALL_REGION(region); - size_t bytes_used = node->bytes_used; - - /* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */ - if (rack->num_magazines == 1) { // Uniprocessor, single magazine, so no recirculation necessary - /* NOTHING */ - return TRUE; // Caller must do SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr) - } else if (DEPOT_MAGAZINE_INDEX != mag_index) { - // Emptiness discriminant - if (small_region_below_recirc_threshold(region)) { - /* Region has crossed threshold from density to sparsity. Mark it "suitable" on the - * recirculation candidates list. */ - node->recirc_suitable = TRUE; - } else { - /* After this free, we've found the region is still dense, so it must have been even more so before - * the free. That implies the region is already correctly marked. Do nothing. */ - } - - // Has the entire magazine crossed the "emptiness threshold"? If so, transfer a region - // from this magazine to the Depot. Choose a region that itself has crossed the emptiness threshold (i.e - // is at least fraction "f" empty.) Such a region will be marked "suitable" on the recirculation list. - if (small_magazine_below_recirc_threshold(small_mag_ptr)) { - return small_free_do_recirc_to_depot(rack, small_mag_ptr, mag_index); - } - } else { -#if !CONFIG_AGGRESSIVE_MADVISE - // We are free'ing into the depot, so madvise as we do so unless we were madvising every incoming - // allocation anyway. - small_madvise_free_range_no_lock(rack, small_mag_ptr, region, freee, msize, headptr, headsize); -#endif - - if (0 < bytes_used || 0 < node->pinned_to_depot) { - /* Depot'd region is still live. Leave it in place on the Depot's recirculation list - * so as to avoid thrashing between the Depot's free list and a magazines's free list - * with detach_region/reattach_region */ - } else { - /* Depot'd region is just now empty. Consider return to OS. */ - region_t r_dealloc = small_free_try_depot_unmap_no_lock(rack, small_mag_ptr, node); - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - if (r_dealloc) { - mvm_deallocate_pages(r_dealloc, SMALL_REGION_SIZE, 0); - } - return FALSE; // Caller need not unlock - } - } - return TRUE; // Caller must do SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr) -} -#endif // CONFIG_RECIRC_DEPOT - -static MALLOC_INLINE boolean_t -small_free_no_lock(rack_t *rack, magazine_t *small_mag_ptr, mag_index_t mag_index, region_t region, void *ptr, msize_t msize) -{ - msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); - unsigned index = SMALL_META_INDEX_FOR_PTR(ptr); - size_t original_size = SMALL_BYTES_FOR_MSIZE(msize); - unsigned char *next_block = ((unsigned char *)ptr + original_size); - msize_t next_index = index + msize; - - MALLOC_TRACE(TRACE_small_free, (uintptr_t)rack, (uintptr_t)small_mag_ptr, (uintptr_t)ptr, SMALL_BYTES_FOR_MSIZE(msize)); - -#if CONFIG_AGGRESSIVE_MADVISE || CONFIG_RECIRC_DEPOT - void *original_ptr = ptr; -#endif // CONFIG_AGGRESSIVE_MADVISE || CONFIG_RECIRC_DEPOT - -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_ERR, "in small_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); - } - if (!msize) { - malloc_zone_error(rack->debug_flags, true, "trying to free small block that is too small in small_free_no_lock(), ptr=%p, msize=%d\n", - ptr, msize); - } -#endif - - // Check that the region cookie is intact. - region_trailer_t *trailer = REGION_TRAILER_FOR_SMALL_REGION(region); - region_check_cookie(region, trailer); - - // We try to coalesce this block with the preceeding one - if (index > 0 && (meta_headers[index - 1] & SMALL_IS_FREE)) { - msize_t previous_msize = meta_headers[index - 1] & ~SMALL_IS_FREE; - grain_t previous_index = index - previous_msize; - - // Check if the metadata for the start of the block is also free. - if (meta_headers[previous_index] == (previous_msize | SMALL_IS_FREE)) { - void *previous_ptr = (void *)((uintptr_t)ptr - SMALL_BYTES_FOR_MSIZE(previous_msize)); - free_list_t previous = small_free_list_find_by_ptr(rack, small_mag_ptr, previous_ptr, previous_msize); - small_free_list_remove_ptr(rack, small_mag_ptr, previous, previous_msize); - ptr = previous_ptr; - small_meta_header_set_middle(meta_headers, index); // This block is now a middle block. - msize += previous_msize; - index -= previous_msize; - } else { - _os_set_crash_log_message("small free list metadata inconsistency (headers[previous] != previous size)"); - __builtin_trap(); - } - } - - // Try to coalesce with this block with the next block - if ((next_block < SMALL_REGION_END(region)) && (meta_headers[next_index] & SMALL_IS_FREE)) { - msize_t next_msize = meta_headers[next_index] & ~SMALL_IS_FREE; - free_list_t next = small_free_list_find_by_ptr(rack, small_mag_ptr, next_block, next_msize); - small_free_list_remove_ptr(rack, small_mag_ptr, next, next_msize); - msize += next_msize; - } - - if (rack->debug_flags & MALLOC_DO_SCRIBBLE) { - if (!msize) { - malloc_zone_error(rack->debug_flags, true, "incorrect size information for %p - block header was damaged\n", ptr); - } else { - memset(ptr, SCRABBLE_BYTE, SMALL_BYTES_FOR_MSIZE(msize)); - } - } - - free_list_t freee = small_free_list_add_ptr(rack, small_mag_ptr, ptr, msize); - - // use original_size and not msize to avoid double counting the coalesced blocks - small_mag_ptr->mag_num_bytes_in_objects -= original_size; - - // Update this region's bytes in use count - size_t bytes_used = trailer->bytes_used - original_size; - trailer->bytes_used = (unsigned int)bytes_used; - -#if CONFIG_AGGRESSIVE_MADVISE - small_madvise_free_range_no_lock(rack, small_mag_ptr, region, freee, msize, original_ptr, original_size); -#endif - - // Caller must do SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr) if this function - // returns TRUE. - boolean_t needs_unlock = TRUE; - -#if CONFIG_RECIRC_DEPOT - needs_unlock = small_free_try_recirc_to_depot(rack, small_mag_ptr, mag_index, region, freee, msize, original_ptr, original_size); -#endif // CONFIG_RECIRC_DEPOT - return needs_unlock; -} - -// Allocates from the last region or a freshly allocated region -static void * -small_malloc_from_region_no_lock(rack_t *rack, - magazine_t *small_mag_ptr, - mag_index_t mag_index, - msize_t msize, - void *aligned_address) -{ - void *ptr; - - // Before anything we transform the mag_bytes_free_at_end or mag_bytes_free_at_start - if any - to a regular free block - /* FIXME: last_block needs to be coalesced with previous entry if free, */ - if (small_mag_ptr->mag_bytes_free_at_end || small_mag_ptr->mag_bytes_free_at_start) { - small_finalize_region(rack, small_mag_ptr); - } - - // Tag the region at "aligned_address" as belonging to us, - // and so put it under the protection of the magazine lock we are holding. - // Do this before advertising "aligned_address" on the hash ring(!) - MAGAZINE_INDEX_FOR_SMALL_REGION(aligned_address) = mag_index; - - // Insert the new region into the hash ring - rack_region_insert(rack, (region_t)aligned_address); - - small_mag_ptr->mag_last_region = aligned_address; - BYTES_USED_FOR_SMALL_REGION(aligned_address) = SMALL_BYTES_FOR_MSIZE(msize); - -#if CONFIG_ASLR_INTERNAL - int offset_msize = malloc_entropy[1] & SMALL_ENTROPY_MASK; -#if DEBUG_MALLOC - if (getenv("MallocASLRForce")) { - offset_msize = strtol(getenv("MallocASLRForce"), NULL, 0) & SMALL_ENTROPY_MASK; - } - if (getenv("MallocASLRPrint")) { - malloc_report(ASL_LEVEL_INFO, "Region: %p offset: %d\n", aligned_address, offset_msize); - } -#endif -#else - int offset_msize = 0; -#endif - ptr = (void *)((uintptr_t)aligned_address + SMALL_BYTES_FOR_MSIZE(offset_msize)); - small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), offset_msize, msize); - small_mag_ptr->mag_num_objects++; - small_mag_ptr->mag_num_bytes_in_objects += SMALL_BYTES_FOR_MSIZE(msize); - small_mag_ptr->num_bytes_in_magazine += SMALL_REGION_PAYLOAD_BYTES; - - // add a big free block at the end - small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), offset_msize + msize, NUM_SMALL_BLOCKS - msize - offset_msize); - small_mag_ptr->mag_bytes_free_at_end = SMALL_BYTES_FOR_MSIZE(NUM_SMALL_BLOCKS - msize - offset_msize); - -#if CONFIG_ASLR_INTERNAL - // add a big free block at the start - small_mag_ptr->mag_bytes_free_at_start = SMALL_BYTES_FOR_MSIZE(offset_msize); - if (offset_msize) { - small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), 0, offset_msize); - } -#else - small_mag_ptr->mag_bytes_free_at_start = 0; -#endif - - // connect to magazine as last node - recirc_list_splice_last(rack, small_mag_ptr, REGION_TRAILER_FOR_SMALL_REGION(aligned_address)); - - return ptr; -} - -void * -small_memalign(szone_t *szone, size_t alignment, size_t size, size_t span) -{ - msize_t mspan = SMALL_MSIZE_FOR_BYTES(span + SMALL_QUANTUM - 1); - void *p = small_malloc_should_clear(&szone->small_rack, mspan, 0); - - if (NULL == p) { - return NULL; - } - - size_t offset = ((uintptr_t)p) & (alignment - 1); // p % alignment - size_t pad = (0 == offset) ? 0 : alignment - offset; // p + pad achieves desired alignment - - msize_t msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1); - msize_t mpad = SMALL_MSIZE_FOR_BYTES(pad + SMALL_QUANTUM - 1); - msize_t mwaste = mspan - msize - mpad; // excess blocks - - if (mpad > 0) { - void *q = (void *)(((uintptr_t)p) + pad); - - // Mark q as block header and in-use, thus creating two blocks. - magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(szone->small_rack.magazines, - REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(p)), - MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(p))); - small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(p), SMALL_META_INDEX_FOR_PTR(p), mpad); - small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(q), SMALL_META_INDEX_FOR_PTR(q), msize + mwaste); - small_mag_ptr->mag_num_objects++; - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - - // Give up mpad blocks beginning at p to the small free list - free_small(&szone->small_rack, p, SMALL_REGION_FOR_PTR(p), SMALL_BYTES_FOR_MSIZE(mpad)); - - p = q; // advance p to the desired alignment - } - if (mwaste > 0) { - void *q = (void *)(((uintptr_t)p) + SMALL_BYTES_FOR_MSIZE(msize)); - // Mark q as block header and in-use, thus creating two blocks. - magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(szone->small_rack.magazines, - REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(p)), - MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(p))); - small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(p), SMALL_META_INDEX_FOR_PTR(p), msize); - small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(q), SMALL_META_INDEX_FOR_PTR(q), mwaste); - small_mag_ptr->mag_num_objects++; - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - - // Give up mwaste blocks beginning at q to the small free list - free_small(&szone->small_rack, q, SMALL_REGION_FOR_PTR(q), SMALL_BYTES_FOR_MSIZE(mwaste)); - } - - return p; // p has the desired size and alignment, and can later be free()'d -} - -boolean_t -small_claimed_address(rack_t *rack, void *ptr) -{ - region_t r = small_region_for_ptr_no_lock(rack, ptr); - return r && ptr < (void *)SMALL_REGION_END(r); -} - -void * -small_try_shrink_in_place(rack_t *rack, void *ptr, size_t old_size, size_t new_good_size) -{ - msize_t new_msize = SMALL_MSIZE_FOR_BYTES(new_good_size); - msize_t mshrinkage = SMALL_MSIZE_FOR_BYTES(old_size) - new_msize; - - if (mshrinkage) { - void *q = (void *)((uintptr_t)ptr + SMALL_BYTES_FOR_MSIZE(new_msize)); - magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(rack->magazines, - REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)), - MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr))); - - // Mark q as block header and in-use, thus creating two blocks. - small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), SMALL_META_INDEX_FOR_PTR(ptr), new_msize); - small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(q), SMALL_META_INDEX_FOR_PTR(q), mshrinkage); - small_mag_ptr->mag_num_objects++; - - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - free_small(rack, q, SMALL_REGION_FOR_PTR(q), 0); - } - - return ptr; -} - -boolean_t -small_try_realloc_in_place(rack_t *rack, void *ptr, size_t old_size, size_t new_size) -{ - // returns 1 on success - msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); - unsigned index; - msize_t old_msize, new_msize; - unsigned next_index; - void *next_block; - msize_t next_msize_and_free; - boolean_t is_free; - msize_t next_msize, leftover_msize; - void *leftover; - - index = SMALL_META_INDEX_FOR_PTR(ptr); - old_msize = SMALL_MSIZE_FOR_BYTES(old_size); - new_msize = SMALL_MSIZE_FOR_BYTES(new_size + SMALL_QUANTUM - 1); - next_index = index + old_msize; - - if (next_index >= NUM_SMALL_BLOCKS) { - return 0; - } - next_block = (char *)ptr + old_size; - -#if DEBUG_MALLOC - if ((uintptr_t)next_block & (SMALL_QUANTUM - 1)) { - malloc_zone_error(rack->debug_flags, true, "internal invariant broken in realloc(next_block) for %p\n", next_block); - } - if (meta_headers[index] != old_msize) { - malloc_report(ASL_LEVEL_ERR, "*** small_try_realloc_in_place incorrect old %d %d\n", meta_headers[index], old_msize); - } -#endif - - magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(rack->magazines, - REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)), - MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr))); - if (DEPOT_MAGAZINE_INDEX == MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr))) { - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - return 0; - } - - msize_t coalesced_msize = new_msize - old_msize; -#if CONFIG_SMALL_CACHE - void *last_free_ptr = small_mag_ptr->mag_last_free; - msize_t last_free_msize = small_mag_ptr->mag_last_free_msize; - if (last_free_ptr == next_block && old_msize + last_free_msize >= new_msize) { - /* - * There is a block in mag_last_free and it's immediately after - * this block and it's large enough. We can use some or all of it. - */ - leftover_msize = last_free_msize - coalesced_msize; - if (leftover_msize) { - small_mag_ptr->mag_last_free_msize -= coalesced_msize; - small_mag_ptr->mag_last_free += new_size - old_size; - // The block in mag_last_free is still marked as header and in-use, so copy that - // state to the block that remains. The state for the block that we're going to - // use is adjusted by the small_meta_header_set_middle() call below. - small_meta_header_set_in_use(meta_headers, index + new_msize, leftover_msize); - } else { - // Using the whole block. - small_mag_ptr->mag_last_free = NULL; - small_mag_ptr->mag_last_free_msize = 0; - small_mag_ptr->mag_last_free_rgn = NULL; - } - small_meta_header_set_in_use(meta_headers, index, new_msize); - small_meta_header_set_middle(meta_headers, next_index); - } else { -#endif // CONFIG_SMALL_CACHE - /* - * Try to expand into unused space immediately after this block. - */ - msize_t unused_msize = SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_end); - void *unused_start = SMALL_REGION_END(SMALL_REGION_FOR_PTR(ptr)) - small_mag_ptr->mag_bytes_free_at_end; - if (small_mag_ptr->mag_last_region == SMALL_REGION_FOR_PTR(ptr) - && coalesced_msize < unused_msize && unused_start == ptr + old_size) { - // Extend the in-use for this block to the new size - small_meta_header_set_in_use(meta_headers, index, new_msize); - - // Clear the in-use size for the start of the area we extended into - small_meta_header_set_middle(meta_headers, next_index); - - // Reduce mag_bytes_free_at_end and update its in-use size. - small_mag_ptr->mag_bytes_free_at_end -= SMALL_BYTES_FOR_MSIZE(coalesced_msize); - small_meta_header_set_in_use(meta_headers, index + new_msize, SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_end)); - } else { - /* - * Look for a free block immediately afterwards. If it's large enough, we can consume (part of) - * it. - */ - next_msize_and_free = meta_headers[next_index]; - is_free = next_msize_and_free & SMALL_IS_FREE; - if (!is_free) { - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - return 0; // next_block is in use; - } - - next_msize = next_msize_and_free & ~SMALL_IS_FREE; - if (old_msize + next_msize < new_msize) { - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - return 0; // even with next block, not enough - } - - // The following block is big enough; pull it from its freelist and chop off enough to satisfy - // our needs. - free_list_t freee = small_free_list_find_by_ptr(rack, small_mag_ptr, next_block, next_msize); - small_free_list_remove_ptr(rack, small_mag_ptr, freee, next_msize); - small_meta_header_set_middle(meta_headers, next_index); - leftover_msize = old_msize + next_msize - new_msize; - if (leftover_msize) { - /* there's some left, so put the remainder back */ - leftover = (unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(new_msize); - small_free_list_add_ptr(rack, small_mag_ptr, leftover, leftover_msize); - } - small_meta_header_set_in_use(meta_headers, index, new_msize); - } -#if CONFIG_SMALL_CACHE - } -#endif // CONFIG_SMALL_CACHE -#if DEBUG_MALLOC - if (SMALL_BYTES_FOR_MSIZE(new_msize) > szone->large_threshold) { - malloc_report(ASL_LEVEL_ERR, "*** realloc in place for %p exceeded msize=%d\n", new_msize); - } - - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in small_try_realloc_in_place(), ptr=%p, msize=%d\n", ptr, *SMALL_METADATA_FOR_PTR(ptr)); - } -#endif - small_mag_ptr->mag_num_bytes_in_objects += SMALL_BYTES_FOR_MSIZE(new_msize - old_msize); - - // Update this region's bytes in use count - region_trailer_t *node = REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)); - size_t bytes_used = node->bytes_used + SMALL_BYTES_FOR_MSIZE(new_msize - old_msize); - node->bytes_used = (unsigned int)bytes_used; - - // Emptiness discriminant - if (bytes_used < DENSITY_THRESHOLD(SMALL_REGION_PAYLOAD_BYTES)) { - /* After this reallocation the region is still sparse, so it must have been even more so before - * the reallocation. That implies the region is already correctly marked. Do nothing. */ - } else { - /* Region has crossed threshold from sparsity to density. Mark it not "suitable" on the - * recirculation candidates list. */ - node->recirc_suitable = FALSE; - } - - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - return 1; -} - -static char *small_check_fail_msg = "check: incorrect small region "; - -#define SMALL_CHECK_FAIL(fmt, ...) \ - malloc_zone_check_fail(small_check_fail_msg, \ - "%ld, counter=%d\n" fmt, region_index, counter, __VA_ARGS__); - -boolean_t -small_check_region(rack_t *rack, region_t region, size_t region_index, - unsigned counter) -{ - unsigned char *ptr = SMALL_REGION_ADDRESS(region); - msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); - unsigned char *region_end = SMALL_REGION_END(region); - msize_t prev_free = 0; - unsigned index; - msize_t msize_and_free; - msize_t msize; - free_list_t free_head, previous, next; - msize_t *follower; - mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)); - magazine_t *small_mag_ptr = &(rack->magazines[mag_index]); - - // Assumes locked - CHECK_MAGAZINE_PTR_LOCKED(szone, small_mag_ptr, __PRETTY_FUNCTION__); - - if (region == small_mag_ptr->mag_last_region) { - ptr += small_mag_ptr->mag_bytes_free_at_start; - region_end -= small_mag_ptr->mag_bytes_free_at_end; - } - - while (ptr < region_end) { - index = SMALL_META_INDEX_FOR_PTR(ptr); - msize_and_free = meta_headers[index]; - if (!(msize_and_free & SMALL_IS_FREE)) { - // block is in use - msize = msize_and_free; - if (!msize) { - SMALL_CHECK_FAIL("*** invariant broken: null msize ptr=%p num_small_regions=%d end=%p\n", ptr, - (int)rack->num_regions, region_end); - return 0; - } -#if !CONFIG_RELAXED_INVARIANT_CHECKS - if (SMALL_BYTES_FOR_MSIZE(msize) > szone->large_threshold) { - SMALL_CHECK_FAIL("*** invariant broken for %p this small msize=%d - size is too large\n", ptr, msize_and_free); - return 0; - } -#endif // CONFIG_RELAXED_INVARIANT_CHECKS - ptr += SMALL_BYTES_FOR_MSIZE(msize); - prev_free = 0; - } else { - // free pointer - msize = msize_and_free & ~SMALL_IS_FREE; - free_head = (free_list_t){ .p = ptr }; - follower = (msize_t *)FOLLOWING_SMALL_PTR(ptr, msize); - if (!msize) { - SMALL_CHECK_FAIL("*** invariant broken for free block %p this msize=%d\n", ptr, msize); - return 0; - } -#if !CONFIG_RELAXED_INVARIANT_CHECKS - if (prev_free) { - SMALL_CHECK_FAIL("*** invariant broken for %p (2 free in a row)\n", ptr); - return 0; - } -#endif - - // check for possible OOB entry if needed - if (small_needs_oob_free_entry(ptr, msize)) { - oob_free_entry_t oob = small_oob_free_find_ptr(ptr, msize); - if (oob) { - free_head.oob = oob; - } - } - - previous = small_free_list_get_previous(rack, free_head); - next = small_free_list_get_next(rack, free_head); - if (previous.p && !SMALL_PTR_IS_FREE(small_free_list_get_ptr(previous))) { - SMALL_CHECK_FAIL("*** invariant broken for %p (previous %p is not a free pointer)\n", ptr, small_free_list_get_ptr(previous)); - return 0; - } - if (next.p && !SMALL_PTR_IS_FREE(small_free_list_get_ptr(next))) { - SMALL_CHECK_FAIL("*** invariant broken for %p (next %p is not a free pointer)\n", ptr, small_free_list_get_ptr(next)); - return 0; - } - if (SMALL_PREVIOUS_MSIZE(follower) != msize) { - SMALL_CHECK_FAIL("*** invariant broken for small free %p followed by %p in region [%p-%p] " - "(end marker incorrect) should be %d; in fact %d\n", - ptr, follower, SMALL_REGION_ADDRESS(region), region_end, msize, SMALL_PREVIOUS_MSIZE(follower)); - return 0; - } - ptr = (unsigned char *)follower; - prev_free = SMALL_IS_FREE; - } - } - return 1; -} - -kern_return_t -small_in_use_enumerator(task_t task, - void *context, - unsigned type_mask, - szone_t *szone, - memory_reader_t reader, - vm_range_recorder_t recorder) -{ - size_t num_regions; - size_t index; - region_t *regions; - vm_range_t buffer[MAX_RECORDER_BUFFER]; - unsigned count = 0; - kern_return_t err; - region_t region; - vm_range_t range; - vm_range_t admin_range; - vm_range_t ptr_range; - unsigned char *mapped_region; - msize_t *block_header; - unsigned block_index; - unsigned block_limit; - msize_t msize_and_free; - msize_t msize; - magazine_t *small_mag_base = NULL; - - region_hash_generation_t *srg_ptr; - err = reader(task, (vm_address_t)szone->small_rack.region_generation, sizeof(region_hash_generation_t), (void **)&srg_ptr); - if (err) { - return err; - } - - num_regions = srg_ptr->num_regions_allocated; - err = reader(task, (vm_address_t)srg_ptr->hashed_regions, sizeof(region_t) * num_regions, (void **)®ions); - if (err) { - return err; - } - - if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { - // Map in all active magazines. Do this outside the iteration over regions. - err = reader(task, (vm_address_t)(szone->small_rack.magazines), szone->small_rack.num_magazines * sizeof(magazine_t), - (void **)&small_mag_base); - if (err) { - return err; - } - } - - for (index = 0; index < num_regions; ++index) { - region = regions[index]; - if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) { - range.address = (vm_address_t)SMALL_REGION_ADDRESS(region); - range.size = SMALL_REGION_SIZE; - if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) { - admin_range.address = range.address + SMALL_METADATA_START; - admin_range.size = SMALL_METADATA_SIZE; - recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1); - } - if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) { - ptr_range.address = range.address; - ptr_range.size = NUM_SMALL_BLOCKS * SMALL_QUANTUM; - recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1); - } - if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { - err = reader(task, range.address, range.size, (void **)&mapped_region); - if (err) { - return err; - } - - mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(mapped_region); - magazine_t *small_mag_ptr = small_mag_base + mag_index; - - int cached_free_blocks = 0; -#if CONFIG_SMALL_CACHE - // Each magazine could have a pointer to a cached free block from - // this region. Count the regions that have such a pointer. - for (mag_index = 0; mag_index < szone->small_rack.num_magazines; mag_index++) { - if ((void *)range.address == (small_mag_base + mag_index)->mag_last_free_rgn) { - cached_free_blocks++; - } - } -#endif // CONFIG_SMALL_CACHE - - block_header = (msize_t *)(mapped_region + SMALL_METADATA_START + sizeof(region_trailer_t)); - block_index = 0; - block_limit = NUM_SMALL_BLOCKS; - if (region == small_mag_ptr->mag_last_region) { - block_index += SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_start); - block_limit -= SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_end); - } - - for (;block_index < block_limit; block_index += msize) { - msize_and_free = block_header[block_index]; - msize = msize_and_free & ~SMALL_IS_FREE; - if (!msize) { - return KERN_FAILURE; // Somethings amiss. Avoid looping at this block_index. - } - if (!(msize_and_free & SMALL_IS_FREE)) { - vm_address_t ptr = range.address + SMALL_BYTES_FOR_MSIZE(block_index); -#if CONFIG_SMALL_CACHE - // If there are still magazines that have cached free - // blocks in this region, check whether this is one of - // them and don't return the block pointer if it is. - boolean_t block_cached = false; - if (cached_free_blocks) { - for (mag_index = 0; mag_index < szone->small_rack.num_magazines; mag_index++) { - if ((void *)ptr == (small_mag_base + mag_index)->mag_last_free) { - block_cached = true; - cached_free_blocks--; - break; - } - } - } - if (block_cached) { - continue; - } -#endif // CONFIG_SMALL_CACHE - // Block in use - buffer[count].address = ptr; - buffer[count].size = SMALL_BYTES_FOR_MSIZE(msize); - count++; - if (count >= MAX_RECORDER_BUFFER) { - recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); - count = 0; - } - } - } - if (count) { - recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); - count = 0; - } - } - } - } - return 0; -} - -static void * -small_malloc_from_free_list(rack_t *rack, magazine_t *small_mag_ptr, mag_index_t mag_index, msize_t msize) -{ - msize_t this_msize; - grain_t slot = SMALL_FREE_SLOT_FOR_MSIZE(rack, msize); - free_list_t *free_list = small_mag_ptr->mag_free_list; - free_list_t *the_slot = free_list + slot; - free_list_t *limit; - unsigned bitmap; - msize_t leftover_msize; - void *leftover_ptr; - void *ptr; - - // Assumes we've locked the region - CHECK_MAGAZINE_PTR_LOCKED(szone, small_mag_ptr, __PRETTY_FUNCTION__); - - // Look for an exact match by checking the freelist for this msize. - if (small_free_list_get_ptr(*the_slot)) { - ptr = small_free_list_get_ptr(*the_slot); - this_msize = msize; - small_free_list_remove_ptr(rack, small_mag_ptr, *the_slot, msize); - goto return_small_alloc; - } - - // Mask off the bits representing slots holding free blocks smaller than - // the size we need. If there are no larger free blocks, try allocating - // from the free space at the end of the small region. - // - // BITMAPN_CTZ implementation - unsigned idx = slot >> 5; - bitmap = 0; - unsigned mask = ~((1 << (slot & 31)) - 1); - for (; idx < SMALL_FREELIST_BITMAP_WORDS(rack); ++idx) { - bitmap = small_mag_ptr->mag_bitmap[idx] & mask; - if (bitmap != 0) { - break; - } - mask = ~0U; - } - // Check for fallthrough: No bits set in bitmap - if ((bitmap == 0) && (idx == SMALL_FREELIST_BITMAP_WORDS(rack))) { - goto try_small_from_end; - } - - // Start looking at the first set bit, plus 32 bits for every word of - // zeroes or entries that were too small. - slot = BITMAP32_CTZ((&bitmap)) + (idx * 32); - - // FIXME: Explain use of - 1 here, last slot has special meaning - limit = free_list + SMALL_FREE_SLOT_COUNT(rack) - 1; - free_list += slot; - - // Attempt to pull off the free_list slot that we now think is full. - if ((ptr = small_free_list_get_ptr(*free_list))) { - this_msize = SMALL_PTR_SIZE(ptr); - small_free_list_remove_ptr(rack, small_mag_ptr, *free_list, this_msize); - goto add_leftover_and_proceed; - } - -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_ERR, "in small_malloc_from_free_list(), mag_bitmap out of sync, slot=%d\n", slot); -#endif - -try_small_from_end: - // Let's see if we can use small_mag_ptr->mag_bytes_free_at_end - if (small_mag_ptr->mag_bytes_free_at_end >= SMALL_BYTES_FOR_MSIZE(msize)) { - ptr = SMALL_REGION_END(small_mag_ptr->mag_last_region) - small_mag_ptr->mag_bytes_free_at_end; - small_mag_ptr->mag_bytes_free_at_end -= SMALL_BYTES_FOR_MSIZE(msize); - if (small_mag_ptr->mag_bytes_free_at_end) { - // let's mark this block as in use to serve as boundary - small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), - SMALL_META_INDEX_FOR_PTR((unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(msize)), - SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_end)); - } - this_msize = msize; - goto return_small_alloc; - } -#if CONFIG_ASLR_INTERNAL - // Try from start if nothing left at end - if (small_mag_ptr->mag_bytes_free_at_start >= SMALL_BYTES_FOR_MSIZE(msize)) { - ptr = SMALL_REGION_ADDRESS(small_mag_ptr->mag_last_region) + small_mag_ptr->mag_bytes_free_at_start - - SMALL_BYTES_FOR_MSIZE(msize); - small_mag_ptr->mag_bytes_free_at_start -= SMALL_BYTES_FOR_MSIZE(msize); - if (small_mag_ptr->mag_bytes_free_at_start) { - // let's mark this block as in use to serve as boundary - small_meta_header_set_in_use( - SMALL_META_HEADER_FOR_PTR(ptr), 0, SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_start)); - } - this_msize = msize; - goto return_small_alloc; - } -#endif - return NULL; - -add_leftover_and_proceed: - if (this_msize > msize) { - leftover_msize = this_msize - msize; - leftover_ptr = (unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(msize); -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in small_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize); - } -#endif - small_free_list_add_ptr(rack, small_mag_ptr, leftover_ptr, leftover_msize); - this_msize = msize; - } - -return_small_alloc: - small_mag_ptr->mag_num_objects++; - small_mag_ptr->mag_num_bytes_in_objects += SMALL_BYTES_FOR_MSIZE(this_msize); - - // Check that the region cookie is intact and update the region's bytes in use count - region_t *region = SMALL_REGION_FOR_PTR(ptr); - region_trailer_t *trailer = REGION_TRAILER_FOR_SMALL_REGION(region); - region_check_cookie(region, trailer); - size_t bytes_used = trailer->bytes_used + SMALL_BYTES_FOR_MSIZE(this_msize); - trailer->bytes_used = (unsigned int)bytes_used; - - // Emptiness discriminant - if (bytes_used < DENSITY_THRESHOLD(SMALL_REGION_PAYLOAD_BYTES)) { - /* After this allocation the region is still sparse, so it must have been even more so before - * the allocation. That implies the region is already correctly marked. Do nothing. */ - } else { - /* Region has crossed threshold from sparsity to density. Mark in not "suitable" on the - * recirculation candidates list. */ - trailer->recirc_suitable = FALSE; - } -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in small_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize); - } -#endif - small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), SMALL_META_INDEX_FOR_PTR(ptr), this_msize); - return ptr; -} - -void * -small_malloc_should_clear(rack_t *rack, msize_t msize, boolean_t cleared_requested) -{ - void *ptr; - mag_index_t mag_index = small_mag_get_thread_index() % rack->num_magazines; - magazine_t *small_mag_ptr = &(rack->magazines[mag_index]); - - MALLOC_TRACE(TRACE_small_malloc, (uintptr_t)rack, SMALL_BYTES_FOR_MSIZE(msize), (uintptr_t)small_mag_ptr, cleared_requested); - - SZONE_MAGAZINE_PTR_LOCK(small_mag_ptr); - -#if CONFIG_SMALL_CACHE - ptr = small_mag_ptr->mag_last_free; - - if (small_mag_ptr->mag_last_free_msize == msize) { - // we have a winner - small_mag_ptr->mag_last_free = NULL; - small_mag_ptr->mag_last_free_msize = 0; - small_mag_ptr->mag_last_free_rgn = NULL; - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - if (cleared_requested) { - memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize)); - } - return ptr; - } -#endif /* CONFIG_SMALL_CACHE */ - - while (1) { - ptr = small_malloc_from_free_list(rack, small_mag_ptr, mag_index, msize); - if (ptr) { - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - if (cleared_requested) { - memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize)); - } - return ptr; - } - -#if CONFIG_RECIRC_DEPOT - if (small_get_region_from_depot(rack, small_mag_ptr, mag_index, msize)) { - ptr = small_malloc_from_free_list(rack, small_mag_ptr, mag_index, msize); - if (ptr) { - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - if (cleared_requested) { - memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize)); - } - return ptr; - } - } -#endif // CONFIG_RECIRC_DEPOT - - // The magazine is exhausted. A new region (heap) must be allocated to satisfy this call to malloc(). - // The allocation, an mmap() system call, will be performed outside the magazine spin locks by the first - // thread that suffers the exhaustion. That thread sets "alloc_underway" and enters a critical section. - // Threads arriving here later are excluded from the critical section, yield the CPU, and then retry the - // allocation. After some time the magazine is resupplied, the original thread leaves with its allocation, - // and retry-ing threads succeed in the code just above. - if (!small_mag_ptr->alloc_underway) { - void *fresh_region; - - // time to create a new region (do this outside the magazine lock) - small_mag_ptr->alloc_underway = TRUE; - OSMemoryBarrier(); - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - fresh_region = mvm_allocate_pages_securely(SMALL_REGION_SIZE, SMALL_BLOCKS_ALIGN, VM_MEMORY_MALLOC_SMALL, rack->debug_flags); - SZONE_MAGAZINE_PTR_LOCK(small_mag_ptr); - - // DTrace USDT Probe - MAGMALLOC_ALLOCREGION(SMALL_SZONE_FROM_RACK(rack), (int)mag_index, fresh_region, SMALL_REGION_SIZE); - - if (!fresh_region) { // out of memory! - small_mag_ptr->alloc_underway = FALSE; - OSMemoryBarrier(); - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - return NULL; - } - - region_set_cookie(REGION_TRAILER_FOR_SMALL_REGION(fresh_region)); - ptr = small_malloc_from_region_no_lock(rack, small_mag_ptr, mag_index, msize, fresh_region); - - // we don't clear because this freshly allocated space is pristine - small_mag_ptr->alloc_underway = FALSE; - OSMemoryBarrier(); - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - return ptr; - } else { - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - yield(); - SZONE_MAGAZINE_PTR_LOCK(small_mag_ptr); - } - } - /* NOTREACHED */ -} - -size_t -small_size(rack_t *rack, const void *ptr) -{ - if (small_region_for_ptr_no_lock(rack, ptr)) { - if (SMALL_META_INDEX_FOR_PTR(ptr) >= NUM_SMALL_BLOCKS) { - return 0; - } - msize_t msize_and_free = *SMALL_METADATA_FOR_PTR(ptr); - if (msize_and_free & SMALL_IS_FREE) { - return 0; - } -#if CONFIG_SMALL_CACHE - { - mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)); - if (DEPOT_MAGAZINE_INDEX != mag_index) { - magazine_t *small_mag_ptr = &(rack->magazines[mag_index]); - - if (ptr == small_mag_ptr->mag_last_free) { - return 0; - } - } else { - for (mag_index = 0; mag_index < rack->num_magazines; mag_index++) { - magazine_t *small_mag_ptr = &(rack->magazines[mag_index]); - - if (ptr == small_mag_ptr->mag_last_free) { - return 0; - } - } - } - } -#endif - return SMALL_BYTES_FOR_MSIZE(msize_and_free); - } - - return 0; -} - -static MALLOC_NOINLINE void -free_small_botch(rack_t *rack, void *ptr) -{ - mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)); - magazine_t *small_mag_ptr = &(rack->magazines[mag_index]); - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - malloc_zone_error(rack->debug_flags, true, "double free for ptr %p\n", ptr); -} - -void -free_small(rack_t *rack, void *ptr, region_t small_region, size_t known_size) -{ - msize_t msize; - mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)); - magazine_t *small_mag_ptr = &(rack->magazines[mag_index]); - - // ptr is known to be in small_region - if (known_size) { - msize = SMALL_MSIZE_FOR_BYTES(known_size + SMALL_QUANTUM - 1); - } else { - msize = SMALL_PTR_SIZE(ptr); - if (SMALL_PTR_IS_FREE(ptr)) { - free_small_botch(rack, ptr); - return; - } - } - - SZONE_MAGAZINE_PTR_LOCK(small_mag_ptr); - -#if CONFIG_SMALL_CACHE - // Depot does not participate in CONFIG_SMALL_CACHE since it can't be directly malloc()'d - if (DEPOT_MAGAZINE_INDEX != mag_index) { - void *ptr2 = small_mag_ptr->mag_last_free; // Might be NULL - msize_t msize2 = small_mag_ptr->mag_last_free_msize; - region_t rgn2 = small_mag_ptr->mag_last_free_rgn; - - /* check that we don't already have this pointer in the cache */ - if (ptr == ptr2) { - free_small_botch(rack, ptr); - return; - } - - if ((rack->debug_flags & MALLOC_DO_SCRIBBLE) && msize) { - memset(ptr, SCRABBLE_BYTE, SMALL_BYTES_FOR_MSIZE(msize)); - } - - small_mag_ptr->mag_last_free = ptr; - small_mag_ptr->mag_last_free_msize = msize; - small_mag_ptr->mag_last_free_rgn = small_region; - - if (!ptr2) { - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - return; - } - - msize = msize2; - ptr = ptr2; - small_region = rgn2; - } -#endif /* CONFIG_SMALL_CACHE */ - - // Now in the time it took to acquire the lock, the region may have migrated - // from one magazine to another. I.e. trailer->mag_index is volatile. - // In which case the magazine lock we obtained (namely magazines[mag_index].mag_lock) - // is stale. If so, keep on tryin' ... - region_trailer_t *trailer = REGION_TRAILER_FOR_SMALL_REGION(small_region); - mag_index_t refreshed_index; - - while (mag_index != (refreshed_index = trailer->mag_index)) { // Note assignment - - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - - mag_index = refreshed_index; - small_mag_ptr = &(rack->magazines[mag_index]); - SZONE_MAGAZINE_PTR_LOCK(small_mag_ptr); - } - - if (small_free_no_lock(rack, small_mag_ptr, mag_index, small_region, ptr, msize)) { - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - } - - CHECK(szone, __PRETTY_FUNCTION__); -} - -void -print_small_free_list(task_t task, memory_reader_t reader, - print_task_printer_t printer, rack_t *rack) -{ - free_list_t ptr; - _SIMPLE_STRING b = _simple_salloc(); - mag_index_t mag_index; - - if (b) { - rack_t *mapped_rack; - magazine_t *mapped_magazines; - if (reader(task, (vm_address_t)rack, sizeof(struct rack_s), - (void **)&mapped_rack)) { - printer("Failed to map small rack\n"); - return; - } - if (reader(task, (vm_address_t)mapped_rack->magazines, - mapped_rack->num_magazines * sizeof(magazine_t), - (void **)&mapped_magazines)) { - printer("Failed to map small rack magazines\n"); - return; - } - - _simple_sappend(b, "small free sizes:\n"); - grain_t free_slots = SMALL_FREE_SLOT_COUNT(mapped_rack); - for (mag_index = -1; mag_index < mapped_rack->num_magazines; - mag_index++) { - grain_t slot = 0; - if (mag_index == -1) { - _simple_sprintf(b, "\tRecirc depot: "); - } else { - _simple_sprintf(b, "\tMagazine %d: ", mag_index); - } - while (slot < free_slots) { - ptr = mapped_magazines[mag_index].mag_free_list[slot]; - if (small_free_list_get_ptr_task(task, reader, printer, ptr)) { - _simple_sprintf(b, "%s%y[%d]; ", (slot == free_slots - 1) ? - ">=" : "", (slot + 1) * SMALL_QUANTUM, - small_free_list_count(task, reader, printer, - rack, ptr)); - } - slot++; - } - _simple_sappend(b, "\n"); - } - printer("%s\n", _simple_string(b)); - _simple_sfree(b); - } -} - -void -print_small_region(task_t task, memory_reader_t reader, - print_task_printer_t printer, szone_t *szone, int level, - region_t region, size_t bytes_at_start, size_t bytes_at_end) -{ - unsigned counts[1024]; - unsigned in_use = 0; - uintptr_t start = (uintptr_t)SMALL_REGION_ADDRESS(region); - uintptr_t current = start + bytes_at_start; - uintptr_t limit = (uintptr_t)SMALL_REGION_END(region) - bytes_at_end; - uintptr_t mapped_start; - msize_t msize_and_free; - msize_t msize; - unsigned ci; - _SIMPLE_STRING b; - uintptr_t pgTot = 0; - - if (reader(task, (vm_address_t)start, SMALL_REGION_SIZE, - (void **)&mapped_start)) { - printer("Failed to map small region at %p\n", start); - return; - } - off_t start_offset = mapped_start - start; - region_t mapped_region = (region_t)mapped_start; - - if (region == HASHRING_REGION_DEALLOCATED) { - if ((b = _simple_salloc()) != NULL) { - _simple_sprintf(b, "Small region [unknown address] was returned to the OS\n"); - printer("%s\n", _simple_string(b)); - _simple_sfree(b); - } - return; - } - - memset(counts, 0, sizeof(counts)); - while (current < limit) { - msize_and_free = *(uintptr_t *)((char *)SMALL_METADATA_FOR_PTR(current) + start_offset); - msize = msize_and_free & ~SMALL_IS_FREE; - if (!msize) { - printer("*** error with %p: msize=%d, free: %x\n", (void *)current, - (unsigned)msize, msize_and_free & SMALL_IS_FREE); - break; - } - if (!(msize_and_free & SMALL_IS_FREE)) { - // block in use - if (msize < 1024) { - counts[msize]++; - } - in_use++; - } else { - uintptr_t pgLo = round_page_quanta(current + - sizeof(free_list_t) + sizeof(msize_t)); - uintptr_t pgHi = trunc_page_quanta(current + - SMALL_BYTES_FOR_MSIZE(msize) - sizeof(msize_t)); - - if (pgLo < pgHi) { - pgTot += (pgHi - pgLo); - } - } - current += SMALL_BYTES_FOR_MSIZE(msize); - } - - if ((b = _simple_salloc()) != NULL) { - mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(mapped_region); - _simple_sprintf(b, "Small region [%p-%p, %y] \t", (void *)start, - SMALL_REGION_END(region), (int)SMALL_REGION_SIZE); - if (mag_index == DEPOT_MAGAZINE_INDEX) { - _simple_sprintf(b, "Recirc depot \t"); - } else { - _simple_sprintf(b, "Magazine=%d \t", mag_index); - } - _simple_sprintf(b, "Allocations in use=%d \t Bytes in use=%ly (%d%%) \t", - in_use, BYTES_USED_FOR_SMALL_REGION(mapped_region), - (int)(100.0F * BYTES_USED_FOR_SMALL_REGION(mapped_region))/SMALL_REGION_SIZE); - if (bytes_at_end || bytes_at_start) { - _simple_sprintf(b, "Untouched=%ly ", bytes_at_end + bytes_at_start); - } - if (mag_index == DEPOT_MAGAZINE_INDEX) { - _simple_sprintf(b, "Advised MADV_FREE=%ly", pgTot); - } else { - _simple_sprintf(b, "Fragments subject to reclamation=%ly", pgTot); -#if CONFIG_RECIRC_DEPOT - _simple_sprintf(b, small_region_below_recirc_threshold(mapped_region) ? - "\tEmpty enough to be moved to recirc depot" : - "\tNot empty enough to be moved to recirc depot"); -#endif // CONFIG_RECIRC_DEPOT - } - if (level >= MALLOC_VERBOSE_PRINT_LEVEL && in_use) { - _simple_sappend(b, "\n\tSizes in use: "); - for (ci = 0; ci < 1024; ci++) { - if (counts[ci]) { - _simple_sprintf(b, "%y[%d] ", SMALL_BYTES_FOR_MSIZE(ci), - counts[ci]); - } - } - } - printer("%s\n", _simple_string(b)); - _simple_sfree(b); - } -} - -static char *small_freelist_fail_msg = "check: small free list incorrect"; - -#define SMALL_FREELIST_FAIL(fmt, ...) \ - malloc_zone_check_fail(small_freelist_fail_msg, \ - " (slot=%u), counter=%d\n" fmt, slot, counter, __VA_ARGS__); - -boolean_t -small_free_list_check(rack_t *rack, grain_t slot, unsigned counter) -{ - mag_index_t mag_index; - - for (mag_index = -1; mag_index < rack->num_magazines; mag_index++) { - magazine_t *small_mag_ptr = &(rack->magazines[mag_index]); - SZONE_MAGAZINE_PTR_LOCK(small_mag_ptr); - - unsigned count = 0; - free_list_t current = rack->magazines[mag_index].mag_free_list[slot]; - free_list_t previous = (free_list_t){ .p = NULL }; - msize_t msize_and_free; - void *ptr = NULL; - - while ((ptr = small_free_list_get_ptr(current))) { - msize_and_free = *SMALL_METADATA_FOR_PTR(ptr); - if (!(msize_and_free & SMALL_IS_FREE)) { - SMALL_FREELIST_FAIL("*** in-use ptr in free list slot=%u count=%d ptr=%p\n", slot, count, ptr); - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - return 0; - } - if (((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) { - SMALL_FREELIST_FAIL("*** unaligned ptr in free list slot=%u count=%d ptr=%p\n", slot, count, ptr); - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - return 0; - } - if (!small_region_for_ptr_no_lock(rack, ptr)) { - SMALL_FREELIST_FAIL("*** ptr not in szone slot=%d count=%d ptr=%p\n", slot, count, ptr); - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - return 0; - } - if (small_free_list_get_previous(rack, current).p != previous.p) { - SMALL_FREELIST_FAIL("*** previous incorrectly set slot=%u count=%d ptr=%p\n", slot, count, ptr); - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - return 0; - } - previous = current; - current = small_free_list_get_next(rack, current); - count++; - } - - SZONE_MAGAZINE_PTR_UNLOCK(small_mag_ptr); - } - return 1; -} diff --git a/src/libmalloc/src/magazine_tiny.c b/src/libmalloc/src/magazine_tiny.c deleted file mode 100644 index 8e6e34e3e..000000000 --- a/src/libmalloc/src/magazine_tiny.c +++ /dev/null @@ -1,2748 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#include "internal.h" - -// The address and size of the block in mag_last_free are combined. These -// macros abstract construction of the combined value and extraction of the -// size and pointer. -#define TINY_MAG_LAST_FREE_FROM_PTR_AND_MSIZE(ptr, msize) (void *)(((uintptr_t)(ptr))|((msize_t)msize)) -#define TINY_PTR_FROM_MAG_LAST_FREE(x) (void *)(((uintptr_t)(x)) & ~(TINY_QUANTUM - 1)) -#define TINY_MSIZE_FROM_MAG_LAST_FREE(x) (msize_t)(((uintptr_t)(x)) & (TINY_QUANTUM - 1)) - -// Adjusts the pointer part of mag_last_free by a given amount in bytes. Must be -// a multiple of the quantum size (not checked). -#define TINY_MAG_LAST_FREE_PTR_ADJUST_PTR(x, size) (x) = ((void *)(x) + (size)) - -// Decrements the size part of mag_last_free by a given msize value. Must not -// reduce the msize part below zero (not checked). -#define TINY_MAG_LAST_FREE_PTR_DEC_MSIZE(x, msize_delta) (x) = ((void *)(x) - (msize_delta)) - -static MALLOC_INLINE MALLOC_ALWAYS_INLINE -mag_index_t -tiny_mag_get_thread_index(void) -{ -#if CONFIG_TINY_USES_HYPER_SHIFT - if (os_likely(_os_cpu_number_override == -1)) { - return _os_cpu_number() >> hyper_shift; - } else { - return _os_cpu_number_override >> hyper_shift; - } -#else // CONFIG_SMALL_USES_HYPER_SHIFT - if (os_likely(_os_cpu_number_override == -1)) { - return _os_cpu_number(); - } else { - return _os_cpu_number_override; - } -#endif // CONFIG_SMALL_USES_HYPER_SHIFT -} - -static inline grain_t -tiny_slot_from_msize(msize_t msize) -{ - return (!msize || (msize > NUM_TINY_SLOTS) ? NUM_TINY_SLOTS : msize - 1); -} - -/* - * Get the size of the previous free block, which is stored in the last two - * bytes of the block. If the previous block is not free, then the result is - * undefined. - */ -static msize_t -get_tiny_previous_free_msize(const void *ptr) -{ - // check whether the previous block is in the tiny region and a block header - // if so, then the size of the previous block is one, and there is no stored - // size. - if (ptr != TINY_REGION_FOR_PTR(ptr)) { - void *prev_block = (void *)((uintptr_t)ptr - TINY_QUANTUM); - uint32_t *prev_header = TINY_BLOCK_HEADER_FOR_PTR(prev_block); - msize_t prev_index = TINY_INDEX_FOR_PTR(prev_block); - if (BITARRAY_BIT(prev_header, prev_index)) { - return 1; - } - return TINY_PREVIOUS_MSIZE(ptr); - } - // don't read possibly unmapped memory before the beginning of the region - return 0; -} - -static MALLOC_INLINE void -set_tiny_meta_header_in_use(const void *ptr, msize_t msize) -{ - uint32_t *block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); - msize_t index = TINY_INDEX_FOR_PTR(ptr); - msize_t clr_msize = msize - 1; - msize_t midx = (index >> 5) << 1; - uint32_t val = (1 << (index & 31)); - -#if DEBUG_MALLOC - if (msize > NUM_TINY_SLOTS) { - malloc_report(ASL_LEVEL_ERR, "set_tiny_meta_header_in_use() invariant broken %p %d\n", ptr, msize); - } - if ((unsigned)index + (unsigned)msize > 0x10000) { - malloc_report(ASL_LEVEL_ERR, "set_tiny_meta_header_in_use() invariant broken (2) %p %d\n", ptr, msize); - } - if (msize > TINY_BITMAP_RANGE_LIMIT) { - malloc_report(ASL_LEVEL_ERROR, "set_tiny_meta_header_in_use() invariant broken (3) %p %d\n", ptr, msize); - } -#endif - - block_header[midx] |= val; // BITARRAY_SET(block_header, index); - block_header[midx + 1] |= val; // BITARRAY_SET(in_use, index); - - // bitarray_mclr(block_header, index, end_bit); - // bitarray_mclr(in_use, index, end_bit); - - index++; - midx = (index >> 5) << 1; - - unsigned start = index & 31; - unsigned end = start + clr_msize; - -#if defined(__LP64__) - if (end > 63) { - unsigned mask0 = (0xFFFFFFFFU >> (31 - start)) >> 1; - unsigned mask1 = (0xFFFFFFFFU << (end - 64)); - block_header[midx + 0] &= mask0; // clear header - block_header[midx + 1] &= mask0; // clear in_use - block_header[midx + 2] = 0; // clear header - block_header[midx + 3] = 0; // clear in_use - block_header[midx + 4] &= mask1; // clear header - block_header[midx + 5] &= mask1; // clear in_use - } else -#endif - if (end > 31) { - unsigned mask0 = (0xFFFFFFFFU >> (31 - start)) >> 1; - unsigned mask1 = (0xFFFFFFFFU << (end - 32)); - block_header[midx + 0] &= mask0; - block_header[midx + 1] &= mask0; - block_header[midx + 2] &= mask1; - block_header[midx + 3] &= mask1; - } else { - unsigned mask = (0xFFFFFFFFU >> (31 - start)) >> 1; - mask |= (0xFFFFFFFFU << end); - block_header[midx + 0] &= mask; - block_header[midx + 1] &= mask; - } - - // we set the block_header bit for the following block to reaffirm next block is a block - index += clr_msize; - midx = (index >> 5) << 1; - val = (1 << (index & 31)); - block_header[midx] |= val; // BITARRAY_SET(block_header, (index+clr_msize)); -#if DEBUG_MALLOC - { - boolean_t ff; - msize_t mf; - - mf = get_tiny_meta_header(ptr, &ff); - if (msize != mf) { - malloc_report(ASL_LEVEL_INFO, "setting header for tiny in_use %p : %d\n", ptr, msize); - malloc_report(ASL_LEVEL_INFO, "reading header for tiny %p : %d %d\n", ptr, mf, ff); - } - } -#endif -} - -static MALLOC_INLINE void set_tiny_meta_header_in_use_1(const void *ptr) // As above with msize == 1 -{ - uint32_t *block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); - msize_t index = TINY_INDEX_FOR_PTR(ptr); - msize_t midx = (index >> 5) << 1; - uint32_t val = (1 << (index & 31)); - - block_header[midx] |= val; // BITARRAY_SET(block_header, index); - block_header[midx + 1] |= val; // BITARRAY_SET(in_use, index); - - index++; - midx = (index >> 5) << 1; - val = (1 << (index & 31)); - - block_header[midx] |= val; // BITARRAY_SET(block_header, (index+clr_msize)) -} - -static MALLOC_INLINE void -set_tiny_meta_header_middle(const void *ptr) -{ - // indicates this block is in the middle of an in use block - uint32_t *block_header; - uint32_t *in_use; - msize_t index; - - block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); - in_use = TINY_INUSE_FOR_HEADER(block_header); - index = TINY_INDEX_FOR_PTR(ptr); - - BITARRAY_CLR(block_header, index); - BITARRAY_CLR(in_use, index); -} - -static MALLOC_INLINE void -set_tiny_meta_header_free(const void *ptr, msize_t msize) -{ - // !msize is acceptable and means 65536 - uint32_t *block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); - msize_t index = TINY_INDEX_FOR_PTR(ptr); - msize_t midx = (index >> 5) << 1; - uint32_t val = (1 << (index & 31)); - -#if DEBUG_MALLOC - if ((unsigned)index + (unsigned)msize > 0x10000) { - malloc_report(ASL_LEVEL_ERR, "setting header for tiny free %p msize too large: %d\n", ptr, msize); - } -#endif - - block_header[midx] |= val; // BITARRAY_SET(block_header, index); - block_header[midx + 1] &= ~val; // BITARRAY_CLR(in_use, index); - - // mark the end of this block if msize is > 1. For msize == 0, the whole - // region is free, so there is no following block. For msize == 1, there is - // no space to write the size on 64 bit systems. The size for 1 quantum - // blocks is computed from the metadata bitmaps. - if (msize > 1) { - void *follower = FOLLOWING_TINY_PTR(ptr, msize); - TINY_PREVIOUS_MSIZE(follower) = msize; - TINY_FREE_SIZE(ptr) = msize; - } - if (msize == 0) { - TINY_FREE_SIZE(ptr) = msize; - } -#if DEBUG_MALLOC - boolean_t ff; - msize_t mf = get_tiny_meta_header(ptr, &ff); - if ((msize != mf) || !ff) { - malloc_report(ASL_LEVEL_INFO, "setting header for tiny free %p : %u\n", ptr, msize); - malloc_report(ASL_LEVEL_INFO, "reading header for tiny %p : %u %u\n", ptr, mf, ff); - } -#endif -} - -static MALLOC_INLINE boolean_t -tiny_meta_header_is_free(const void *ptr) -{ - uint32_t *block_header; - uint32_t *in_use; - msize_t index; - - block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); - in_use = TINY_INUSE_FOR_HEADER(block_header); - index = TINY_INDEX_FOR_PTR(ptr); - if (!BITARRAY_BIT(block_header, index)) { - return 0; - } - return !BITARRAY_BIT(in_use, index); -} - -static MALLOC_INLINE void * -tiny_previous_preceding_free(void *ptr, msize_t *prev_msize) -{ - // returns the previous block, assuming and verifying it's free - uint32_t *block_header; - uint32_t *in_use; - msize_t index; - msize_t previous_msize; - msize_t previous_index; - void *previous_ptr; - - block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); - in_use = TINY_INUSE_FOR_HEADER(block_header); - index = TINY_INDEX_FOR_PTR(ptr); - - if (!index) { - return NULL; - } - if ((previous_msize = get_tiny_previous_free_msize(ptr)) > index) { - return NULL; - } - - previous_index = index - previous_msize; - previous_ptr = (void *)((uintptr_t)TINY_REGION_FOR_PTR(ptr) + TINY_BYTES_FOR_MSIZE(previous_index)); - if (!BITARRAY_BIT(block_header, previous_index)) { - return NULL; - } - if (BITARRAY_BIT(in_use, previous_index)) { - return NULL; - } - if (get_tiny_free_size(previous_ptr) != previous_msize) { - return NULL; - } - - // conservative check did match true check - *prev_msize = previous_msize; - return previous_ptr; -} - -// Given a region and a slot index, scans regions that precede the region on -// the magazine region list for one that has something on its free list for -// the given slot and returns the address of the last such block, or NULL if -// there is none. To reduce overhead, we scan forward from the first region -// looking for our region and noting the last on the freelist for the given slot -// for each earlier region, stopping once we have looked at 5 regions if we did -// not reach our own. This has the effect of keeping the blocks for early -// regions near the front of the freelist while not spending large amounts of -// time looking for the "best" place to put this region's free blocks when there -// are many regions in the magazine. -static MALLOC_INLINE void * -tiny_earlier_region_last_free(magazine_t *tiny_mag_ptr, - region_trailer_t *trailer, grain_t slot) -{ - int count = 0; - uint16_t target_block = 0; - region_trailer_t *target_trailer = NULL; - region_trailer_t *next_trailer = tiny_mag_ptr->firstNode; - - while (next_trailer && next_trailer != trailer && count++ < 5) { - tiny_region_t r = TINY_REGION_FOR_PTR(next_trailer); - uint16_t block = r->free_blocks_by_slot[slot].last_block; - if (block) { - target_block = block; - target_trailer = next_trailer; - } - next_trailer = next_trailer->next; - } - return target_block ? TINY_PTR_FOR_INDEX(target_block - 1, - TINY_REGION_FOR_PTR(target_trailer)) : NULL; -} - -static MALLOC_INLINE void -tiny_update_region_free_list_for_remove(grain_t slot, tiny_free_list_t *ptr, - tiny_free_list_t *new_head) -{ - uint16_t ptr_index = TINY_INDEX_FOR_PTR(ptr); - tiny_region_t ptr_region = TINY_REGION_FOR_PTR(ptr); - region_free_blocks_t *blocks = &ptr_region->free_blocks_by_slot[slot]; - MALLOC_ASSERT(ptr_index == blocks->first_block - 1); - - if (new_head && (TINY_REGION_FOR_PTR(new_head) == TINY_REGION_FOR_PTR(ptr))) { - uint16_t new_head_block = TINY_INDEX_FOR_PTR(new_head) + 1; - if (blocks->first_block == blocks->last_block) { - blocks->last_block = new_head_block; - } - blocks->first_block = new_head_block; - } else { - // No more entries in this region. - blocks->first_block = blocks->last_block = 0; - } -} - -void -tiny_print_region_free_list(void *ptr, grain_t slot) -{ - tiny_region_t region = TINY_REGION_FOR_PTR(ptr); - region_free_blocks_t *blocks = ®ion->free_blocks_by_slot[slot]; - malloc_printf("For region %p, first block: %d (%p), last block: %d (%p)\n", - region, - blocks->first_block, - blocks->first_block ? TINY_PTR_FOR_INDEX(blocks->first_block - 1, region) : (void *)0, - blocks->last_block, - blocks->last_block ? TINY_PTR_FOR_INDEX(blocks->last_block - 1, region) : (void *)0); -} - -/* - * Adds an item to the proper free list, and also marks the meta-header of the - * block properly. - * Assumes szone has been locked - */ -static void -tiny_free_list_add_ptr(rack_t *rack, magazine_t *tiny_mag_ptr, void *ptr, msize_t msize) -{ - grain_t slot = (!msize || (msize > NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS : msize - 1; - tiny_free_list_t *free_ptr = ptr; - tiny_free_list_t *free_head = tiny_mag_ptr->mag_free_list[slot].p; - -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize); - } - if (((uintptr_t)ptr) & (TINY_QUANTUM - 1)) { - malloc_zone_error(rack->debug_flags, true, "tiny_free_list_add_ptr: Unaligned ptr: %p\n", ptr); - } -#endif - set_tiny_meta_header_free(ptr, msize); - if (free_head) { -#if DEBUG_MALLOC - if (free_list_unchecksum_ptr(szone, &free_head->previous)) { - malloc_zone_error(rack->debug_flags, true, - "tiny_free_list_add_ptr: Internal invariant broken (free_head->previous): " - "ptr=%p slot=%d free_head=%p previous=%p\n", ptr, slot, (void *)free_head, free_head->previous.p); - } - if (!tiny_meta_header_is_free(free_head)) { - malloc_zone_error(rack->debug_flags, true, - "tiny_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer): " - "ptr=%p slot=%d free_head=%p\n", ptr, slot, (void *)free_head); - } -#endif - } else { - BITMAPV_SET(tiny_mag_ptr->mag_bitmap, slot); - } - - tiny_region_t region = TINY_REGION_FOR_PTR(ptr); - region_free_blocks_t *free_blocks = ®ion->free_blocks_by_slot[slot]; - uint16_t first_free_block_index = free_blocks->first_block; - uint16_t this_block_index = TINY_INDEX_FOR_PTR(ptr); - - if (first_free_block_index) { - // This region already has something on its free list. - tiny_free_list_t *old_first_free = TINY_PTR_FOR_INDEX(first_free_block_index - 1, region); - tiny_free_list_t *prev_ptr = free_list_unchecksum_ptr(rack, &old_first_free->previous); - if (!prev_ptr) { - // Old first item was the first item in the magazine free list - - // update the magazine head pointer to point to this block. - tiny_mag_ptr->mag_free_list[slot].p = free_ptr; - } else { - prev_ptr->next.u = free_list_checksum_ptr(rack, free_ptr); // XXX - } - - // Set our previous pointer to the one from the old first block. - // It's already checksummed, so just copy it directly. It is NULL if - // the old first block was also the first block on the magazine free - // list. - free_ptr->previous.u = old_first_free->previous.u; - - // Our "next" pointer always points to the block that used to be first - // and we are always its predecessor. - free_ptr->next.u = free_list_checksum_ptr(rack, old_first_free); - old_first_free->previous.u = free_list_checksum_ptr(rack, free_ptr); - - // Update the first free block index for this region. - free_blocks->first_block = this_block_index + 1; - } else { - // Free list for this region is empty. Add ourselves to the magazine - // free list between the last block of the preceding region that has - // a free block and the first block of the next region with a free - // block (either of which may not exist). - tiny_free_list_t *prev_free = NULL; - tiny_free_list_t *next_free; - - // If the magazine free list is empty, we know we are going to add at - // the front. Otherwise, find the correct place. If we are freeing to - // the recirc depot, we can always free to the front because we don't - // care about ordering in the depot (since no allocations occur there). - mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(region); - if (mag_index != DEPOT_MAGAZINE_INDEX - && tiny_mag_ptr->mag_free_list[slot].p) { - region_trailer_t *trailer = REGION_TRAILER_FOR_TINY_REGION(region); - prev_free = tiny_earlier_region_last_free(tiny_mag_ptr, trailer, slot); - } - if (!prev_free) { - // We are becoming the head of the magazine free list. - next_free = tiny_mag_ptr->mag_free_list[slot].p; - tiny_mag_ptr->mag_free_list[slot].p = free_ptr; - } else { - next_free = free_list_unchecksum_ptr(rack, &prev_free->next); - prev_free->next.u = free_list_checksum_ptr(rack, free_ptr); - } - free_ptr->previous.u = free_list_checksum_ptr(rack, prev_free); - - if (next_free) { - next_free->previous.u = free_list_checksum_ptr(rack, free_ptr); - } - free_ptr->next.u = free_list_checksum_ptr(rack, next_free); - - // Set the first and last free block index for this region. - free_blocks->first_block = free_blocks->last_block = - this_block_index + 1; - } -} - -/* - * Removes the item pointed to by ptr in the proper free list. - * Assumes szone has been locked - */ -static void -tiny_free_list_remove_ptr(rack_t *rack, magazine_t *tiny_mag_ptr, void *ptr, msize_t msize) -{ - grain_t slot = tiny_slot_from_msize(msize); - tiny_free_list_t *free_ptr = ptr, *next, *previous; - - next = free_list_unchecksum_ptr(rack, &free_ptr->next); - previous = free_list_unchecksum_ptr(rack, &free_ptr->previous); - -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "In %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize); - } -#endif - if (!previous) { - // The block to remove is the head of the free list -#if DEBUG_MALLOC - if (tiny_mag_ptr->mag_free_list[slot] != ptr) { - malloc_zone_error(rack->debug_flags, true, - "tiny_free_list_remove_ptr: Internal invariant broken (tiny_mag_ptr->mag_free_list[slot]): " - "ptr=%p slot=%d msize=%d tiny_mag_ptr->mag_free_list[slot]=%p\n", ptr, slot, msize, - (void *)tiny_mag_ptr->mag_free_list[slot]); - return; - } -#endif - tiny_mag_ptr->mag_free_list[slot].p = next; - if (!next) { - BITMAPV_CLR(tiny_mag_ptr->mag_bitmap, slot); - } - } else { - // Check that the next pointer of "previous" points to free_ptr. - tiny_free_list_t *prev_next = free_list_unchecksum_ptr(rack, &previous->next); - if (prev_next != free_ptr) { - malloc_zone_error(rack->debug_flags, true, - "tiny_free_list_remove_ptr: Internal invariant broken (next ptr of prev): " - "ptr=%p, prev_next=%p\n", ptr, prev_next); - __builtin_unreachable(); // Always crashes in malloc_zone_error(). - } - - // We know free_ptr is already checksummed, so we don't need to do it - // again. - previous->next = free_ptr->next; - } - if (next) { - // Check that the previous pointer of "next" points to free_ptr. - tiny_free_list_t *next_prev = free_list_unchecksum_ptr(rack, &next->previous); - if (next_prev != free_ptr) { - malloc_zone_error(rack->debug_flags, true, - "tiny_free_list_remove_ptr: Internal invariant broken (prev ptr of next): " - "ptr=%p, next_prev=%p\n", ptr, next_prev); - __builtin_unreachable(); // Always crashes in malloc_zone_error(). - } - - // We know free_ptr is already checksummed, so we don't need to do it - // again. - next->previous = free_ptr->previous; - } - - tiny_region_t region = TINY_REGION_FOR_PTR(ptr); - region_free_blocks_t *free_blocks = ®ion->free_blocks_by_slot[slot]; - uint16_t this_block_index = TINY_INDEX_FOR_PTR(ptr); - - boolean_t is_first = free_blocks->first_block == this_block_index + 1; - boolean_t is_last = free_blocks->last_block == this_block_index + 1; - - if (is_first && is_last) { - // Removing the one and only item on the list. Set both block indices to 0. - free_blocks->first_block = free_blocks->last_block = 0; - } else if (is_first) { - MALLOC_ASSERT(next); - free_blocks->first_block = TINY_INDEX_FOR_PTR(next) + 1; - } else if (is_last) { - MALLOC_ASSERT(previous); - free_blocks->last_block = TINY_INDEX_FOR_PTR(previous) + 1; - } -} - -void -tiny_finalize_region(rack_t *rack, magazine_t *tiny_mag_ptr) -{ - void *last_block, *previous_block; - uint32_t *last_header; - msize_t last_msize, previous_msize, last_index; - - // It is possible that the block prior to the last block in the region has - // been free'd, but was not coalesced with the free bytes at the end of the - // block, since we treat the bytes at the end of the region as "in use" in - // the meta headers. Attempt to coalesce the last block with the previous - // block, so we don't violate the "no consecutive free blocks" invariant. - // - // FIXME: Need to investigate how much work would be required to increase - // 'mag_bytes_free_at_end' when freeing the preceding block, rather - // than performing this workaround. - // - - if (tiny_mag_ptr->mag_bytes_free_at_end) { - last_block = (void *)((uintptr_t)TINY_REGION_END(tiny_mag_ptr->mag_last_region) - tiny_mag_ptr->mag_bytes_free_at_end); - last_msize = TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_end); - last_header = TINY_BLOCK_HEADER_FOR_PTR(last_block); - last_index = TINY_INDEX_FOR_PTR(last_block); - - // Before anything we transform any remaining mag_bytes_free_at_end into a - // regular free block. We take special care here to update the bitfield - // information, since we are bypassing the normal free codepath. If there - // is more than one quanta worth of memory in mag_bytes_free_at_end, then - // there will be two block headers: - // 1) header for the free space at end, msize = 1 - // 2) header inserted by set_tiny_meta_header_in_use after block - // We must clear the second one so that when the free block's size is - // queried, we do not think the block is only 1 quantum in size because - // of the second set header bit. - if (last_index != (NUM_TINY_BLOCKS - 1)) { - BITARRAY_CLR(last_header, (last_index + 1)); - } - - previous_block = tiny_previous_preceding_free(last_block, &previous_msize); - if (previous_block) { - set_tiny_meta_header_middle(last_block); - tiny_free_list_remove_ptr(rack, tiny_mag_ptr, previous_block, previous_msize); - last_block = previous_block; - last_msize += previous_msize; - } - - // splice last_block into the free list - tiny_free_list_add_ptr(rack, tiny_mag_ptr, last_block, last_msize); - tiny_mag_ptr->mag_bytes_free_at_end = 0; - } - -#if CONFIG_ASLR_INTERNAL - // Coalesce the big free block at start with any following free blocks - if (tiny_mag_ptr->mag_bytes_free_at_start) { - last_block = TINY_REGION_ADDRESS(tiny_mag_ptr->mag_last_region); - last_msize = TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_start); - - void *next_block = (void *)((uintptr_t)last_block + tiny_mag_ptr->mag_bytes_free_at_start); - - // clear the in use bit we were using to mark the end of the big start block - set_tiny_meta_header_middle((void *)((uintptr_t)next_block - TINY_QUANTUM)); - - // Coalesce the big start block with any following free blocks - if (tiny_meta_header_is_free(next_block)) { - msize_t next_msize = get_tiny_free_size(next_block); - set_tiny_meta_header_middle(next_block); - tiny_free_list_remove_ptr(rack, tiny_mag_ptr, next_block, next_msize); - last_msize += next_msize; - } - - // splice last_block into the free list - tiny_free_list_add_ptr(rack, tiny_mag_ptr, last_block, last_msize); - tiny_mag_ptr->mag_bytes_free_at_start = 0; - } -#endif - - tiny_mag_ptr->mag_last_region = NULL; -} - -int -tiny_free_detach_region(rack_t *rack, magazine_t *tiny_mag_ptr, region_t r) -{ - uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(r); - uintptr_t current = start; - uintptr_t limit = (uintptr_t)TINY_REGION_END(r); - boolean_t is_free; - msize_t msize; - region_trailer_t *trailer = REGION_TRAILER_FOR_TINY_REGION(r); - - while (current < limit) { - msize = get_tiny_meta_header((void *)current, &is_free); - if (is_free && !msize && (current == start)) { - // first block is all free - break; - } - if (!msize) { -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_ERR, "*** tiny_free_detach_region error with %p: msize=%d is_free=%d\n", (void *)current, msize, is_free); -#endif - break; - } - if (is_free) { - tiny_free_list_remove_ptr(rack, tiny_mag_ptr, (void *)current, msize); - } - current += TINY_BYTES_FOR_MSIZE(msize); - } - return trailer->objects_in_use; -} - -size_t -tiny_free_reattach_region(rack_t *rack, magazine_t *tiny_mag_ptr, region_t r) -{ - uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(r); - uintptr_t current = start; - uintptr_t limit = (uintptr_t)TINY_REGION_END(r); - boolean_t is_free; - msize_t msize; - size_t bytes_used = REGION_TRAILER_FOR_TINY_REGION(r)->bytes_used; - - while (current < limit) { - msize = get_tiny_meta_header((void *)current, &is_free); - if (is_free && !msize && (current == start)) { - // first block is all free - break; - } - if (!msize) { -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_ERR, "*** tiny_free_reattach_region error with %p: msize=%d is_free=%d\n", (void *)current, msize, is_free); -#endif - break; - } - if (is_free) { - tiny_free_list_add_ptr(rack, tiny_mag_ptr, (void *)current, msize); - } - current += TINY_BYTES_FOR_MSIZE(msize); - } - return bytes_used; -} - -typedef struct { - uint8_t pnum, size; -} tiny_pg_pair_t; - -void -tiny_free_scan_madvise_free(rack_t *rack, magazine_t *depot_ptr, region_t r) -{ - uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(r); - uintptr_t current = start; - uintptr_t limit = (uintptr_t)TINY_REGION_END(r); - boolean_t is_free; - msize_t msize; - tiny_pg_pair_t advisory[((TINY_REGION_PAYLOAD_BYTES + vm_kernel_page_size - 1) >> vm_kernel_page_shift) >> - 1]; // 256bytes stack allocated - int advisories = 0; - - // Scan the metadata identifying blocks which span one or more pages. Mark the pages MADV_FREE taking care to preserve free list - // management data. - while (current < limit) { - msize = get_tiny_meta_header((void *)current, &is_free); - if (is_free && !msize && (current == start)) { - // first block is all free -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_INFO, "*** tiny_free_scan_madvise_free first block is all free! %p: msize=%d is_free=%d\n", (void *)current, - msize, is_free); -#endif - uintptr_t pgLo = round_page_kernel(start + sizeof(tiny_free_list_t) + sizeof(msize_t)); - uintptr_t pgHi = trunc_page_kernel(start + TINY_REGION_SIZE - sizeof(msize_t)); - - if (pgLo < pgHi) { - advisory[advisories].pnum = (pgLo - start) >> vm_kernel_page_shift; - advisory[advisories].size = (pgHi - pgLo) >> vm_kernel_page_shift; - advisories++; - } - break; - } - if (!msize) { -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_ERR, "*** tiny_free_scan_madvise_free error with %p: msize=%d is_free=%d\n", (void *)current, msize, is_free); -#endif - break; - } - if (is_free) { - uintptr_t pgLo = round_page_kernel(current + sizeof(tiny_free_list_t) + sizeof(msize_t)); - uintptr_t pgHi = trunc_page_kernel(current + TINY_BYTES_FOR_MSIZE(msize) - sizeof(msize_t)); - - if (pgLo < pgHi) { - advisory[advisories].pnum = (pgLo - start) >> vm_kernel_page_shift; - advisory[advisories].size = (pgHi - pgLo) >> vm_kernel_page_shift; - advisories++; - } - } - current += TINY_BYTES_FOR_MSIZE(msize); - } - - if (advisories > 0) { - int i; - - // So long as the following hold for this region: - // (1) No malloc()'s are ever performed from the depot (hence free pages remain free,) - // (2) The region is not handed over to a per-CPU magazine (where malloc()'s could be performed), - // (3) The entire region is not mumap()'d (so the madvise's are applied to the intended addresses), - // then the madvise opportunities collected just above can be applied outside all locks. - // (1) is ensured by design, (2) and (3) are ensured by bumping the globally visible counter node->pinned_to_depot. - - OSAtomicIncrement32Barrier(&(REGION_TRAILER_FOR_TINY_REGION(r)->pinned_to_depot)); - SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr); - for (i = 0; i < advisories; ++i) { - uintptr_t addr = (advisory[i].pnum << vm_kernel_page_shift) + start; - size_t size = advisory[i].size << vm_kernel_page_shift; - - mvm_madvise_free(rack, r, addr, addr + size, NULL, rack->debug_flags & MALLOC_DO_SCRIBBLE); - } - SZONE_MAGAZINE_PTR_LOCK(depot_ptr); - OSAtomicDecrement32Barrier(&(REGION_TRAILER_FOR_TINY_REGION(r)->pinned_to_depot)); - } -} - -#if CONFIG_RECIRC_DEPOT -static region_t -tiny_find_msize_region(rack_t *rack, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize) -{ - tiny_free_list_t *ptr; - grain_t slot = tiny_slot_from_msize(msize); - free_list_t *free_list = tiny_mag_ptr->mag_free_list; - free_list_t *the_slot = free_list + slot; - free_list_t *limit; -#if defined(__LP64__) - uint64_t bitmap; -#else - uint32_t bitmap; -#endif - // Assumes we've locked the magazine - CHECK_MAGAZINE_PTR_LOCKED(szone, tiny_mag_ptr, __PRETTY_FUNCTION__); - - // Look for an exact match by checking the freelist for this msize. - ptr = the_slot->p; - if (ptr) { - return TINY_REGION_FOR_PTR(ptr); - } - - // Mask off the bits representing slots holding free blocks smaller than the - // size we need. If there are no larger free blocks, try allocating from - // the free space at the end of the tiny region. -#if defined(__LP64__) - bitmap = ((uint64_t *)(tiny_mag_ptr->mag_bitmap))[0] & ~((1ULL << slot) - 1); -#else - bitmap = tiny_mag_ptr->mag_bitmap[0] & ~((1 << slot) - 1); -#endif - if (!bitmap) { - return NULL; - } - - slot = BITMAPV_CTZ(bitmap); - limit = free_list + NUM_TINY_SLOTS; - free_list += slot; - - if (free_list < limit) { - ptr = free_list->p; - if (ptr) { - return TINY_REGION_FOR_PTR(ptr); - } else { - /* Shouldn't happen. Fall through to look at last slot. */ -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_ERR, "in tiny_find_msize_region(), mag_bitmap out of sync, slot=%d\n", slot); -#endif - } - } - - // We are now looking at the last slot, which contains blocks equal to, or - // due to coalescing of free blocks, larger than NUM_TINY_SLOTS * tiny quantum size. - ptr = limit->p; - if (ptr) { - return TINY_REGION_FOR_PTR(ptr); - } - - return NULL; -} -#endif // CONFIG_RECIRC_DEPOT - -#if CONFIG_MADVISE_PRESSURE_RELIEF -void -tiny_madvise_pressure_relief(rack_t *rack) -{ - mag_index_t mag_index; - magazine_t *tiny_depot_ptr = (&rack->magazines[DEPOT_MAGAZINE_INDEX]); - - for (mag_index = 0; mag_index < rack->num_magazines; mag_index++) { - size_t index; - for (index = 0; index < rack->region_generation->num_regions_allocated; ++index) { - SZONE_LOCK(TINY_SZONE_FROM_RACK(rack)); - - region_t tiny = rack->region_generation->hashed_regions[index]; - if (!tiny || tiny == HASHRING_REGION_DEALLOCATED) { - SZONE_UNLOCK(TINY_SZONE_FROM_RACK(rack)); - continue; - } - - magazine_t *mag_ptr = mag_lock_zine_for_region_trailer(rack->magazines, - REGION_TRAILER_FOR_TINY_REGION(tiny), - MAGAZINE_INDEX_FOR_TINY_REGION(tiny)); - SZONE_UNLOCK(TINY_SZONE_FROM_RACK(rack)); - - /* Ordering is important here, the magazine of a region may potentially change - * during mag_lock_zine_for_region_trailer, so src_mag_index must be taken - * after we've obtained the lock. - */ - mag_index_t src_mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(tiny); - - /* We can (and must) ignore magazines that are already in the recirc depot. */ - if (src_mag_index == DEPOT_MAGAZINE_INDEX) { - SZONE_MAGAZINE_PTR_UNLOCK(mag_ptr); - continue; - } - - if (tiny == mag_ptr->mag_last_region && (mag_ptr->mag_bytes_free_at_end || mag_ptr->mag_bytes_free_at_start)) { - tiny_finalize_region(rack, mag_ptr); - } - - /* Because this region is currently in use, we can't safely madvise it while - * it's attached to the magazine. For this operation we have to remove it from - * the current mag, attach it to the depot and then madvise. - */ - - recirc_list_extract(rack, mag_ptr, REGION_TRAILER_FOR_TINY_REGION(tiny)); - int objects_in_use = tiny_free_detach_region(rack, mag_ptr, tiny); - - SZONE_MAGAZINE_PTR_LOCK(tiny_depot_ptr); - MAGAZINE_INDEX_FOR_TINY_REGION(tiny) = DEPOT_MAGAZINE_INDEX; - REGION_TRAILER_FOR_TINY_REGION(tiny)->pinned_to_depot = 0; - - size_t bytes_inplay = tiny_free_reattach_region(rack, tiny_depot_ptr, tiny); - - /* Fix up the metadata of the target magazine while the region is in the depot. */ - mag_ptr->mag_num_bytes_in_objects -= bytes_inplay; - mag_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES; - mag_ptr->mag_num_objects -= objects_in_use; - - /* Now we can drop the magazine lock of the source mag. */ - SZONE_MAGAZINE_PTR_UNLOCK(mag_ptr); - - tiny_depot_ptr->mag_num_bytes_in_objects += bytes_inplay; - tiny_depot_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES; - tiny_depot_ptr->mag_num_objects -= objects_in_use; - - recirc_list_splice_last(rack, tiny_depot_ptr, REGION_TRAILER_FOR_TINY_REGION(tiny)); - - /* Actually do the scan, done holding the depot lock, the call will drop the lock - * around the actual madvise syscalls. - */ - tiny_free_scan_madvise_free(rack, tiny_depot_ptr, tiny); - - /* Now the region is in the recirc depot, the next allocations to require more - * blocks will come along and take one of these regions back out of the depot. - * As OS X madvise's reuse on an per-region basis, we leave as many of these - * regions in the depot as possible after memory pressure. - */ - SZONE_MAGAZINE_PTR_UNLOCK(tiny_depot_ptr); - } - } -} -#endif // CONFIG_MADVISE_PRESSURE_RELIEF - -static MALLOC_INLINE void -tiny_madvise_free_range_no_lock(rack_t *rack, - magazine_t *tiny_mag_ptr, - region_t region, - void *headptr, - size_t headsize, - void *ptr, - msize_t msize) -{ - region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(region); - - // Lock on tiny_magazines[mag_index] is already held here - // Calculate the first page in the coalesced block that would be safe to mark MADV_FREE - size_t free_header_size = sizeof(tiny_free_list_t) + sizeof(msize_t); - uintptr_t safe_ptr = (uintptr_t)ptr + free_header_size; - uintptr_t round_safe = round_page_kernel(safe_ptr); - - // Calculate the last page in the coalesced block that would be safe to mark MADV_FREE - size_t free_tail_size = sizeof(msize_t); - uintptr_t safe_extent = (uintptr_t)ptr + TINY_BYTES_FOR_MSIZE(msize) - free_tail_size; - uintptr_t trunc_extent = trunc_page_kernel(safe_extent); - - // The newly freed block may complete a span of bytes that cover a page. Mark it with MADV_FREE. - if (round_safe < trunc_extent) { // Coalesced area covers a page (perhaps many) - // Extend the freed block by the free region header and tail sizes to include pages - // we may have coalesced that no longer host free region tails and headers. - // This may extend over in-use ranges, but the MIN/MAX clamping below will fix that up. - uintptr_t lo = trunc_page_kernel((uintptr_t)headptr - free_tail_size); - uintptr_t hi = round_page_kernel((uintptr_t)headptr + headsize + free_header_size); - - uintptr_t free_lo = MAX(round_safe, lo); - uintptr_t free_hi = MIN(trunc_extent, hi); - - if (free_lo < free_hi) { - tiny_free_list_remove_ptr(rack, tiny_mag_ptr, ptr, msize); - set_tiny_meta_header_in_use(ptr, msize); - - OSAtomicIncrement32Barrier(&(node->pinned_to_depot)); - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - mvm_madvise_free(rack, region, free_lo, free_hi, &rack->last_madvise, rack->debug_flags & MALLOC_DO_SCRIBBLE); - SZONE_MAGAZINE_PTR_LOCK(tiny_mag_ptr); - OSAtomicDecrement32Barrier(&(node->pinned_to_depot)); - - set_tiny_meta_header_free(ptr, msize); - tiny_free_list_add_ptr(rack, tiny_mag_ptr, ptr, msize); - } - } -} - -#if CONFIG_RECIRC_DEPOT -static boolean_t -tiny_get_region_from_depot(rack_t *rack, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize) -{ - magazine_t *depot_ptr = &(rack->magazines[DEPOT_MAGAZINE_INDEX]); - - /* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */ - if (rack->num_magazines == 1) { // Uniprocessor, single magazine, so no recirculation necessary - return 0; - } - -#if DEBUG_MALLOC - if (DEPOT_MAGAZINE_INDEX == mag_index) { - malloc_zone_error(rack->debug_flags, true, "tiny_get_region_from_depot called for magazine index -1\n"); - return 0; - } -#endif - - SZONE_MAGAZINE_PTR_LOCK(depot_ptr); - - // Appropriate a Depot'd region that can satisfy requested msize. - region_trailer_t *node; - region_t sparse_region; - - while (1) { - sparse_region = tiny_find_msize_region(rack, depot_ptr, DEPOT_MAGAZINE_INDEX, msize); - if (NULL == sparse_region) { // Depot empty? - SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr); - return 0; - } - - node = REGION_TRAILER_FOR_TINY_REGION(sparse_region); - if (0 >= node->pinned_to_depot) { - break; - } - - SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr); - yield(); - SZONE_MAGAZINE_PTR_LOCK(depot_ptr); - } - - // disconnect node from Depot - recirc_list_extract(rack, depot_ptr, node); - - // Iterate the region pulling its free entries off the (locked) Depot's free list - int objects_in_use = tiny_free_detach_region(rack, depot_ptr, sparse_region); - - // Transfer ownership of the region - MAGAZINE_INDEX_FOR_TINY_REGION(sparse_region) = mag_index; - node->pinned_to_depot = 0; - - // Iterate the region putting its free entries on its new (locked) magazine's free list - size_t bytes_inplay = tiny_free_reattach_region(rack, tiny_mag_ptr, sparse_region); - - depot_ptr->mag_num_bytes_in_objects -= bytes_inplay; - depot_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES; - depot_ptr->mag_num_objects -= objects_in_use; - - tiny_mag_ptr->mag_num_bytes_in_objects += bytes_inplay; - tiny_mag_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES; - tiny_mag_ptr->mag_num_objects += objects_in_use; - - // connect to magazine as last node - recirc_list_splice_last(rack, tiny_mag_ptr, node); - - SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr); - - // DTrace USDT Probe - MAGMALLOC_DEPOTREGION(TINY_SZONE_FROM_RACK(rack), (int)mag_index, (void *)sparse_region, - TINY_REGION_SIZE, (int)BYTES_USED_FOR_TINY_REGION(sparse_region)); - - return 1; -} - -static region_t -tiny_free_try_depot_unmap_no_lock(rack_t *rack, magazine_t *depot_ptr, region_trailer_t *node) -{ - if (0 < node->bytes_used || 0 < node->pinned_to_depot || - depot_ptr->recirculation_entries < recirc_retained_regions) - { - return NULL; - } - - // disconnect node from Depot - recirc_list_extract(rack, depot_ptr, node); - - // Iterate the region pulling its free entries off the (locked) Depot's free list - region_t sparse_region = TINY_REGION_FOR_PTR(node); - int objects_in_use = tiny_free_detach_region(rack, depot_ptr, sparse_region); - - if (0 == objects_in_use) { - // Invalidate the hash table entry for this region with HASHRING_REGION_DEALLOCATED. - // Using HASHRING_REGION_DEALLOCATED preserves the collision chain, using HASHRING_OPEN_ENTRY (0) would not. - rgnhdl_t pSlot = hash_lookup_region_no_lock(rack->region_generation->hashed_regions, - rack->region_generation->num_regions_allocated, - rack->region_generation->num_regions_allocated_shift, - sparse_region); - - if (NULL == pSlot) { - malloc_zone_error(rack->debug_flags, true, "tiny_free_try_depot_unmap_no_lock hash lookup failed: %p\n", sparse_region); - return NULL; - } - *pSlot = HASHRING_REGION_DEALLOCATED; - depot_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES; - - // Atomically increment num_regions_dealloc -#ifdef __LP64___ - OSAtomicIncrement64(&rack->num_regions_dealloc); -#else - OSAtomicIncrement32((int32_t *)&rack->num_regions_dealloc); -#endif - - // Caller will transfer ownership of the region back to the OS with no locks held - MAGMALLOC_DEALLOCREGION(TINY_SZONE_FROM_RACK(rack), (void *)sparse_region, TINY_REGION_SIZE); // DTrace USDT Probe - return sparse_region; - } else { - malloc_zone_error(rack->debug_flags, true, "tiny_free_try_depot_unmap_no_lock objects_in_use not zero: %d\n", objects_in_use); - return NULL; - } -} - -static boolean_t -tiny_free_do_recirc_to_depot(rack_t *rack, magazine_t *tiny_mag_ptr, mag_index_t mag_index) -{ - // The entire magazine crossed the "emptiness threshold". Transfer a region - // from this magazine to the Depot. Choose a region that itself has crossed the emptiness threshold (i.e - // is at least fraction "f" empty.) Such a region will be marked "suitable" on the recirculation list. - // Start from the last node in order to keep the regions created earlier at the front of the list for - // as long as possible. This helps reduce fragmentation. - region_trailer_t *node = tiny_mag_ptr->lastNode; - - while (node && (!node->recirc_suitable || node->pinned_to_depot)) { - // If we skip a node due to pinned_to_depot being non-zero, it must be - // because another thread is madvising the same region in - // tiny_madvise_free_range_no_lock(), called from tiny_free_no_lock(). - // When that's done, the same thread will enter tiny_free_try_recirc_to_depot() - // for the same region, which will come back here. So this just defers - // recirculation of the region. - node = node->prev; - } - - if (!node) { -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_INFO, "*** tiny_free_do_recirc_to_depot end of list\n"); -#endif - return TRUE; // Caller must SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - } - - region_t sparse_region = TINY_REGION_FOR_PTR(node); - - // Deal with unclaimed memory -- mag_bytes_free_at_end or mag_bytes_free_at_start - if (sparse_region == tiny_mag_ptr->mag_last_region && - (tiny_mag_ptr->mag_bytes_free_at_end || tiny_mag_ptr->mag_bytes_free_at_start)) { - tiny_finalize_region(rack, tiny_mag_ptr); - } - - // disconnect "suitable" node from magazine - recirc_list_extract(rack, tiny_mag_ptr, node); - - // Iterate the region pulling its free entries off its (locked) magazine's free list - int objects_in_use = tiny_free_detach_region(rack, tiny_mag_ptr, sparse_region); - magazine_t *depot_ptr = &(rack->magazines[DEPOT_MAGAZINE_INDEX]); - - // hand over the region to the (locked) Depot - SZONE_MAGAZINE_PTR_LOCK(depot_ptr); - // this will cause tiny_free_list_add_ptr called by tiny_free_reattach_region to use - // the depot as its target magazine, rather than magazine formerly associated with sparse_region - MAGAZINE_INDEX_FOR_TINY_REGION(sparse_region) = DEPOT_MAGAZINE_INDEX; - node->pinned_to_depot = 0; - - // Iterate the region putting its free entries on Depot's free list - size_t bytes_inplay = tiny_free_reattach_region(rack, depot_ptr, sparse_region); - - tiny_mag_ptr->mag_num_bytes_in_objects -= bytes_inplay; - tiny_mag_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES; - tiny_mag_ptr->mag_num_objects -= objects_in_use; - - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); // Unlock the originating magazine - - depot_ptr->mag_num_bytes_in_objects += bytes_inplay; - depot_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES; - depot_ptr->mag_num_objects += objects_in_use; - - // connect to Depot as last node - recirc_list_splice_last(rack, depot_ptr, node); - - MAGMALLOC_RECIRCREGION(TINY_SZONE_FROM_RACK(rack), (int)mag_index, (void *)sparse_region, TINY_REGION_SIZE, - (int)BYTES_USED_FOR_TINY_REGION(sparse_region)); // DTrace USDT Probe - -#if !CONFIG_AGGRESSIVE_MADVISE - // Mark free'd dirty pages with MADV_FREE to reduce memory pressure - tiny_free_scan_madvise_free(rack, depot_ptr, sparse_region); -#endif - - // If the region is entirely empty vm_deallocate() it outside the depot lock - region_t r_dealloc = tiny_free_try_depot_unmap_no_lock(rack, depot_ptr, node); - SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr); - if (r_dealloc) { - mvm_deallocate_pages(r_dealloc, TINY_REGION_SIZE, 0); - } - return FALSE; // Caller need not unlock the originating magazine -} - -static MALLOC_INLINE boolean_t -tiny_free_try_recirc_to_depot(rack_t *rack, - magazine_t *tiny_mag_ptr, - mag_index_t mag_index, - region_t region, - void *headptr, - size_t headsize, - void *ptr, - msize_t msize) -{ - region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(region); - size_t bytes_used = node->bytes_used; - - /* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */ - if (rack->num_magazines == 1) { // Uniprocessor, single magazine, so no recirculation necessary - /* NOTHING */ - return TRUE; // Caller must do SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr) - } else if (DEPOT_MAGAZINE_INDEX != mag_index) { - // Emptiness discriminant - if (tiny_region_below_recirc_threshold(region)) { - /* Region has crossed threshold from density to sparsity. Mark it "suitable" on the - * recirculation candidates list. */ - node->recirc_suitable = TRUE; - } else { - /* After this free, we've found the region is still dense, so it must have been even more so before - * the free. That implies the region is already correctly marked. Do nothing. */ - } - - // Has the entire magazine crossed the "emptiness threshold"? If so, transfer a region - // from this magazine to the Depot. Choose a region that itself has crossed the emptiness threshold (i.e - // is at least fraction "f" empty.) Such a region will be marked "suitable" on the recirculation list. - if (tiny_magazine_below_recirc_threshold(tiny_mag_ptr)) { - return tiny_free_do_recirc_to_depot(rack, tiny_mag_ptr, mag_index); - } - } else { -#if !CONFIG_AGGRESSIVE_MADVISE - // We are free'ing into the depot, so madvise as we do so unless we were madvising every incoming - // allocation anyway. - tiny_madvise_free_range_no_lock(rack, tiny_mag_ptr, region, headptr, headsize, ptr, msize); -#endif - - if (0 < bytes_used || 0 < node->pinned_to_depot) { - /* Depot'd region is still live. Leave it in place on the Depot's recirculation list - * so as to avoid thrashing between the Depot's free list and a magazines's free list - * with detach_region/reattach_region */ - } else { - /* Depot'd region is just now empty. Consider return to OS. */ - region_t r_dealloc = tiny_free_try_depot_unmap_no_lock(rack, tiny_mag_ptr, node); - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - if (r_dealloc) { - mvm_deallocate_pages(r_dealloc, TINY_REGION_SIZE, 0); - } - return FALSE; // Caller need not unlock - } - } - return TRUE; // Caller must do SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr) -} -#endif // CONFIG_RECIRC_DEPOT - -boolean_t -tiny_free_no_lock(rack_t *rack, magazine_t *tiny_mag_ptr, mag_index_t mag_index, region_t region, void *ptr, msize_t msize, boolean_t partial_free) -{ - void *original_ptr = ptr; - size_t original_size = TINY_BYTES_FOR_MSIZE(msize); - void *next_block = ((unsigned char *)ptr + original_size); - msize_t previous_msize, next_msize; - void *previous; - tiny_free_list_t *big_free_block; - tiny_free_list_t *after_next_block; - tiny_free_list_t *before_next_block; - -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); - } - if (!msize) { - malloc_zone_error(rack->debug_flags, true, - "trying to free tiny block that is too small in tiny_free_no_lock(), ptr=%p, msize=%d\n", - ptr, msize); - } -#endif - - // Check that the region cookie is intact. - region_trailer_t *trailer = REGION_TRAILER_FOR_TINY_REGION(region); - region_check_cookie(region, trailer); - - // We try to coalesce this block with the preceeding one - previous = tiny_previous_preceding_free(ptr, &previous_msize); - if (previous) { -#if DEBUG_MALLOC - if (LOG(szone, ptr) || LOG(szone, previous)) { - malloc_report(ASL_LEVEL_INFO, "in tiny_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous); - } -#endif - - // clear the meta_header since this is no longer the start of a block - set_tiny_meta_header_middle(ptr); - tiny_free_list_remove_ptr(rack, tiny_mag_ptr, previous, previous_msize); - ptr = previous; - msize += previous_msize; - } - // We try to coalesce with the next block - if ((next_block < TINY_REGION_END(region)) && tiny_meta_header_is_free(next_block)) { - next_msize = get_tiny_free_size(next_block); -#if DEBUG_MALLOC - if (LOG(szone, ptr) || LOG(szone, next_block)) { - malloc_report(ASL_LEVEL_INFO, "in tiny_free_no_lock(), for ptr=%p, msize=%d coalesced forward=%p next_msize=%d\n", ptr, msize, - next_block, next_msize); - } -#endif - // If we are coalescing with the next block, and the next block is in - // the last slot of the free list, then we optimize this case here to - // avoid removing next_block from the slot NUM_TINY_SLOTS and then adding ptr back - // to slot NUM_TINY_SLOTS. - if (next_msize > NUM_TINY_SLOTS) { - msize += next_msize; - - big_free_block = (tiny_free_list_t *)next_block; - after_next_block = free_list_unchecksum_ptr(rack, &big_free_block->next); - before_next_block = free_list_unchecksum_ptr(rack, &big_free_block->previous); - - if (!before_next_block) { - tiny_mag_ptr->mag_free_list[NUM_TINY_SLOTS].p = ptr; - } else { - before_next_block->next.u = free_list_checksum_ptr(rack, ptr); - } - - if (after_next_block) { - after_next_block->previous.u = free_list_checksum_ptr(rack, ptr); - } - - // we don't need to checksum these since they are already checksummed - ((tiny_free_list_t *)ptr)->previous = big_free_block->previous; - ((tiny_free_list_t *)ptr)->next = big_free_block->next; - - // clear the meta_header to enable coalescing backwards - set_tiny_meta_header_middle(big_free_block); - set_tiny_meta_header_free(ptr, msize); - - uint16_t next_block_index = TINY_INDEX_FOR_PTR(big_free_block) + 1; - uint16_t ptr_index = TINY_INDEX_FOR_PTR(ptr) + 1; - const grain_t slot = NUM_TINY_SLOTS; - region_free_blocks_t *free_blocks = &((tiny_region_t)region)->free_blocks_by_slot[slot]; - if (free_blocks->first_block == next_block_index) { - free_blocks->first_block = ptr_index; - } - if (free_blocks->last_block == next_block_index) { - free_blocks->last_block = ptr_index; - } - goto tiny_free_ending; - } - tiny_free_list_remove_ptr(rack, tiny_mag_ptr, next_block, next_msize); - set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards - msize += next_msize; - } - - // The tiny cache already scribbles free blocks as they go through the - // cache whenever msize < TINY_QUANTUM , so we do not need to do it here. - if ((rack->debug_flags & MALLOC_DO_SCRIBBLE) && msize && (msize >= TINY_QUANTUM)) { - memset(ptr, SCRABBLE_BYTE, TINY_BYTES_FOR_MSIZE(msize)); - } - - tiny_free_list_add_ptr(rack, tiny_mag_ptr, ptr, msize); - -tiny_free_ending: - // we use original_size and not msize to avoid double counting the coalesced blocks - tiny_mag_ptr->mag_num_bytes_in_objects -= original_size; - - // Update this region's bytes in use count - size_t bytes_used = trailer->bytes_used - original_size; - trailer->bytes_used = (unsigned int)bytes_used; - - // Partial free accounts for the case where we allocate a block for - // posix_memalign and then free some range of bytes at the start and/or - // the end. In that case, we aren't changing the number of allocated objects. - // Similarly for realloc() in the case where we shrink in place. - if (!partial_free) { - trailer->objects_in_use--; - tiny_mag_ptr->mag_num_objects--; - } -#if CONFIG_AGGRESSIVE_MADVISE - // Platforms that want to madvise every freed allocation do so here, even if we continue - // on to use the recirc depot after. - tiny_madvise_free_range_no_lock(rack, tiny_mag_ptr, region, original_ptr, original_size, ptr, msize); -#endif - - // Caller must do SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr) if this function - // returns TRUE. - boolean_t needs_unlock = TRUE; - -#if CONFIG_RECIRC_DEPOT - needs_unlock = tiny_free_try_recirc_to_depot(rack, tiny_mag_ptr, mag_index, region, original_ptr, original_size, ptr, msize); -#endif // CONFIG_RECIRC_DEPOT - return needs_unlock; -} - -// Allocates from the last region or a freshly allocated region -static void * -tiny_malloc_from_region_no_lock(rack_t *rack, - magazine_t *tiny_mag_ptr, - mag_index_t mag_index, - msize_t msize, - void *aligned_address) -{ - void *ptr; - - // Deal with unclaimed memory -- mag_bytes_free_at_end or mag_bytes_free_at_start - if (tiny_mag_ptr->mag_bytes_free_at_end || tiny_mag_ptr->mag_bytes_free_at_start) { - tiny_finalize_region(rack, tiny_mag_ptr); - } - - // We set the unused bits of the header in the last pair to be all ones, and those of the inuse to zeroes. -#if NUM_TINY_BLOCKS & 31 - const uint32_t header = 0xFFFFFFFFU << (NUM_TINY_BLOCKS & 31); -#else - const uint32_t header = 0; -#endif - ((tiny_region_t)aligned_address)->pairs[CEIL_NUM_TINY_BLOCKS_WORDS - 1].header = header; - ((tiny_region_t)aligned_address)->pairs[CEIL_NUM_TINY_BLOCKS_WORDS - 1].inuse = 0; - - // Tag the region at "aligned_address" as belonging to us, - // and so put it under the protection of the magazine lock we are holding. - // Do this before advertising "aligned_address" on the hash ring(!) - MAGAZINE_INDEX_FOR_TINY_REGION(aligned_address) = mag_index; - - // Insert the new region into the hash ring - rack_region_insert(rack, (region_t)aligned_address); - - tiny_mag_ptr->mag_last_region = aligned_address; - BYTES_USED_FOR_TINY_REGION(aligned_address) = TINY_BYTES_FOR_MSIZE(msize); - OBJECTS_IN_USE_FOR_TINY_REGION(aligned_address) = 1; - -#if CONFIG_ASLR_INTERNAL - int offset_msize = malloc_entropy[0] & TINY_ENTROPY_MASK; -#if DEBUG_MALLOC - if (getenv("MallocASLRForce")) { - offset_msize = strtol(getenv("MallocASLRForce"), NULL, 0) & TINY_ENTROPY_MASK; - } - if (getenv("MallocASLRPrint")) { - malloc_report(ASL_LEVEL_INFO, "Region: %p offset: %d\n", aligned_address, offset_msize); - } -#endif -#else - int offset_msize = 0; -#endif - ptr = (void *)((uintptr_t)aligned_address + TINY_BYTES_FOR_MSIZE(offset_msize)); - set_tiny_meta_header_in_use(ptr, msize); - tiny_mag_ptr->mag_num_objects++; - tiny_mag_ptr->mag_num_bytes_in_objects += TINY_BYTES_FOR_MSIZE(msize); - tiny_mag_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES; - - // We put a header on the last block so that it appears in use (for coalescing, etc...) - set_tiny_meta_header_in_use_1((void *)((uintptr_t)ptr + TINY_BYTES_FOR_MSIZE(msize))); - tiny_mag_ptr->mag_bytes_free_at_end = TINY_BYTES_FOR_MSIZE(NUM_TINY_BLOCKS - msize - offset_msize); - -#if CONFIG_ASLR_INTERNAL - // Put a header on the previous block for same reason - tiny_mag_ptr->mag_bytes_free_at_start = TINY_BYTES_FOR_MSIZE(offset_msize); - if (offset_msize) { - set_tiny_meta_header_in_use_1((void *)((uintptr_t)ptr - TINY_QUANTUM)); - } -#else - tiny_mag_ptr->mag_bytes_free_at_start = 0; -#endif - - // connect to magazine as last node - recirc_list_splice_last(rack, tiny_mag_ptr, REGION_TRAILER_FOR_TINY_REGION(aligned_address)); - -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in tiny_malloc_from_region_no_lock(), ptr=%p, msize=%d\n", ptr, msize); - } -#endif - return ptr; -} - -void * -tiny_memalign(szone_t *szone, size_t alignment, size_t size, size_t span) -{ - msize_t mspan = TINY_MSIZE_FOR_BYTES(span + TINY_QUANTUM - 1); - void *p = tiny_malloc_should_clear(&szone->tiny_rack, mspan, 0); - - if (NULL == p) { - return NULL; - } - - size_t offset = ((uintptr_t)p) & (alignment - 1); // p % alignment - size_t pad = (0 == offset) ? 0 : alignment - offset; // p + pad achieves desired alignment - - msize_t msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1); - msize_t mpad = TINY_MSIZE_FOR_BYTES(pad + TINY_QUANTUM - 1); - msize_t mwaste = mspan - msize - mpad; // excess blocks - - if (mpad > 0) { - void *q = (void *)(((uintptr_t)p) + pad); - - // Mark q as a block header and in-use, thus creating two blocks. - magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone->tiny_rack.magazines, - REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p)), - MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p))); - set_tiny_meta_header_in_use(q, msize); - tiny_mag_ptr->mag_num_objects++; - - // set_tiny_meta_header_in_use() "reaffirms" the block_header on the *following* block, so - // now set its in_use bit as well. But only if its within the original allocation made above. - if (mwaste > 0) { - BITARRAY_SET(TINY_INUSE_FOR_HEADER(TINY_BLOCK_HEADER_FOR_PTR(q)), TINY_INDEX_FOR_PTR(q) + msize); - } - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - - // Give up mpad blocks beginning at p to the tiny free list - free_tiny(&szone->tiny_rack, p, TINY_REGION_FOR_PTR(p), TINY_BYTES_FOR_MSIZE(mpad), true); - - p = q; // advance p to the desired alignment - } - - if (mwaste > 0) { - void *q = (void *)(((uintptr_t)p) + TINY_BYTES_FOR_MSIZE(msize)); - // Mark q as block header and in-use, thus creating two blocks. - magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone->tiny_rack.magazines, - REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p)), - MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p))); - set_tiny_meta_header_in_use(q, mwaste); - tiny_mag_ptr->mag_num_objects++; - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - - // Give up mwaste blocks beginning at q to the tiny free list - free_tiny(&szone->tiny_rack, q, TINY_REGION_FOR_PTR(q), TINY_BYTES_FOR_MSIZE(mwaste), true); - } - - return p; // p has the desired size and alignment, and can later be free()'d -} - -boolean_t -tiny_claimed_address(rack_t *rack, void *ptr) -{ - region_t r = tiny_region_for_ptr_no_lock(rack, ptr); - return r && ptr < TINY_REGION_END(r); -} - -void * -tiny_try_shrink_in_place(rack_t *rack, void *ptr, size_t old_size, size_t new_good_size) -{ - msize_t new_msize = TINY_MSIZE_FOR_BYTES(new_good_size); - msize_t mshrinkage = TINY_MSIZE_FOR_BYTES(old_size) - new_msize; - - if (mshrinkage) { - void *q = (void *)((uintptr_t)ptr + TINY_BYTES_FOR_MSIZE(new_msize)); - magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(rack->magazines, - REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)), - MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr))); - - // Mark q as block header and in-use, thus creating two blocks. - set_tiny_meta_header_in_use(q, mshrinkage); - tiny_mag_ptr->mag_num_objects++; - - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - free_tiny(rack, q, TINY_REGION_FOR_PTR(q), 0, true); - } - return ptr; -} - -boolean_t -tiny_try_realloc_in_place(rack_t *rack, void *ptr, size_t old_size, size_t new_size) -{ - // returns 1 on success - msize_t index; - msize_t old_msize; - unsigned next_index; - void *next_block; - boolean_t is_free; - msize_t next_msize, coalesced_msize, leftover_msize, new_msize; - void *leftover; - region_trailer_t *trailer = REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)); - - index = TINY_INDEX_FOR_PTR(ptr); - old_msize = TINY_MSIZE_FOR_BYTES(old_size); - new_msize = TINY_MSIZE_FOR_BYTES(new_size + TINY_QUANTUM - 1); - next_index = index + old_msize; - - if (next_index >= NUM_TINY_BLOCKS) { - return 0; - } - next_block = (char *)ptr + old_size; - - magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(rack->magazines, - trailer, - MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr))); - - if (DEPOT_MAGAZINE_INDEX == MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr))) { - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - return 0; - } - - coalesced_msize = new_msize - old_msize; -#if CONFIG_TINY_CACHE - void *last_free_ptr = tiny_mag_ptr->mag_last_free; - msize_t last_free_msize = tiny_mag_ptr->mag_last_free_msize; - if (last_free_ptr == next_block && old_msize + last_free_msize >= new_msize) { - /* - * There is a block in mag_last_free and it's immediately after - * this block and it's large enough. We can use some or all of it. - */ - leftover_msize = last_free_msize - coalesced_msize; - if (leftover_msize) { - tiny_mag_ptr->mag_last_free_msize -= coalesced_msize; - tiny_mag_ptr->mag_last_free += new_size - old_size; - // The block in mag_last_free is still marked as header and in-use, so copy that - // state to the block that remains. The state for the block that we're going to - // use is adjusted by the set_tiny_meta_header_middle() call below. - set_tiny_meta_header_in_use(next_block + TINY_BYTES_FOR_MSIZE(coalesced_msize), leftover_msize); - } else { - // Using the whole block. - tiny_mag_ptr->mag_last_free = NULL; - tiny_mag_ptr->mag_last_free_msize = 0; - tiny_mag_ptr->mag_last_free_rgn = NULL; - trailer->objects_in_use--; - } - set_tiny_meta_header_middle(next_block); - coalesced_msize = 0; // No net change in memory use - } else { -#endif // CONFIG_TINY_CACHE - /* - * Try to expand into unused space immediately after this block. - */ - msize_t unused_msize = TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_end); - void *unused_start = TINY_REGION_END(TINY_REGION_FOR_PTR(ptr)) - tiny_mag_ptr->mag_bytes_free_at_end; - if (tiny_mag_ptr->mag_last_region == TINY_REGION_FOR_PTR(ptr) - && coalesced_msize < unused_msize && unused_start == ptr + old_size) { - // The block at the start of mag_bytes_free_at_end is marked as - // header/in-use and the next one has header/free. We need to - // reset both the header and in-use bit in the first block and we - // need to reset the header bit in the second block if it's part of - // the new allocation. - set_tiny_meta_header_middle(unused_start); - if (coalesced_msize > 1) { - set_tiny_meta_header_middle(unused_start + TINY_QUANTUM); - } - tiny_mag_ptr->mag_bytes_free_at_end -= TINY_BYTES_FOR_MSIZE(coalesced_msize); - if (tiny_mag_ptr->mag_bytes_free_at_end) { - // Mark the first block of the remaining free area as a header and in-use. - set_tiny_meta_header_in_use_1(ptr + TINY_BYTES_FOR_MSIZE(new_msize)); - } - } else { - /* - * Look for a free block immediately afterwards. If it's large - * enough, we can consume (part of) it. - */ - is_free = tiny_meta_header_is_free(next_block); - if (!is_free) { - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - return 0; // next_block is in use; - } - next_msize = get_tiny_free_size(next_block); - if (old_msize + next_msize < new_msize) { - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - return 0; // even with next block, not enough - } - /* - * The following block is big enough; pull it from its freelist and chop off enough to satisfy - * our needs. - */ - tiny_free_list_remove_ptr(rack, tiny_mag_ptr, next_block, next_msize); - set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards - leftover_msize = next_msize - coalesced_msize; - if (leftover_msize) { - /* there's some left, so put the remainder back */ - leftover = (void *)((uintptr_t)next_block + TINY_BYTES_FOR_MSIZE(coalesced_msize)); - tiny_free_list_add_ptr(rack, tiny_mag_ptr, leftover, leftover_msize); - } - set_tiny_meta_header_in_use(ptr, old_msize + coalesced_msize); - } -#if CONFIG_TINY_CACHE - } -#endif // CONFIG_TINY_CACHE -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in tiny_try_realloc_in_place(), ptr=%p, msize=%d\n", ptr, old_msize + coalesced_msize); - } -#endif - tiny_mag_ptr->mag_num_bytes_in_objects += TINY_BYTES_FOR_MSIZE(coalesced_msize); - - // Update this region's bytes in use count - region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)); - size_t bytes_used = node->bytes_used + TINY_BYTES_FOR_MSIZE(coalesced_msize); - node->bytes_used = (unsigned int)bytes_used; - - // Emptiness discriminant - if (bytes_used < DENSITY_THRESHOLD(TINY_REGION_PAYLOAD_BYTES)) { - /* After this reallocation the region is still sparse, so it must have been even more so before - * the reallocation. That implies the region is already correctly marked. Do nothing. */ - } else { - /* Region has crossed threshold from sparsity to density. Mark it not "suitable" on the - * recirculation candidates list. */ - node->recirc_suitable = FALSE; - } - - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - return 1; -} - -static char *tiny_check_fail_msg = "*** check: incorrect tiny region "; - -#define TINY_CHECK_FAIL(fmt, ...) \ - malloc_zone_check_fail(tiny_check_fail_msg, \ - "%ld, counter=%d\n" fmt, region_index, counter, __VA_ARGS__); - -boolean_t -tiny_check_region(rack_t *rack, region_t region, size_t region_index, - unsigned counter) -{ - uintptr_t start, ptr, region_end; - boolean_t prev_free = 0; - boolean_t is_free; - msize_t msize; - tiny_free_list_t *free_head; - void *follower, *previous, *next; - mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(region); - magazine_t *tiny_mag_ptr = &(rack->magazines[mag_index]); - - // Assumes locked - CHECK_MAGAZINE_PTR_LOCKED(szone, tiny_mag_ptr, __PRETTY_FUNCTION__); - - // Do not check the region if pinned_to_depot is not zero because it - // may not be in a consistent state (specifically, if may have a - // block marked as in-use that's longer than any legal allocation, - // which upsets get_tiny_meta_header() because it can't determine the - // block's length). - if (!REGION_TRAILER_FOR_TINY_REGION(region)->pinned_to_depot) { - return 1; - } - - /* establish region limits */ - start = (uintptr_t)TINY_REGION_ADDRESS(region); - ptr = start; - if (region == tiny_mag_ptr->mag_last_region) { - ptr += tiny_mag_ptr->mag_bytes_free_at_start; - - /* - * Check the leading block's integrity here also. - */ - if (tiny_mag_ptr->mag_bytes_free_at_start) { - msize = get_tiny_meta_header((void *)(ptr - TINY_QUANTUM), &is_free); - if (is_free || (msize != 1)) { - TINY_CHECK_FAIL("*** invariant broken for leader block %p - %d %d\n", - (void *)(ptr - TINY_QUANTUM), msize, is_free); - return 0; - } - } - } - region_end = (uintptr_t)TINY_REGION_END(region); - - /* - * The last region may have a trailing chunk which has not been converted into inuse/freelist - * blocks yet. - */ - if (region == tiny_mag_ptr->mag_last_region) { - region_end -= tiny_mag_ptr->mag_bytes_free_at_end; - } - - /* - * Scan blocks within the region. - */ - while (ptr < region_end) { - /* - * If the first block is free, and its size is 65536 (msize = 0) then the entire region is - * free. - */ - msize = get_tiny_meta_header((void *)ptr, &is_free); - if (is_free && !msize && (ptr == start)) { - return 1; - } - - /* - * If the block's size is 65536 (msize = 0) then since we're not the first entry the size is - * corrupt. - */ - if (!msize) { - TINY_CHECK_FAIL("*** invariant broken for tiny block %p this msize=%d - size is too small\n", (void *)ptr, msize); - return 0; - } - - if (!is_free) { - /* - * In use blocks cannot be more than NUM_TINY_SLOTS quanta large. - */ - prev_free = 0; - if (msize > NUM_TINY_SLOTS) { - TINY_CHECK_FAIL("*** invariant broken for %p this tiny msize=%d - size is too large\n", (void *)ptr, msize); - return 0; - } - /* move to next block */ - ptr += TINY_BYTES_FOR_MSIZE(msize); - } else { -#if !CONFIG_RELAXED_INVARIANT_CHECKS - /* - * Free blocks must have been coalesced, we cannot have a free block following another - * free block. - */ - if (prev_free) { - TINY_CHECK_FAIL("*** invariant broken for free block %p this tiny msize=%d: two free blocks in a row\n", (void *)ptr, msize); - return 0; - } -#endif // CONFIG_RELAXED_INVARIANT_CHECKS - prev_free = 1; - /* - * Check the integrity of this block's entry in its freelist. - */ - free_head = (tiny_free_list_t *)ptr; - previous = free_list_unchecksum_ptr(rack, &free_head->previous); - next = free_list_unchecksum_ptr(rack, &free_head->next); - if (previous && !tiny_meta_header_is_free(previous)) { - TINY_CHECK_FAIL("*** invariant broken for %p (previous %p is not a free pointer)\n", (void *)ptr, previous); - return 0; - } - if (next && !tiny_meta_header_is_free(next)) { - TINY_CHECK_FAIL("*** invariant broken for %p (next in free list %p is not a free pointer)\n", (void *)ptr, next); - return 0; - } - /* - * Check the free block's trailing size value. - */ - follower = FOLLOWING_TINY_PTR(ptr, msize); - if (((uintptr_t)follower != region_end) && (get_tiny_previous_free_msize(follower) != msize)) { - TINY_CHECK_FAIL("*** invariant broken for tiny free %p followed by %p in region [%p-%p] " - "(end marker incorrect) should be %d; in fact %d\n", - (void *)ptr, follower, TINY_REGION_ADDRESS(region), (void *)region_end, - msize, get_tiny_previous_free_msize(follower)); - return 0; - } - /* move to next block */ - ptr = (uintptr_t)follower; - } - } - /* - * Ensure that we scanned the entire region - */ - if (ptr != region_end) { - TINY_CHECK_FAIL("*** invariant broken for region end %p - %p\n", (void *)ptr, (void *)region_end); - return 0; - } - /* - * Check the trailing block's integrity. - */ - if (region == tiny_mag_ptr->mag_last_region) { - if (tiny_mag_ptr->mag_bytes_free_at_end) { - msize = get_tiny_meta_header((void *)ptr, &is_free); - if (is_free || (msize != 1)) { - TINY_CHECK_FAIL("*** invariant broken for blocker block %p - %d %d\n", (void *)ptr, msize, is_free); - return 0; - } - } - } - return 1; -} - -kern_return_t -tiny_in_use_enumerator(task_t task, - void *context, - unsigned type_mask, - szone_t *szone, - memory_reader_t reader, - vm_range_recorder_t recorder) -{ - size_t num_regions; - size_t index; - region_t *regions; - vm_range_t buffer[MAX_RECORDER_BUFFER]; - unsigned count = 0; - kern_return_t err; - region_t region; - vm_range_t range; - vm_range_t admin_range; - vm_range_t ptr_range; - unsigned char *mapped_region; - uint32_t *block_header; - uint32_t *in_use; - unsigned block_index; - unsigned block_limit; - boolean_t is_free; - msize_t msize; - void *mapped_ptr; - unsigned bit; - magazine_t *tiny_mag_base = NULL; - - region_hash_generation_t *trg_ptr; - err = reader(task, (vm_address_t)szone->tiny_rack.region_generation, sizeof(region_hash_generation_t), (void **)&trg_ptr); - if (err) { - return err; - } - - num_regions = trg_ptr->num_regions_allocated; - err = reader(task, (vm_address_t)trg_ptr->hashed_regions, sizeof(region_t) * num_regions, (void **)®ions); - if (err) { - return err; - } - - if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { - // Map in all active magazines. Do this outside the iteration over regions. - err = reader(task, (vm_address_t)(szone->tiny_rack.magazines), szone->tiny_rack.num_magazines * sizeof(magazine_t), - (void **)&tiny_mag_base); - if (err) { - return err; - } - } - - for (index = 0; index < num_regions; ++index) { - region = regions[index]; - if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) { - range.address = (vm_address_t)TINY_REGION_ADDRESS(region); - range.size = (vm_size_t)TINY_REGION_SIZE; - if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) { - admin_range.address = range.address + TINY_METADATA_START; - admin_range.size = TINY_METADATA_SIZE; - recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1); - } - if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) { - ptr_range.address = range.address; - ptr_range.size = NUM_TINY_BLOCKS * TINY_QUANTUM; - recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1); - } - if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { - err = reader(task, range.address, range.size, (void **)&mapped_region); - if (err) { - return err; - } - - mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(mapped_region); - magazine_t *tiny_mag_ptr = tiny_mag_base + mag_index; - - int cached_free_blocks = 0; -#if CONFIG_TINY_CACHE - // Each magazine could have a pointer to a cached free block from - // this region. Count the regions that have such a pointer. - for (mag_index = 0; mag_index < szone->tiny_rack.num_magazines; mag_index++) { - if ((void *)range.address == (tiny_mag_base + mag_index)->mag_last_free_rgn) { - cached_free_blocks++; - } - } -#endif // CONFIG_TINY_CACHE - - block_header = (uint32_t *)(mapped_region + TINY_METADATA_START + sizeof(region_trailer_t)); - in_use = TINY_INUSE_FOR_HEADER(block_header); - block_index = 0; - block_limit = NUM_TINY_BLOCKS; - if (region == tiny_mag_ptr->mag_last_region) { - block_index += TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_start); - block_limit -= TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_end); - } - - for (; block_index < block_limit; block_index += msize) { - vm_size_t block_offset = TINY_BYTES_FOR_MSIZE(block_index); - is_free = !BITARRAY_BIT(in_use, block_index); - if (is_free) { - mapped_ptr = mapped_region + block_offset; - - // mapped_region, the address at which 'range' in 'task' has been - // mapped into our process, is not necessarily aligned to - // TINY_BLOCKS_ALIGN. - // - // Since the code in get_tiny_free_size() assumes the pointer came - // from a properly aligned tiny region, and mapped_region is not - // necessarily aligned, then do the size calculation directly. - // If the next bit is set in the header bitmap, then the size is one - // quantum. Otherwise, read the size field. - if (!BITARRAY_BIT(block_header, (block_index + 1))) { - msize = TINY_FREE_SIZE(mapped_ptr); - } else { - msize = 1; - } - } else { -#if CONFIG_TINY_CACHE - // If there are still magazines that have cached free - // blocks in this region, check whether this is one of - // them and don't return the block pointer if it is. - vm_address_t ptr = range.address + block_offset; - boolean_t block_cached = false; - if (cached_free_blocks) { - for (mag_index = 0; mag_index < szone->tiny_rack.num_magazines; mag_index++) { - if ((void *)ptr == (tiny_mag_base + mag_index)->mag_last_free) { - block_cached = true; - cached_free_blocks--; - msize = (tiny_mag_base + mag_index)->mag_last_free_msize; - break; - } - } - } - if (block_cached) { - if (!msize) { - return KERN_FAILURE; // Somethings amiss. Avoid looping at this block_index. - } - continue; - } -#endif // CONFIG_TINY_CACHE - msize = 1; - bit = block_index + 1; - while (!BITARRAY_BIT(block_header, bit)) { - bit++; - msize++; - } - buffer[count].address = range.address + block_offset; - buffer[count].size = TINY_BYTES_FOR_MSIZE(msize); - count++; - if (count >= MAX_RECORDER_BUFFER) { - recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); - count = 0; - } - } - - if (!msize) { - return KERN_FAILURE; // Somethings amiss. Avoid looping at this block_index. - } - } - if (count) { - recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); - count = 0; - } - } - } - } - return 0; -} - -void * -tiny_malloc_from_free_list(rack_t *rack, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize) -{ - tiny_free_list_t *ptr; - msize_t this_msize; - grain_t slot = tiny_slot_from_msize(msize); - free_list_t *free_list = tiny_mag_ptr->mag_free_list; - free_list_t *the_slot = free_list + slot; - tiny_free_list_t *next; - free_list_t *limit; -#if defined(__LP64__) - uint64_t bitmap; -#else - uint32_t bitmap; -#endif - msize_t leftover_msize; - tiny_free_list_t *leftover_ptr; - - // Assumes we've locked the region - CHECK_MAGAZINE_PTR_LOCKED(szone, tiny_mag_ptr, __PRETTY_FUNCTION__); - - // Look for an exact match by checking the freelist for this msize. - // - ptr = the_slot->p; - if (ptr) { - next = free_list_unchecksum_ptr(rack, &ptr->next); - if (next) { - next->previous = ptr->previous; - } else { - BITMAPV_CLR(tiny_mag_ptr->mag_bitmap, slot); - } - the_slot->p = next; - this_msize = msize; -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in tiny_malloc_from_free_list(), exact match ptr=%p, this_msize=%d\n", ptr, this_msize); - } -#endif - tiny_update_region_free_list_for_remove(slot, ptr, next); - - goto return_tiny_alloc; - } - - // Mask off the bits representing slots holding free blocks smaller than the - // size we need. If there are no larger free blocks, try allocating from - // the free space at the end of the tiny region. -#if defined(__LP64__) - bitmap = ((uint64_t *)(tiny_mag_ptr->mag_bitmap))[0] & ~((1ULL << slot) - 1); -#else - bitmap = tiny_mag_ptr->mag_bitmap[0] & ~((1 << slot) - 1); -#endif - if (!bitmap) { - goto try_tiny_malloc_from_end; - } - - slot = BITMAPV_CTZ(bitmap); - limit = free_list + NUM_TINY_SLOTS; - free_list += slot; - - if (free_list < limit) { - ptr = free_list->p; - if (ptr) { - next = free_list_unchecksum_ptr(rack, &ptr->next); - free_list->p = next; - if (next) { - next->previous = ptr->previous; - } else { - BITMAPV_CLR(tiny_mag_ptr->mag_bitmap, slot); - } - this_msize = get_tiny_free_size(ptr); - tiny_update_region_free_list_for_remove(slot, ptr, next); - goto add_leftover_and_proceed; - } -#if DEBUG_MALLOC - malloc_report(ASL_LEVEL_ERR, "in tiny_malloc_from_free_list(), mag_bitmap out of sync, slot=%d\n", slot); -#endif - } - - // We are now looking at the last slot, which contains blocks equal to, or - // due to coalescing of free blocks, larger than NUM_TINY_SLOTS * tiny quantum size. - // If the last freelist is not empty, and the head contains a block that is - // larger than our request, then the remainder is put back on the free list. - ptr = limit->p; - if (ptr) { - this_msize = get_tiny_free_size(ptr); - next = free_list_unchecksum_ptr(rack, &ptr->next); - if (this_msize - msize > NUM_TINY_SLOTS) { - // the leftover will go back to the free list, so we optimize by - // modifying the free list rather than a pop and push of the head - leftover_msize = this_msize - msize; - leftover_ptr = (tiny_free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize)); - limit->p = leftover_ptr; - if (next) { - next->previous.u = free_list_checksum_ptr(rack, leftover_ptr); - } - leftover_ptr->previous = ptr->previous; - leftover_ptr->next = ptr->next; - set_tiny_meta_header_free(leftover_ptr, leftover_msize); -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, - "in tiny_malloc_from_free_list(), last slot ptr=%p, msize=%d this_msize=%d\n", ptr, msize, this_msize); - } -#endif - this_msize = msize; - tiny_update_region_free_list_for_remove(NUM_TINY_SLOTS, ptr, leftover_ptr); - - goto return_tiny_alloc; - } - if (next) { - next->previous = ptr->previous; - } - limit->p = next; - tiny_update_region_free_list_for_remove(slot, ptr, next); - goto add_leftover_and_proceed; - /* NOTREACHED */ - } - -try_tiny_malloc_from_end: - // Let's see if we can use tiny_mag_ptr->mag_bytes_free_at_end - if (tiny_mag_ptr->mag_bytes_free_at_end >= TINY_BYTES_FOR_MSIZE(msize)) { - ptr = (tiny_free_list_t *)((uintptr_t)TINY_REGION_END(tiny_mag_ptr->mag_last_region) - tiny_mag_ptr->mag_bytes_free_at_end); - tiny_mag_ptr->mag_bytes_free_at_end -= TINY_BYTES_FOR_MSIZE(msize); - if (tiny_mag_ptr->mag_bytes_free_at_end) { - // let's add an in use block after ptr to serve as boundary - set_tiny_meta_header_in_use_1((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize)); - } - this_msize = msize; -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in tiny_malloc_from_free_list(), from end ptr=%p, msize=%d\n", ptr, msize); - } -#endif - goto return_tiny_alloc; - } -#if CONFIG_ASLR_INTERNAL - // Try from start if nothing left at end - if (tiny_mag_ptr->mag_bytes_free_at_start >= TINY_BYTES_FOR_MSIZE(msize)) { - ptr = (tiny_free_list_t *)(TINY_REGION_ADDRESS(tiny_mag_ptr->mag_last_region) + tiny_mag_ptr->mag_bytes_free_at_start - - TINY_BYTES_FOR_MSIZE(msize)); - tiny_mag_ptr->mag_bytes_free_at_start -= TINY_BYTES_FOR_MSIZE(msize); - if (tiny_mag_ptr->mag_bytes_free_at_start) { - // let's add an in use block before ptr to serve as boundary - set_tiny_meta_header_in_use_1((unsigned char *)ptr - TINY_QUANTUM); - } - this_msize = msize; -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in tiny_malloc_from_free_list(), from start ptr=%p, msize=%d\n", ptr, msize); - } -#endif - goto return_tiny_alloc; - } -#endif - return NULL; - -add_leftover_and_proceed: - if (!this_msize || (this_msize > msize)) { - leftover_msize = this_msize - msize; - leftover_ptr = (tiny_free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize)); -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in tiny_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize); - } -#endif - tiny_free_list_add_ptr(rack, tiny_mag_ptr, leftover_ptr, leftover_msize); - this_msize = msize; - } - -return_tiny_alloc: - tiny_mag_ptr->mag_num_objects++; - tiny_mag_ptr->mag_num_bytes_in_objects += TINY_BYTES_FOR_MSIZE(this_msize); - - // Check that the region cookie is intact and update the region's bytes in use count - region_t *region = TINY_REGION_FOR_PTR(ptr); - region_trailer_t *trailer = REGION_TRAILER_FOR_TINY_REGION(region); - region_check_cookie(region, trailer); - size_t bytes_used = trailer->bytes_used + TINY_BYTES_FOR_MSIZE(this_msize); - trailer->bytes_used = (unsigned int)bytes_used; - trailer->objects_in_use++; - - // Emptiness discriminant - if (bytes_used < DENSITY_THRESHOLD(TINY_REGION_PAYLOAD_BYTES)) { - /* After this allocation the region is still sparse, so it must have been even more so before - * the allocation. That implies the region is already correctly marked. Do nothing. */ - } else { - /* Region has crossed threshold from sparsity to density. Mark it not "suitable" on the - * recirculation candidates list. */ - trailer->recirc_suitable = FALSE; - } -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in tiny_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize); - } -#endif - if (this_msize > 1) { - set_tiny_meta_header_in_use(ptr, this_msize); - } else { - set_tiny_meta_header_in_use_1(ptr); - } - return ptr; -} - -void * -tiny_malloc_should_clear(rack_t *rack, msize_t msize, boolean_t cleared_requested) -{ - void *ptr; - mag_index_t mag_index = tiny_mag_get_thread_index() % rack->num_magazines; - magazine_t *tiny_mag_ptr = &(rack->magazines[mag_index]); - - MALLOC_TRACE(TRACE_tiny_malloc, (uintptr_t)rack, TINY_BYTES_FOR_MSIZE(msize), (uintptr_t)tiny_mag_ptr, cleared_requested); - -#if DEBUG_MALLOC - if (DEPOT_MAGAZINE_INDEX == mag_index) { - malloc_zone_error(rack->debug_flags, true, "malloc called for magazine index -1\n"); - return (NULL); - } - - if (!msize) { - malloc_zone_error(rack->debug_flags, true, "invariant broken (!msize) in allocation (region)\n"); - return (NULL); - } -#endif - - SZONE_MAGAZINE_PTR_LOCK(tiny_mag_ptr); - -#if CONFIG_TINY_CACHE - ptr = tiny_mag_ptr->mag_last_free; - - if (tiny_mag_ptr->mag_last_free_msize == msize) { - // we have a winner - tiny_mag_ptr->mag_last_free = NULL; - tiny_mag_ptr->mag_last_free_msize = 0; - tiny_mag_ptr->mag_last_free_rgn = NULL; - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - if (cleared_requested) { - memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize)); - } -#if DEBUG_MALLOC - if (LOG(szone, ptr)) { - malloc_report(ASL_LEVEL_INFO, "in tiny_malloc_should_clear(), tiny cache ptr=%p, msize=%d\n", ptr, msize); - } -#endif - return ptr; - } -#endif /* CONFIG_TINY_CACHE */ - - while (1) { - ptr = tiny_malloc_from_free_list(rack, tiny_mag_ptr, mag_index, msize); - if (ptr) { - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - if (cleared_requested) { - memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize)); - } - return ptr; - } - -#if CONFIG_RECIRC_DEPOT - if (tiny_get_region_from_depot(rack, tiny_mag_ptr, mag_index, msize)) { - ptr = tiny_malloc_from_free_list(rack, tiny_mag_ptr, mag_index, msize); - if (ptr) { - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - if (cleared_requested) { - memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize)); - } - return ptr; - } - } -#endif // CONFIG_RECIRC_DEPOT - - // The magazine is exhausted. A new region (heap) must be allocated to satisfy this call to malloc(). - // The allocation, an mmap() system call, will be performed outside the magazine spin locks by the first - // thread that suffers the exhaustion. That thread sets "alloc_underway" and enters a critical section. - // Threads arriving here later are excluded from the critical section, yield the CPU, and then retry the - // allocation. After some time the magazine is resupplied, the original thread leaves with its allocation, - // and retry-ing threads succeed in the code just above. - if (!tiny_mag_ptr->alloc_underway) { - void *fresh_region; - - // time to create a new region (do this outside the magazine lock) - tiny_mag_ptr->alloc_underway = TRUE; - OSMemoryBarrier(); - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - fresh_region = mvm_allocate_pages_securely(TINY_REGION_SIZE, TINY_BLOCKS_ALIGN, VM_MEMORY_MALLOC_TINY, rack->debug_flags); - SZONE_MAGAZINE_PTR_LOCK(tiny_mag_ptr); - - // DTrace USDT Probe - MAGMALLOC_ALLOCREGION(TINY_SZONE_FROM_RACK(rack), (int)mag_index, fresh_region, TINY_REGION_SIZE); - - if (!fresh_region) { // out of memory! - tiny_mag_ptr->alloc_underway = FALSE; - OSMemoryBarrier(); - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - return NULL; - } - - region_set_cookie(REGION_TRAILER_FOR_TINY_REGION(fresh_region)); - ptr = tiny_malloc_from_region_no_lock(rack, tiny_mag_ptr, mag_index, msize, fresh_region); - - // we don't clear because this freshly allocated space is pristine - tiny_mag_ptr->alloc_underway = FALSE; - OSMemoryBarrier(); - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - return ptr; - } else { - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - yield(); - SZONE_MAGAZINE_PTR_LOCK(tiny_mag_ptr); - } - } - /* NOTREACHED */ -} - -size_t -tiny_size(rack_t *rack, const void *ptr) -{ - if (tiny_region_for_ptr_no_lock(rack, ptr)) { - if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) { - return 0; - } - - boolean_t is_free; - msize_t msize = get_tiny_meta_header(ptr, &is_free); - if (is_free) { - return 0; - } - -#if CONFIG_TINY_CACHE - { - mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)); - if (DEPOT_MAGAZINE_INDEX != mag_index) { - magazine_t *tiny_mag_ptr = &rack->magazines[mag_index]; - - if (msize < TINY_QUANTUM && ptr == tiny_mag_ptr->mag_last_free) { - return 0; - } - } else { - for (mag_index = 0; mag_index < rack->num_magazines; mag_index++) { - magazine_t *tiny_mag_ptr = &(rack->magazines[mag_index]); - - if (msize < TINY_QUANTUM && ptr == tiny_mag_ptr->mag_last_free) { - return 0; - } - } - } - } -#endif - return TINY_BYTES_FOR_MSIZE(msize); - } - - return 0; -} - -static MALLOC_NOINLINE void -free_tiny_botch(rack_t *rack, tiny_free_list_t *ptr) -{ - mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)); - magazine_t *tiny_mag_ptr = &(rack->magazines[mag_index]); - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - malloc_zone_error(rack->debug_flags, true, "Double free of object %p\n", ptr); -} - -void -free_tiny(rack_t *rack, void *ptr, region_t tiny_region, size_t known_size, - boolean_t partial_free) -{ - msize_t msize; - boolean_t is_free; - mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(tiny_region); - magazine_t *tiny_mag_ptr = &(rack->magazines[mag_index]); - - MALLOC_TRACE(TRACE_tiny_free, (uintptr_t)rack, (uintptr_t)ptr, (uintptr_t)tiny_mag_ptr, known_size); - - // ptr is known to be in tiny_region - if (known_size) { - msize = TINY_MSIZE_FOR_BYTES(known_size + TINY_QUANTUM - 1); - } else { - msize = get_tiny_meta_header(ptr, &is_free); - if (is_free) { - free_tiny_botch(rack, ptr); - return; - } - } -#if DEBUG_MALLOC - if (!msize) { - malloc_report(ASL_LEVEL_ERR, "*** free_tiny() block in use is too large: %p\n", ptr); - return; - } -#endif - - SZONE_MAGAZINE_PTR_LOCK(tiny_mag_ptr); - -#if CONFIG_TINY_CACHE - // Depot does not participate in CONFIG_TINY_CACHE since it can't be directly malloc()'d - if (DEPOT_MAGAZINE_INDEX != mag_index && !partial_free) { - if (msize < TINY_QUANTUM) { // to see if the bits fit in the last 4 bits - void *ptr2 = tiny_mag_ptr->mag_last_free; // Might be NULL - msize_t msize2 = tiny_mag_ptr->mag_last_free_msize; - region_t rgn2 = tiny_mag_ptr->mag_last_free_rgn; - - /* check that we don't already have this pointer in the cache */ - if (ptr == ptr2) { - free_tiny_botch(rack, ptr); - return; - } - - if ((rack->debug_flags & MALLOC_DO_SCRIBBLE) && msize) { - memset(ptr, SCRABBLE_BYTE, TINY_BYTES_FOR_MSIZE(msize)); - } - - tiny_mag_ptr->mag_last_free = ptr; - tiny_mag_ptr->mag_last_free_msize = msize; - tiny_mag_ptr->mag_last_free_rgn = tiny_region; - - if (!ptr2) { - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - CHECK(szone, __PRETTY_FUNCTION__); - return; - } - - msize = msize2; - ptr = ptr2; - tiny_region = rgn2; - } - } -#endif /* CONFIG_TINY_CACHE */ - - // Now in the time it took to acquire the lock, the region may have migrated - // from one magazine to another. I.e. trailer->mag_index is volatile. - // In which case the magazine lock we obtained (namely magazines[mag_index].mag_lock) - // is stale. If so, keep on tryin' ... - region_trailer_t *trailer = REGION_TRAILER_FOR_TINY_REGION(tiny_region); - mag_index_t refreshed_index; - - while (mag_index != (refreshed_index = trailer->mag_index)) { // Note assignment - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - mag_index = refreshed_index; - tiny_mag_ptr = &(rack->magazines[mag_index]); - SZONE_MAGAZINE_PTR_LOCK(tiny_mag_ptr); - } - - if (tiny_free_no_lock(rack, tiny_mag_ptr, mag_index, tiny_region, ptr, - msize, partial_free)) { - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - } - - CHECK(szone, __PRETTY_FUNCTION__); -} - -unsigned -tiny_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count) -{ - msize_t msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1); - unsigned found = 0; - mag_index_t mag_index = tiny_mag_get_thread_index() % szone->tiny_rack.num_magazines; - magazine_t *tiny_mag_ptr = &(szone->tiny_rack.magazines[mag_index]); - - // make sure to return objects at least one quantum in size - if (!msize) { - msize = 1; - } - - CHECK(szone, __PRETTY_FUNCTION__); - - // We must lock the zone now, since tiny_malloc_from_free_list assumes that - // the caller has done so. - SZONE_MAGAZINE_PTR_LOCK(tiny_mag_ptr); - - // with the zone locked, allocate objects from the free list until all - // sufficiently large objects have been exhausted, or we have met our quota - // of objects to allocate. - while (found < count) { - void *ptr = tiny_malloc_from_free_list(&szone->tiny_rack, tiny_mag_ptr, mag_index, msize); - if (!ptr) { - break; - } - - *results++ = ptr; - found++; - } - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - return found; -} - -void -tiny_batch_free(szone_t *szone, void **to_be_freed, unsigned count) -{ - unsigned cc = 0; - void *ptr; - region_t tiny_region = NULL; - boolean_t is_free; - msize_t msize; - magazine_t *tiny_mag_ptr = NULL; - mag_index_t mag_index = -1; - - // frees all the pointers in to_be_freed - // note that to_be_freed may be overwritten during the process - if (!count) { - return; - } - - CHECK(szone, __PRETTY_FUNCTION__); - while (cc < count) { - ptr = to_be_freed[cc]; - if (ptr) { - if (NULL == tiny_region || tiny_region != TINY_REGION_FOR_PTR(ptr)) { // region same as last iteration? - if (tiny_mag_ptr) { // non-NULL iff magazine lock taken - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - tiny_mag_ptr = NULL; - } - - tiny_region = tiny_region_for_ptr_no_lock(&szone->tiny_rack, ptr); - - if (tiny_region) { - tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone->tiny_rack.magazines, - REGION_TRAILER_FOR_TINY_REGION(tiny_region), - MAGAZINE_INDEX_FOR_TINY_REGION(tiny_region)); - mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(tiny_region); - } - } - if (tiny_region) { - // this is a tiny pointer - if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) { - break; // pointer to metadata; let the standard free deal with it - } - msize = get_tiny_meta_header(ptr, &is_free); - if (is_free) { - break; // a double free; let the standard free deal with it - } - if (!tiny_free_no_lock(&szone->tiny_rack, tiny_mag_ptr, mag_index, tiny_region, ptr, msize, false)) { - // Arrange to re-acquire magazine lock - tiny_mag_ptr = NULL; - tiny_region = NULL; - } - to_be_freed[cc] = NULL; - } else { - // No region in this zone claims ptr; let the standard free deal with it - break; - } - } - cc++; - } - - if (tiny_mag_ptr) { - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - tiny_mag_ptr = NULL; - } -} - - -void -print_tiny_free_list(task_t task, memory_reader_t reader, - print_task_printer_t printer, rack_t *rack) -{ - tiny_free_list_t *ptr; - _SIMPLE_STRING b = _simple_salloc(); - mag_index_t mag_index; - - if (b) { - rack_t *mapped_rack; - magazine_t *mapped_magazines; - if (reader(task, (vm_address_t)rack, sizeof(struct rack_s), - (void **)&mapped_rack)) { - printer("Failed to map tiny rack\n"); - return; - } - _simple_sappend(b, "Tiny free sizes:\n"); - if (reader(task, (vm_address_t)mapped_rack->magazines, - mapped_rack->num_magazines * sizeof(magazine_t), - (void **)&mapped_magazines)) { - printer("Failed to map tiny rack magazines\n"); - return; - } - - for (mag_index = -1; mag_index < mapped_rack->num_magazines; mag_index++) { - grain_t slot = 0; - if (mag_index == -1) { - _simple_sprintf(b, "\tRecirc depot: "); - } else { - _simple_sprintf(b, "\tMagazine %d: ", mag_index); - } - while (slot <= NUM_TINY_SLOTS) { - ptr = mapped_magazines[mag_index].mag_free_list[slot].p; - if (ptr) { - _simple_sprintf(b, "%s%y[%d]; ", - (slot == NUM_TINY_SLOTS) ? ">=" : "", - (slot + 1) * TINY_QUANTUM, - free_list_count(task, reader, printer, mapped_rack, - (free_list_t){ .p = ptr })); - } - slot++; - } - _simple_sappend(b, "\n"); - } - printer("%s\n", _simple_string(b)); - _simple_sfree(b); - } -} - -void -print_tiny_region(task_t task, memory_reader_t reader, - print_task_printer_t printer, int level, region_t region, - size_t bytes_at_start, size_t bytes_at_end) -{ - unsigned counts[1024]; - unsigned in_use = 0; - uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(region); - uintptr_t current = start + bytes_at_start; - uintptr_t limit = (uintptr_t)TINY_REGION_END(region) - bytes_at_end; - uintptr_t mapped_start; - boolean_t is_free; - msize_t msize; - unsigned ci; - _SIMPLE_STRING b; - uintptr_t pgTot = 0; - - if (reader(task, (vm_address_t)start, TINY_REGION_SIZE, - (void **)&mapped_start)) { - printer("Failed to map tiny region at %p\n", start); - return; - } - off_t start_offset = mapped_start - start; - region_t mapped_region = (region_t)mapped_start; - - if (region == HASHRING_REGION_DEALLOCATED) { - if ((b = _simple_salloc()) != NULL) { - _simple_sprintf(b, "Tiny region [unknown address] was returned to the OS\n"); - printer("%s\n", _simple_string(b)); - _simple_sfree(b); - } - return; - } - - memset(counts, 0, sizeof(counts)); - while (current < limit) { - msize = get_tiny_meta_header_offset((void *)current, start_offset, &is_free); - if (is_free && !msize && (current == start)) { - // first block is all free - uintptr_t pgLo = round_page_quanta(start + sizeof(tiny_free_list_t) + sizeof(msize_t)); - uintptr_t pgHi = trunc_page_quanta(start + TINY_REGION_SIZE - sizeof(msize_t)); - - if (pgLo < pgHi) { - pgTot += (pgHi - pgLo); - } - break; - } - if (!msize) { - printer("*** error with %p: msize=%d\n", (void *)current, (unsigned)msize); - break; - } - if (!is_free) { - // block in use - if (msize > NUM_TINY_SLOTS) { - printer("*** error at %p msize for in_use is %d\n", (void *)current, msize); - } - if (msize < 1024) { - counts[msize]++; - } - in_use++; - } else { - uintptr_t pgLo = round_page_quanta(current + sizeof(tiny_free_list_t) + sizeof(msize_t)); - uintptr_t pgHi = trunc_page_quanta(current + TINY_BYTES_FOR_MSIZE(msize) - sizeof(msize_t)); - - if (pgLo < pgHi) { - pgTot += (pgHi - pgLo); - } - } - current += TINY_BYTES_FOR_MSIZE(msize); - } - if ((b = _simple_salloc()) != NULL) { - mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(mapped_region); - _simple_sprintf(b, "Tiny region [%p-%p, %y] \t", (void *)start, TINY_REGION_END(region), (int)TINY_REGION_SIZE); - if (mag_index == DEPOT_MAGAZINE_INDEX) { - _simple_sprintf(b, "Recirc depot \t"); - } else { - _simple_sprintf(b, "Magazine=%d \t", mag_index); - } - _simple_sprintf(b, "Allocations in use=%d \t Bytes in use=%ly (%d%%) \t", - in_use, BYTES_USED_FOR_TINY_REGION(mapped_region), - (int)(100.0F * BYTES_USED_FOR_TINY_REGION(mapped_region))/TINY_REGION_SIZE); - if (bytes_at_end || bytes_at_start) { - _simple_sprintf(b, "Untouched=%ly ", bytes_at_end + bytes_at_start); - } - if (mag_index == DEPOT_MAGAZINE_INDEX) { - _simple_sprintf(b, "Advised MADV_FREE=%ly", pgTot); - } else { - _simple_sprintf(b, "Fragments subject to reclamation=%ly", pgTot); -#if CONFIG_RECIRC_DEPOT - _simple_sprintf(b, tiny_region_below_recirc_threshold(mapped_region) ? - "\tEmpty enough to be moved to recirc depot" : - "\tNot empty enough to be moved to recirc depot"); -#endif // CONFIG_RECIRC_DEPOT - } - if (level >= MALLOC_VERBOSE_PRINT_LEVEL && in_use) { - _simple_sappend(b, "\n\tSizes in use: "); - for (ci = 0; ci < 1024; ci++) { - if (counts[ci]) { - _simple_sprintf(b, "%y[%d] ", TINY_BYTES_FOR_MSIZE(ci), counts[ci]); - } - } - } - printer("%s\n", _simple_string(b)); - _simple_sfree(b); - } -} - -static char *tiny_freelist_fail_msg = "check: tiny free list incorrect "; - -#define TINY_FREELIST_FAIL(fmt, ...) \ - malloc_zone_check_fail(tiny_freelist_fail_msg, \ - " (slot=%u), counter=%d\n" fmt, slot, counter, __VA_ARGS__); - -boolean_t -tiny_free_list_check(rack_t *rack, grain_t slot, unsigned counter) -{ - mag_index_t mag_index; - - for (mag_index = -1; mag_index < rack->num_magazines; mag_index++) { - magazine_t *tiny_mag_ptr = &(rack->magazines[mag_index]); - SZONE_MAGAZINE_PTR_LOCK(tiny_mag_ptr); - - unsigned count = 0; - tiny_free_list_t *ptr = rack->magazines[mag_index].mag_free_list[slot].p; - boolean_t is_free; - tiny_free_list_t *previous = NULL; - - while (ptr) { - is_free = tiny_meta_header_is_free(ptr); - if (!is_free) { - TINY_FREELIST_FAIL("*** in-use ptr in free list slot=%u count=%d ptr=%p\n", slot, count, ptr); - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - return 0; - } - if (((uintptr_t)ptr) & (TINY_QUANTUM - 1)) { - TINY_FREELIST_FAIL("*** unaligned ptr in free list slot=%u count=%d ptr=%p\n", slot, count, ptr); - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - return 0; - } - if (!tiny_region_for_ptr_no_lock(rack, ptr)) { - TINY_FREELIST_FAIL("*** ptr not in szone slot=%d count=%u ptr=%p\n", slot, count, ptr); - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - return 0; - } - if (free_list_unchecksum_ptr(rack, &ptr->previous) != previous) { - TINY_FREELIST_FAIL("*** previous incorrectly set slot=%u count=%d ptr=%p\n", slot, count, ptr); - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - return 0; - } - previous = ptr; - ptr = free_list_unchecksum_ptr(rack, &ptr->next); - count++; - } - - SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); - } - return 1; -} diff --git a/src/libmalloc/src/magazine_zone.h b/src/libmalloc/src/magazine_zone.h deleted file mode 100644 index 89fd52522..000000000 --- a/src/libmalloc/src/magazine_zone.h +++ /dev/null @@ -1,810 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __MAGAZINE_ZONE_H -#define __MAGAZINE_ZONE_H - -/********************* DEFINITIONS ************************/ - -// Out-of-band free list entry. Out-of-band free list entries are used -// in specific cases where a free-list entry is the *only* data on a given page, -// and the presence of that entry causes the page to stay dirty. -// -// `ptr` is all 16-bit quantum-sized index and packed, as that references a -// block address inside the current region. `next` and `prev` have to be pointer -// sized references, as these values can point to entries outside the current -// region, so it's not safe to compact them. -typedef struct { - uintptr_t prev; - uintptr_t next; - uint16_t ptr; -} MALLOC_PACKED oob_free_entry_s, *oob_free_entry_t; - -// In-place free list entry. Unlike the out-of-band entry, the in-place entries -// are stored at the start of the range that has been freed. -typedef struct _inplace_free_entry_s *inplace_free_entry_t; - -typedef struct { - void *ptr; - uint8_t checksum; -} inplace_linkage_s; - -typedef union { - inplace_free_entry_t p; - uintptr_t u; -} inplace_union; - -typedef struct _inplace_free_entry_s { - inplace_union previous; - inplace_union next; -} inplace_free_entry_s, *inplace_free_entry_t; - -#ifdef __LP64__ -MALLOC_STATIC_ASSERT(sizeof(inplace_free_entry_s) == 16, "inplace free list must be 16-bytes long"); -#else -MALLOC_STATIC_ASSERT(sizeof(inplace_free_entry_s) == 8, "inplace free list must be 8-bytes long"); -#endif - -typedef struct _small_inplace_free_entry_s { - inplace_linkage_s previous; - inplace_linkage_s next; -} small_inplace_free_entry_s, *small_inplace_free_entry_t; - -typedef struct _medium_inplace_free_entry_s { - inplace_linkage_s previous; - inplace_linkage_s next; -} medium_inplace_free_entry_s, *medium_inplace_free_entry_t; - -typedef union { - small_inplace_free_entry_t small_inplace; - medium_inplace_free_entry_t medium_inplace; - inplace_free_entry_t inplace; - oob_free_entry_t oob; - void *p; -} free_list_t; - -typedef struct { - inplace_union previous; - inplace_union next; -} tiny_free_list_t; - -typedef unsigned int grain_t; // N.B. wide enough to index all free slots - -#define CHECK_REGIONS (1 << 31) -#define DISABLE_ASLR (1 << 30) - -#define MAX_RECORDER_BUFFER 256 - -/********************* DEFINITIONS for tiny ************************/ - -/* - * Memory in the Tiny range is allocated from regions (heaps) pointed to by the - * szone's hashed_regions pointer. - * - * Each region is laid out as a heap, followed by a header block, all within - * a 1MB (2^20) block. This means there are 64504 16-byte blocks and the header - * is 16138 bytes, making the total 1048458 bytes, leaving 118 bytes unused. - * - * The header block is arranged as in struct tiny_region defined just below, and - * consists of two bitfields (or bit arrays) interleaved 32 bits by 32 bits. - * - * Each bitfield comprises NUM_TINY_BLOCKS bits, and refers to the corresponding - * TINY_QUANTUM block within the heap. - * - * The bitfields are used to encode the state of memory within the heap. The header bit indicates - * that the corresponding quantum is the first quantum in a block (either in use or free). The - * in-use bit is set for the header if the block has been handed out (allocated). If the header - * bit is not set, the in-use bit is invalid. - * - * The szone maintains an array of NUM_TINY_SLOTS freelists, each of which is used to hold - * free objects of the corresponding quantum size. In thr tiny region, the free - * objects for each region are arranged so that they are grouped together in their - * per-slot freelists and the groups are ordered roughly in the order of regions - * as they appear in the magazine's region list. This approach helps to reduce - * fragmentation. Not guaranteeing strictly the same ordering as the regions - * helps reduce the CPU time required to reduce fragmentation. - * - * A free block is laid out depending on its size, in order to fit all free - * blocks in 16 bytes, on both 32 and 64 bit platforms. One quantum blocks do - * not store their size in the block, instead relying on the header information - * to determine their size. Blocks of two or more quanta have room to store - * their size in the block, and store it both after the 'next' pointer, and in - * the last 2 bytes of the block. - * - * 1-quantum block - * Offset (32-bit mode) (64-bit mode) - * 0x0 0x0 : previous - * 0x4 0x08 : next - * end end - * - * >1-quantum block - * Offset (32-bit mode) (64-bit mode) - * 0x0 0x0 : previous - * 0x4 0x08 : next - * 0x8 0x10 : size (in quantum counts) - * end - 2 end - 2 : size (in quantum counts) - * end end - * - * All fields are pointer-sized, except for the size which is an unsigned short. - * - */ - -#define FOLLOWING_TINY_PTR(ptr, msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_TINY_QUANTUM)) - -#define TINY_BLOCKS_ALIGN (SHIFT_TINY_CEIL_BLOCKS + SHIFT_TINY_QUANTUM) // 20 - -#define TINY_ENTROPY_BITS 15 -#define TINY_ENTROPY_MASK ((1 << TINY_ENTROPY_BITS) - 1) - -/* - * Avoid having so much entropy that the end of a valid tiny allocation - * might overrun the end of the tiny region. - */ -#if TINY_ENTROPY_MASK + NUM_TINY_SLOTS > NUM_TINY_BLOCKS -#error Too many entropy bits for tiny region requested -#endif - -/* - * Enough room for the data, followed by the bit arrays (2-bits per block) - * plus rounding to the nearest page. - */ -#define CEIL_NUM_TINY_BLOCKS_WORDS (((NUM_TINY_BLOCKS + 31) & ~31) >> 5) -#define TINY_METADATA_SIZE (sizeof(region_trailer_t) + sizeof(tiny_header_inuse_pair_t) * CEIL_NUM_TINY_BLOCKS_WORDS + (sizeof(region_free_blocks_t) * NUM_TINY_SLOTS)) -#define TINY_REGION_SIZE ((NUM_TINY_BLOCKS * TINY_QUANTUM + TINY_METADATA_SIZE + PAGE_MAX_SIZE - 1) & ~(PAGE_MAX_SIZE - 1)) - -#define TINY_METADATA_START (NUM_TINY_BLOCKS * TINY_QUANTUM) - -/* - * Beginning and end pointers for a region's heap. - */ -#define TINY_REGION_ADDRESS(region) ((void *)(region)) -#define TINY_REGION_END(region) ((void *)(((uintptr_t)(region)) + (NUM_TINY_BLOCKS * TINY_QUANTUM))) - -/* - * Locate the heap base for a pointer known to be within a tiny region. - */ -#define TINY_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << TINY_BLOCKS_ALIGN) - 1))) - -/* - * Convert between byte and msize units. - */ -#define TINY_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_TINY_QUANTUM) -#define TINY_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_TINY_QUANTUM) - -#if MALLOC_TARGET_64BIT -#define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[8]) -#else // MALLOC_TARGET_64BIT -#define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[4]) -#endif // MALLOC_TARGET_64BIT -#define TINY_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1] - -/* - * Layout of a tiny region - */ -typedef uint32_t tiny_block_t[4]; // assert(TINY_QUANTUM == sizeof(tiny_block_t)) - -#define TINY_REGION_PAD (TINY_REGION_SIZE - (NUM_TINY_BLOCKS * sizeof(tiny_block_t)) - TINY_METADATA_SIZE) - -typedef struct tiny_header_inuse_pair { - uint32_t header; - uint32_t inuse; -} tiny_header_inuse_pair_t; - -typedef struct { - // Block indices are +1 so that 0 represents no free block. - uint16_t first_block; - uint16_t last_block; -} region_free_blocks_t; - -typedef struct region_trailer { - uint32_t region_cookie; - volatile int32_t pinned_to_depot; - struct region_trailer *prev; - struct region_trailer *next; - boolean_t recirc_suitable; - unsigned bytes_used; - unsigned objects_in_use; // Used only by tiny allocator. - mag_index_t mag_index; -} region_trailer_t; - -typedef struct tiny_region { - tiny_block_t blocks[NUM_TINY_BLOCKS]; - - region_trailer_t trailer; - - // The interleaved bit arrays comprising the header and inuse bitfields. - // The unused bits of each component in the last pair will be initialized to sentinel values. - tiny_header_inuse_pair_t pairs[CEIL_NUM_TINY_BLOCKS_WORDS]; - - // Indices of the first and last free block in this region. Value is the - // block index + 1 so that 0 indicates no free block in this region for the - // corresponding slot. - region_free_blocks_t free_blocks_by_slot[NUM_TINY_SLOTS]; - - uint8_t pad[TINY_REGION_PAD]; -} * tiny_region_t; - -// The layout described above should result in a tiny_region_t being 1MB. -MALLOC_STATIC_ASSERT(TINY_REGION_SIZE == (1024 * 1024), "incorrect TINY_REGION_SIZE"); -MALLOC_STATIC_ASSERT(sizeof(struct tiny_region) == TINY_REGION_SIZE, "incorrect tiny_region_size"); - -/* - * Per-region meta data for tiny allocator - */ -#define REGION_TRAILER_FOR_TINY_REGION(r) (&(((tiny_region_t)(r))->trailer)) -#define MAGAZINE_INDEX_FOR_TINY_REGION(r) (REGION_TRAILER_FOR_TINY_REGION(r)->mag_index) -#define BYTES_USED_FOR_TINY_REGION(r) (REGION_TRAILER_FOR_TINY_REGION(r)->bytes_used) -#define OBJECTS_IN_USE_FOR_TINY_REGION(r) (REGION_TRAILER_FOR_TINY_REGION(r)->objects_in_use) - -/* - * Locate the block header for a pointer known to be within a tiny region. - */ -#define TINY_BLOCK_HEADER_FOR_PTR(_p) ((void *)&(((tiny_region_t)TINY_REGION_FOR_PTR(_p))->pairs)) - -/* - * Locate the inuse map for a given block header pointer. - */ -#define TINY_INUSE_FOR_HEADER(_h) ((void *)&(((tiny_header_inuse_pair_t *)(_h))->inuse)) - -/* - * Compute the bitmap index for a pointer known to be within a tiny region. - */ -#define TINY_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_TINY_QUANTUM) & (NUM_TINY_CEIL_BLOCKS - 1)) - -/* - * Get the pointer for a given index in a region. - */ -#define TINY_PTR_FOR_INDEX(index, region) (region_t)((void *)(((uintptr_t)(region)) + ((index) << SHIFT_TINY_QUANTUM))) - -/* - * Offset back to an szone_t given prior knowledge that this rack_t - * is contained within an szone_t. - * - * Note: the only place this is used, the dtrace probes, only occurs - * when the rack has been set up inside a scalable zone. Should - * this ever be used somewhere that this does not hold true - * (say, the test cases) then the pointer returned will be junk. - */ -#define TINY_SZONE_FROM_RACK(_r) \ - (szone_t *)((uintptr_t)(_r) - offsetof(struct szone_s, tiny_rack)) - - -#if !CONFIG_TINY_CACHE -#warning CONFIG_TINY_CACHE turned off -#endif - -#define TINY_REGION_PAYLOAD_BYTES (NUM_TINY_BLOCKS * TINY_QUANTUM) - -/********************* DEFINITIONS for small ************************/ - -/* - * Memory in the small range is allocated from regions (heaps) pointed to by the szone's hashed_regions - * pointer. - * - * Each region is laid out as a heap, followed by the metadata array, all within an 8MB (2^23) block. - * The array is arranged as an array of shorts, one for each SMALL_QUANTUM in the heap. There are - * 16319 512-blocks and the array is 16319*2 bytes, which totals 8387966, leaving 642 bytes unused. - * Once the region trailer is accounted for, there is room for 61 out-of-band free list entries in - * the remaining padding (or 6, if the region was split into 16320 blocks, not 16319). - * - * The 16-bit shorts in the region are used for allocation metadata. The MSB bit marks a block as - * either free, or not. The remaining 15-bits give the size of the allocation, defined in "msize", the - * quantum-shifted size of the allocation. - * - * The metadata table either: - * - * 1. Stores the allocation size in the first short for the block, with the MSB cleared to indicate - * that the block is allocated and in-use, or, - * - * 2. Stores the free-allocation size in the first and last shorts for the block, with the MSB set - * in both places to indicate that the block is freed. (Storing the range in last block allows - * for coalescing of adjacent free entries). - * - * 3. Zero, or "middle", meaning that this block in the region is not the start or end of an - * allocated block. - * - * The small zone represents the free list in one of two ways: - * - * 1. In-line free list entries. These are stored at the starting address of the just-freed memory - * and both the previous and next pointer are checksummed to attempt to detect use-after-free - * writes. - * - * An in-line free list entry is laid out as: - * |prev (uintptr_t)|checksum (uint8_t)|next (uintptr_t)|checksum (uint8_t) - * - * 2. Out-of-band free list entries. These utilitise the remaining padding in the 8mb region that - * follows the blocks, metadata and region trailer. Out-of-band entries are used *iff* the - * freed address lies on a page boundary and the freed region spans more than a page. If we were - * to store the free list entry in-line in that memory, it would keep the entire page dirty, - * so an out-of-band entry is used. - * - * An out-of-band free list entry is laid out as: - * |prev (uintptr_t)|next (uintptr_t)|ptr (uint16_t)| - * - * The szone maintains an array of 32 freelists, each of which is used to hold free objects - * of the corresponding quantum size. - */ - -#define SMALL_IS_FREE (1 << 15) -#define FOLLOWING_SMALL_PTR(ptr, msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_SMALL_QUANTUM)) - -/* - * SMALL_IS_OOB is used to mark the MSB of OOB free list entries to show that they are in use, and - * distinguish them from their initial, empty, state. - */ -#define SMALL_IS_OOB (1 << 15) - -#define SMALL_ENTROPY_BITS 13 -#define SMALL_ENTROPY_MASK ((1 << SMALL_ENTROPY_BITS) - 1) - -/* - * Avoid having so much entropy that the end of a valid small allocation - * might overrun the end of the small region. - */ -#if SMALL_ENTROPY_MASK + NUM_SMALL_SLOTS > NUM_SMALL_BLOCKS -#error Too many entropy bits for small region requested -#endif - -#define SMALL_METADATA_SIZE (sizeof(region_trailer_t) + NUM_SMALL_BLOCKS * sizeof(msize_t)) -#define SMALL_REGION_SIZE ((NUM_SMALL_BLOCKS * SMALL_QUANTUM + SMALL_METADATA_SIZE + PAGE_MAX_SIZE - 1) & ~(PAGE_MAX_SIZE - 1)) - -#define SMALL_METADATA_START (NUM_SMALL_BLOCKS * SMALL_QUANTUM) - -/* - * Beginning and end pointers for a region's heap. - */ -#define SMALL_REGION_ADDRESS(region) ((unsigned char *)region) -#define SMALL_REGION_END(region) (SMALL_REGION_ADDRESS(region) + (NUM_SMALL_BLOCKS * SMALL_QUANTUM)) - -/* - * Locate the heap base for a pointer known to be within a small region. - */ -#define SMALL_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << SMALL_BLOCKS_ALIGN) - 1))) -#define SMALL_OFFSET_FOR_PTR(_p) ((uintptr_t)(_p) & ((1 << SMALL_BLOCKS_ALIGN) - 1)) - -/* - * Convert between byte and msize units. - */ -#define SMALL_BYTES_FOR_MSIZE(_m) ((uint32_t)(_m) << SHIFT_SMALL_QUANTUM) -#define SMALL_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_SMALL_QUANTUM) - -#define SMALL_PREVIOUS_MSIZE(ptr) (*SMALL_METADATA_FOR_PTR(ptr - 1) & ~SMALL_IS_FREE) - -/* - * Convert from msize unit to free list slot. - */ -#define SMALL_FREE_SLOT_COUNT(_r) \ - (NUM_SMALL_SLOTS + 1) -#define SMALL_FREE_SLOT_FOR_MSIZE(_r, _m) \ - (((_m) <= SMALL_FREE_SLOT_COUNT(_r)) ? ((_m) - 1) : (SMALL_FREE_SLOT_COUNT(_r) - 1)) -/* compare with MAGAZINE_FREELIST_BITMAP_WORDS */ -#define SMALL_FREELIST_BITMAP_WORDS(_r) ((SMALL_FREE_SLOT_COUNT(_r) + 31) >> 5) - -/* - * Offset back to an szone_t given prior knowledge that this rack_t - * is contained within an szone_t. - * - * Note: the only place this is used, the dtrace probes, only occurs - * when the rack has been set up inside a scalable zone. Should - * this ever be used somewhere that this does not hold true - * (say, the test cases) then the pointer returned will be junk. - */ -#define SMALL_SZONE_FROM_RACK(_r) \ - (szone_t *)((uintptr_t)(_r) - offsetof(struct szone_s, small_rack)) - -/* - * Layout of a small region - */ -typedef uint32_t small_block_t[SMALL_QUANTUM / sizeof(uint32_t)]; -#define SMALL_HEAP_SIZE (NUM_SMALL_BLOCKS * sizeof(small_block_t)) -#define SMALL_OOB_COUNT ((SMALL_REGION_SIZE - SMALL_HEAP_SIZE - SMALL_METADATA_SIZE) / sizeof(oob_free_entry_s)) -#define SMALL_OOB_SIZE (SMALL_OOB_COUNT * sizeof(oob_free_entry_s)) -#define SMALL_REGION_PAD (SMALL_REGION_SIZE - SMALL_HEAP_SIZE - SMALL_METADATA_SIZE - SMALL_OOB_SIZE) - -typedef struct small_region { - small_block_t blocks[NUM_SMALL_BLOCKS]; - region_trailer_t trailer; - msize_t small_meta_words[NUM_SMALL_BLOCKS]; - oob_free_entry_s small_oob_free_entries[SMALL_OOB_COUNT]; - uint8_t pad[SMALL_REGION_PAD]; -} * small_region_t; - -// The layout described above should result in a small_region_t being 8MB. -MALLOC_STATIC_ASSERT(SMALL_REGION_SIZE == (8 * 1024 * 1024), "incorrect SMALL_REGION_SIZE"); -MALLOC_STATIC_ASSERT(sizeof(struct small_region) == SMALL_REGION_SIZE, "incorrect small_region_size"); - -/* - * Per-region meta data for small allocator - */ -#define REGION_TRAILER_FOR_SMALL_REGION(r) (&(((small_region_t)(r))->trailer)) -#define MAGAZINE_INDEX_FOR_SMALL_REGION(r) (REGION_TRAILER_FOR_SMALL_REGION(r)->mag_index) -#define BYTES_USED_FOR_SMALL_REGION(r) (REGION_TRAILER_FOR_SMALL_REGION(r)->bytes_used) - -/* - * Locate the metadata base for a pointer known to be within a small region. - */ -#define SMALL_META_HEADER_FOR_PTR(_p) (((small_region_t)SMALL_REGION_FOR_PTR(_p))->small_meta_words) - -/* - * Compute the metadata index for a pointer known to be within a small region. - */ -#define SMALL_META_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_SMALL_QUANTUM) & (NUM_SMALL_CEIL_BLOCKS - 1)) - -/* - * Find the metadata word for a pointer known to be within a small region. - */ -#define SMALL_METADATA_FOR_PTR(_p) (SMALL_META_HEADER_FOR_PTR(_p) + SMALL_META_INDEX_FOR_PTR(_p)) - -/* - * Determine whether a pointer known to be within a small region points to memory which is free. - */ -#define SMALL_PTR_IS_FREE(_p) (*SMALL_METADATA_FOR_PTR(_p) & SMALL_IS_FREE) - -/* - * Extract the msize value for a pointer known to be within a small region. - */ -#define SMALL_PTR_SIZE(_p) (*SMALL_METADATA_FOR_PTR(_p) & ~SMALL_IS_FREE) - -#if !CONFIG_SMALL_CACHE -#warning CONFIG_SMALL_CACHE turned off -#endif - -#define SMALL_REGION_PAYLOAD_BYTES (NUM_SMALL_BLOCKS * SMALL_QUANTUM) - -/********************* DEFINITIONS for medium ************************/ - -/* - * Memory in the medium range is allocated from regions (heaps) pointed to by the szone's hashed_regions - * pointer. - * - * Each region is laid out as a heap, followed by the metadata array, all within an 512MB block. - * The array is arranged as an array of shorts, one for each MEDIUM_QUANTUM in the heap. There are - * 16382 32k-blocks and the array is 16382*2 bytes, which totals 8387966, leaving 32,772b unused. - * - * The 16-bit shorts in the region are used for allocation metadata. The MSB bit marks a block as - * either free, or not. The remaining 15-bits give the size of the allocation, defined in "msize", the - * quantum-shifted size of the allocation. - * - * The metadata table either: - * - * 1. Stores the allocation size in the first short for the block, with the MSB cleared to indicate - * that the block is allocated and in-use, or, - * - * 2. Stores the free-allocation size in the first and last shorts for the block, with the MSB set - * in both places to indicate that the block is freed. (Storing the range in last block allows - * for coalescing of adjacent free entries). - * - * 3. Zero, or "middle", meaning that this block in the region is not the start or end of an - * allocated block. - * - * The medium zone represents the free list in one of two ways: - * - * 1. In-line free list entries. These are stored at the starting address of the just-freed memory - * and both the previous and next pointer are checksummed to attempt to detect use-after-free - * writes. - * - * An in-line free list entry is laid out as: - * |prev (uintptr_t)|checksum (uint8_t)|next (uintptr_t)|checksum (uint8_t) - * - * 2. Out-of-band free list entries. These utilitise the remaining padding in the 8mb region that - * follows the blocks, metadata and region trailer. Out-of-band entries are used *iff* the - * freed address lies on a page boundary and the freed region spans more than a page. If we were - * to store the free list entry in-line in that memory, it would keep the entire page dirty, - * so an out-of-band entry is used. - * - * An out-of-band free list entry is laid out as: - * |prev (uintptr_t)|next (uintptr_t)|ptr (uint16_t)| - * - * The szone maintains an array of 256 freelists, each of which is used to hold free objects - * of the corresponding quantum size. - */ - -#define MEDIUM_IS_FREE (1 << 15) -#define MEDIUM_IS_ADVISED (1 << 15) -#define FOLLOWING_MEDIUM_PTR(ptr, msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_MEDIUM_QUANTUM)) -#define MEDIUM_MAX_MSIZE ((uint16_t)(NUM_MEDIUM_BLOCKS >> SHIFT_MEDIUM_QUANTUM) \ - & ~(uint16_t)MEDIUM_IS_FREE) - -// Ensure that the we don't overflow the number of blocks that msize can -// represent (without running into the free bit). -MALLOC_STATIC_ASSERT(NUM_MEDIUM_BLOCKS <= (uint16_t)(~MEDIUM_IS_FREE), - "NUM_MEDIUM_BLOCKS should fit into a msize_t"); - -/* - * MEDIUM_IS_OOB is used mark to the MSB of OOB free list entries to show that they are in use, and - * distinguish them from their initial, empty, state. - */ -#define MEDIUM_IS_OOB (1 << 15) - -#define MEDIUM_ENTROPY_BITS 11 -#define MEDIUM_ENTROPY_MASK ((1 << MEDIUM_ENTROPY_BITS) - 1) - -/* - * Avoid having so much entropy that the end of a valid medium allocation - * might overrun the end of the medium region. - */ -#if MEDIUM_ENTROPY_MASK + NUM_MEDIUM_SLOTS > NUM_MEDIUM_BLOCKS -#error Too many entropy bits for medium region requested -#endif - -#define MEDIUM_METADATA_SIZE (sizeof(region_trailer_t) + \ - (NUM_MEDIUM_BLOCKS * sizeof(msize_t)) + \ - (NUM_MEDIUM_BLOCKS * sizeof(msize_t))) -// Note: The other instances of x_REGION_SIZE use PAGE_MAX_SIZE as the rounding -// and truncating constant but because medium's quanta size is larger than a -// page, it's used instead. -#define MEDIUM_REGION_SIZE ((NUM_MEDIUM_BLOCKS * MEDIUM_QUANTUM + \ - MEDIUM_METADATA_SIZE + MEDIUM_QUANTUM - 1) & ~(MEDIUM_QUANTUM - 1)) - -#define MEDIUM_METADATA_START (NUM_MEDIUM_BLOCKS * MEDIUM_QUANTUM) - -/* - * Beginning and end pointers for a region's heap. - */ -#define MEDIUM_REGION_ADDRESS(region) ((unsigned char *)region) -#define MEDIUM_REGION_END(region) (MEDIUM_REGION_ADDRESS(region) + (NUM_MEDIUM_BLOCKS * MEDIUM_QUANTUM)) - -/* - * Locate the heap base for a pointer known to be within a medium region. - */ -#define MEDIUM_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1ull << MEDIUM_BLOCKS_ALIGN) - 1))) -#define MEDIUM_OFFSET_FOR_PTR(_p) ((uintptr_t)(_p) & ((1ull << MEDIUM_BLOCKS_ALIGN) - 1)) - -/* - * Convert between byte and msize units. - */ -#define MEDIUM_BYTES_FOR_MSIZE(_m) ((uint32_t)(_m) << SHIFT_MEDIUM_QUANTUM) -#define MEDIUM_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_MEDIUM_QUANTUM) - -#define MEDIUM_PREVIOUS_MSIZE(ptr) (*MEDIUM_METADATA_FOR_PTR(ptr - 1) & ~MEDIUM_IS_FREE) - -/* - * Convert from msize unit to free list slot. - */ -#define MEDIUM_FREE_SLOT_COUNT(_r) (NUM_MEDIUM_SLOTS + 1) -#define MEDIUM_FREE_SLOT_FOR_MSIZE(_r, _m) \ - (((_m) <= MEDIUM_FREE_SLOT_COUNT(_r)) ? ((_m) - 1) : (MEDIUM_FREE_SLOT_COUNT(_r) - 1)) -/* compare with MAGAZINE_FREELIST_BITMAP_WORDS */ -#define MEDIUM_FREELIST_BITMAP_WORDS(_r) ((MEDIUM_FREE_SLOT_COUNT(_r) + 31) >> 5) - -/* - * Offset back to an szone_t given prior knowledge that this rack_t - * is contained within an szone_t. - * - * Note: the only place this is used, the dtrace probes, only occurs - * when the rack has been set up inside a scalable zone. Should - * this ever be used somewhere that this does not hold true - * (say, the test cases) then the pointer returned will be junk. - */ -#define MEDIUM_SZONE_FROM_RACK(_r) \ - (szone_t *)((uintptr_t)(_r) - offsetof(struct szone_s, medium_rack)) - -/* - * Layout of a medium region - */ -typedef uint32_t medium_block_t[MEDIUM_QUANTUM / sizeof(uint32_t)]; -#define MEDIUM_HEAP_SIZE (NUM_MEDIUM_BLOCKS * sizeof(medium_block_t)) -#define MEDIUM_OOB_COUNT ((MEDIUM_REGION_SIZE - MEDIUM_HEAP_SIZE - \ - MEDIUM_METADATA_SIZE) / sizeof(oob_free_entry_s)) -#define MEDIUM_OOB_SIZE (MEDIUM_OOB_COUNT * sizeof(oob_free_entry_s)) -#define MEDIUM_REGION_PAD (MEDIUM_REGION_SIZE - MEDIUM_HEAP_SIZE - \ - MEDIUM_METADATA_SIZE - MEDIUM_OOB_SIZE) - -typedef struct medium_region { - medium_block_t blocks[NUM_MEDIUM_BLOCKS]; - region_trailer_t trailer; - msize_t medium_meta_words[NUM_MEDIUM_BLOCKS]; - msize_t medium_madvise_words[NUM_MEDIUM_BLOCKS]; - oob_free_entry_s medium_oob_free_entries[MEDIUM_OOB_COUNT]; - uint8_t pad[MEDIUM_REGION_PAD]; -} * medium_region_t; - -// The layout described above should result in a medium_region_t being 512MB. -MALLOC_STATIC_ASSERT(sizeof(struct medium_region) == 128 * 1024 * 1024, - "incorrect medium_region_size"); - -/* - * Per-region meta data for medium allocator - */ -#define REGION_TRAILER_FOR_MEDIUM_REGION(r) (&(((medium_region_t)(r))->trailer)) -#define MAGAZINE_INDEX_FOR_MEDIUM_REGION(r) (REGION_TRAILER_FOR_MEDIUM_REGION(r)->mag_index) -#define BYTES_USED_FOR_MEDIUM_REGION(r) (REGION_TRAILER_FOR_MEDIUM_REGION(r)->bytes_used) - -/* - * Locate the metadata base for a pointer known to be within a medium region. - */ -#define MEDIUM_META_HEADER_FOR_PTR(_p) (((medium_region_t)MEDIUM_REGION_FOR_PTR(_p))->medium_meta_words) -#define MEDIUM_MADVISE_HEADER_FOR_PTR(_p) (((medium_region_t)MEDIUM_REGION_FOR_PTR(_p))->medium_madvise_words) - -/* - * Compute the metadata index for a pointer known to be within a medium region. - */ -#define MEDIUM_META_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_MEDIUM_QUANTUM) & (NUM_MEDIUM_CEIL_BLOCKS - 1)) -#define MEDIUM_PTR_FOR_META_INDEX(_region, _i) ((uintptr_t)(_region) + MEDIUM_BYTES_FOR_MSIZE(_i)) - -/* - * Find the metadata word for a pointer known to be within a medium region. - */ -#define MEDIUM_METADATA_FOR_PTR(_p) (MEDIUM_META_HEADER_FOR_PTR(_p) + MEDIUM_META_INDEX_FOR_PTR(_p)) - -/* - * Determine whether a pointer known to be within a medium region points to memory which is free. - */ -#define MEDIUM_PTR_IS_FREE(_p) (*MEDIUM_METADATA_FOR_PTR(_p) & MEDIUM_IS_FREE) - -/* - * Extract the msize value for a pointer known to be within a medium region. - */ -#define MEDIUM_PTR_SIZE(_p) (*MEDIUM_METADATA_FOR_PTR(_p) & ~MEDIUM_IS_FREE) - -#if !CONFIG_MEDIUM_CACHE -#warning CONFIG_MEDIUM_CACHE turned off -#endif - -#define MEDIUM_REGION_PAYLOAD_BYTES (NUM_MEDIUM_BLOCKS * MEDIUM_QUANTUM) - -/************************* DEFINITIONS for large ****************************/ - - -typedef struct large_entry_s { - vm_address_t address; - vm_size_t size; - boolean_t did_madvise_reusable; -} large_entry_t; - -#if !CONFIG_LARGE_CACHE && DEBUG_MALLOC -#warning CONFIG_LARGE_CACHE turned off -#endif - -#if CONFIG_MEDIUM_ALLOCATOR -#define LARGE_THRESHOLD(szone) ((szone)->is_medium_engaged ? \ - (MEDIUM_LIMIT_THRESHOLD) : (SMALL_LIMIT_THRESHOLD)) -#else // CONFIG_MEDIUM_ALLOCATOR -#define LARGE_THRESHOLD(szone) (SMALL_LIMIT_THRESHOLD) -#endif // CONFIG_MEDIUM_ALLOCATOR - - -/******************************************************************************* - * Per-processor magazine for tiny and small allocators - ******************************************************************************/ - -typedef struct magazine_s { // vm_allocate()'d, so the array of magazines is page-aligned to begin with. - // Take magazine_lock first, Depot lock when needed for recirc, then szone->{tiny,small}_regions_lock when needed for alloc - _malloc_lock_s magazine_lock MALLOC_CACHE_ALIGN; - // Protection for the crtical section that does allocate_pages outside the magazine_lock - volatile boolean_t alloc_underway; - - // One element deep "death row", optimizes malloc/free/malloc for identical size. - void *mag_last_free; - msize_t mag_last_free_msize; // msize for mag_last_free -#if MALLOC_TARGET_64BIT - uint32_t _pad; -#endif - region_t mag_last_free_rgn; // holds the region for mag_last_free - - free_list_t mag_free_list[MAGAZINE_FREELIST_SLOTS]; - uint32_t mag_bitmap[MAGAZINE_FREELIST_BITMAP_WORDS]; - - // the first and last free region in the last block are treated as big blocks in use that are not accounted for - size_t mag_bytes_free_at_end; - size_t mag_bytes_free_at_start; - region_t mag_last_region; // Valid iff mag_bytes_free_at_end || mag_bytes_free_at_start > 0 - - // bean counting ... - size_t mag_num_bytes_in_objects; - size_t num_bytes_in_magazine; - unsigned mag_num_objects; - - // recirculation list -- invariant: all regions owned by this magazine that meet the emptiness criteria - // are located nearer to the head of the list than any region that doesn't satisfy that criteria. - // Doubly linked list for efficient extraction. - unsigned recirculation_entries; - region_trailer_t *firstNode; - region_trailer_t *lastNode; - -#if MALLOC_TARGET_64BIT - uintptr_t pad[320 - 14 - MAGAZINE_FREELIST_SLOTS - - (MAGAZINE_FREELIST_BITMAP_WORDS + 1) / 2]; -#else - uintptr_t pad[320 - 16 - MAGAZINE_FREELIST_SLOTS - - MAGAZINE_FREELIST_BITMAP_WORDS]; -#endif - -} magazine_t; - -#if MALLOC_TARGET_64BIT -MALLOC_STATIC_ASSERT(sizeof(magazine_t) == 2560, "Incorrect padding in magazine_t"); -#else -MALLOC_STATIC_ASSERT(sizeof(magazine_t) == 1280, "Incorrect padding in magazine_t"); -#endif - -#define TINY_MAX_MAGAZINES 64 /* MUST BE A POWER OF 2! */ -#define TINY_MAGAZINE_PAGED_SIZE \ - (((sizeof(magazine_t) * (TINY_MAX_MAGAZINES + 1)) + vm_page_quanta_size - 1) & \ - ~(vm_page_quanta_size - 1)) /* + 1 for the Depot */ - -#define SMALL_MAX_MAGAZINES 64 /* MUST BE A POWER OF 2! */ -#define SMALL_MAGAZINE_PAGED_SIZE \ - (((sizeof(magazine_t) * (SMALL_MAX_MAGAZINES + 1)) + vm_page_quanta_size - 1) & \ - ~(vm_page_quanta_size - 1)) /* + 1 for the Depot */ - -#define DEPOT_MAGAZINE_INDEX -1 - -/****************************** zone itself ***********************************/ - -/* - * Note that objects whose adddress are held in pointers here must be pursued - * individually in the {tiny,small}_in_use_enumeration() routines. See for - * example the treatment of region_hash_generation and tiny_magazines below. - */ - -typedef struct szone_s { // vm_allocate()'d, so page-aligned to begin with. - malloc_zone_t basic_zone; // first page will be given read-only protection - uint8_t pad[PAGE_MAX_SIZE - sizeof(malloc_zone_t)]; - - unsigned long cpu_id_key; // unused - // remainder of structure is R/W (contains no function pointers) - unsigned debug_flags; - void *log_address; - - /* Allocation racks per allocator type. */ - struct rack_s tiny_rack; - struct rack_s small_rack; - struct rack_s medium_rack; - - /* large objects: all the rest */ - _malloc_lock_s large_szone_lock MALLOC_CACHE_ALIGN; // One customer at a time for large - unsigned num_large_objects_in_use; - unsigned num_large_entries; - large_entry_t *large_entries; // hashed by location; null entries don't count - size_t num_bytes_in_large_objects; - -#if CONFIG_LARGE_CACHE - int large_entry_cache_oldest; - int large_entry_cache_newest; - large_entry_t large_entry_cache[LARGE_ENTRY_CACHE_SIZE_HIGH]; // "death row" for large malloc/free - int large_cache_depth; - size_t large_cache_entry_limit; - boolean_t large_legacy_reset_mprotect; - size_t large_entry_cache_reserve_bytes; - size_t large_entry_cache_reserve_limit; - size_t large_entry_cache_bytes; // total size of death row, bytes -#endif - - /* flag and limits pertaining to altered malloc behavior for systems with - * large amounts of physical memory */ - bool is_medium_engaged; - - /* security cookie */ - uintptr_t cookie; - - /* The purgeable zone constructed by create_purgeable_zone() would like to hand off tiny and small - * allocations to the default scalable zone. Record the latter as the "helper" zone here. */ - struct szone_s *helper_zone; - - boolean_t flotsam_enabled; -} szone_t; - -#define SZONE_PAGED_SIZE round_page_quanta((sizeof(szone_t))) - -#endif // __MAGAZINE_ZONE_H diff --git a/src/libmalloc/src/magmallocProvider.d b/src/libmalloc/src/magmallocProvider.d deleted file mode 100644 index ceb6c73d9..000000000 --- a/src/libmalloc/src/magmallocProvider.d +++ /dev/null @@ -1,18 +0,0 @@ -provider magmalloc { - probe refreshIndex(void *, int, int); - probe depotRegion(void *, int, void *, int, int); - probe recircRegion(void *, int, void *, int, int); - probe allocRegion(void *, int, void *, int); - probe deallocRegion(void *, void *, int); - probe madvfreeRegion(void *, void *, void *, int); - probe pressureReliefBegin(void *, char *, int); - probe pressureReliefEnd(void *, char *, int, int); - probe mallocErrorBreak(); -}; - -#pragma D attributes Evolving/Evolving/ISA provider magmalloc provider -#pragma D attributes Private/Private/Unknown provider magmalloc module -#pragma D attributes Private/Private/Unknown provider magmalloc function -#pragma D attributes Evolving/Evolving/ISA provider magmalloc name -#pragma D attributes Evolving/Evolving/ISA provider magmalloc args - diff --git a/src/libmalloc/src/malloc.c b/src/libmalloc/src/malloc.c deleted file mode 100644 index f229d806d..000000000 --- a/src/libmalloc/src/malloc.c +++ /dev/null @@ -1,2780 +0,0 @@ -/* - * Copyright (c) 1999, 2000, 2003, 2005, 2008, 2012 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#include "internal.h" - -#if TARGET_OS_IPHONE -// malloc_report(ASL_LEVEL_INFO...) on iOS doesn't show up in the Xcode Console log of the device, -// but ASL_LEVEL_NOTICE does. So raising the log level is helpful. -#undef ASL_LEVEL_INFO -#define ASL_LEVEL_INFO ASL_LEVEL_NOTICE -#endif // TARGET_OS_IPHONE - -#define USE_SLEEP_RATHER_THAN_ABORT 0 - -static _malloc_lock_s _malloc_lock = _MALLOC_LOCK_INIT; -#define MALLOC_LOCK() _malloc_lock_lock(&_malloc_lock) -#define MALLOC_TRY_LOCK() _malloc_lock_trylock(&_malloc_lock) -#define MALLOC_UNLOCK() _malloc_lock_unlock(&_malloc_lock) -#define MALLOC_REINIT_LOCK() _malloc_lock_init(&_malloc_lock) - -/* The following variables are exported for the benefit of performance tools - * - * It should always be safe to first read malloc_num_zones, then read - * malloc_zones without taking the lock, if only iteration is required and - * provided that when malloc_destroy_zone is called all prior operations on that - * zone are complete and no further calls referencing that zone can be made. - */ -int32_t malloc_num_zones = 0; -int32_t malloc_num_zones_allocated = 0; -malloc_zone_t **malloc_zones = 0; -malloc_logger_t *malloc_logger = NULL; -static malloc_zone_t *initial_default_zone = NULL; - -unsigned malloc_debug_flags = 0; -boolean_t malloc_tracing_enabled = false; - -unsigned malloc_check_start = 0; // 0 means don't check -unsigned malloc_check_counter = 0; -unsigned malloc_check_each = 1000; - -static int malloc_check_sleep = 100; // default 100 second sleep -static int malloc_check_abort = 0; // default is to sleep, not abort - -static os_once_t _malloc_initialize_pred; - -static -struct msl { - void *dylib; - - void (*handle_memory_event) (unsigned long event); - boolean_t (*stack_logging_locked) (void); - void (*fork_prepare) (void); - void (*fork_parent) (void); - void (*fork_child) (void); - - - // TODO delete these ones - kern_return_t (*get_frames_for_address)(task_t task, - mach_vm_address_t address, - mach_vm_address_t *stack_frames_buffer, - uint32_t max_stack_frames, - uint32_t *count); - - uint64_t (*stackid_for_vm_region) (task_t task, mach_vm_address_t address); - - kern_return_t (*get_frames_for_stackid) (task_t task, - uint64_t stack_identifier, - mach_vm_address_t *stack_frames_buffer, - uint32_t max_stack_frames, - uint32_t *count, - bool *last_frame_is_threadid); - - - kern_return_t (*uniquing_table_read_stack) (struct backtrace_uniquing_table *uniquing_table, - uint64_t stackid, - mach_vm_address_t *out_frames_buffer, - uint32_t *out_frames_count, - uint32_t max_frames); -} msl = {}; - -/* - * Counters that coordinate zone destruction (in malloc_zone_unregister) with - * find_registered_zone (here abbreviated as FRZ). - */ -static int32_t volatile counterAlice = 0, counterBob = 0; -static int32_t volatile * volatile pFRZCounterLive = &counterAlice; -static int32_t volatile * volatile pFRZCounterDrain = &counterBob; - -unsigned int _os_cpu_number_override = -1; - -static inline malloc_zone_t *inline_malloc_default_zone(void) __attribute__((always_inline)); - -#define MALLOC_LOG_TYPE_ALLOCATE stack_logging_type_alloc -#define MALLOC_LOG_TYPE_DEALLOCATE stack_logging_type_dealloc -#define MALLOC_LOG_TYPE_HAS_ZONE stack_logging_flag_zone -#define MALLOC_LOG_TYPE_CLEARED stack_logging_flag_cleared - -#define DEFAULT_MALLOC_ZONE_STRING "DefaultMallocZone" -#define DEFAULT_PUREGEABLE_ZONE_STRING "DefaultPurgeableMallocZone" -#define MALLOC_HELPER_ZONE_STRING "MallocHelperZone" - -MALLOC_NOEXPORT -unsigned int phys_ncpus; - -MALLOC_NOEXPORT -unsigned int logical_ncpus; - -MALLOC_NOEXPORT -unsigned int hyper_shift; - -// Boot argument for max magazine control -static const char max_magazines_boot_arg[] = "malloc_max_magazines"; - -static const char large_expanded_cache_threshold_boot_arg[] = "malloc_large_expanded_cache_threshold"; - -#if CONFIG_MEDIUM_ALLOCATOR -static const char medium_enabled_boot_arg[] = "malloc_medium_zone"; -static const char max_medium_magazines_boot_arg[] = "malloc_max_medium_magazines"; -static const char medium_activation_threshold_boot_arg[] = "malloc_medium_activation_threshold"; -#endif // CONFIG_MEDIUM_ALLOCATOR - -/********* Utilities ************/ -static bool _malloc_entropy_initialized; - -#if !TARGET_OS_DRIVERKIT -#include - -typedef void * (*dlopen_t) (const char * __path, int __mode); -typedef void * (*dlsym_t) (void * __handle, const char * __symbol); - -static dlopen_t _dlopen = NULL; -static dlsym_t _dlsym = NULL; -#else -#define _dlopen(...) NULL -#define _dlsym(...) NULL -#endif // TARGET_OS_DRIVERKIT - -void __malloc_init(const char *apple[]); - -static int -__entropy_from_kernel(const char *str) -{ - unsigned long long val; - char tmp[20], *p; - int idx = 0; - - /* Skip over key to the first value */ - str = strchr(str, '='); - if (str == NULL) { - return 0; - } - str++; - - while (str && idx < sizeof(malloc_entropy) / sizeof(malloc_entropy[0])) { - strlcpy(tmp, str, 20); - p = strchr(tmp, ','); - if (p) { - *p = '\0'; - } - val = strtoull_l(tmp, NULL, 0, NULL); - malloc_entropy[idx] = (uint64_t)val; - idx++; - if ((str = strchr(str, ',')) != NULL) { - str++; - } - } - return idx; -} - -static void -__malloc_init_from_bootargs(const char *bootargs) -{ - // The maximum number of magazines can be set either via a - // boot argument or from the environment. Get the boot argument value - // here and store it. We can't bounds check it until we have phys_ncpus, - // which happens later in _malloc_initialize(), along with handling - // of the environment value setting. - char value_buf[256]; - const char *flag = malloc_common_value_for_key_copy(bootargs, - max_magazines_boot_arg, value_buf, sizeof(value_buf)); - if (flag) { - const char *endp; - long value = malloc_common_convert_to_long(flag, &endp); - if (!*endp && value >= 0) { - max_magazines = (unsigned int)value; - } else { - malloc_report(ASL_LEVEL_ERR, - "malloc_max_magazines must be positive - ignored.\n"); - } - } - - flag = malloc_common_value_for_key_copy(bootargs, - large_expanded_cache_threshold_boot_arg, value_buf, sizeof(value_buf)); - if (flag) { - const char *endp; - long value = malloc_common_convert_to_long(flag, &endp); - if (!*endp && value >= 0) { - magazine_large_expanded_cache_threshold = (unsigned int)value; - } else { - malloc_report(ASL_LEVEL_ERR, - "malloc_large_expanded_cache_threshold must be positive - ignored.\n"); - } - } - -#if CONFIG_MEDIUM_ALLOCATOR - flag = malloc_common_value_for_key_copy(bootargs, medium_enabled_boot_arg, - value_buf, sizeof(value_buf)); - if (flag) { - const char *endp; - long value = malloc_common_convert_to_long(flag, &endp); - if (!*endp) { - magazine_medium_enabled = (value != 0); - } - } - - flag = malloc_common_value_for_key_copy(bootargs, - medium_activation_threshold_boot_arg, value_buf, sizeof(value_buf)); - if (flag) { - const char *endp; - long value = malloc_common_convert_to_long(flag, &endp); - if (!*endp && value >= 0) { - magazine_medium_active_threshold = (uint64_t)value; - } else { - malloc_report(ASL_LEVEL_ERR, - "malloc_medium_activation_threshold must be positive - ignored.\n"); - } - } - - flag = malloc_common_value_for_key_copy(bootargs, - max_medium_magazines_boot_arg, value_buf, sizeof(value_buf)); - if (flag) { - const char *endp; - long value = malloc_common_convert_to_long(flag, &endp); - if (!*endp && value >= 0) { - max_medium_magazines = (int)value; - } else { - malloc_report(ASL_LEVEL_ERR, - "malloc_max_medium_magazines must be positive - ignored.\n"); - } - } -#endif // CONFIG_MEDIUM_ALLOCATOR -} - -/* TODO: Investigate adding _malloc_initialize() into this libSystem initializer */ -void -__malloc_init(const char *apple[]) -{ - // We could try to be clever and cater for arbitrary length bootarg - // strings, but it's probably not worth it, especially as we would need - // to temporarily allocate at least a page of memory to read the bootargs - // into. - char bootargs[1024] = { '\0' }; - size_t len = sizeof(bootargs) - 1; - if (!sysctlbyname("kern.bootargs", bootargs, &len, NULL, 0) && len > 0) { - bootargs[len + 1] = '\0'; - } - -#if CONFIG_NANOZONE - // TODO: envp should be passed down from Libsystem - const char **envp = (const char **)*_NSGetEnviron(); - nano_common_init(envp, apple, bootargs); -#endif - - const char **p; - for (p = apple; p && *p; p++) { - if (strstr(*p, "malloc_entropy") == *p) { - int count = __entropy_from_kernel(*p); - bzero((void *)*p, strlen(*p)); - - if (sizeof(malloc_entropy) / sizeof(malloc_entropy[0]) == count) { - _malloc_entropy_initialized = true; - } - break; - } - } - if (!_malloc_entropy_initialized) { - getentropy((void*)malloc_entropy, sizeof(malloc_entropy)); - _malloc_entropy_initialized = true; - } - - __malloc_init_from_bootargs(bootargs); - mvm_aslr_init(); -} - -MALLOC_NOEXPORT malloc_zone_t* lite_zone = NULL; - -MALLOC_ALWAYS_INLINE -static inline malloc_zone_t * -runtime_default_zone() { - return (lite_zone) ? lite_zone : inline_malloc_default_zone(); -} - -static size_t -default_zone_size(malloc_zone_t *zone, const void *ptr) -{ - zone = runtime_default_zone(); - - return zone->size(zone, ptr); -} - -static void * -default_zone_malloc(malloc_zone_t *zone, size_t size) -{ - zone = runtime_default_zone(); - - return zone->malloc(zone, size); -} - -static void * -default_zone_calloc(malloc_zone_t *zone, size_t num_items, size_t size) -{ - zone = runtime_default_zone(); - - return zone->calloc(zone, num_items, size); -} - -static void * -default_zone_valloc(malloc_zone_t *zone, size_t size) -{ - zone = runtime_default_zone(); - - return zone->valloc(zone, size); -} - -static void -default_zone_free(malloc_zone_t *zone, void *ptr) -{ - zone = runtime_default_zone(); - - return zone->free(zone, ptr); -} - -static void * -default_zone_realloc(malloc_zone_t *zone, void *ptr, size_t new_size) -{ - zone = runtime_default_zone(); - - return zone->realloc(zone, ptr, new_size); -} - -static void -default_zone_destroy(malloc_zone_t *zone) -{ - zone = runtime_default_zone(); - - return zone->destroy(zone); -} - -static unsigned -default_zone_batch_malloc(malloc_zone_t *zone, size_t size, void **results, unsigned count) -{ - zone = runtime_default_zone(); - - return zone->batch_malloc(zone, size, results, count); -} - -static void -default_zone_batch_free(malloc_zone_t *zone, void **to_be_freed, unsigned count) -{ - zone = runtime_default_zone(); - - return zone->batch_free(zone, to_be_freed, count); -} - -static void * -default_zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) -{ - zone = runtime_default_zone(); - - return zone->memalign(zone, alignment, size); -} - -static void -default_zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) -{ - zone = runtime_default_zone(); - - return zone->free_definite_size(zone, ptr, size); -} - -static size_t -default_zone_pressure_relief(malloc_zone_t *zone, size_t goal) -{ - zone = runtime_default_zone(); - - return zone->pressure_relief(zone, goal); -} - -static boolean_t -default_zone_malloc_claimed_address(malloc_zone_t *zone, void *ptr) -{ - zone = runtime_default_zone(); - - return malloc_zone_claimed_address(zone, ptr); -} - -static kern_return_t -default_zone_ptr_in_use_enumerator(task_t task, - void *context, - unsigned type_mask, - vm_address_t zone_address, - memory_reader_t reader, - vm_range_recorder_t recorder) -{ - malloc_zone_t *zone = runtime_default_zone(); - - return zone->introspect->enumerator(task, context, type_mask, (vm_address_t) zone, reader, recorder); -} - -static size_t -default_zone_good_size(malloc_zone_t *zone, size_t size) -{ - zone = runtime_default_zone(); - - return zone->introspect->good_size(zone, size); -} - -static boolean_t -default_zone_check(malloc_zone_t *zone) -{ - zone = runtime_default_zone(); - - return zone->introspect->check(zone); -} - -static void -default_zone_print(malloc_zone_t *zone, boolean_t verbose) -{ - zone = runtime_default_zone(); - - return (void)zone->introspect->print(zone, verbose); -} - -static void -default_zone_log(malloc_zone_t *zone, void *log_address) -{ - zone = runtime_default_zone(); - - return zone->introspect->log(zone, log_address); -} - -static void -default_zone_force_lock(malloc_zone_t *zone) -{ - zone = runtime_default_zone(); - - return zone->introspect->force_lock(zone); -} - -static void -default_zone_force_unlock(malloc_zone_t *zone) -{ - zone = runtime_default_zone(); - - return zone->introspect->force_unlock(zone); -} - -static void -default_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) -{ - zone = runtime_default_zone(); - - return zone->introspect->statistics(zone, stats); -} - -static boolean_t -default_zone_locked(malloc_zone_t *zone) -{ - zone = runtime_default_zone(); - - return zone->introspect->zone_locked(zone); -} - -static void -default_zone_reinit_lock(malloc_zone_t *zone) -{ - zone = runtime_default_zone(); - - return zone->introspect->reinit_lock(zone); -} - -static struct malloc_introspection_t default_zone_introspect = { - default_zone_ptr_in_use_enumerator, - default_zone_good_size, - default_zone_check, - default_zone_print, - default_zone_log, - default_zone_force_lock, - default_zone_force_unlock, - default_zone_statistics, - default_zone_locked, - NULL, - NULL, - NULL, - NULL, - default_zone_reinit_lock -}; - -typedef struct { - malloc_zone_t malloc_zone; - uint8_t pad[PAGE_MAX_SIZE - sizeof(malloc_zone_t)]; -} virtual_default_zone_t; - -static virtual_default_zone_t virtual_default_zone -__attribute__((section("__DATA,__v_zone"))) -__attribute__((aligned(PAGE_MAX_SIZE))) = { - NULL, - NULL, - default_zone_size, - default_zone_malloc, - default_zone_calloc, - default_zone_valloc, - default_zone_free, - default_zone_realloc, - default_zone_destroy, - DEFAULT_MALLOC_ZONE_STRING, - default_zone_batch_malloc, - default_zone_batch_free, - &default_zone_introspect, - 10, - default_zone_memalign, - default_zone_free_definite_size, - default_zone_pressure_relief, - default_zone_malloc_claimed_address, -}; - -static malloc_zone_t *default_zone = &virtual_default_zone.malloc_zone; - -MALLOC_NOEXPORT -/*static*/ boolean_t -has_default_zone0(void) -{ - if (!malloc_zones) { - return false; - } - - return initial_default_zone == malloc_zones[0]; -} - -static inline malloc_zone_t *find_registered_zone(const void *, size_t *) __attribute__((always_inline)); -static inline malloc_zone_t * -find_registered_zone(const void *ptr, size_t *returned_size) -{ - // Returns a zone which contains ptr, else NULL - - if (0 == malloc_num_zones) { - if (returned_size) { - *returned_size = 0; - } - return NULL; - } - - // first look in the lite zone - if (lite_zone) { - malloc_zone_t *zone = lite_zone; - size_t size = zone->size(zone, ptr); - if (size) { // Claimed by this zone? - if (returned_size) { - *returned_size = size; - } - // Return the virtual default zone instead of the lite zone - see - return default_zone; - } - } - - // The default zone is registered in malloc_zones[0]. There's no danger that it will ever be unregistered. - // So don't advance the FRZ counter yet. - malloc_zone_t *zone = malloc_zones[0]; - size_t size = zone->size(zone, ptr); - if (size) { // Claimed by this zone? - if (returned_size) { - *returned_size = size; - } - - // Asan and others replace the zone at position 0 with their own zone. - // In that case just return that zone as they need this information. - // Otherwise return the virtual default zone, not the actual zone in position 0. - if (!has_default_zone0()) { - return zone; - } else { - return default_zone; - } - } - - int32_t volatile *pFRZCounter = pFRZCounterLive; // Capture pointer to the counter of the moment - OSAtomicIncrement32Barrier(pFRZCounter); // Advance this counter -- our thread is in FRZ - - unsigned index; - int32_t limit = *(int32_t volatile *)&malloc_num_zones; - malloc_zone_t **zones = &malloc_zones[1]; - - // From this point on, FRZ is accessing the malloc_zones[] array without locking - // in order to avoid contention on common operations (such as non-default-zone free()). - // In order to ensure that this is actually safe to do, register/unregister take care - // to: - // - // 1. Register ensures that newly inserted pointers in malloc_zones[] are visible - // when malloc_num_zones is incremented. At the moment, we're relying on that store - // ordering to work without taking additional steps here to ensure load memory - // ordering. - // - // 2. Unregister waits for all readers in FRZ to complete their iteration before it - // returns from the unregister call (during which, even unregistered zone pointers - // are still valid). It also ensures that all the pointers in the zones array are - // valid until it returns, so that a stale value in limit is not dangerous. - - for (index = 1; index < limit; ++index, ++zones) { - zone = *zones; - size = zone->size(zone, ptr); - if (size) { // Claimed by this zone? - goto out; - } - } - // Unclaimed by any zone. - zone = NULL; - size = 0; -out: - if (returned_size) { - *returned_size = size; - } - OSAtomicDecrement32Barrier(pFRZCounter); // our thread is leaving FRZ - return zone; -} - -void -malloc_error_break(void) -{ - // Provides a non-inlined place for various malloc error procedures to call - // that will be called after an error message appears. It does not make - // sense for developers to call this function, so it is marked - // hidden to prevent it from becoming API. - MAGMALLOC_MALLOCERRORBREAK(); // DTrace USDT probe -} - -int -malloc_gdb_po_unsafe(void) -{ - // In order to implement "po" other data formatters in gdb, the debugger - // calls functions that call malloc. The debugger will only run one thread - // of the program in this case, so if another thread is holding a zone lock, - // gdb may deadlock in this case. - // - // Iterate over the zones in malloc_zones, and call "trylock" on the zone - // lock. If trylock succeeds, unlock it, otherwise return "locked". Returns - // 0 == safe, 1 == locked/unsafe. - - if (msl.stack_logging_locked && msl.stack_logging_locked()) { - return 1; - } - - malloc_zone_t **zones = malloc_zones; - unsigned i, e = malloc_num_zones; - - for (i = 0; i != e; ++i) { - malloc_zone_t *zone = zones[i]; - - // Version must be >= 5 to look at the new introspection field. - if (zone->version < 5) { - continue; - } - - if (zone->introspect->zone_locked && zone->introspect->zone_locked(zone)) { - return 1; - } - } - return 0; -} - -/********* Creation and destruction ************/ - -static void set_flags_from_environment(void); - -MALLOC_NOEXPORT void -malloc_zone_register_while_locked(malloc_zone_t *zone) -{ - size_t protect_size; - unsigned i; - - /* scan the list of zones, to see if this zone is already registered. If - * so, print an error message and return. */ - for (i = 0; i != malloc_num_zones; ++i) { - if (zone == malloc_zones[i]) { - malloc_report(ASL_LEVEL_ERR, "Attempted to register zone more than once: %p\n", zone); - return; - } - } - - if (malloc_num_zones == malloc_num_zones_allocated) { - size_t malloc_zones_size = malloc_num_zones * sizeof(malloc_zone_t *); - mach_vm_size_t alloc_size = round_page(malloc_zones_size + vm_page_size); - mach_vm_address_t vm_addr; - int alloc_flags = VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_MALLOC); - - vm_addr = vm_page_size; - kern_return_t kr = mach_vm_allocate(mach_task_self(), &vm_addr, alloc_size, alloc_flags); - if (kr) { - malloc_report(ASL_LEVEL_ERR, "malloc_zone_register allocation failed: %d\n", kr); - return; - } - - malloc_zone_t **new_zones = (malloc_zone_t **)vm_addr; - /* If there were previously allocated malloc zones, we need to copy them - * out of the previous array and into the new zones array */ - if (malloc_zones) { - memcpy(new_zones, malloc_zones, malloc_zones_size); - vm_addr = (mach_vm_address_t)malloc_zones; - mach_vm_size_t dealloc_size = round_page(malloc_zones_size); - mach_vm_deallocate(mach_task_self(), vm_addr, dealloc_size); - } - - /* Update the malloc_zones pointer, which we leak if it was previously - * allocated, and the number of zones allocated */ - protect_size = (size_t)alloc_size; - malloc_zones = new_zones; - malloc_num_zones_allocated = (int32_t)(alloc_size / sizeof(malloc_zone_t *)); - } else { - /* If we don't need to reallocate zones, we need to briefly change the - * page protection the malloc zones to allow writes */ - protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *); - mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE); - } - - /* This store-increment needs to be visible in the correct - * order to any threads in find_registered_zone, such that if the incremented value - * in malloc_num_zones is visible then the pointer write before it must also be visible. - * - * While we could be slightly more efficent here with atomic ops the cleanest way to - * ensure the proper store-release operation is performed is to use OSAtomic*Barrier - * to update malloc_num_zones. - */ - malloc_zones[malloc_num_zones] = zone; - OSAtomicIncrement32Barrier(&malloc_num_zones); - - /* Finally, now that the zone is registered, disallow write access to the - * malloc_zones array */ - mprotect(malloc_zones, protect_size, PROT_READ); - //malloc_report(ASL_LEVEL_INFO, "Registered malloc_zone %p in malloc_zones %p [%u zones, %u bytes]\n", zone, malloc_zones, - // malloc_num_zones, protect_size); -} - -// To be used in _malloc_initialize_once() only, call that function instead. -static void -_malloc_initialize(void *context __unused) -{ - MALLOC_LOCK(); - unsigned n; - malloc_zone_t *zone = NULL; - - if (!_malloc_entropy_initialized) { - // Lazy initialization may occur before __malloc_init (rdar://27075409) - // TODO: make this a fatal error - malloc_report(ASL_LEVEL_ERR, "*** malloc was initialized without entropy\n"); - } - - phys_ncpus = *(uint8_t *)(uintptr_t)_COMM_PAGE_PHYSICAL_CPUS; - logical_ncpus = *(uint8_t *)(uintptr_t)_COMM_PAGE_LOGICAL_CPUS; - - if (0 != (logical_ncpus % phys_ncpus)) { - MALLOC_REPORT_FATAL_ERROR(logical_ncpus % phys_ncpus, - "logical_ncpus %% phys_ncpus != 0\n"); - } - - switch (logical_ncpus / phys_ncpus) { - case 1: - hyper_shift = 0; - break; - case 2: - hyper_shift = 1; - break; - case 4: - hyper_shift = 2; - break; - default: - MALLOC_REPORT_FATAL_ERROR(logical_ncpus / phys_ncpus, "logical_ncpus / phys_ncpus not 1, 2, or 4"); - } - - // max_magazines may already be set from a boot argument. Make sure that it - // is bounded by the number of CPUs. - if (max_magazines) { - max_magazines = MIN(max_magazines, logical_ncpus); - } else { - max_magazines = logical_ncpus; - } - - // similiarly, cap medium magazines at logical_ncpus but don't cap it by - // the max magazines if it has been set explicitly - if (max_medium_magazines) { - max_medium_magazines = MIN(max_medium_magazines, logical_ncpus); - } else { - max_medium_magazines = max_magazines; - } - - set_flags_from_environment(); // will only set flags up to two times - n = malloc_num_zones; - -#if CONFIG_NANOZONE - nano_common_configure(); - - malloc_zone_t *helper_zone = create_scalable_zone(0, malloc_debug_flags); - - if (_malloc_engaged_nano == NANO_V2) { - zone = nanov2_create_zone(helper_zone, malloc_debug_flags); - } else if (_malloc_engaged_nano == NANO_V1) { - zone = nano_create_zone(helper_zone, malloc_debug_flags); - } - - if (zone) { - malloc_zone_register_while_locked(zone); - malloc_zone_register_while_locked(helper_zone); - - // Must call malloc_set_zone_name() *after* helper and nano are hooked together. - malloc_set_zone_name(zone, DEFAULT_MALLOC_ZONE_STRING); - malloc_set_zone_name(helper_zone, MALLOC_HELPER_ZONE_STRING); - } else { - zone = helper_zone; - malloc_zone_register_while_locked(zone); - malloc_set_zone_name(zone, DEFAULT_MALLOC_ZONE_STRING); - } -#else - zone = create_scalable_zone(0, malloc_debug_flags); - malloc_zone_register_while_locked(zone); - malloc_set_zone_name(zone, DEFAULT_MALLOC_ZONE_STRING); -#endif - - initial_default_zone = zone; - - if (n != 0) { // make the default first, for efficiency - unsigned protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *); - malloc_zone_t *hold = malloc_zones[0]; - - if (hold->zone_name && strcmp(hold->zone_name, DEFAULT_MALLOC_ZONE_STRING) == 0) { - malloc_set_zone_name(hold, NULL); - } - - mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE); - malloc_zones[0] = malloc_zones[n]; - malloc_zones[n] = hold; - mprotect(malloc_zones, protect_size, PROT_READ); - } - - // malloc_report(ASL_LEVEL_INFO, "%d registered zones\n", malloc_num_zones); - // malloc_report(ASL_LEVEL_INFO, "malloc_zones is at %p; malloc_num_zones is at %p\n", (unsigned)&malloc_zones, - // (unsigned)&malloc_num_zones); - MALLOC_UNLOCK(); -} - -MALLOC_ALWAYS_INLINE -static inline void -_malloc_initialize_once(void) -{ - os_once(&_malloc_initialize_pred, NULL, _malloc_initialize); -} - -static inline malloc_zone_t * -inline_malloc_default_zone(void) -{ - _malloc_initialize_once(); - // malloc_report(ASL_LEVEL_INFO, "In inline_malloc_default_zone with %d %d\n", malloc_num_zones, malloc_has_debug_zone); - return malloc_zones[0]; -} - -malloc_zone_t * -malloc_default_zone(void) -{ - return default_zone; -} - -static inline malloc_zone_t *inline_malloc_default_scalable_zone(void) __attribute__((always_inline)); -static inline malloc_zone_t * -inline_malloc_default_scalable_zone(void) -{ - unsigned index; - - _malloc_initialize_once(); - // malloc_report(ASL_LEVEL_INFO, "In inline_malloc_default_scalable_zone with %d %d\n", malloc_num_zones, - // malloc_has_debug_zone); - - MALLOC_LOCK(); -#if CONFIG_NANOZONE - for (index = 0; index < malloc_num_zones; ++index) { - malloc_zone_t *z = malloc_zones[index]; - - if (z->zone_name && strcmp(z->zone_name, MALLOC_HELPER_ZONE_STRING) == 0) { - MALLOC_UNLOCK(); - return z; - } - } -#endif - for (index = 0; index < malloc_num_zones; ++index) { - malloc_zone_t *z = malloc_zones[index]; - - if (z->zone_name && strcmp(z->zone_name, DEFAULT_MALLOC_ZONE_STRING) == 0) { - MALLOC_UNLOCK(); - return z; - } - } - MALLOC_UNLOCK(); - - malloc_report(ASL_LEVEL_ERR, "*** malloc_default_scalable_zone() failed to find 'DefaultMallocZone'\n"); - return NULL; // FIXME: abort() instead? -} - -static void * -legacy_zeroing_large_malloc(malloc_zone_t *zone, size_t size) -{ - if (size > LEGACY_ZEROING_THRESHOLD) { - // Leopard and earlier returned a ZFOD range, so clear to zero always, - // ham-handedly touching in each page - return default_zone_calloc(zone, 1, size); - } else { - return default_zone_malloc(zone, size); - } -} - -static void * -legacy_zeroing_large_valloc(malloc_zone_t *zone, size_t size) -{ - void *p = default_zone_valloc(zone, size); - - // Leopard and earlier returned a ZFOD range, so ... - memset(p, 0, size); // Clear to zero always, ham-handedly touching in each page - return p; -} - -void -zeroify_scalable_zone(malloc_zone_t *zone) -{ - // this checkfix should replace the default zone's - // allocation routines with the zeroing versions. Instead of getting in hot - // water with the wrong zone, ensure that we're mutating the zone we expect. - // - // Additionally, the default_zone is no longer PROT_READ, so the two mprotect - // calls that were here are no longer needed. - if (zone == default_zone) { - zone->malloc = (void *)legacy_zeroing_large_malloc; - zone->valloc = (void *)legacy_zeroing_large_valloc; - } -} - -/* - * Returns the version of the Nano allocator that's in use, or 0 if not. - */ -int -malloc_engaged_nano(void) -{ -#if CONFIG_NANOZONE - return _malloc_engaged_nano; -#else - return 0; -#endif -} - -malloc_zone_t * -malloc_default_purgeable_zone(void) -{ - static malloc_zone_t *dpz; - - if (!dpz) { - // - // PR_7288598: Must pass a *scalable* zone (szone) as the helper for create_purgeable_zone(). - // Take care that the zone so obtained is not subject to interposing. - // - malloc_zone_t *tmp = create_purgeable_zone(0, inline_malloc_default_scalable_zone(), malloc_debug_flags); - malloc_zone_register(tmp); - malloc_set_zone_name(tmp, DEFAULT_PUREGEABLE_ZONE_STRING); - if (!OSAtomicCompareAndSwapPtrBarrier(NULL, tmp, (void**)&dpz)) { - malloc_destroy_zone(tmp); - } - } - return dpz; -} - -static void -set_flags_from_environment(void) -{ - const char *flag; - const char **env = (const char **)*_NSGetEnviron(); - const char **p; - const char *c; - -#if __LP64__ - malloc_debug_flags = MALLOC_ABORT_ON_CORRUPTION; // Set always on 64-bit processes -#else - int libSystemVersion = NSVersionOfLinkTimeLibrary("System"); - if ((-1 != libSystemVersion) && ((libSystemVersion >> 16) < 126) /* Lion or greater */) { - malloc_debug_flags = 0; - } else { - malloc_debug_flags = MALLOC_ABORT_ON_CORRUPTION; - } -#endif - /* - * Given that all environment variables start with "Malloc" we optimize by scanning quickly - * first the environment, therefore avoiding repeated calls to getenv(). - * If we are setu/gid these flags are ignored to prevent a malicious invoker from changing - * our behaviour. - */ - for (p = env; (c = *p) != NULL; ++p) { -#if RDAR_48993662 - if (!strncmp(c, "Malloc", 6) || !strncmp(c, "_Malloc", 6)) { -#else // RDAR_48993662 - if (!strncmp(c, "Malloc", 6)) { -#endif // RDAR_48993662 - if (issetugid()) { - return; - } - break; - } - } - - /* - * Deny certain flags for entitled processes rdar://problem/13521742 - * MallocLogFile & MallocCorruptionAbort - * as these provide the ability to turn *off* aborting in error cases. - */ - bool restricted = dyld_process_is_restricted(); - malloc_print_configure(restricted); - - if (c == NULL) { - return; - } - - if (getenv("MallocGuardEdges")) { - malloc_debug_flags |= MALLOC_ADD_GUARD_PAGES; - malloc_report(ASL_LEVEL_INFO, "protecting edges\n"); - if (getenv("MallocDoNotProtectPrelude")) { - malloc_debug_flags |= MALLOC_DONT_PROTECT_PRELUDE; - malloc_report(ASL_LEVEL_INFO, "... but not protecting prelude guard page\n"); - } - if (getenv("MallocDoNotProtectPostlude")) { - malloc_debug_flags |= MALLOC_DONT_PROTECT_POSTLUDE; - malloc_report(ASL_LEVEL_INFO, "... but not protecting postlude guard page\n"); - } - } - - if (getenv("MallocScribble")) { - malloc_debug_flags |= MALLOC_DO_SCRIBBLE; - malloc_report(ASL_LEVEL_INFO, "enabling scribbling to detect mods to free blocks\n"); - } - if (getenv("MallocErrorAbort")) { - malloc_debug_flags |= MALLOC_ABORT_ON_ERROR; - malloc_report(ASL_LEVEL_INFO, "enabling abort() on bad malloc or free\n"); - } - if (getenv("MallocTracing")) { - malloc_tracing_enabled = true; - } - -#if __LP64__ -/* initialization above forces MALLOC_ABORT_ON_CORRUPTION of 64-bit processes */ -#else - flag = getenv("MallocCorruptionAbort"); - if (!restricted && flag && (flag[0] == '0')) { // Set from an environment variable in 32-bit processes - malloc_debug_flags &= ~MALLOC_ABORT_ON_CORRUPTION; - } else if (flag) { - malloc_debug_flags |= MALLOC_ABORT_ON_CORRUPTION; - } -#endif - flag = getenv("MallocCheckHeapStart"); - if (flag) { - malloc_check_start = (unsigned)strtoul(flag, NULL, 0); - if (malloc_check_start == 0) { - malloc_check_start = 1; - } - if (malloc_check_start == -1) { - malloc_check_start = 1; - } - flag = getenv("MallocCheckHeapEach"); - if (flag) { - malloc_check_each = (unsigned)strtoul(flag, NULL, 0); - if (malloc_check_each == 0) { - malloc_check_each = 1; - } - if (malloc_check_each == -1) { - malloc_check_each = 1; - } - } - malloc_report(ASL_LEVEL_INFO, "checks heap after operation #%d and each %d operations\n", malloc_check_start, malloc_check_each); - flag = getenv("MallocCheckHeapAbort"); - if (flag) { - malloc_check_abort = (unsigned)strtol(flag, NULL, 0); - } - if (malloc_check_abort) { - malloc_report(ASL_LEVEL_INFO, "will abort on heap corruption\n"); - } else { - flag = getenv("MallocCheckHeapSleep"); - if (flag) { - malloc_check_sleep = (unsigned)strtol(flag, NULL, 0); - } - if (malloc_check_sleep > 0) { - malloc_report(ASL_LEVEL_INFO, "will sleep for %d seconds on heap corruption\n", malloc_check_sleep); - } else if (malloc_check_sleep < 0) { - malloc_report(ASL_LEVEL_INFO, "will sleep once for %d seconds on heap corruption\n", -malloc_check_sleep); - } else { - malloc_report(ASL_LEVEL_INFO, "no sleep on heap corruption\n"); - } - } - } - - flag = getenv("MallocMaxMagazines"); -#if RDAR_48993662 - if (!flag) { - flag = getenv("_MallocMaxMagazines"); - } -#endif // RDAR_48993662 - if (flag) { - int value = (unsigned)strtol(flag, NULL, 0); - if (value == 0) { - malloc_report(ASL_LEVEL_INFO, "Maximum magazines defaulted to %d\n", max_magazines); - } else if (value < 0) { - malloc_report(ASL_LEVEL_ERR, "Maximum magazines must be positive - ignored.\n"); - } else if (value > logical_ncpus) { - max_magazines = logical_ncpus; - malloc_report(ASL_LEVEL_INFO, "Maximum magazines limited to number of logical CPUs (%d)\n", max_magazines); - } else { - max_magazines = value; - malloc_report(ASL_LEVEL_INFO, "Maximum magazines set to %d\n", max_magazines); - } - } - - flag = getenv("MallocLargeExpandedCacheThreshold"); - if (flag) { - uint64_t value = (uint64_t)strtoull(flag, NULL, 0); - if (value == 0) { - malloc_report(ASL_LEVEL_INFO, "Large expanded cache threshold defaulted to %lly\n", magazine_large_expanded_cache_threshold); - } else if (value < 0) { - malloc_report(ASL_LEVEL_ERR, "MallocLargeExpandedCacheThreshold must be positive - ignored.\n"); - } else { - magazine_large_expanded_cache_threshold = value; - malloc_report(ASL_LEVEL_INFO, "Large expanded cache threshold set to %lly\n", magazine_large_expanded_cache_threshold); - } - } - -#if CONFIG_MEDIUM_ALLOCATOR - flag = getenv("MallocMediumZone"); - if (flag) { - int value = (unsigned)strtol(flag, NULL, 0); - if (value == 0) { - magazine_medium_enabled = false; - } else if (value == 1) { - magazine_medium_enabled = true; - } - } - - flag = getenv("MallocMediumActivationThreshold"); - if (flag) { - uint64_t value = (uint64_t)strtoull(flag, NULL, 0); - if (value == 0) { - malloc_report(ASL_LEVEL_INFO, "Medium activation threshold defaulted to %lly\n", magazine_medium_active_threshold); - } else if (value < 0) { - malloc_report(ASL_LEVEL_ERR, "MallocMediumActivationThreshold must be positive - ignored.\n"); - } else { - magazine_medium_active_threshold = value; - malloc_report(ASL_LEVEL_INFO, "Medium activation threshold set to %lly\n", magazine_medium_active_threshold); - } - } - - flag = getenv("MallocMaxMediumMagazines"); -#if RDAR_48993662 - if (!flag) { - flag = getenv("_MallocMaxMediumMagazines"); - } -#endif // RDAR_48993662 - if (flag) { - int value = (unsigned)strtol(flag, NULL, 0); - if (value == 0) { - malloc_report(ASL_LEVEL_INFO, "Maximum medium magazines defaulted to %d\n", max_magazines); - } else if (value < 0) { - malloc_report(ASL_LEVEL_ERR, "Maximum medium magazines must be positive - ignored.\n"); - } else if (value > logical_ncpus) { - max_medium_magazines = logical_ncpus; - malloc_report(ASL_LEVEL_INFO, "Maximum medium magazines limited to number of logical CPUs (%d)\n", max_medium_magazines); - } else { - max_medium_magazines = value; - malloc_report(ASL_LEVEL_INFO, "Maximum medium magazines set to %d\n", max_medium_magazines); - } - } -#endif // CONFIG_MEDIUM_ALLOCATOR - -#if CONFIG_RECIRC_DEPOT - flag = getenv("MallocRecircRetainedRegions"); - if (flag) { - int value = (int)strtol(flag, NULL, 0); - if (value > 0) { - recirc_retained_regions = value; - } else { - malloc_report(ASL_LEVEL_ERR, "MallocRecircRetainedRegions must be positive - ignored.\n"); - } - } -#endif // CONFIG_RECIRC_DEPOT - if (getenv("MallocHelp")) { - malloc_report(ASL_LEVEL_INFO, - "environment variables that can be set for debug:\n" - "- MallocLogFile to create/append messages to file instead of stderr\n" - "- MallocGuardEdges to add 2 guard pages for each large block\n" - "- MallocDoNotProtectPrelude to disable protection (when previous flag set)\n" - "- MallocDoNotProtectPostlude to disable protection (when previous flag set)\n" - "- MallocStackLogging to record all stacks. Tools like leaks can then be applied\n" - "- MallocStackLoggingNoCompact to record all stacks. Needed for malloc_history\n" - "- MallocStackLoggingDirectory to set location of stack logs, which can grow large; default is /tmp\n" - "- MallocScribble to detect writing on free blocks and missing initializers:\n" - " 0x55 is written upon free and 0xaa is written on allocation\n" - "- MallocCheckHeapStart to start checking the heap after operations\n" - "- MallocCheckHeapEach to repeat the checking of the heap after operations\n" - "- MallocCheckHeapSleep to sleep seconds on heap corruption\n" - "- MallocCheckHeapAbort to abort on heap corruption if is non-zero\n" - "- MallocCorruptionAbort to abort on malloc errors, but not on out of memory for 32-bit processes\n" - " MallocCorruptionAbort is always set on 64-bit processes\n" - "- MallocErrorAbort to abort on any malloc error, including out of memory\n"\ - "- MallocTracing to emit kdebug trace points on malloc entry points\n"\ - "- MallocHelp - this help!\n"); - } -} - -malloc_zone_t * -malloc_create_zone(vm_size_t start_size, unsigned flags) -{ - malloc_zone_t *zone; - - /* start_size doesn't actually appear to be used, but we test anyway. */ - if (start_size > MALLOC_ABSOLUTE_MAX_SIZE) { - return NULL; - } - _malloc_initialize_once(); - zone = create_scalable_zone(start_size, flags | malloc_debug_flags); - malloc_zone_register(zone); - return zone; -} - -/* - * For use by CheckFix: establish a new default zone whose behavior is, apart from - * the use of death-row and per-CPU magazines, that of Leopard. - */ -void -malloc_create_legacy_default_zone(void) -{ - malloc_zone_t *zone; - int i; - - _malloc_initialize_once(); - zone = create_legacy_scalable_zone(0, malloc_debug_flags); - - MALLOC_LOCK(); - malloc_zone_register_while_locked(zone); - - // - // Establish the legacy scalable zone just created as the default zone. - // - malloc_zone_t *hold = malloc_zones[0]; - if (hold->zone_name && strcmp(hold->zone_name, DEFAULT_MALLOC_ZONE_STRING) == 0) { - malloc_set_zone_name(hold, NULL); - } - malloc_set_zone_name(zone, DEFAULT_MALLOC_ZONE_STRING); - - unsigned protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *); - mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE); - - // assert(zone == malloc_zones[malloc_num_zones - 1]; - for (i = malloc_num_zones - 1; i > 0; --i) { - malloc_zones[i] = malloc_zones[i - 1]; - } - malloc_zones[0] = zone; - - mprotect(malloc_zones, protect_size, PROT_READ); - MALLOC_UNLOCK(); -} - -void -malloc_destroy_zone(malloc_zone_t *zone) -{ - malloc_set_zone_name(zone, NULL); // Deallocate zone name wherever it may reside PR_7701095 - malloc_zone_unregister(zone); - zone->destroy(zone); -} - -static vm_address_t *frames = NULL; -static unsigned num_frames; - -MALLOC_NOINLINE -void -malloc_zone_check_fail(const char *msg, const char *fmt, ...) -{ - _SIMPLE_STRING b = _simple_salloc(); - if (b) { - _simple_sprintf(b, "*** MallocCheckHeap: FAILED check at operation #%d\n", malloc_check_counter - 1); - } else { - malloc_report(MALLOC_REPORT_NOLOG, "*** MallocCheckHeap: FAILED check at operation #%d\n", malloc_check_counter - 1); - } - if (frames) { - unsigned index = 1; - if (b) { - _simple_sappend(b, "Stack for last operation where the malloc check succeeded: "); - while (index < num_frames) - _simple_sprintf(b, "%p ", (void*)frames[index++]); - malloc_report(MALLOC_REPORT_NOLOG, "%s\n(Use 'atos' for a symbolic stack)\n", _simple_string(b)); - } else { - /* - * Should only get here if vm_allocate() can't get a single page of - * memory, implying _simple_asl_log() would also fail. So we just - * print to the file descriptor. - */ - malloc_report(MALLOC_REPORT_NOLOG, "Stack for last operation where the malloc check succeeded: "); - while (index < num_frames) { - malloc_report(MALLOC_REPORT_NOLOG, "%p ", (void *)frames[index++]); - } - malloc_report(MALLOC_REPORT_NOLOG, "\n(Use 'atos' for a symbolic stack)\n"); - } - } - if (malloc_check_each > 1) { - unsigned recomm_each = (malloc_check_each > 10) ? malloc_check_each / 10 : 1; - unsigned recomm_start = - (malloc_check_counter > malloc_check_each + 1) ? malloc_check_counter - 1 - malloc_check_each : 1; - malloc_report(MALLOC_REPORT_NOLOG, - "*** Recommend using 'setenv MallocCheckHeapStart %d; setenv MallocCheckHeapEach %d' to narrow down failure\n", - recomm_start, recomm_each); - } - - if (b) { - _simple_sfree(b); - } - - // Use malloc_vreport() to: - // * report the error - // * call malloc_error_break() for a breakpoint - // * sleep or stop for debug - // * set the crash message and crash if malloc_check_abort is set. - unsigned sleep_time = 0; - uint32_t report_flags = ASL_LEVEL_ERR | MALLOC_REPORT_DEBUG | MALLOC_REPORT_NOLOG; - if (malloc_check_abort) { - report_flags |= MALLOC_REPORT_CRASH; - } else { - if (malloc_check_sleep > 0) { - malloc_report(ASL_LEVEL_NOTICE, "*** Will sleep for %d seconds to leave time to attach\n", malloc_check_sleep); - sleep_time = malloc_check_sleep; - } else if (malloc_check_sleep < 0) { - malloc_report(ASL_LEVEL_NOTICE, "*** Will sleep once for %d seconds to leave time to attach\n", -malloc_check_sleep); - sleep_time = -malloc_check_sleep; - malloc_check_sleep = 0; - } - } - va_list ap; - va_start(ap, fmt); - malloc_vreport(report_flags, sleep_time, msg, NULL, fmt, ap); - va_end(ap); -} - -/********* Block creation and manipulation ************/ - -static void -internal_check(void) -{ - if (malloc_zone_check(NULL)) { - if (!frames) { - vm_allocate(mach_task_self(), (void *)&frames, vm_page_size, 1); - } - thread_stack_pcs(frames, (unsigned)(vm_page_size / sizeof(vm_address_t) - 1), &num_frames); - } - malloc_check_start += malloc_check_each; -} - -void * -malloc_zone_malloc(malloc_zone_t *zone, size_t size) -{ - MALLOC_TRACE(TRACE_malloc | DBG_FUNC_START, (uintptr_t)zone, size, 0, 0); - - void *ptr; - if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) { - internal_check(); - } - if (size > MALLOC_ABSOLUTE_MAX_SIZE) { - return NULL; - } - - ptr = zone->malloc(zone, size); // if lite zone is passed in then we still call the lite methods - - - if (malloc_logger) { - malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0); - } - - MALLOC_TRACE(TRACE_malloc | DBG_FUNC_END, (uintptr_t)zone, size, (uintptr_t)ptr, 0); - return ptr; -} - -void * -malloc_zone_calloc(malloc_zone_t *zone, size_t num_items, size_t size) -{ - MALLOC_TRACE(TRACE_calloc | DBG_FUNC_START, (uintptr_t)zone, num_items, size, 0); - - void *ptr; - if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) { - internal_check(); - } - - ptr = zone->calloc(zone, num_items, size); - - if (malloc_logger) { - malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE | MALLOC_LOG_TYPE_CLEARED, (uintptr_t)zone, - (uintptr_t)(num_items * size), 0, (uintptr_t)ptr, 0); - } - - MALLOC_TRACE(TRACE_calloc | DBG_FUNC_END, (uintptr_t)zone, num_items, size, (uintptr_t)ptr); - return ptr; -} - -void * -malloc_zone_valloc(malloc_zone_t *zone, size_t size) -{ - MALLOC_TRACE(TRACE_valloc | DBG_FUNC_START, (uintptr_t)zone, size, 0, 0); - - void *ptr; - if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) { - internal_check(); - } - if (size > MALLOC_ABSOLUTE_MAX_SIZE) { - return NULL; - } - - ptr = zone->valloc(zone, size); - - if (malloc_logger) { - malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0); - } - - MALLOC_TRACE(TRACE_valloc | DBG_FUNC_END, (uintptr_t)zone, size, (uintptr_t)ptr, 0); - return ptr; -} - -void * -malloc_zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) -{ - MALLOC_TRACE(TRACE_realloc | DBG_FUNC_START, (uintptr_t)zone, (uintptr_t)ptr, size, 0); - - void *new_ptr; - if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) { - internal_check(); - } - if (size > MALLOC_ABSOLUTE_MAX_SIZE) { - return NULL; - } - - new_ptr = zone->realloc(zone, ptr, size); - - if (malloc_logger) { - malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, - (uintptr_t)ptr, (uintptr_t)size, (uintptr_t)new_ptr, 0); - } - MALLOC_TRACE(TRACE_realloc | DBG_FUNC_END, (uintptr_t)zone, (uintptr_t)ptr, size, (uintptr_t)new_ptr); - return new_ptr; -} - -void -malloc_zone_free(malloc_zone_t *zone, void *ptr) -{ - MALLOC_TRACE(TRACE_free, (uintptr_t)zone, (uintptr_t)ptr, (ptr) ? *(uintptr_t*)ptr : 0, 0); - - if (malloc_logger) { - malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)ptr, 0, 0, 0); - } - if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) { - internal_check(); - } - - zone->free(zone, ptr); -} - -static void -malloc_zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) -{ - MALLOC_TRACE(TRACE_free, (uintptr_t)zone, (uintptr_t)ptr, size, (ptr && size) ? *(uintptr_t*)ptr : 0); - - if (malloc_logger) { - malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)ptr, 0, 0, 0); - } - if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) { - internal_check(); - } - - zone->free_definite_size(zone, ptr, size); -} - -malloc_zone_t * -malloc_zone_from_ptr(const void *ptr) -{ - if (!ptr) { - return NULL; - } else { - return find_registered_zone(ptr, NULL); - } -} - -void * -malloc_zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) -{ - MALLOC_TRACE(TRACE_memalign | DBG_FUNC_START, (uintptr_t)zone, alignment, size, 0); - - void *ptr; - if (zone->version < 5) { // Version must be >= 5 to look at the new memalign field. - return NULL; - } - if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) { - internal_check(); - } - if (size > MALLOC_ABSOLUTE_MAX_SIZE) { - return NULL; - } - if (alignment < sizeof(void *) || // excludes 0 == alignment - 0 != (alignment & (alignment - 1))) { // relies on sizeof(void *) being a power of two. - return NULL; - } - - if (!(zone->memalign)) { - return NULL; - } - ptr = zone->memalign(zone, alignment, size); - - if (malloc_logger) { - malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0); - } - - MALLOC_TRACE(TRACE_memalign | DBG_FUNC_END, (uintptr_t)zone, alignment, size, (uintptr_t)ptr); - return ptr; -} - -boolean_t -malloc_zone_claimed_address(malloc_zone_t *zone, void *ptr) -{ - if (!ptr) { - // NULL is not a member of any zone. - return false; - } - - if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) { - internal_check(); - } - - if (zone->version < 10 || !zone->claimed_address) { - // For zones that have not implemented claimed_address, we always have - // to return true to avoid a false negative. - return true; - } - - return zone->claimed_address(zone, ptr); -} - -/********* Functions for zone implementors ************/ - -void -malloc_zone_register(malloc_zone_t *zone) -{ - MALLOC_LOCK(); - malloc_zone_register_while_locked(zone); - MALLOC_UNLOCK(); -} - -void -malloc_zone_unregister(malloc_zone_t *z) -{ - unsigned index; - - if (malloc_num_zones == 0) { - return; - } - - MALLOC_LOCK(); - for (index = 0; index < malloc_num_zones; ++index) { - if (z != malloc_zones[index]) { - continue; - } - - // Modify the page to be allow write access, so that we can update the - // malloc_zones array. - size_t protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *); - mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE); - - // If we found a match, replace it with the entry at the end of the list, shrink the list, - // and leave the end of the list intact to avoid racing with find_registered_zone(). - - malloc_zones[index] = malloc_zones[malloc_num_zones - 1]; - --malloc_num_zones; - - mprotect(malloc_zones, protect_size, PROT_READ); - - // Exchange the roles of the FRZ counters. The counter that has captured the number of threads presently - // executing *inside* find_registered_zone is swapped with the counter drained to zero last time through. - // The former is then allowed to drain to zero while this thread yields. - int32_t volatile *p = pFRZCounterLive; - pFRZCounterLive = pFRZCounterDrain; - pFRZCounterDrain = p; - OSMemoryBarrier(); // Full memory barrier - - while (0 != *pFRZCounterDrain) { - yield(); - } - - MALLOC_UNLOCK(); - - return; - } - MALLOC_UNLOCK(); - malloc_report(ASL_LEVEL_ERR, "*** malloc_zone_unregister() failed for %p\n", z); -} - -void -malloc_set_zone_name(malloc_zone_t *z, const char *name) -{ - char *newName; - - mprotect(z, sizeof(malloc_zone_t), PROT_READ | PROT_WRITE); - if (z->zone_name) { - free((char *)z->zone_name); - z->zone_name = NULL; - } - if (name) { - size_t buflen = strlen(name) + 1; - newName = malloc_zone_malloc(z, buflen); - if (newName) { - strlcpy(newName, name, buflen); - z->zone_name = (const char *)newName; - } else { - z->zone_name = NULL; - } - } - mprotect(z, sizeof(malloc_zone_t), PROT_READ); -} - -const char * -malloc_get_zone_name(malloc_zone_t *zone) -{ - return zone->zone_name; -} - - -/********* Generic ANSI callouts ************/ - -void * -malloc(size_t size) -{ - void *retval; - retval = malloc_zone_malloc(default_zone, size); - if (retval == NULL) { - errno = ENOMEM; - } - return retval; -} - -void * -aligned_alloc(size_t alignment, size_t size) -{ - if (alignment < sizeof(void *) || !powerof2(alignment) || /* those are implementation requirements */ - (size & (alignment - 1)) != 0) { /* C11 requires size to be a multiple of alignment */ - errno = EINVAL; - return NULL; - } - - void *retval = malloc_zone_memalign(default_zone, alignment, size); - if (retval == NULL) { - errno = ENOMEM; - } - return retval; -} - -void * -calloc(size_t num_items, size_t size) -{ - void *retval; - retval = malloc_zone_calloc(default_zone, num_items, size); - if (retval == NULL) { - errno = ENOMEM; - } - return retval; -} - -void -free(void *ptr) -{ - malloc_zone_t *zone; - size_t size; - if (!ptr) { - return; - } - - zone = find_registered_zone(ptr, &size); - if (!zone) { - int flags = MALLOC_REPORT_DEBUG | MALLOC_REPORT_NOLOG; - if ((malloc_debug_flags & (MALLOC_ABORT_ON_CORRUPTION | MALLOC_ABORT_ON_ERROR))) { - flags = MALLOC_REPORT_CRASH | MALLOC_REPORT_NOLOG; - } - malloc_report(flags, - "*** error for object %p: pointer being freed was not allocated\n", ptr); - } else if (zone->version >= 6 && zone->free_definite_size) { - malloc_zone_free_definite_size(zone, ptr, size); - } else { - malloc_zone_free(zone, ptr); - } -} - -void * -realloc(void *in_ptr, size_t new_size) -{ - void *retval = NULL; - void *old_ptr; - malloc_zone_t *zone; - - // SUSv3: "If size is 0 and ptr is not a null pointer, the object - // pointed to is freed. If the space cannot be allocated, the object - // shall remain unchanged." Also "If size is 0, either a null pointer - // or a unique pointer that can be successfully passed to free() shall - // be returned." We choose to allocate a minimum size object by calling - // malloc_zone_malloc with zero size, which matches "If ptr is a null - // pointer, realloc() shall be equivalent to malloc() for the specified - // size." So we only free the original memory if the allocation succeeds. - old_ptr = (new_size == 0) ? NULL : in_ptr; - if (!old_ptr) { - retval = malloc_zone_malloc(default_zone, new_size); - } else { - zone = find_registered_zone(old_ptr, NULL); - if (!zone) { - int flags = MALLOC_REPORT_DEBUG | MALLOC_REPORT_NOLOG; - if (malloc_debug_flags & (MALLOC_ABORT_ON_CORRUPTION | MALLOC_ABORT_ON_ERROR)) { - flags = MALLOC_REPORT_CRASH | MALLOC_REPORT_NOLOG; - } - malloc_report(flags, "*** error for object %p: pointer being realloc'd was not allocated\n", in_ptr); - } else { - retval = malloc_zone_realloc(zone, old_ptr, new_size); - } - } - - if (retval == NULL) { - errno = ENOMEM; - } else if (new_size == 0) { - free(in_ptr); - } - return retval; -} - -void * -valloc(size_t size) -{ - void *retval; - malloc_zone_t *zone = default_zone; - retval = malloc_zone_valloc(zone, size); - if (retval == NULL) { - errno = ENOMEM; - } - return retval; -} - -extern void -vfree(void *ptr) -{ - free(ptr); -} - -size_t -malloc_size(const void *ptr) -{ - size_t size = 0; - - if (!ptr) { - return size; - } - - (void)find_registered_zone(ptr, &size); - return size; -} - -size_t -malloc_good_size(size_t size) -{ - malloc_zone_t *zone = default_zone; - return zone->introspect->good_size(zone, size); -} - -/* - * The posix_memalign() function shall allocate size bytes aligned on a boundary specified by alignment, - * and shall return a pointer to the allocated memory in memptr. - * The value of alignment shall be a multiple of sizeof( void *), that is also a power of two. - * Upon successful completion, the value pointed to by memptr shall be a multiple of alignment. - * - * Upon successful completion, posix_memalign() shall return zero; otherwise, - * an error number shall be returned to indicate the error. - * - * The posix_memalign() function shall fail if: - * EINVAL - * The value of the alignment parameter is not a power of two multiple of sizeof( void *). - * ENOMEM - * There is insufficient memory available with the requested alignment. - */ - -int -posix_memalign(void **memptr, size_t alignment, size_t size) -{ - void *retval; - - /* POSIX is silent on NULL == memptr !?! */ - - retval = malloc_zone_memalign(default_zone, alignment, size); - if (retval == NULL) { - // To avoid testing the alignment constraints redundantly, we'll rely on the - // test made in malloc_zone_memalign to vet each request. Only if that test fails - // and returns NULL, do we arrive here to detect the bogus alignment and give the - // required EINVAL return. - if (alignment < sizeof(void *) || // excludes 0 == alignment - 0 != (alignment & (alignment - 1))) { // relies on sizeof(void *) being a power of two. - return EINVAL; - } - return ENOMEM; - } else { - *memptr = retval; // Set iff allocation succeeded - return 0; - } -} - -boolean_t -malloc_claimed_address(void *ptr) -{ - // We need to check with each registered zone whether it claims "ptr". - // Use logic similar to that in find_registered_zone(). - if (malloc_num_zones == 0) { - return false; - } - - // Start with the lite zone, if it's in use. - if (lite_zone && malloc_zone_claimed_address(lite_zone, ptr)) { - return true; - } - - // Next, try the default zone, which is always present. - if (malloc_zone_claimed_address(malloc_zones[0], ptr)) { - return true; - } - - // Try all the other zones. Increment the FRZ barrier so that we can - // walk the zones array without a lock (see find_registered_zone() for - // the details). - int32_t volatile *pFRZCounter = pFRZCounterLive; - OSAtomicIncrement32Barrier(pFRZCounter); - - int32_t limit = *(int32_t volatile *)&malloc_num_zones; - malloc_zone_t **zones = &malloc_zones[1]; - boolean_t result = false; - for (unsigned index = 1; index < limit; ++index, ++zones) { - malloc_zone_t *zone = *zones; - if (malloc_zone_claimed_address(zone, ptr)) { - result = true; - break; - } - } - - OSAtomicDecrement32Barrier(pFRZCounter); - return result; -} - -void * -reallocarray(void * in_ptr, size_t nmemb, size_t size){ - size_t alloc_size; - if (os_mul_overflow(nmemb, size, &alloc_size)){ - errno = ENOMEM; - return NULL; - } - return realloc(in_ptr, alloc_size); -} - -void * -reallocarrayf(void * in_ptr, size_t nmemb, size_t size){ - size_t alloc_size; - if (os_mul_overflow(nmemb, size, &alloc_size)){ - errno = ENOMEM; - return NULL; - } - return reallocf(in_ptr, alloc_size); -} - -static malloc_zone_t * -find_registered_purgeable_zone(void *ptr) -{ - if (!ptr) { - return NULL; - } - - /* - * Look for a zone which contains ptr. If that zone does not have the purgeable malloc flag - * set, or the allocation is too small, do nothing. Otherwise, set the allocation volatile. - * FIXME: for performance reasons, we should probably keep a separate list of purgeable zones - * and only search those. - */ - size_t size = 0; - malloc_zone_t *zone = find_registered_zone(ptr, &size); - - /* FIXME: would really like a zone->introspect->flags->purgeable check, but haven't determined - * binary compatibility impact of changing the introspect struct yet. */ - if (!zone) { - return NULL; - } - - /* Check to make sure pointer is page aligned and size is multiple of page size */ - if ((size < vm_page_size) || ((size % vm_page_size) != 0)) { - return NULL; - } - - return zone; -} - -void -malloc_make_purgeable(void *ptr) -{ - malloc_zone_t *zone = find_registered_purgeable_zone(ptr); - if (!zone) { - return; - } - - int state = VM_PURGABLE_VOLATILE; - vm_purgable_control(mach_task_self(), (vm_address_t)ptr, VM_PURGABLE_SET_STATE, &state); - return; -} - -/* Returns true if ptr is valid. Ignore the return value from vm_purgeable_control and only report - * state. */ -int -malloc_make_nonpurgeable(void *ptr) -{ - malloc_zone_t *zone = find_registered_purgeable_zone(ptr); - if (!zone) { - return 0; - } - - int state = VM_PURGABLE_NONVOLATILE; - vm_purgable_control(mach_task_self(), (vm_address_t)ptr, VM_PURGABLE_SET_STATE, &state); - - if (state == VM_PURGABLE_EMPTY) { - return EFAULT; - } - - return 0; -} - -void -malloc_enter_process_memory_limit_warn_mode(void) -{ - // -} - - - -// Note that malloc_memory_event_handler is not thread-safe, and we are relying on the callers of this for synchronization -void -malloc_memory_event_handler(unsigned long event) -{ - if (event & NOTE_MEMORYSTATUS_PRESSURE_WARN) { - malloc_zone_pressure_relief(0, 0); - } - - if ((event & NOTE_MEMORYSTATUS_MSL_STATUS) != 0 && (event & ~NOTE_MEMORYSTATUS_MSL_STATUS) == 0) { - malloc_register_stack_logger(); - } - -#if ENABLE_MEMORY_RESOURCE_EXCEPTION_HANDLING - if (event & (NOTE_MEMORYSTATUS_PROC_LIMIT_WARN | NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL | NOTE_MEMORYSTATUS_PRESSURE_CRITICAL)) { - malloc_register_stack_logger(); - } -#endif // ENABLE_MEMORY_RESOURCE_EXCEPTION_HANDLING - - if (msl.handle_memory_event) { - // Let MSL see the event. - msl.handle_memory_event(event); - } -} - -size_t -malloc_zone_pressure_relief(malloc_zone_t *zone, size_t goal) -{ - if (!zone) { - unsigned index = 0; - size_t total = 0; - - // Take lock to defend against malloc_destroy_zone() - MALLOC_LOCK(); - while (index < malloc_num_zones) { - zone = malloc_zones[index++]; - if (zone->version < 8) { - continue; - } - if (NULL == zone->pressure_relief) { - continue; - } - if (0 == goal) { /* Greedy */ - total += zone->pressure_relief(zone, 0); - } else if (goal > total) { - total += zone->pressure_relief(zone, goal - total); - } else { /* total >= goal */ - break; - } - } - MALLOC_UNLOCK(); - return total; - } else { - // Assumes zone is not destroyed for the duration of this call - if (zone->version < 8) { - return 0; - } - if (NULL == zone->pressure_relief) { - return 0; - } - return zone->pressure_relief(zone, goal); - } -} - -/********* Batch methods ************/ - -unsigned -malloc_zone_batch_malloc(malloc_zone_t *zone, size_t size, void **results, unsigned num_requested) -{ - if (!zone->batch_malloc) { - return 0; - } - if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) { - internal_check(); - } - unsigned batched = zone->batch_malloc(zone, size, results, num_requested); - - if (malloc_logger) { - unsigned index = 0; - while (index < batched) { - malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, - (uintptr_t)results[index], 0); - index++; - } - } - return batched; -} - -void -malloc_zone_batch_free(malloc_zone_t *zone, void **to_be_freed, unsigned num) -{ - if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) { - internal_check(); - } - if (malloc_logger) { - unsigned index = 0; - while (index < num) { - malloc_logger( - MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)to_be_freed[index], 0, 0, 0); - index++; - } - } - - if (zone->batch_free) { - zone->batch_free(zone, to_be_freed, num); - } else { - void (*free_fun)(malloc_zone_t *, void *) = zone->free; - - while (num--) { - void *ptr = *to_be_freed++; - free_fun(zone, ptr); - } - } -} - -/********* Functions for performance tools ************/ - -kern_return_t -malloc_get_all_zones(task_t task, memory_reader_t reader, vm_address_t **addresses, unsigned *count) -{ - // Note that the 2 following addresses are not correct if the address of the target is different from your own. This notably - // occurs if the address of System.framework is slid (e.g. different than at B & I ) - vm_address_t remote_malloc_zones = (vm_address_t)&malloc_zones; - vm_address_t remote_malloc_num_zones = (vm_address_t)&malloc_num_zones; - kern_return_t err; - vm_address_t zones_address; - vm_address_t *zones_address_ref; - unsigned num_zones; - unsigned *num_zones_ref; - if (!reader) { - reader = _malloc_default_reader; - } - // printf("Read malloc_zones at address %p should be %p\n", &malloc_zones, malloc_zones); - err = reader(task, remote_malloc_zones, sizeof(void *), (void **)&zones_address_ref); - // printf("Read malloc_zones[%p]=%p\n", remote_malloc_zones, *zones_address_ref); - if (err) { - malloc_report(ASL_LEVEL_ERR, "*** malloc_get_all_zones: error reading zones_address at %p\n", (void *)remote_malloc_zones); - return err; - } - zones_address = *zones_address_ref; - // printf("Reading num_zones at address %p\n", remote_malloc_num_zones); - err = reader(task, remote_malloc_num_zones, sizeof(unsigned), (void **)&num_zones_ref); - if (err) { - malloc_report(ASL_LEVEL_ERR, "*** malloc_get_all_zones: error reading num_zones at %p\n", (void *)remote_malloc_num_zones); - return err; - } - num_zones = *num_zones_ref; - // printf("Read malloc_num_zones[%p]=%d\n", remote_malloc_num_zones, num_zones); - *count = num_zones; - // printf("malloc_get_all_zones succesfully found %d zones\n", num_zones); - err = reader(task, zones_address, sizeof(malloc_zone_t *) * num_zones, (void **)addresses); - if (err) { - malloc_report(ASL_LEVEL_ERR, "*** malloc_get_all_zones: error reading zones at %p\n", &zones_address); - return err; - } - // printf("malloc_get_all_zones succesfully read %d zones\n", num_zones); - return err; -} - -/********* Debug helpers ************/ - -void -malloc_zone_print_ptr_info(void *ptr) -{ - malloc_zone_t *zone; - if (!ptr) { - return; - } - zone = malloc_zone_from_ptr(ptr); - if (zone) { - printf("ptr %p in registered zone %p\n", ptr, zone); - } else { - printf("ptr %p not in heap\n", ptr); - } -} - -boolean_t -malloc_zone_check(malloc_zone_t *zone) -{ - boolean_t ok = 1; - if (!zone) { - unsigned index = 0; - while (index < malloc_num_zones) { - zone = malloc_zones[index++]; - if (!zone->introspect->check(zone)) { - ok = 0; - } - } - } else { - ok = zone->introspect->check(zone); - } - return ok; -} - -void -malloc_zone_print(malloc_zone_t *zone, boolean_t verbose) -{ - if (!zone) { - unsigned index = 0; - while (index < malloc_num_zones) { - zone = malloc_zones[index++]; - zone->introspect->print(zone, verbose); - } - } else { - zone->introspect->print(zone, verbose); - } -} - -void -malloc_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) -{ - if (!zone) { - memset(stats, 0, sizeof(*stats)); - unsigned index = 0; - while (index < malloc_num_zones) { - zone = malloc_zones[index++]; - malloc_statistics_t this_stats; - zone->introspect->statistics(zone, &this_stats); - stats->blocks_in_use += this_stats.blocks_in_use; - stats->size_in_use += this_stats.size_in_use; - stats->max_size_in_use += this_stats.max_size_in_use; - stats->size_allocated += this_stats.size_allocated; - } - } else { - zone->introspect->statistics(zone, stats); - } -} - -void -malloc_zone_log(malloc_zone_t *zone, void *address) -{ - if (!zone) { - unsigned index = 0; - while (index < malloc_num_zones) { - zone = malloc_zones[index++]; - zone->introspect->log(zone, address); - } - } else { - zone->introspect->log(zone, address); - } -} - -/********* Misc other entry points ************/ - -void -mag_set_thread_index(unsigned int index) -{ - _os_cpu_number_override = index; -#if CONFIG_NANOZONE - nano_common_cpu_number_override_set(); -#endif // CONFIG_NANOZONE -} - -static void -DefaultMallocError(int x) -{ -#if USE_SLEEP_RATHER_THAN_ABORT - malloc_report(ASL_LEVEL_ERR, "*** error %d\n", x); - sleep(3600); -#else - _SIMPLE_STRING b = _simple_salloc(); - if (b) { - _simple_sprintf(b, "*** error %d", x); - malloc_report(MALLOC_REPORT_NOLOG, "%s\n", _simple_string(b)); - _os_set_crash_log_message_dynamic(_simple_string(b)); - } else { - malloc_report(MALLOC_REPORT_NOLOG, "*** error %d\n", x); - _os_set_crash_log_message("*** DefaultMallocError called"); - } - abort(); -#endif -} - -void (*malloc_error(void (*func)(int)))(int) -{ - return DefaultMallocError; -} - -static void -_malloc_lock_all(void (*callout)(void)) -{ - unsigned index = 0; - MALLOC_LOCK(); - while (index < malloc_num_zones) { - malloc_zone_t *zone = malloc_zones[index++]; - zone->introspect->force_lock(zone); - } - if (callout) { - callout(); - } -} - -static void -_malloc_unlock_all(void (*callout)(void)) -{ - unsigned index = 0; - if (callout) { - callout(); - } - while (index < malloc_num_zones) { - malloc_zone_t *zone = malloc_zones[index++]; - zone->introspect->force_unlock(zone); - } - MALLOC_UNLOCK(); -} - -static void -_malloc_reinit_lock_all(void (*callout)(void)) -{ - unsigned index = 0; - if (callout) { - callout(); - } - while (index < malloc_num_zones) { - malloc_zone_t *zone = malloc_zones[index++]; - if (zone->version < 9) { // Version must be >= 9 to look at reinit_lock - zone->introspect->force_unlock(zone); - } else { - zone->introspect->reinit_lock(zone); - } - } - MALLOC_REINIT_LOCK(); -} - - -// Called prior to fork() to guarantee that malloc is not in any critical -// sections during the fork(); prevent any locks from being held by non- -// surviving threads after the fork. -void -_malloc_fork_prepare(void) -{ - return _malloc_lock_all(msl.fork_prepare); -} - -// Called in the parent process after fork() to resume normal operation. -void -_malloc_fork_parent(void) -{ - return _malloc_unlock_all(msl.fork_parent); -} - -// Called in the child process after fork() to resume normal operation. -void -_malloc_fork_child(void) -{ -#if CONFIG_NANOZONE - if (_malloc_initialize_pred) { - if (_malloc_engaged_nano == NANO_V2) { - nanov2_forked_zone((nanozonev2_t *)inline_malloc_default_zone()); - } else if (_malloc_engaged_nano == NANO_V1) { - nano_forked_zone((nanozone_t *)inline_malloc_default_zone()); - } - } -#endif - return _malloc_reinit_lock_all(msl.fork_child); -} - -/* - * A Glibc-like mstats() interface. - * - * Note that this interface really isn't very good, as it doesn't understand - * that we may have multiple allocators running at once. We just massage - * the result from malloc_zone_statistics in any case. - */ -struct mstats -mstats(void) -{ - malloc_statistics_t s; - struct mstats m; - - malloc_zone_statistics(NULL, &s); - m.bytes_total = s.size_allocated; - m.chunks_used = s.blocks_in_use; - m.bytes_used = s.size_in_use; - m.chunks_free = 0; - m.bytes_free = m.bytes_total - m.bytes_used; /* isn't this somewhat obvious? */ - - return (m); -} - -boolean_t -malloc_zone_enable_discharge_checking(malloc_zone_t *zone) -{ - if (zone->version < 7) { // Version must be >= 7 to look at the new discharge checking fields. - return FALSE; - } - if (NULL == zone->introspect->enable_discharge_checking) { - return FALSE; - } - return zone->introspect->enable_discharge_checking(zone); -} - -void -malloc_zone_disable_discharge_checking(malloc_zone_t *zone) -{ - if (zone->version < 7) { // Version must be >= 7 to look at the new discharge checking fields. - return; - } - if (NULL == zone->introspect->disable_discharge_checking) { - return; - } - zone->introspect->disable_discharge_checking(zone); -} - -void -malloc_zone_discharge(malloc_zone_t *zone, void *memory) -{ - if (NULL == zone) { - zone = malloc_zone_from_ptr(memory); - } - if (NULL == zone) { - return; - } - if (zone->version < 7) { // Version must be >= 7 to look at the new discharge checking fields. - return; - } - if (NULL == zone->introspect->discharge) { - return; - } - zone->introspect->discharge(zone, memory); -} - -void -malloc_zone_enumerate_discharged_pointers(malloc_zone_t *zone, void (^report_discharged)(void *memory, void *info)) -{ - if (!zone) { - unsigned index = 0; - while (index < malloc_num_zones) { - zone = malloc_zones[index++]; - if (zone->version < 7) { - continue; - } - if (NULL == zone->introspect->enumerate_discharged_pointers) { - continue; - } - zone->introspect->enumerate_discharged_pointers(zone, report_discharged); - } - } else { - if (zone->version < 7) { - return; - } - if (NULL == zone->introspect->enumerate_discharged_pointers) { - return; - } - zone->introspect->enumerate_discharged_pointers(zone, report_discharged); - } -} - -/***************** OBSOLETE ENTRY POINTS ********************/ - -#if PHASE_OUT_OLD_MALLOC -#error PHASE OUT THE FOLLOWING FUNCTIONS -#endif - -void -set_malloc_singlethreaded(boolean_t single) -{ - static boolean_t warned = 0; - if (!warned) { -#if PHASE_OUT_OLD_MALLOC - malloc_report(ASL_LEVEL_ERR, "*** OBSOLETE: set_malloc_singlethreaded(%d)\n", single); -#endif - warned = 1; - } -} - -void -malloc_singlethreaded(void) -{ - static boolean_t warned = 0; - if (!warned) { - malloc_report(ASL_LEVEL_ERR, "*** OBSOLETE: malloc_singlethreaded()\n"); - warned = 1; - } -} - -int -malloc_debug(int level) -{ - malloc_report(ASL_LEVEL_ERR, "*** OBSOLETE: malloc_debug()\n"); - return 0; -} - -#pragma mark - -#pragma mark Malloc Stack Logging - - -/* this is called from libsystem during initialization. */ -void -__stack_logging_early_finished(const struct _malloc_functions *funcs) -{ -#if !TARGET_OS_DRIVERKIT - _dlopen = funcs->dlopen; - _dlsym = funcs->dlsym; -#endif - const char **env = (const char**) *_NSGetEnviron(); - for (const char **e = env; *e; e++) { - if (0==strncmp(*e, "MallocStackLogging", 18)) { - malloc_register_stack_logger(); - void (*msl_set_flags_from_environment) (const char **env); - msl_set_flags_from_environment = _dlsym(msl.dylib, "msl_set_flags_from_environment"); - if (msl_set_flags_from_environment) { - msl_set_flags_from_environment(env); - } - break; - } - } - if (msl.dylib) { - void (*initialize) () = _dlsym(msl.dylib, "msl_initialize"); - if (initialize) { - initialize(); - } - } -} - - -static os_once_t _register_msl_dylib_pred; - -static void -register_msl_dylib(void *dylib) -{ - if (!dylib) { - return; - } - msl.dylib = dylib; - msl.handle_memory_event = _dlsym(dylib, "msl_handle_memory_event"); - msl.stack_logging_locked = _dlsym(dylib, "msl_stack_logging_locked"); - msl.fork_prepare = _dlsym(dylib, "msl_fork_prepare"); - msl.fork_child = _dlsym(dylib, "msl_fork_child"); - msl.fork_parent = _dlsym(dylib, "msl_fork_parent"); - - // TODO delete these ones - msl.get_frames_for_address = _dlsym(dylib, "msl_get_frames_for_address"); - msl.stackid_for_vm_region = _dlsym(dylib, "msl_stackid_for_vm_region"); - msl.get_frames_for_stackid = _dlsym(dylib, "msl_get_frames_for_stackid"); - msl.uniquing_table_read_stack = _dlsym(dylib, "msl_uniquing_table_read_stack"); - - void (*msl_copy_msl_lite_hooks) (struct _malloc_msl_lite_hooks_s *hooksp, size_t size); - msl_copy_msl_lite_hooks = _dlsym(dylib, "msl_copy_msl_lite_hooks"); - if (msl_copy_msl_lite_hooks) { - set_msl_lite_hooks(msl_copy_msl_lite_hooks); - } -} - -MALLOC_EXPORT -boolean_t -malloc_register_stack_logger(void) -{ - if (msl.dylib != NULL) { - return true; - } - void *dylib = _dlopen("/System/Library/PrivateFrameworks/Alternate/MallocStackLogging.framework/MallocStackLogging", RTLD_GLOBAL); - if (dylib == NULL) { - dylib = _dlopen("/System/Library/PrivateFrameworks/MallocStackLogging.framework/MallocStackLogging", RTLD_GLOBAL); - } - os_once(&_register_msl_dylib_pred, dylib, register_msl_dylib); - if (!msl.dylib) { - malloc_report(ASL_LEVEL_WARNING, "failed to load MallocStackLogging.framework\n"); - } - return msl.dylib == dylib; -} - -/* Symbolication.framework looks up this symbol by name inside libsystem_malloc.dylib. */ -uint64_t __mach_stack_logging_shared_memory_address = 0; - - -#pragma mark - -#pragma mark Malloc Stack Logging - Legacy stubs - -/* - * legacy API for MallocStackLogging. - * - * TODO, deprecate this, move clients off it and delete it. Clients should move - * to MallocStackLogging.framework for these APIs. - */ - -MALLOC_EXPORT -boolean_t -turn_on_stack_logging(stack_logging_mode_type mode) -{ - malloc_register_stack_logger(); - if (!msl.dylib) { - return false; - } - boolean_t (*msl_turn_on_stack_logging) (stack_logging_mode_type mode); - msl_turn_on_stack_logging = _dlsym(msl.dylib, "msl_turn_on_stack_logging"); - if (!msl_turn_on_stack_logging) { - return false; - } - return msl_turn_on_stack_logging(mode); -} - -MALLOC_EXPORT -void turn_off_stack_logging(void) -{ - malloc_register_stack_logger(); - if (!msl.dylib) { - return; - } - void (*msl_turn_off_stack_logging) (); - msl_turn_off_stack_logging = _dlsym(msl.dylib, "msl_turn_off_stack_logging"); - if (msl_turn_off_stack_logging) { - msl_turn_off_stack_logging(); - } -} - -kern_return_t -__mach_stack_logging_start_reading(task_t task, vm_address_t shared_memory_address, boolean_t *uses_lite_mode) -{ - malloc_register_stack_logger(); - if (!msl.dylib) { - return KERN_FAILURE; - } - kern_return_t (*f) (task_t task, vm_address_t shared_memory_address, boolean_t *uses_lite_mode); - f = _dlsym(msl.dylib, "msl_start_reading"); - if (!f) { - return KERN_FAILURE; - } - return f(task, shared_memory_address, uses_lite_mode); -} - -kern_return_t -__mach_stack_logging_stop_reading(task_t task) -{ - malloc_register_stack_logger(); - if (!msl.dylib) { - return KERN_FAILURE; - } - kern_return_t (*f) (task_t task); - f = _dlsym(msl.dylib, "msl_stop_reading"); - if (!f) { - return KERN_FAILURE; - } - return f(task); -} - -kern_return_t -__mach_stack_logging_get_frames(task_t task, - mach_vm_address_t address, - mach_vm_address_t *stack_frames_buffer, - uint32_t max_stack_frames, - uint32_t *count) -{ - malloc_register_stack_logger(); - if (!msl.get_frames_for_address) { - return KERN_FAILURE; - } - return msl.get_frames_for_address(task, address, stack_frames_buffer, max_stack_frames, count); -} - -uint64_t -__mach_stack_logging_stackid_for_vm_region(task_t task, mach_vm_address_t address) -{ - malloc_register_stack_logger(); - if (!msl.stackid_for_vm_region) { - return -1ull; - } - return msl.stackid_for_vm_region(task, address); -} - - -kern_return_t -__mach_stack_logging_frames_for_uniqued_stack(task_t task, - uint64_t stack_identifier, - mach_vm_address_t *stack_frames_buffer, - uint32_t max_stack_frames, - uint32_t *count) -{ - malloc_register_stack_logger(); - if (!msl.get_frames_for_stackid) { - return KERN_FAILURE; - } - return msl.get_frames_for_stackid(task, stack_identifier, stack_frames_buffer, max_stack_frames, count, NULL); -} - -kern_return_t -__mach_stack_logging_get_frames_for_stackid(task_t task, - uint64_t stack_identifier, - mach_vm_address_t *stack_frames_buffer, - uint32_t max_stack_frames, - uint32_t *count, - bool *last_frame_is_threadid) -{ - malloc_register_stack_logger(); - if (!msl.get_frames_for_stackid) { - return KERN_FAILURE; - } - return msl.get_frames_for_stackid(task, stack_identifier, stack_frames_buffer, max_stack_frames, count, last_frame_is_threadid); -} - -kern_return_t -__mach_stack_logging_uniquing_table_read_stack(struct backtrace_uniquing_table *uniquing_table, - uint64_t stackid, - mach_vm_address_t *out_frames_buffer, - uint32_t *out_frames_count, - uint32_t max_frames) -{ - malloc_register_stack_logger(); - if (!msl.uniquing_table_read_stack) { - return KERN_FAILURE; - } - return msl.uniquing_table_read_stack(uniquing_table, stackid, out_frames_buffer, out_frames_count, max_frames); -} - -kern_return_t -__mach_stack_logging_enumerate_records(task_t task, - mach_vm_address_t address, - void enumerator(mach_stack_logging_record_t, void *), - void *context) -{ - malloc_register_stack_logger(); - kern_return_t (*f) (task_t task, - mach_vm_address_t address, - void enumerator(mach_stack_logging_record_t, void *), - void *context); - if (!msl.dylib) { - return KERN_FAILURE; - } - f = _dlsym(msl.dylib, "msl_disk_stack_logs_enumerate_from_task"); - if (!f) { - return KERN_FAILURE; - } - return f(task, address, enumerator, context); -} - -struct backtrace_uniquing_table * -__mach_stack_logging_copy_uniquing_table(task_t task) -{ - malloc_register_stack_logger(); - struct backtrace_uniquing_table * (*f) (task_t task); - if (!msl.dylib) { - return NULL; - } - f = _dlsym(msl.dylib, "msl_uniquing_table_copy_from_task"); - if (!f) { - return NULL; - } - return f(task); -} - -struct backtrace_uniquing_table * -__mach_stack_logging_uniquing_table_copy_from_serialized(void *buffer, size_t size) -{ - malloc_register_stack_logger(); - struct backtrace_uniquing_table * (*f) (void *buffer, size_t size); - if (!msl.dylib) { - return NULL; - } - f = _dlsym(msl.dylib, "msl_uniquing_table_copy_from_serialized"); - if (!f) { - return NULL; - } - return f(buffer, size); -} - -void -__mach_stack_logging_uniquing_table_release(struct backtrace_uniquing_table *table) -{ - malloc_register_stack_logger(); - if (!msl.dylib) { - return; - } - void (*f) (struct backtrace_uniquing_table *table); - f = _dlsym(msl.dylib, "msl_uniquing_table_release"); - if (f) { - f(table); - } -} - -void -__mach_stack_logging_uniquing_table_retain(struct backtrace_uniquing_table *table) -{ - malloc_register_stack_logger(); - if (!msl.dylib) { - return; - } - void (*f) (struct backtrace_uniquing_table *table); - f = _dlsym(msl.dylib, "msl_uniquing_table_retain"); - if (f) { - f(table); - } -} - -extern -size_t -__mach_stack_logging_uniquing_table_sizeof(struct backtrace_uniquing_table *table) -{ - malloc_register_stack_logger(); - size_t (*f) (struct backtrace_uniquing_table *table); - f = _dlsym(msl.dylib, "msl_uniquing_table_retain"); - return f(table); -} - -void * -__mach_stack_logging_uniquing_table_serialize(struct backtrace_uniquing_table *table, mach_vm_size_t *size) -{ - malloc_register_stack_logger(); - if (!msl.dylib) { - return NULL; - } - void * (*f) (struct backtrace_uniquing_table *table, mach_vm_size_t *size); - f = _dlsym(msl.dylib, "msl_uniquing_table_serialize"); - if (!f) { - return NULL; - } - return f(table, size); -} - -kern_return_t -__mach_stack_logging_set_file_path(task_t task, char* file_path) -{ - return KERN_SUCCESS; -} - -/* WeChat references this, only god knows why. This symbol does nothing. */ -int stack_logging_enable_logging = 0; - -/* vim: set noet:ts=4:sw=4:cindent: */ diff --git a/src/libmalloc/src/malloc_common.c b/src/libmalloc/src/malloc_common.c deleted file mode 100644 index e7337e2d1..000000000 --- a/src/libmalloc/src/malloc_common.c +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -#include "internal.h" - -#pragma mark - -#pragma mark Utility Functions - -// libplatform does not have strstr() and we don't want to add any new -// dependencies on libc, so we have to implement a version of strntr() -// here. Fortunately, as it's only used to look for boot arguments, it does not -// have to be efficient. We can also assume that the source string is -// nul-terminated. Eventually, we will move the function to a more central -// location and use it to replace other uses of strstr(). -const char * -malloc_common_strstr(const char *src, const char *target, size_t target_len) -{ - const char *next = src; - while (*next) { - if (!strncmp(next, target, target_len)) { - return next; - } - next++; - } - return NULL; -} - -// Converts a string to a long. If a non-numeric value is found, the -// return value is whatever has been accumulated so far. end_ptr always points -// to the character that caused the conversion to stop. We can't use strtol() -// etc because that would add a new dependency on libc. Eventually, this -// function could be made generally available within the library and used to -// replace the existing calls to strtol(). Currenly only handles non-negative -// numbers and does not detect overflow. -long -malloc_common_convert_to_long(const char *ptr, const char **end_ptr) -{ - long value = 0; - while (*ptr) { - char c = *ptr; - if (c < '0' || c > '9') { - break; - } - value = value * 10 + (c - '0'); - ptr++; - } - *end_ptr = ptr; - return value; -} - -// Looks for a sequence of the form "key=value" in the string 'src' and -// returns the location of the first character of 'value', or NULL if not -// found. No spaces are permitted around the "=". -const char * -malloc_common_value_for_key(const char *src, const char *key) -{ - const char *ptr = src; - size_t keylen = strlen(key); - while ((ptr = malloc_common_strstr(ptr, key, keylen)) != NULL) { - ptr += keylen; - if (*ptr == '=') { - return ptr + 1; - } - } - return NULL; -} - -// Looks for a sequence of the form "key=value" in the string 'src' and -// returns the location of the first character of 'value'. No spaces are -// permitted around the "=". The value is copied to 'bufp', up to the first -// whitespace or nul character and bounded by maxlen, and nul-terminated. -// Returns bufp if the key was found, NULL if not. -const char * -malloc_common_value_for_key_copy(const char *src, const char *key, - char *bufp, size_t maxlen) -{ - const char *ptr = malloc_common_value_for_key(src, key); - if (ptr) { - char *to = bufp; - while (maxlen > 1) { // Always leave room for a '\0' - char c = *ptr++; - if (c == '\0' || c == ' ' || c == '\t' || c == '\n') { - break; - } - *to++ = c; - maxlen--; - } - *to = '\0'; // Always nul-terminate - return bufp; - } - return NULL; -} - - diff --git a/src/libmalloc/src/malloc_common.h b/src/libmalloc/src/malloc_common.h deleted file mode 100644 index e8b745950..000000000 --- a/src/libmalloc/src/malloc_common.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -#ifndef __MALLOC_COMMON_H -#define __MALLOC_COMMON_H - -MALLOC_NOEXPORT -const char * -malloc_common_strstr(const char *src, const char *target, size_t target_len); - -MALLOC_NOEXPORT -long -malloc_common_convert_to_long(const char *ptr, const char **end_ptr); - -MALLOC_NOEXPORT -const char * -malloc_common_value_for_key(const char *src, const char *key); - -MALLOC_NOEXPORT -const char * -malloc_common_value_for_key_copy(const char *src, const char *key, - char *bufp, size_t maxlen); - -#endif // __MALLOC_COMMON_H diff --git a/src/libmalloc/src/malloc_printf.c b/src/libmalloc/src/malloc_printf.c deleted file mode 100644 index 9f3b8a570..000000000 --- a/src/libmalloc/src/malloc_printf.c +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -#include "internal.h" - -/* global flag to suppress ASL logging e.g. for syslogd */ -int _malloc_no_asl_log = 0; - -typedef enum { - DEBUG_WRITE_NONE, - DEBUG_WRITE_ON_CRASH, - DEBUG_WRITE_ALWAYS, -} write_debug_mode_t; - -static const char Malloc_Facility[] = "com.apple.Libsystem.malloc"; -static int malloc_debug_file = STDERR_FILENO; -static write_debug_mode_t debug_mode = DEBUG_WRITE_NONE; -static boolean_t malloc_error_stop; // Stop when reporting error. -static boolean_t malloc_error_sleep; // Sleep after reporting error. -static const int default_sleep_time = 3600; - -// Gets the default time to sleep for when reporting an error. Returns 0 -// (meaning do not sleep) if malloc_error_sleep is 0 (that is, if sleeping on -// error is not configured). -MALLOC_INLINE MALLOC_ALWAYS_INLINE -static unsigned _malloc_default_debug_sleep_time() -{ - return malloc_error_sleep ? default_sleep_time : 0; -} - -#define WRITE_TO_DEBUG_FILE(flags) \ - ((debug_mode == DEBUG_WRITE_ALWAYS) || \ - (debug_mode == DEBUG_WRITE_ON_CRASH && (flags & MALLOC_REPORT_CRASH))) -#define MALLOC_REPORT_LEVEL_MASK 0x0f - -#pragma mark - -#pragma mark Configuration - -void -malloc_print_configure(bool restricted) -{ - char *flag = getenv("MallocDebugReport"); - if (flag) { - if (!strcmp(flag, "stderr")) { - debug_mode = DEBUG_WRITE_ALWAYS; - } else if (!strcmp(flag, "crash")) { - debug_mode = DEBUG_WRITE_ON_CRASH; - } else if (!strcmp(flag, "none")) { - debug_mode = DEBUG_WRITE_NONE; - } else { - debug_mode = DEBUG_WRITE_ALWAYS; - malloc_printf("Unrecognized value for MallocDebugReport (%s) - using 'stderr'\n", flag); - } - } else { - // Default is to write to stderr only if it's a tty. - if (isatty(STDERR_FILENO)) { - debug_mode = DEBUG_WRITE_ALWAYS; - } - } - if (getenv("MallocErrorStop")) { - malloc_error_stop = TRUE; - } - if (getenv("MallocErrorSleep")) { - malloc_error_sleep = TRUE; - } -} - -#pragma mark - -#pragma mark Low level debug output - -/* - * The functions that follow use _simple_*printf. They deal with a - * subset of printf format specifiers and do not call malloc internally. - */ -static void -_malloc_put(uint32_t flags, const char *msg) -{ - _SIMPLE_STRING b; - if ((b = _simple_salloc()) == NULL) { - if (WRITE_TO_DEBUG_FILE(flags)) { - if (!(flags & MALLOC_REPORT_NOPREFIX)) { - void *self = _os_tsd_get_direct(__TSD_THREAD_SELF); - _simple_dprintf(malloc_debug_file, "%s(%d,%p) malloc: ", getprogname(), getpid(), self); - } - write(malloc_debug_file, msg, strlen(msg)); - } - return; - } - if (!(flags & MALLOC_REPORT_NOPREFIX)) { - void *self = _os_tsd_get_direct(__TSD_THREAD_SELF); - _simple_sprintf(b, "%s(%d,%p) malloc: ", getprogname(), getpid(), self); - } - - _simple_sprintf(b, "%s", msg); - if (WRITE_TO_DEBUG_FILE(flags)) { - _simple_put(b, malloc_debug_file); - } - if (_malloc_no_asl_log & !(flags & MALLOC_REPORT_NOLOG)) { - _simple_asl_log(flags & MALLOC_REPORT_LEVEL_MASK, Malloc_Facility, _simple_string(b)); - } - _simple_sfree(b); -} - -#pragma mark - -#pragma mark High-Level Reporting Functions - -MALLOC_NOINLINE void -malloc_vreport(uint32_t flags, unsigned sleep_time, const char *prefix_msg, - const void *prefix_arg, const char *fmt, va_list ap) -{ - const char *crash_msg = NULL; - _SIMPLE_STRING b = NULL; - if ((b = _simple_salloc()) == NULL) { - if (WRITE_TO_DEBUG_FILE(flags)) { - if (!(flags & MALLOC_REPORT_NOPREFIX)) { - void *self = _os_tsd_get_direct(__TSD_THREAD_SELF); - _simple_dprintf(malloc_debug_file, "%s(%d,%p) malloc: ", getprogname(), getpid(), self); - } - if (prefix_msg) { - _simple_dprintf(malloc_debug_file, prefix_msg, prefix_arg); - } - _simple_vdprintf(malloc_debug_file, fmt, ap); - } - if (flags & MALLOC_REPORT_CRASH) { - crash_msg = fmt; - } - } else { - if (!(flags & MALLOC_REPORT_NOPREFIX)) { - void *self = _os_tsd_get_direct(__TSD_THREAD_SELF); - _simple_sprintf(b, "%s(%d,%p) malloc: ", getprogname(), getpid(), self); - } - if (prefix_msg) { - _simple_sprintf(b, prefix_msg, prefix_arg); - } - _simple_vsprintf(b, fmt, ap); - if (WRITE_TO_DEBUG_FILE(flags)) { - _simple_put(b, malloc_debug_file); - } - if (!_malloc_no_asl_log && !(flags & MALLOC_REPORT_NOLOG)) { - _simple_asl_log(flags & MALLOC_REPORT_LEVEL_MASK, Malloc_Facility, _simple_string(b)); - } - if (flags & MALLOC_REPORT_CRASH) { - crash_msg = _simple_string(b); - } else { - _simple_sfree(b); - } - } - - if (flags & (MALLOC_REPORT_DEBUG | MALLOC_REPORT_CRASH)) { - _malloc_put(flags, "*** set a breakpoint in malloc_error_break to debug\n"); - malloc_error_break(); - - if (malloc_error_stop) { - _malloc_put(ASL_LEVEL_NOTICE, "*** sending SIGSTOP to help debug\n"); - kill(getpid(), SIGSTOP); - } else if (sleep_time) { - _malloc_put(ASL_LEVEL_NOTICE, "*** sleeping to help debug\n"); - sleep(sleep_time); - } - } - - if (flags & MALLOC_REPORT_CRASH) { - _os_set_crash_log_message_dynamic(crash_msg); - abort(); - } -} - -MALLOC_NOEXPORT void -malloc_report(uint32_t flags, const char *fmt, ...) -{ - va_list ap; - va_start(ap, fmt); - malloc_vreport(flags, _malloc_default_debug_sleep_time(), NULL, NULL, fmt, ap); - va_end(ap); -} - -MALLOC_NOEXPORT void -malloc_report_simple(const char *fmt, ...) -{ - va_list ap; - va_start(ap, fmt); - malloc_vreport(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, - _malloc_default_debug_sleep_time(), NULL, NULL, fmt, ap); - va_end(ap); -} - -#pragma mark - -#pragma mark Zone Error Reporing - -void -malloc_zone_error(uint32_t flags, bool is_corruption, const char *fmt, ...) -{ - va_list ap; - va_start(ap, fmt); - uint32_t report_flags = MALLOC_REPORT_DEBUG | MALLOC_REPORT_NOLOG; - if ((is_corruption && (flags & MALLOC_ABORT_ON_CORRUPTION)) || - (flags & MALLOC_ABORT_ON_ERROR)) { - report_flags = MALLOC_REPORT_CRASH; - } - malloc_vreport(report_flags | ASL_LEVEL_ERR, _malloc_default_debug_sleep_time(), - NULL, NULL, fmt, ap); - va_end(ap); -} - -#pragma mark - -#pragma mark Malloc Output API. - -// malloc_printf() needs to be retained and exported because it's API (defined -// in malloc/malloc.h). It's equivalent to calling malloc_report() with -// a flags value of ASL_LEVEL_ERR, so does not result in a crash or any prompts -// for diagnostics or breakpoints. -// Do not use in malloc code. -void -malloc_printf(const char *fmt, ...) -{ - va_list ap; - va_start(ap, fmt); - malloc_vreport(ASL_LEVEL_ERR, 0, NULL, NULL, fmt, ap); - va_end(ap); -} diff --git a/src/libmalloc/src/msl_lite_support.c b/src/libmalloc/src/msl_lite_support.c deleted file mode 100644 index 555648e80..000000000 --- a/src/libmalloc/src/msl_lite_support.c +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -/* - * Support for stack logging lite in the malloc stack logging library. This code - * will go away when MSL lite no longer requires its own zone. - */ -#ifndef MALLOC_ENABLE_MSL_LITE_SPI -#define MALLOC_ENABLE_MSL_LITE_SPI 1 -#endif // MALLOC_ENABLE_MSL_LITE_SPI - -#include "internal.h" - -// These definitions are here and not in a header file because all of this code -// is intended to go away very soon. -extern malloc_zone_t **malloc_zones; -extern malloc_zone_t* lite_zone; -extern void malloc_zone_register_while_locked(malloc_zone_t *zone); -extern boolean_t has_default_zone0(void); - -static szone_t *create_and_insert_msl_lite_zone(const char *name, - void *mallocp, void *callocp, - void *vallocp, void *reallocp, void *batch_mallocp, - void *batch_freep, void *memalignp, void *freep, - void *free_definite_sizep, void *sizep); - -static size_t -_calloc_get_size(size_t num_items, size_t size, size_t extra_size, - size_t *total_size) -{ - // calloc_get_size is inlined. - return calloc_get_size(num_items, size, extra_size, total_size); -} - -static malloc_zone_t * -szone_helper_zone(szone_t *zone) -{ - return &zone->helper_zone->basic_zone; -} - -static malloc_zone_t * -szone_basic_zone(szone_t *zone) -{ - return &zone->basic_zone; -} - -static struct _malloc_msl_lite_hooks_s malloc_msl_lite_hooks = { - .create_and_insert_msl_lite_zone = &create_and_insert_msl_lite_zone, - .helper_zone = &szone_helper_zone, - .szone_size = &szone_size, - .szone_malloc = &szone_malloc, - .szone_malloc_should_clear = &szone_malloc_should_clear, - .szone_free = &szone_free, - .szone_realloc = &szone_realloc, - .szone_valloc = &szone_valloc, - .szone_memalign = &szone_memalign, - .szone_batch_malloc = &szone_batch_malloc, - .szone_batch_free = &szone_batch_free, - .has_default_zone0 = &has_default_zone0, - .calloc_get_size = &_calloc_get_size, - .basic_zone = &szone_basic_zone, - .szone_good_size = &szone_good_size, -}; - -static szone_t * -create_and_insert_msl_lite_zone(const char *name, - void *mallocp, void *callocp, - void *vallocp, void *reallocp, void *batch_mallocp, - void *batch_freep, void *memalignp, void *freep, - void *free_definite_sizep, void *sizep) -{ -// TODO: this has to be locked in some way.... - szone_t* szone = create_scalable_szone(0, malloc_debug_flags); - malloc_zone_t *zone = &szone->basic_zone; - - // unprotect function pointers - mprotect(szone, sizeof(szone->basic_zone), PROT_READ | PROT_WRITE); - - // set the function pointers - szone->basic_zone.malloc = mallocp; - szone->basic_zone.calloc = callocp; - szone->basic_zone.valloc = vallocp; - szone->basic_zone.realloc = reallocp; - szone->basic_zone.batch_malloc = batch_mallocp; - szone->basic_zone.batch_free = batch_freep; - szone->basic_zone.memalign = memalignp; - szone->basic_zone.free = freep; - szone->basic_zone.free_definite_size = free_definite_sizep; - szone->basic_zone.size = sizep; - - // protect function pointers - mprotect(szone, sizeof(szone->basic_zone), PROT_READ); - - // set helper zone - szone->helper_zone = (szone_t *)malloc_zones[0]; - - malloc_zone_register_while_locked(zone); - malloc_set_zone_name(zone, name); - lite_zone = zone; - - return szone; -} - -/* - * Copies the malloc library's _malloc_msl_lite_hooks_t structure to a given - * location. We pass the structure size to allow the structure to - * grow. Since this is a temporary arrangement, we don't need to worry about - * pointer authentication here or in the _malloc_msl_lite_hooks_t structure itself. - */ -MALLOC_NOEXPORT -void -set_msl_lite_hooks(set_msl_lite_hooks_callout_t callout) -{ - callout(&malloc_msl_lite_hooks, sizeof(malloc_msl_lite_hooks)); -} diff --git a/src/libmalloc/src/nano_malloc.c b/src/libmalloc/src/nano_malloc.c deleted file mode 100644 index 94a56ab6a..000000000 --- a/src/libmalloc/src/nano_malloc.c +++ /dev/null @@ -1,1956 +0,0 @@ -/* - * Copyright (c) 1999, 2000, 2003, 2005, 2008, 2012 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#include "internal.h" - -/* nano_malloc for 64bit ABI */ -#if CONFIG_NANOZONE - -/********************* PROTOTYPES ***********************/ - -static void nano_statistics(nanozone_t *nanozone, malloc_statistics_t *stats); - -/********************* VERY LOW LEVEL UTILITIES ************************/ -// msg prints after fmt, ... - -static MALLOC_ALWAYS_INLINE unsigned int -nano_mag_index(const nanozone_t *nanozone) -{ - if (os_likely(_os_cpu_number_override == -1)) { - return (_os_cpu_number() >> hyper_shift) % nano_common_max_magazines; - } - return (_os_cpu_number_override >> hyper_shift) % nano_common_max_magazines; -} - -#if NANO_PREALLOCATE_BAND_VM -static boolean_t -nano_preallocate_band_vm(void) -{ - nano_blk_addr_t u; - uintptr_t s, e; - - u.fields.nano_signature = NANOZONE_SIGNATURE; - u.fields.nano_mag_index = 0; - u.fields.nano_band = 0; - u.fields.nano_slot = 0; - u.fields.nano_offset = 0; - s = u.addr; // start of first possible band - - u.fields.nano_mag_index = (1 << NANO_MAG_BITS) - 1; - u.fields.nano_band = (1 << NANO_BAND_BITS) - 1; - e = u.addr + BAND_SIZE; // end of last possible band - - return nano_common_allocate_vm_space(s, e - s); -} -#endif - -/* - * We maintain separate free lists for each (quantized) size. The literature - * calls this the "segregated policy". - */ - -static boolean_t -segregated_band_grow(nanozone_t *nanozone, nano_meta_admin_t pMeta, size_t slot_bytes, unsigned int mag_index) -{ - nano_blk_addr_t u; // the compiler holds this in a register - uintptr_t p, s; - size_t watermark, hiwater; - - if (0 == pMeta->slot_current_base_addr) { // First encounter? - - u.fields.nano_signature = NANOZONE_SIGNATURE; - u.fields.nano_mag_index = mag_index; - u.fields.nano_band = 0; - u.fields.nano_slot = (slot_bytes >> SHIFT_NANO_QUANTUM) - 1; - u.fields.nano_offset = 0; - - p = u.addr; - pMeta->slot_bytes = (unsigned int)slot_bytes; - pMeta->slot_objects = SLOT_IN_BAND_SIZE / slot_bytes; - } else { - p = pMeta->slot_current_base_addr + BAND_SIZE; // Growing, so stride ahead by BAND_SIZE - - u.addr = (uint64_t)p; - if (0 == u.fields.nano_band) { // Did the band index wrap? - return FALSE; - } - - assert(slot_bytes == pMeta->slot_bytes); - } - pMeta->slot_current_base_addr = p; - - mach_vm_address_t vm_addr = p & ~((uintptr_t)(BAND_SIZE - 1)); // Address of the (2MB) band covering this (128KB) slot - if (nanozone->band_max_mapped_baseaddr[mag_index] < vm_addr) { -#if !NANO_PREALLOCATE_BAND_VM - // Obtain the next band to cover this slot - kern_return_t kr = mach_vm_map(mach_task_self(), &vm_addr, BAND_SIZE, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_NANO), - MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); - - void *q = (void *)vm_addr; - if (kr || q != (void *)(p & ~((uintptr_t)(BAND_SIZE - 1)))) { // Must get exactly what we asked for - if (!kr) { - mach_vm_deallocate(mach_task_self(), vm_addr, BAND_SIZE); - } - return FALSE; - } -#endif - nanozone->band_max_mapped_baseaddr[mag_index] = vm_addr; - } - - // Randomize the starting allocation from this slot (introduces 11 to 14 bits of entropy) - if (0 == pMeta->slot_objects_mapped) { // First encounter? - pMeta->slot_objects_skipped = (malloc_entropy[1] % (SLOT_IN_BAND_SIZE / slot_bytes)); - pMeta->slot_bump_addr = p + (pMeta->slot_objects_skipped * slot_bytes); - } else { - pMeta->slot_bump_addr = p; - } - - pMeta->slot_limit_addr = p + (SLOT_IN_BAND_SIZE / slot_bytes) * slot_bytes; - pMeta->slot_objects_mapped += (SLOT_IN_BAND_SIZE / slot_bytes); - - u.fields.nano_signature = NANOZONE_SIGNATURE; - u.fields.nano_mag_index = mag_index; - u.fields.nano_band = 0; - u.fields.nano_slot = 0; - u.fields.nano_offset = 0; - s = u.addr; // Base for this core. - - // Set the high water mark for this CPU's entire magazine, if this resupply raised it. - watermark = nanozone->core_mapped_size[mag_index]; - hiwater = MAX(watermark, p - s + SLOT_IN_BAND_SIZE); - nanozone->core_mapped_size[mag_index] = hiwater; - - return TRUE; -} - -static inline unsigned long -divrem(unsigned long a, unsigned int b, unsigned int *remainder) -{ - // Encapsulating the modulo and division in an in-lined function convinces the compiler - // to issue just a single divide instruction to obtain quotient and remainder. Go figure. - *remainder = a % b; - return a / b; -} - -static MALLOC_INLINE void * -segregated_next_block(nanozone_t *nanozone, nano_meta_admin_t pMeta, size_t slot_bytes, unsigned int mag_index) -{ - while (1) { - uintptr_t theLimit = pMeta->slot_limit_addr; // Capture the slot limit that bounds slot_bump_addr right now - uintptr_t b = OSAtomicAdd64Barrier(slot_bytes, (volatile int64_t *)&(pMeta->slot_bump_addr)); - b -= slot_bytes; // Atomic op returned addr of *next* free block. Subtract to get addr for *this* allocation. - - if (b < theLimit) { // Did we stay within the bound of the present slot allocation? - return (void *)b; // Yep, so the slot_bump_addr this thread incremented is good to go - } else { - if (pMeta->slot_exhausted) { // exhausted all the bands availble for this slot? - pMeta->slot_bump_addr = theLimit; - return 0; // We're toast - } else { - // One thread will grow the heap, others will see its been grown and retry allocation - _malloc_lock_lock(&nanozone->band_resupply_lock[mag_index]); - // re-check state now that we've taken the lock - if (pMeta->slot_exhausted) { - _malloc_lock_unlock(&nanozone->band_resupply_lock[mag_index]); - return 0; // Toast - } else if (b < pMeta->slot_limit_addr) { - _malloc_lock_unlock(&nanozone->band_resupply_lock[mag_index]); - continue; // ... the slot was successfully grown by first-taker (not us). Now try again. - } else if (segregated_band_grow(nanozone, pMeta, slot_bytes, mag_index)) { - _malloc_lock_unlock(&nanozone->band_resupply_lock[mag_index]); - continue; // ... the slot has been successfully grown by us. Now try again. - } else { - pMeta->slot_exhausted = TRUE; - pMeta->slot_bump_addr = theLimit; - _malloc_lock_unlock(&nanozone->band_resupply_lock[mag_index]); - return 0; - } - } - } - } -} - -static MALLOC_INLINE size_t -segregated_size_to_fit(nanozone_t *nanozone, size_t size, size_t *pKey) -{ - size_t k, slot_bytes; - - if (0 == size) { - size = NANO_REGIME_QUANTA_SIZE; // Historical behavior - } - k = (size + NANO_REGIME_QUANTA_SIZE - 1) >> SHIFT_NANO_QUANTUM; // round up and shift for number of quanta - slot_bytes = k << SHIFT_NANO_QUANTUM; // multiply by power of two quanta size - *pKey = k - 1; // Zero-based! - - return slot_bytes; -} - -static MALLOC_INLINE index_t -offset_to_index(nanozone_t *nanozone, nano_meta_admin_t pMeta, uintptr_t offset) -{ - unsigned int slot_bytes = pMeta->slot_bytes; - unsigned int slot_objects = pMeta->slot_objects; // SLOT_IN_BAND_SIZE / slot_bytes; - unsigned int rem; - unsigned long quo = divrem(offset, BAND_SIZE, &rem); - - assert(0 == rem % slot_bytes || pMeta->slot_exhausted); - return (index_t)((quo * slot_objects) + (rem / slot_bytes)); -} - -static MALLOC_INLINE uintptr_t -index_to_offset(nanozone_t *nanozone, nano_meta_admin_t pMeta, index_t i) -{ - unsigned int slot_bytes = pMeta->slot_bytes; - unsigned int slot_objects = pMeta->slot_objects; // SLOT_IN_BAND_SIZE / slot_bytes; - unsigned int rem; - unsigned long quo = divrem(i, slot_objects, &rem); - - return (quo * BAND_SIZE) + (rem * slot_bytes); -} - -static kern_return_t -segregated_in_use_enumerator(task_t task, - void *context, - unsigned type_mask, - nanozone_t *nanozone, - memory_reader_t reader, - vm_range_recorder_t recorder) -{ - unsigned int mag_index, slot_key; - vm_range_t ptr_range; - vm_range_t buffer[MAX_RECORDER_BUFFER]; - kern_return_t err; - unsigned count = 0; - - for (mag_index = 0; mag_index < nano_common_max_magazines; mag_index++) { - uintptr_t clone_magazine; // magazine base for ourselves - nano_blk_addr_t p; // slot base for remote - uintptr_t clone_slot_base; // slot base for ourselves (tracks with "p") - - // Establish p as base address for slot 0 in remote - p.fields.nano_signature = NANOZONE_SIGNATURE; - p.fields.nano_mag_index = mag_index; - p.fields.nano_band = 0; - p.fields.nano_slot = 0; - p.fields.nano_offset = 0; - - if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { - mach_vm_address_t vm_addr; - mach_vm_size_t alloc_size = nanozone->core_mapped_size[mag_index]; - int alloc_flags = VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_MALLOC); - - vm_addr = vm_page_size; - kern_return_t kr = mach_vm_allocate(mach_task_self(), &vm_addr, alloc_size, alloc_flags); - if (kr) { - return kr; - } - clone_magazine = (uintptr_t)vm_addr; - clone_slot_base = clone_magazine; // base for slot 0 in this local magazine - } else { - clone_slot_base = clone_magazine = 0; // and won't be used in this loop - } - - for (slot_key = 0; slot_key < SLOT_KEY_LIMIT; p.addr += SLOT_IN_BAND_SIZE, // Advance to next slot base for remote - clone_slot_base += SLOT_IN_BAND_SIZE, // Advance to next slot base for ourselves - slot_key++) { - nano_meta_admin_t pMeta = &(nanozone->meta_data[mag_index][slot_key]); - size_t slot_objects_mapped = pMeta->slot_objects_mapped; // capture this volatile count - - if (0 == slot_objects_mapped) { // Nothing allocated in this magazine for this slot? - continue; - } - - if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) { - /* do NOTHING as there is no distinct admin region */ - } - - if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) { - nano_blk_addr_t q = p; - uintptr_t skip_adj = index_to_offset(nanozone, pMeta, (index_t)pMeta->slot_objects_skipped); - - while (q.addr < pMeta->slot_limit_addr) { - ptr_range.address = q.addr + skip_adj; - ptr_range.size = SLOT_IN_BAND_SIZE - skip_adj; - skip_adj = 0; - recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1); - q.addr += BAND_SIZE; - } - } - - if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { - nano_blk_addr_t q = p; - uintptr_t slot_band, clone_slot_band_base = clone_slot_base; - uintptr_t skip_adj = index_to_offset(nanozone, pMeta, (index_t)pMeta->slot_objects_skipped); - - // Copy the bitarray_t denoting madvise()'d pages (if any) into *this* task's address space - bitarray_t madv_page_bitarray; - int log_page_count; - - if (pMeta->slot_madvised_pages) { - log_page_count = pMeta->slot_madvised_log_page_count; - err = reader(task, (vm_address_t)(pMeta->slot_madvised_pages), bitarray_size(log_page_count), - (void **)&madv_page_bitarray); - if (err) { - return err; - } - } else { - madv_page_bitarray = NULL; - log_page_count = 0; - } - - while (q.addr < pMeta->slot_limit_addr) { - // read slot in each remote band. Lands in some random location. Do not read - // parts of the slot that are in madvised pages. - if (!madv_page_bitarray) { - // Nothing madvised yet - read everything in one go. - size_t len = MIN(pMeta->slot_bump_addr - q.addr, SLOT_IN_BAND_SIZE) - skip_adj; - err = reader(task, (vm_address_t)(q.addr + skip_adj), len, (void **)&slot_band); - if (err) { - return err; - } - - // Place the data just read in the correct position relative to the local magazine. - memcpy((void *)(clone_slot_band_base + skip_adj), (void *)slot_band, len); - } else { - // We madvised at least one page. Read only the pages that - // have not been madvised. If bitarray_t had operations - // like "get next bit set after a given bit" and "find - // next unset bit after a given bit", we could do this more - // efficiently but given that it doesn't, we have to walk - // through each page individually. In practice this is not - // much of an issue because this code is only used by - // sampling tools and the additional time required is not - // really noticeable. - size_t len = MIN(pMeta->slot_bump_addr - q.addr, SLOT_IN_BAND_SIZE) - skip_adj; - vm_address_t start_addr = (vm_address_t)(q.addr + skip_adj); - vm_address_t end_addr = (vm_address_t)(start_addr + len); - void *target_addr = (void *)(clone_slot_band_base + skip_adj); - for (vm_address_t addr = start_addr; addr < end_addr;) { - vm_address_t next_page_addr = trunc_page_kernel(addr + vm_kernel_page_size); - size_t read_size = MIN(len, next_page_addr - addr); - - boolean_t madvised = false; - nano_blk_addr_t r; - r.addr = addr; - index_t pgnum = ((((unsigned)r.fields.nano_band) << NANO_OFFSET_BITS) | ((unsigned)r.fields.nano_offset)) >> - vm_kernel_page_shift; - unsigned int log_page_count = pMeta->slot_madvised_log_page_count; - madvised = (pgnum < (1 << log_page_count)) && - bitarray_get(madv_page_bitarray, log_page_count, pgnum); - if (!madvised) { - // This is not an madvised page - grab the data. - err = reader(task, addr, read_size, (void **)&slot_band); - if (err) { - return err; - } - - // Place the data just read in the correct position relative to the local magazine. - memcpy(target_addr, (void *)slot_band, read_size); - } else { - // This is an madvised page - there should be nothing in here that's - // on the freelist, so just write garbage to the target memory. - memset(target_addr, (char)0xee, read_size); - } - addr = next_page_addr; - target_addr += read_size; - len -= read_size; - } - } - - // Simultaneously advance pointers in remote and ourselves to the next band. - q.addr += BAND_SIZE; - clone_slot_band_base += BAND_SIZE; - skip_adj = 0; - } - - // Walk the slot free list and populate a bitarray_t - int log_size = 64 - __builtin_clzl(slot_objects_mapped); - bitarray_t slot_bitarray = bitarray_create(log_size); - - if (!slot_bitarray) { - return errno; - } - - chained_block_t t; - unsigned stoploss = (unsigned)slot_objects_mapped; - while ((t = OSAtomicDequeue( - &(pMeta->slot_LIFO), offsetof(struct chained_block_s, next) + (clone_slot_base - p.addr)))) { - if (0 == stoploss) { - malloc_report(ASL_LEVEL_ERR, "Free list walk in segregated_in_use_enumerator exceeded object count.\n"); - break; - } - stoploss--; - - uintptr_t offset = ((uintptr_t)t - p.addr); // offset from beginning of slot, task-independent - index_t block_index = offset_to_index(nanozone, pMeta, offset); - - if (block_index < slot_objects_mapped) { - bitarray_set(slot_bitarray, log_size, block_index); - } - } - // N.B. pMeta->slot_LIFO in *this* task is now drained (remote free list has *not* been disturbed) - - - // Enumerate all the block indices issued to date, and report those not on the free list - index_t i; - for (i = (index_t)pMeta->slot_objects_skipped; i < slot_objects_mapped; ++i) { - uintptr_t block_offset = index_to_offset(nanozone, pMeta, i); - if (p.addr + block_offset >= pMeta->slot_bump_addr) { - break; - } - - // blocks falling on madvise()'d pages are free! So not enumerated. - if (madv_page_bitarray) { - nano_blk_addr_t q; - index_t pgnum, pgnum_end; - - q.addr = p.addr + block_offset; - pgnum = ((((unsigned)q.fields.nano_band) << NANO_OFFSET_BITS) | ((unsigned)q.fields.nano_offset)) >> - vm_kernel_page_shift; - q.addr += pMeta->slot_bytes - 1; - pgnum_end = ((((unsigned)q.fields.nano_band) << NANO_OFFSET_BITS) | ((unsigned)q.fields.nano_offset)) >> - vm_kernel_page_shift; - - if (pgnum < (1 << log_page_count)) { // bounds check for bitarray_get()'s that follow - if (bitarray_get(madv_page_bitarray, log_page_count, pgnum) || - bitarray_get(madv_page_bitarray, log_page_count, pgnum_end)) { - continue; - } - } - } - - if (!bitarray_get(slot_bitarray, log_size, i)) { - buffer[count].address = p.addr + block_offset; - buffer[count].size = (slot_key + 1) << SHIFT_NANO_QUANTUM; - count++; - if (count >= MAX_RECORDER_BUFFER) { - recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); - count = 0; - } - } - } - if (count) { - recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); - count = 0; - } - - free(slot_bitarray); - } - } - if (clone_magazine) { - mach_vm_address_t vm_addr = clone_magazine; - mach_vm_size_t alloc_size = nanozone->core_mapped_size[mag_index]; - mach_vm_deallocate(mach_task_self(), vm_addr, alloc_size); - } - } - return 0; -} - -/****************** nanozone methods **********************/ -/* - * These methods are called with "ptr" known to possess the nano signature (from - * which we can additionally infer "ptr" is not NULL), and with "size" bounded to - * the extent of the nano allocation regime -- (0, 256]. - */ - -static MALLOC_INLINE MALLOC_UNUSED boolean_t -_nano_block_inuse_p(nanozone_t *nanozone, const void *ptr) -{ - nano_blk_addr_t p; // happily, the compiler holds this in a register - nano_meta_admin_t pMeta; - chained_block_t head = NULL, tail = NULL, t; - boolean_t inuse = TRUE; - - p.addr = (uint64_t)ptr; // place ptr on the dissecting table - - pMeta = &(nanozone->meta_data[p.fields.nano_mag_index][p.fields.nano_slot]); - - // pop elements off the free list all the while looking for ptr. - unsigned stoploss = (unsigned)pMeta->slot_objects_mapped; - while ((t = OSAtomicDequeue(&(pMeta->slot_LIFO), offsetof(struct chained_block_s, next)))) { - if (0 == stoploss) { - malloc_zone_error(nanozone->debug_flags, true, - "Free list walk for slot %p in _nano_block_inuse_p exceeded object count.\n", - (void *)&(pMeta->slot_LIFO)); - } - stoploss--; - - if (NULL == head) { - head = t; - } else { - tail->next = t; - } - tail = t; - - if (ptr == t) { - inuse = FALSE; - break; - } - } - if (tail) { - tail->next = NULL; - } - - // push the free list extracted above back onto the LIFO, all at once - if (head) { - OSAtomicEnqueue(&(pMeta->slot_LIFO), head, (uintptr_t)tail - (uintptr_t)head + offsetof(struct chained_block_s, next)); - } - - return inuse; -} - -static MALLOC_INLINE size_t -__nano_vet_and_size_inner(nanozone_t *nanozone, const void *ptr, boolean_t inner) -{ - // Extracts the size of the block in bytes. Checks for a plausible ptr. - nano_blk_addr_t p; // the compiler holds this in a register - nano_meta_admin_t pMeta; - - p.addr = (uint64_t)ptr; // Begin the dissection of ptr - - if (NANOZONE_SIGNATURE != p.fields.nano_signature) { - return 0; - } - - if (nano_common_max_magazines <= p.fields.nano_mag_index) { - return 0; - } - - if (!inner && p.fields.nano_offset & NANO_QUANTA_MASK) { // stray low-order bits? - return 0; - } - - pMeta = &(nanozone->meta_data[p.fields.nano_mag_index][p.fields.nano_slot]); - if ((void *)(pMeta->slot_bump_addr) <= ptr) { - return 0; // Beyond what's ever been allocated! - } - if (!inner && ((p.fields.nano_offset % pMeta->slot_bytes) != 0)) { - return 0; // Not an exact multiple of the block size for this slot - } - return pMeta->slot_bytes; -} - - -static MALLOC_INLINE size_t -__nano_vet_and_size(nanozone_t *nanozone, const void *ptr) -{ - return __nano_vet_and_size_inner(nanozone, ptr, false); -} - -static MALLOC_ALWAYS_INLINE boolean_t -_nano_block_has_canary_value(nanozone_t *nanozone, const void *ptr) -{ - return (((chained_block_t)ptr)->double_free_guard ^ nanozone->cookie) - == (uintptr_t)ptr; -} - -static MALLOC_ALWAYS_INLINE void -_nano_block_set_canary_value(nanozone_t *nanozone, const void *ptr) -{ - ((chained_block_t)ptr)->double_free_guard = - ((uintptr_t)ptr) ^ nanozone->cookie; -} - -static MALLOC_INLINE size_t -_nano_vet_and_size_of_live(nanozone_t *nanozone, const void *ptr) -{ - size_t size = __nano_vet_and_size(nanozone, ptr); - - if (0 == size) { // ptr fails sanity check? - return 0; - } - - // We have the invariant: If ptr is on a free list, then ptr->double_free_guard is the canary. - // So if ptr->double_free_guard is NOT the canary, then ptr is not on a free list, hence is live. - if (!_nano_block_has_canary_value(nanozone, ptr)) { - return size; // Common case: not on a free list, hence live. Return its size. - } else { - // confirm that ptr is live despite ptr->double_free_guard having the canary value - if (_nano_block_inuse_p(nanozone, ptr)) { - return size; // live block that exhibits canary - } else { - return 0; // ptr wasn't live after all (likely a double free) - } - } -} - -static MALLOC_INLINE size_t -_nano_vet_and_size_of_free(nanozone_t *nanozone, const void *ptr) -{ - size_t size = __nano_vet_and_size(nanozone, ptr); - - if (0 == size) { // ptr fails sanity check? - return 0; - } - - // ptr was just dequed from a free list, so ptr->double_free_guard must have the canary value. - if (_nano_block_has_canary_value(nanozone, ptr)) { - return size; // return the size of this well formed free block. - } else { - return 0; // Broken invariant: If ptr is on a free list, then ptr->double_free_guard is the canary. (likely use after free) - } -} - -static void * -_nano_malloc_check_clear(nanozone_t *nanozone, size_t size, boolean_t cleared_requested) -{ - MALLOC_TRACE(TRACE_nano_malloc, (uintptr_t)nanozone, size, cleared_requested, 0); - - void *ptr; - size_t slot_key; - size_t slot_bytes = segregated_size_to_fit(nanozone, size, &slot_key); // Note slot_key is set here - mag_index_t mag_index = nano_mag_index(nanozone); - - nano_meta_admin_t pMeta = &(nanozone->meta_data[mag_index][slot_key]); - - ptr = OSAtomicDequeue(&(pMeta->slot_LIFO), offsetof(struct chained_block_s, next)); - if (ptr) { - unsigned debug_flags = nanozone->debug_flags; -#if NANO_FREE_DEQUEUE_DILIGENCE - size_t gotSize; - nano_blk_addr_t p; // the compiler holds this in a register - - p.addr = (uint64_t)ptr; // Begin the dissection of ptr - if (NANOZONE_SIGNATURE != p.fields.nano_signature) { - malloc_zone_error(debug_flags, true, - "Invalid signature for pointer %p dequeued from free list\n", - ptr); - } - - if (mag_index != p.fields.nano_mag_index) { - malloc_zone_error(debug_flags, true, - "Mismatched magazine for pointer %p dequeued from free list\n", - ptr); - } - - gotSize = _nano_vet_and_size_of_free(nanozone, ptr); - if (0 == gotSize) { - malloc_zone_error(debug_flags, true, - "Invalid pointer %p dequeued from free list\n", ptr); - } - if (gotSize != slot_bytes) { - malloc_zone_error(debug_flags, true, - "Mismatched size for pointer %p dequeued from free list\n", - ptr); - } - - if (!_nano_block_has_canary_value(nanozone, ptr)) { - malloc_zone_error(debug_flags, true, - "Heap corruption detected, free list canary is damaged for %p\n" - "*** Incorrect guard value: %lu\n", ptr, - ((chained_block_t)ptr)->double_free_guard); - } - -#if defined(DEBUG) - void *next = (void *)(((chained_block_t)ptr)->next); - if (next) { - p.addr = (uint64_t)next; // Begin the dissection of next - if (NANOZONE_SIGNATURE != p.fields.nano_signature) { - malloc_zone_error(debug_flags, true, - "Invalid next signature for pointer %p dequeued from free " - "list, next = %p\n", ptr, "next"); - } - - if (mag_index != p.fields.nano_mag_index) { - malloc_zone_error(debug_flags, true, - "Mismatched next magazine for pointer %p dequeued from " - "free list, next = %p\n", ptr, next); - } - - gotSize = _nano_vet_and_size_of_free(nanozone, next); - if (0 == gotSize) { - malloc_zone_error(debug_flags, true, - "Invalid next for pointer %p dequeued from free list, " - "next = %p\n", ptr, next); - } - if (gotSize != slot_bytes) { - malloc_zone_error(debug_flags, true, - "Mismatched next size for pointer %p dequeued from free " - "list, next = %p\n", ptr, next); - } - } -#endif /* DEBUG */ -#endif /* NANO_FREE_DEQUEUE_DILIGENCE */ - - ((chained_block_t)ptr)->double_free_guard = 0; - ((chained_block_t)ptr)->next = NULL; // clear out next pointer to protect free list - } else { - ptr = segregated_next_block(nanozone, pMeta, slot_bytes, mag_index); - } - - if (cleared_requested && ptr) { - memset(ptr, 0, slot_bytes); // TODO: Needs a memory barrier after memset to ensure zeroes land first? - } - return ptr; -} - -static void * -_nano_malloc_check_scribble(nanozone_t *nanozone, size_t size) -{ - void *ptr = _nano_malloc_check_clear(nanozone, size, 0); - - /* - * Scribble on allocated memory when requested. - */ - if ((nanozone->debug_flags & MALLOC_DO_SCRIBBLE) && ptr && size) { - memset(ptr, SCRIBBLE_BYTE, _nano_vet_and_size_of_live(nanozone, ptr)); - } - - return ptr; -} - -static MALLOC_INLINE size_t -_nano_size(nanozone_t *nanozone, const void *ptr) -{ - return _nano_vet_and_size_of_live(nanozone, ptr); -} - -static MALLOC_INLINE size_t -_nano_good_size(nanozone_t *nanozone, size_t size) -{ - return (size <= NANO_REGIME_QUANTA_SIZE) ? NANO_REGIME_QUANTA_SIZE - : (((size + NANO_REGIME_QUANTA_SIZE - 1) >> SHIFT_NANO_QUANTUM) << SHIFT_NANO_QUANTUM); -} - -static MALLOC_INLINE void _nano_free_trusted_size_check_scribble(nanozone_t *nanozone, - void *ptr, - size_t trusted_size, - boolean_t do_scribble) MALLOC_ALWAYS_INLINE; - -static MALLOC_INLINE void -_nano_free_trusted_size_check_scribble(nanozone_t *nanozone, void *ptr, size_t trusted_size, boolean_t do_scribble) -{ - if (trusted_size) { - nano_blk_addr_t p; // happily, the compiler holds this in a register - nano_meta_admin_t pMeta; - - if (do_scribble) { - (void)memset(ptr, SCRABBLE_BYTE, trusted_size); - } - _nano_block_set_canary_value(nanozone, ptr); - - p.addr = (uint64_t)ptr; // place ptr on the dissecting table - pMeta = &(nanozone->meta_data[p.fields.nano_mag_index][p.fields.nano_slot]); - OSAtomicEnqueue(&(pMeta->slot_LIFO), ptr, offsetof(struct chained_block_s, next)); - } else { - malloc_zone_error(nanozone->debug_flags, true, - "Freeing unallocated pointer %p\n", ptr); - } -} - -static MALLOC_INLINE void _nano_free_check_scribble(nanozone_t *nanozone, void *ptr, boolean_t do_scribble) MALLOC_ALWAYS_INLINE; - -static MALLOC_INLINE void -_nano_free_check_scribble(nanozone_t *nanozone, void *ptr, boolean_t do_scribble) -{ - _nano_free_trusted_size_check_scribble(nanozone, ptr, _nano_vet_and_size_of_live(nanozone, ptr), do_scribble); -} - -static MALLOC_INLINE void * -_nano_realloc(nanozone_t *nanozone, void *ptr, size_t new_size) -{ - size_t old_size, new_good_size, valid_size; - void *new_ptr; - - if (FALSE && NULL == ptr) { // ptr has our_signature so can't be NULL, but if it were Posix sez ... - // If ptr is a null pointer, realloc() shall be equivalent to malloc() for the specified size. - return _nano_malloc_check_scribble(nanozone, new_size); - } else if (0 == new_size) { - // If size is 0 and ptr is not a null pointer, the object pointed to is freed. - _nano_free_check_scribble(nanozone, ptr, (nanozone->debug_flags & MALLOC_DO_SCRIBBLE)); - // If size is 0, either a null pointer or a unique pointer that can be successfully passed - // to free() shall be returned. - return _nano_malloc_check_scribble(nanozone, 1); - } - - old_size = _nano_vet_and_size_of_live(nanozone, ptr); - if (!old_size) { - malloc_zone_error(nanozone->debug_flags, true, - "pointer %p being reallocated was not allocated\n", ptr); - return NULL; - } - - new_good_size = _nano_good_size(nanozone, new_size); - if (new_good_size > old_size) { - /* Must grow. FALL THROUGH to alloc/copy/free. */ - } else if (new_good_size <= (old_size >> 1)) { - /* Serious shrinkage (more than half). FALL THROUGH to alloc/copy/free. */ - } else { - /* Let's hang on to what we got. */ - if (nanozone->debug_flags & MALLOC_DO_SCRIBBLE) { - memset(ptr + new_size, SCRIBBLE_BYTE, old_size - new_size); - } - return ptr; - } - - /* - * Allocate a new buffer and copy. - */ - new_ptr = _nano_malloc_check_scribble(nanozone, new_good_size); - if (new_ptr == NULL) { - return NULL; - } - - valid_size = MIN(old_size, new_good_size); - memcpy(new_ptr, ptr, valid_size); - _nano_free_check_scribble(nanozone, ptr, (nanozone->debug_flags & MALLOC_DO_SCRIBBLE)); - - return new_ptr; -} - -static MALLOC_INLINE void -_nano_destroy(nanozone_t *nanozone) -{ - /* Now destroy the separate nanozone region */ - nano_common_deallocate_pages((void *)nanozone, NANOZONE_PAGED_SIZE, - nanozone->debug_flags); -} - -/****************** nanozone dispatch **********************/ - -static void * -nano_malloc(nanozone_t *nanozone, size_t size) -{ - if (size <= NANO_MAX_SIZE) { - void *p = _nano_malloc_check_clear(nanozone, size, 0); - if (p) { - return p; - } else { - /* FALLTHROUGH to helper zone */ - } - } - - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - return zone->malloc(zone, size); -} - -static void * -nano_forked_malloc(nanozone_t *nanozone, size_t size) -{ - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - return zone->malloc(zone, size); -} - -static void * -nano_malloc_scribble(nanozone_t *nanozone, size_t size) -{ - if (size <= NANO_MAX_SIZE) { - void *ptr = _nano_malloc_check_clear(nanozone, size, 0); - if (ptr) { - /* - * Scribble on allocated memory. - */ - if (size) { - memset(ptr, SCRIBBLE_BYTE, _nano_vet_and_size_of_live(nanozone, ptr)); - } - - return ptr; - } else { - /* FALLTHROUGH to helper zone */ - } - } - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - return zone->malloc(zone, size); -} - -static void * -nano_calloc(nanozone_t *nanozone, size_t num_items, size_t size) -{ - size_t total_bytes; - - if (calloc_get_size(num_items, size, 0, &total_bytes)) { - return NULL; - } - - if (total_bytes <= NANO_MAX_SIZE) { - void *p = _nano_malloc_check_clear(nanozone, total_bytes, 1); - if (p) { - return p; - } else { - /* FALLTHROUGH to helper zone */ - } - } - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - return zone->calloc(zone, 1, total_bytes); -} - -static void * -nano_forked_calloc(nanozone_t *nanozone, size_t num_items, size_t size) -{ - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - return zone->calloc(zone, num_items, size); -} - -static void * -nano_valloc(nanozone_t *nanozone, size_t size) -{ - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - return zone->valloc(zone, size); -} - -static MALLOC_INLINE void -__nano_free_definite_size(nanozone_t *nanozone, void *ptr, size_t size, boolean_t do_scribble) MALLOC_ALWAYS_INLINE; - -static MALLOC_INLINE void -__nano_free_definite_size(nanozone_t *nanozone, void *ptr, size_t size, boolean_t do_scribble) -{ - nano_blk_addr_t p; // happily, the compiler holds this in a register - - p.addr = (uint64_t)ptr; // place ptr on the dissecting table - if (NANOZONE_SIGNATURE == p.fields.nano_signature) { - if (size == ((p.fields.nano_slot + 1) << SHIFT_NANO_QUANTUM)) { // "Trust but verify." - _nano_free_trusted_size_check_scribble(nanozone, ptr, size, do_scribble); - return; - } else { - malloc_zone_error(nanozone->debug_flags, true, - "Freeing pointer %p whose size was misdeclared\n", ptr); - } - } else { - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - zone->free_definite_size(zone, ptr, size); - return; - } - /* NOTREACHED */ -} - -static void -nano_free_definite_size(nanozone_t *nanozone, void *ptr, size_t size) -{ - __nano_free_definite_size(nanozone, ptr, size, 0); -} - -static void -nano_free_definite_size_scribble(nanozone_t *nanozone, void *ptr, size_t size) -{ - __nano_free_definite_size(nanozone, ptr, size, 1); -} - -static MALLOC_INLINE void __nano_free(nanozone_t *nanozone, void *ptr, boolean_t do_scribble) MALLOC_ALWAYS_INLINE; - -static MALLOC_INLINE void -__nano_free(nanozone_t *nanozone, void *ptr, boolean_t do_scribble) -{ - MALLOC_TRACE(TRACE_nano_free, (uintptr_t)nanozone, (uintptr_t)ptr, do_scribble, 0); - - if (!ptr) { - return; // Protect against malloc_zone_free() passing NULL. - } - - // exhausting a slot may result in a pointer with - // the nanozone prefix being given to nano_free via malloc_zone_free. Calling - // vet_and_size here, instead of in _nano_free_check_scribble means we can - // early-out into the helper_zone if it turns out nano does not own this ptr. - size_t sz = _nano_vet_and_size_of_live(nanozone, ptr); - - if (sz) { - _nano_free_trusted_size_check_scribble(nanozone, ptr, sz, do_scribble); - return; - } else { - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - zone->free(zone, ptr); - return; - } - /* NOTREACHED */ -} - -static void -nano_free(nanozone_t *nanozone, void *ptr) -{ - __nano_free(nanozone, ptr, 0); -} - -static void -nano_forked_free(nanozone_t *nanozone, void *ptr) -{ - if (!ptr) { - return; // Protect against malloc_zone_free() passing NULL. - } - - // exhausting a slot may result in a pointer with - // the nanozone prefix being given to nano_free via malloc_zone_free. Calling - // vet_and_size here, instead of in _nano_free_check_scribble means we can - // early-out into the helper_zone if it turns out nano does not own this ptr. - size_t sz = _nano_vet_and_size_of_live(nanozone, ptr); - - if (sz) { - /* NOTHING. Drop it on the floor as nanozone metadata could be fouled by fork. */ - return; - } else { - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - zone->free(zone, ptr); - return; - } - /* NOTREACHED */ -} - -static void -nano_forked_free_definite_size(nanozone_t *nanozone, void *ptr, size_t size) -{ - nano_forked_free(nanozone, ptr); -} - -static void -nano_free_scribble(nanozone_t *nanozone, void *ptr) -{ - __nano_free(nanozone, ptr, 1); -} - -static size_t -nano_size(nanozone_t *nanozone, const void *ptr) -{ - nano_blk_addr_t p; // happily, the compiler holds this in a register - - p.addr = (uint64_t)ptr; // place ptr on the dissecting table - - if (NANOZONE_SIGNATURE == p.fields.nano_signature) { // Our signature? - return _nano_size(nanozone, ptr); - } else { - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - return zone->size(zone, ptr); // Not nano. Try other sizes. - } - /* NOTREACHED */ -} - -static void * -nano_realloc(nanozone_t *nanozone, void *ptr, size_t new_size) -{ - // could occur through malloc_zone_realloc() path - if (!ptr) { - // If ptr is a null pointer, realloc() shall be equivalent to malloc() for the specified size. - return nano_malloc(nanozone, new_size); - } - - size_t old_size = _nano_vet_and_size_of_live(nanozone, ptr); - if (!old_size) { - // not-nano pointer, hand down to helper zone - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - return zone->realloc(zone, ptr, new_size); - } else { - if (new_size <= NANO_MAX_SIZE) { - // nano to nano? - void *q = _nano_realloc(nanozone, ptr, new_size); - if (q) { - return q; - } else { - // nano exhausted - /* FALLTHROUGH to helper zone copying case */ - } - } - - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - void *new_ptr = zone->malloc(zone, new_size); - - if (new_ptr) { - size_t valid_size = MIN(old_size, new_size); - memcpy(new_ptr, ptr, valid_size); - _nano_free_check_scribble(nanozone, ptr, (nanozone->debug_flags & MALLOC_DO_SCRIBBLE)); - return new_ptr; - } else { - /* Original ptr is left intact */ - return NULL; - } - /* NOTREACHED */ - } - /* NOTREACHED */ -} - -static void * -nano_forked_realloc(nanozone_t *nanozone, void *ptr, size_t new_size) -{ - // could occur through malloc_zone_realloc() path - if (!ptr) { - // If ptr is a null pointer, realloc() shall be equivalent to malloc() for the specified size. - return nano_forked_malloc(nanozone, new_size); - } - - size_t old_size = _nano_vet_and_size_of_live(nanozone, ptr); - if (!old_size) { - // not-nano pointer, hand down to helper zone - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - return zone->realloc(zone, ptr, new_size); - } else { - if (0 == new_size) { - // If size is 0 and ptr is not a null pointer, the object pointed to is freed. - // However as nanozone metadata could be fouled by fork, we'll intentionally leak it. - - // If size is 0, either a null pointer or a unique pointer that can be successfully passed - // to free() shall be returned. - return nano_forked_malloc(nanozone, 1); - } - - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - void *new_ptr = zone->malloc(zone, new_size); - - if (new_ptr) { - size_t valid_size = MIN(old_size, new_size); - memcpy(new_ptr, ptr, valid_size); - /* Original pointer is intentionally leaked as nanozone metadata could be fouled by fork. */ - return new_ptr; - } else { - /* Original ptr is left intact */ - return NULL; - } - /* NOTREACHED */ - } - /* NOTREACHED */ -} - -static void -nano_destroy(nanozone_t *nanozone) -{ - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - zone->destroy(zone); - - _nano_destroy(nanozone); -} - -static unsigned -nano_batch_malloc(nanozone_t *nanozone, size_t size, void **results, unsigned count) -{ - unsigned found = 0; - - if (size <= NANO_MAX_SIZE) { - while (found < count) { - void *ptr = _nano_malloc_check_clear(nanozone, size, 0); - if (!ptr) { - break; - } - - *results++ = ptr; - found++; - } - if (found == count) { - return found; - } else { - /* FALLTHROUGH to mop-up in the helper zone */ - } - } - - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - return found + zone->batch_malloc(zone, size, results, count - found); -} - -static unsigned -nano_forked_batch_malloc(nanozone_t *nanozone, size_t size, void **results, unsigned count) -{ - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - return zone->batch_malloc(zone, size, results, count); -} - -static void -nano_batch_free(nanozone_t *nanozone, void **to_be_freed, unsigned count) -{ - void *ptr; - - // frees all the pointers in to_be_freed - // note that to_be_freed may be overwritten during the process - if (!count) { - return; - } - - while (count--) { - ptr = to_be_freed[count]; - if (ptr) { - nano_free(nanozone, ptr); - } - } -} - -static void -nano_forked_batch_free(nanozone_t *nanozone, void **to_be_freed, unsigned count) -{ - void *ptr; - - // frees all the pointers in to_be_freed - // note that to_be_freed may be overwritten during the process - if (!count) { - return; - } - - while (count--) { - ptr = to_be_freed[count]; - if (ptr) { - nano_forked_free(nanozone, ptr); - } - } -} - -static void * -nano_memalign(nanozone_t *nanozone, size_t alignment, size_t size) -{ - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - return zone->memalign(zone, alignment, size); -} - -static boolean_t -nano_claimed_address(nanozone_t *nanozone, void *ptr) -{ - nano_blk_addr_t p; - p.addr = (uint64_t)ptr; - if (NANOZONE_SIGNATURE != p.fields.nano_signature) { - // Not a nano address - let the helper zone handle it. - malloc_zone_t *helper_zone = nanozone->helper_zone; - return malloc_zone_claimed_address(helper_zone, ptr); - } - return __nano_vet_and_size_inner(nanozone, ptr, true) != 0; -} - -static boolean_t -nano_forked_claimed_address(struct _malloc_zone_t *zone, void *ptr) -{ - // This does not operate after fork - default to true to avoid - // false negatives. - return true; -} - -static size_t -nano_try_madvise(nanozone_t *nanozone, size_t goal) -{ - unsigned int mag_index, slot_key; - size_t bytes_toward_goal = 0; - - for (mag_index = 0; mag_index < nano_common_max_magazines; mag_index++) { - nano_blk_addr_t p; - - // Establish p as base address for band 0, slot 0, offset 0 - p.fields.nano_signature = NANOZONE_SIGNATURE; - p.fields.nano_mag_index = mag_index; - p.fields.nano_band = 0; - p.fields.nano_slot = 0; - p.fields.nano_offset = 0; - - for (slot_key = 0; slot_key < SLOT_KEY_LIMIT; p.addr += SLOT_IN_BAND_SIZE, // Advance to next slot base - slot_key++) { - // malloc_report(ASL_LEVEL_WARNING,"nano_try_madvise examining slot base %p\n", p.addr); - nano_meta_admin_t pMeta = &(nanozone->meta_data[mag_index][slot_key]); - uintptr_t slot_bump_addr = pMeta->slot_bump_addr; // capture this volatile pointer - size_t slot_objects_mapped = pMeta->slot_objects_mapped; // capture this volatile count - - if (0 == slot_objects_mapped) { // Nothing allocated in this magazine for this slot? - continue; - } else { - // Walk the slot free list and populate a bitarray_t - int log_size = 64 - __builtin_clzl(slot_objects_mapped); - bitarray_t slot_bitarray = bitarray_create(log_size); - - unsigned int slot_bytes = pMeta->slot_bytes; - int log_page_count = 64 - __builtin_clzl((slot_objects_mapped * slot_bytes) / vm_kernel_page_size); - log_page_count = 1 + MAX(0, log_page_count); - bitarray_t page_bitarray = bitarray_create(log_page_count); - - // malloc_report(ASL_LEVEL_WARNING,"slot_bitarray: %db page_bitarray: %db\n", bitarray_size(log_size), - // bitarray_size(log_page_count)); - if (!slot_bitarray) { - malloc_report(ASL_LEVEL_ERR, "bitarray_create(%d) in nano_try_madvise returned errno=%d.\n", log_size, errno); - free(page_bitarray); - return bytes_toward_goal; - } - - if (!page_bitarray) { - malloc_report(ASL_LEVEL_ERR, "bitarray_create(%d) in nano_try_madvise returned errno=%d.\n", log_page_count, errno); - free(slot_bitarray); - return bytes_toward_goal; - } - - chained_block_t head = NULL, tail = NULL, t; - unsigned stoploss = (unsigned)slot_objects_mapped; - while ((t = OSAtomicDequeue(&(pMeta->slot_LIFO), offsetof(struct chained_block_s, next)))) { - if (0 == stoploss) { - malloc_report(ASL_LEVEL_ERR, "Free list walk in nano_try_madvise exceeded object count.\n"); - break; - } - stoploss--; - - uintptr_t offset = ((uintptr_t)t - p.addr); // offset from beginning of slot - index_t block_index = offset_to_index(nanozone, pMeta, offset); - - // build a simple linked list of the free blocks we're able to obtain - if (NULL == head) { - head = t; - } else { - tail->next = t; - } - tail = t; - - // take note in a bitarray_t of each free block we're able to obtain (allows fast lookup below) - if (block_index < slot_objects_mapped) { - bitarray_set(slot_bitarray, log_size, block_index); - } - } - if (tail) { - tail->next = NULL; - } - - if (NULL == head) { - free(slot_bitarray); - free(page_bitarray); - continue; - } - - index_t i; - nano_blk_addr_t q; - size_t pgnum; - for (i = (index_t)pMeta->slot_objects_skipped; i < slot_objects_mapped; ++i) { - uintptr_t block_offset = index_to_offset(nanozone, pMeta, i); - if (p.addr + block_offset >= slot_bump_addr) { - break; - } - - if (!bitarray_get(slot_bitarray, log_size, i)) { // is block i allocated or already on an madvise'd page? - - // Mark the page(s) it resides on as live - q.addr = p.addr + block_offset; - pgnum = ((((unsigned)q.fields.nano_band) << NANO_OFFSET_BITS) | ((unsigned)q.fields.nano_offset)) >> - vm_kernel_page_shift; - bitarray_set(page_bitarray, log_page_count, (index_t)pgnum); - - q.addr += slot_bytes - 1; - pgnum = ((((unsigned)q.fields.nano_band) << NANO_OFFSET_BITS) | ((unsigned)q.fields.nano_offset)) >> - vm_kernel_page_shift; - bitarray_set(page_bitarray, log_page_count, (index_t)pgnum); - } - } - - free(slot_bitarray); - - q.addr = p.addr + index_to_offset(nanozone, pMeta, (index_t)pMeta->slot_objects_skipped); - index_t pgstart = - ((((unsigned)q.fields.nano_band) << NANO_OFFSET_BITS) | ((unsigned)q.fields.nano_offset)) >> vm_kernel_page_shift; - - q.addr = slot_bump_addr - slot_bytes; - pgnum = ((((unsigned)q.fields.nano_band) << NANO_OFFSET_BITS) | ((unsigned)q.fields.nano_offset)) >> vm_kernel_page_shift; - - // malloc_report(ASL_LEVEL_WARNING,"Examining %d pages. Slot base %p.\n", pgnum - pgstart + 1, p.addr); - - if (pMeta->slot_madvised_pages) { - if (pMeta->slot_madvised_log_page_count < log_page_count) { - bitarray_t new_madvised_pages = bitarray_create(log_page_count); - index_t index; - while (bitarray_zap_first_set(pMeta->slot_madvised_pages, pMeta->slot_madvised_log_page_count, &index)) { - bitarray_set(new_madvised_pages, log_page_count, index); - } - free(pMeta->slot_madvised_pages); - pMeta->slot_madvised_pages = new_madvised_pages; - pMeta->slot_madvised_log_page_count = log_page_count; - } - } else { - pMeta->slot_madvised_pages = bitarray_create(log_page_count); - pMeta->slot_madvised_log_page_count = log_page_count; - } - - bitarray_t will_madvise_pages = bitarray_create(log_page_count); - int num_advised = 0; - - for (i = pgstart; i < pgnum; ++i) { - if ((i < (1 << log_page_count)) && // bounds check for the bitarray_get()'s that follow. - !bitarray_get(pMeta->slot_madvised_pages, log_page_count, i) && // already madvise'd? - !bitarray_get(page_bitarray, log_page_count, i)) // no live allocations? - { - num_advised++; - bitarray_set(will_madvise_pages, log_page_count, i); - } - } - free(page_bitarray); - - if (num_advised) { - chained_block_t new_head = NULL, new_tail = NULL; - // malloc_report(ASL_LEVEL_WARNING,"Constructing residual free list starting at %p num_advised %d\n", head, - // num_advised); - t = head; - while (t) { - q.addr = (uintptr_t)t; - index_t pgnum_start = - ((((unsigned)q.fields.nano_band) << NANO_OFFSET_BITS) | ((unsigned)q.fields.nano_offset)) >> - vm_kernel_page_shift; - q.addr += slot_bytes - 1; - index_t pgnum_end = - ((((unsigned)q.fields.nano_band) << NANO_OFFSET_BITS) | ((unsigned)q.fields.nano_offset)) >> - vm_kernel_page_shift; - - // bounds check for the bitarray_get()'s that follow. If the pgnum is beyond the - // capacity of the will_madvise_pages just restore the block to the free list. - if (pgnum_start >= (1 << log_page_count)) { - if (NULL == new_head) { - new_head = t; - } else { - new_tail->next = t; - } - new_tail = t; - } - // If the block nowhere lies on an madvise()'d page restore it to the slot free list. - else if (!bitarray_get(will_madvise_pages, log_page_count, pgnum_start) && - !bitarray_get(will_madvise_pages, log_page_count, pgnum_end)) { - if (NULL == new_head) { - new_head = t; - } else { - new_tail->next = t; - } - new_tail = t; - } - - t = t->next; - } - if (new_tail) { - new_tail->next = NULL; - } - - // push the free list extracted above back onto the LIFO, all at once - if (new_head) { - OSAtomicEnqueue(&(pMeta->slot_LIFO), new_head, - (uintptr_t)new_tail - (uintptr_t)new_head + offsetof(struct chained_block_s, next)); - } - } else { - // malloc_report(ASL_LEVEL_WARNING,"Reinstating free list since no pages were madvised (%d).\n", num_advised); - if (head) { - OSAtomicEnqueue(&(pMeta->slot_LIFO), head, - (uintptr_t)tail - (uintptr_t)head + offsetof(struct chained_block_s, next)); - } - } - - for (i = pgstart; i < pgnum; ++i) { - if ((i < (1 << log_page_count)) && bitarray_get(will_madvise_pages, log_page_count, i)) { - q = p; - q.fields.nano_band = (i << vm_kernel_page_shift) >> NANO_OFFSET_BITS; - q.fields.nano_offset = (i << vm_kernel_page_shift) & ((1 << NANO_OFFSET_BITS) - 1); - // malloc_report(ASL_LEVEL_WARNING,"Entire page non-live: %d. Slot base %p, madvising %p\n", i, p.addr, - // q.addr); - - if (nanozone->debug_flags & MALLOC_DO_SCRIBBLE) { - memset((void *)q.addr, SCRUBBLE_BYTE, vm_kernel_page_size); - } - - if (-1 == madvise((void *)q.addr, vm_kernel_page_size, MADV_FREE_REUSABLE)) - { - /* -1 return: VM map entry change makes this unfit for reuse. Something evil lurks. */ -#if DEBUG_MADVISE - nanozone_error(nanozone, 0, "madvise(..., MADV_FREE_REUSABLE) failed", (void *)cwq.addrpgLo, - "length=%d\n", vm_page_size); -#endif - } else { - bytes_toward_goal += vm_kernel_page_size; - bitarray_set(pMeta->slot_madvised_pages, log_page_count, i); - } - } - } - free(will_madvise_pages); - - if (!bitarray_first_set(pMeta->slot_madvised_pages, log_page_count)) { - free(pMeta->slot_madvised_pages); - pMeta->slot_madvised_pages = NULL; - pMeta->slot_madvised_log_page_count = 0; - } - - if (goal && bytes_toward_goal >= goal) { - return bytes_toward_goal; - } - } - } - } - return bytes_toward_goal; -} - -static size_t -nano_pressure_relief(nanozone_t *nanozone, size_t goal) -{ - MAGMALLOC_PRESSURERELIEFBEGIN((void *)nanozone, nanozone->basic_zone.zone_name, (int)goal); - MALLOC_TRACE(TRACE_nano_memory_pressure | DBG_FUNC_START, (uint64_t)nanozone, goal, 0, 0); - - size_t total = nano_try_madvise(nanozone, goal); - - MAGMALLOC_PRESSURERELIEFEND((void *)nanozone, nanozone->basic_zone.zone_name, (int)goal, (int)total); - MALLOC_TRACE(TRACE_nano_memory_pressure | DBG_FUNC_END, (uint64_t)nanozone, goal, total, 0); - - return total; -} - -/**************** introspection methods *********************/ - -static kern_return_t -nano_ptr_in_use_enumerator(task_t task, - void *context, - unsigned type_mask, - vm_address_t zone_address, - memory_reader_t reader, - vm_range_recorder_t recorder) -{ - nanozone_t *nanozone; - kern_return_t err; - struct nanozone_s zone_copy; - - if (!reader) { - reader = nano_common_default_reader; - } - - err = reader(task, zone_address, sizeof(nanozone_t), (void **)&nanozone); - if (err) { - return err; - } - memcpy(&zone_copy, nanozone, sizeof(zone_copy)); - - err = segregated_in_use_enumerator(task, context, type_mask, &zone_copy, reader, recorder); - return err; -} - -static size_t -nano_good_size(nanozone_t *nanozone, size_t size) -{ - if (size <= NANO_MAX_SIZE) { - return _nano_common_good_size(size); - } else { - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - return zone->introspect->good_size(zone, size); - } -} - -// TODO sanity checks -static unsigned nanozone_check_counter = 0; -static unsigned nanozone_check_start = 0; -static unsigned nanozone_check_modulo = 1; - -static boolean_t -nano_check_all(nanozone_t *nanozone, const char *function) -{ - return 1; -} - -static boolean_t -nanozone_check(nanozone_t *nanozone) -{ - if ((++nanozone_check_counter % 10000) == 0) { - malloc_report(ASL_LEVEL_NOTICE, "at nanozone_check counter=%d\n", nanozone_check_counter); - } - - if (nanozone_check_counter < nanozone_check_start) { - return 1; - } - - if (nanozone_check_counter % nanozone_check_modulo) { - return 1; - } - - return nano_check_all(nanozone, ""); -} - -static unsigned -count_free(nanozone_t *nanozone, nano_meta_admin_t pMeta) -{ - chained_block_t head = NULL, tail = NULL, t; - unsigned count = 0; - - unsigned stoploss = (unsigned)pMeta->slot_objects_mapped; - while ((t = OSAtomicDequeue(&(pMeta->slot_LIFO), offsetof(struct chained_block_s, next)))) { - if (0 == stoploss) { - malloc_zone_error(nanozone->debug_flags, true, - "Free list walk in count_free exceeded object count.\n", - (void *)&(pMeta->slot_LIFO), NULL); - } - stoploss--; - - if (NULL == head) { - head = t; - } else { - tail->next = t; - } - tail = t; - - count++; - } - if (tail) { - tail->next = NULL; - } - - // push the free list extracted above back onto the LIFO, all at once - if (head) { - OSAtomicEnqueue(&(pMeta->slot_LIFO), head, (uintptr_t)tail - (uintptr_t)head + offsetof(struct chained_block_s, next)); - } - - return count; -} - -static void -nano_print(nanozone_t *nanozone, boolean_t verbose) -{ - unsigned int mag_index, slot_key; - malloc_statistics_t stats; - - nano_statistics(nanozone, &stats); - malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, - "Nanozone %p: inUse=%d(%lluKB) touched=%lluKB allocated=%lluMB\n", - nanozone, stats.blocks_in_use, (uint64_t)stats.size_in_use >> 10, - (uint64_t)stats.max_size_in_use >> 10, (uint64_t)stats.size_allocated >> 20); - - for (mag_index = 0; mag_index < nano_common_max_magazines; mag_index++) { - nano_blk_addr_t p; - - // Establish p as base address for band 0, slot 0, offset 0 - p.fields.nano_signature = NANOZONE_SIGNATURE; - p.fields.nano_mag_index = mag_index; - p.fields.nano_band = 0; - p.fields.nano_slot = 0; - p.fields.nano_offset = 0; - - for (slot_key = 0; slot_key < SLOT_KEY_LIMIT; p.addr += SLOT_IN_BAND_SIZE, // Advance to next slot base - slot_key++) { - nano_meta_admin_t pMeta = &(nanozone->meta_data[mag_index][slot_key]); - uintptr_t slot_bump_addr = pMeta->slot_bump_addr; // capture this volatile pointer - size_t slot_objects_mapped = pMeta->slot_objects_mapped; // capture this volatile count - - if (0 == slot_objects_mapped) { // Nothing allocated in this magazine for this slot? - malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, "Magazine %2d(%3d) Unrealized\n", mag_index, - (slot_key + 1) << SHIFT_NANO_QUANTUM); - continue; - } - - uintptr_t offset = (0 == slot_bump_addr ? 0 : slot_bump_addr - p.addr); - unsigned blocks_touched = offset_to_index(nanozone, pMeta, offset) - (unsigned)pMeta->slot_objects_skipped; - unsigned blocks_now_free = count_free(nanozone, pMeta); - unsigned blocks_in_use = blocks_touched - blocks_now_free; - - size_t size_hiwater = ((slot_key + 1) << SHIFT_NANO_QUANTUM) * blocks_touched; - size_t size_in_use = ((slot_key + 1) << SHIFT_NANO_QUANTUM) * blocks_in_use; - size_t size_allocated = ((offset / BAND_SIZE) + 1) * SLOT_IN_BAND_SIZE; - - malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, - "Magazine %2d(%3d) [%p, %3lluKB] \t Allocations in use=%4d \t Bytes in use=%llub \t Untouched=%lluKB\n", mag_index, - (slot_key + 1) << SHIFT_NANO_QUANTUM, (void *)p.addr, (uint64_t)(size_allocated >> 10), blocks_in_use, (uint64_t)size_in_use, - (uint64_t)((size_allocated - size_hiwater) >> 10)); - - if (!verbose) { - continue; - } else { - // Walk the slot free list and populate a bitarray_t - int log_size = 64 - __builtin_clzl(slot_objects_mapped); - bitarray_t slot_bitarray = bitarray_create(log_size); - - if (!slot_bitarray) { - malloc_report(ASL_LEVEL_ERR, "bitarray_create(%d) in nano_print returned errno=%d.\n", log_size, errno); - return; - } - - chained_block_t head = NULL, tail = NULL, t; - unsigned stoploss = (unsigned)slot_objects_mapped; - while ((t = OSAtomicDequeue(&(pMeta->slot_LIFO), offsetof(struct chained_block_s, next)))) { - if (0 == stoploss) { - malloc_report(ASL_LEVEL_ERR, "Free list walk in nano_print exceeded object count.\n"); - break; - } - stoploss--; - - uintptr_t offset = ((uintptr_t)t - p.addr); // offset from beginning of slot - index_t block_index = offset_to_index(nanozone, pMeta, offset); - - if (NULL == head) { - head = t; - } else { - tail->next = t; - } - tail = t; - - if (block_index < slot_objects_mapped) { - bitarray_set(slot_bitarray, log_size, block_index); - } - } - if (tail) { - tail->next = NULL; - } - - index_t i; - for (i = 0; i < slot_objects_mapped; ++i) { - nano_blk_addr_t q; - size_t pgnum; - uintptr_t block_offset = index_to_offset(nanozone, pMeta, i); - if (p.addr + block_offset >= slot_bump_addr) { - break; - } - - q.addr = p.addr + block_offset; - pgnum = ((((unsigned)q.fields.nano_band) << NANO_OFFSET_BITS) | ((unsigned)q.fields.nano_offset)) >> - vm_kernel_page_shift; - - if (i < pMeta->slot_objects_skipped) { - malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, "_"); - } else if (bitarray_get(slot_bitarray, log_size, i)) { - malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, "F"); - } else if (pMeta->slot_madvised_pages && (pgnum < (1 << pMeta->slot_madvised_log_page_count)) && - bitarray_get(pMeta->slot_madvised_pages, pMeta->slot_madvised_log_page_count, (index_t)pgnum)) { - malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, "M"); - } else { - malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, "."); - } - } - malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, "\n"); - - free(slot_bitarray); - - // push the free list extracted above back onto the LIFO, all at once - if (head) { - OSAtomicEnqueue( - &(pMeta->slot_LIFO), head, (uintptr_t)tail - (uintptr_t)head + offsetof(struct chained_block_s, next)); - } - } - } - } - return; -} - -static void -nano_log(malloc_zone_t *zone, void *log_address) -{ -} - -static void -nano_force_lock(nanozone_t *nanozone) -{ - int i; - - for (i = 0; i < nano_common_max_magazines; ++i) { - _malloc_lock_lock(&nanozone->band_resupply_lock[i]); - } -} - -static void -nano_force_unlock(nanozone_t *nanozone) -{ - int i; - - for (i = 0; i < nano_common_max_magazines; ++i) { - _malloc_lock_unlock(&nanozone->band_resupply_lock[i]); - } -} - -static void -nano_reinit_lock(nanozone_t *nanozone) -{ - int i; - - for (i = 0; i < nano_common_max_magazines; ++i) { - _malloc_lock_init(&nanozone->band_resupply_lock[i]); - } -} - -static void -nano_statistics(nanozone_t *nanozone, malloc_statistics_t *stats) -{ - int i, j; - - bzero(stats, sizeof(*stats)); - - for (i = 0; i < nano_common_max_magazines; ++i) { - nano_blk_addr_t p; - - // Establish p as base address for slot 0 in this CPU magazine - p.fields.nano_signature = NANOZONE_SIGNATURE; - p.fields.nano_mag_index = i; - p.fields.nano_band = 0; - p.fields.nano_slot = 0; - p.fields.nano_offset = 0; - - for (j = 0; j < NANO_SLOT_SIZE; p.addr += SLOT_IN_BAND_SIZE, // Advance to next slot base - ++j) { - nano_meta_admin_t pMeta = &nanozone->meta_data[i][j]; - uintptr_t offset = pMeta->slot_bump_addr - p.addr; - - if (0 == pMeta->slot_current_base_addr) { // Nothing allocated in this magazine for this slot? - continue; - } else { - unsigned blocks_touched = offset_to_index(nanozone, pMeta, offset) - (unsigned)pMeta->slot_objects_skipped; - unsigned blocks_now_free = count_free(nanozone, pMeta); - unsigned blocks_in_use = blocks_touched - blocks_now_free; - - size_t size_hiwater = ((j + 1) << SHIFT_NANO_QUANTUM) * blocks_touched; - size_t size_in_use = ((j + 1) << SHIFT_NANO_QUANTUM) * blocks_in_use; - size_t size_allocated = ((offset / BAND_SIZE) + 1) * SLOT_IN_BAND_SIZE; - - stats->blocks_in_use += blocks_in_use; - - stats->max_size_in_use += size_hiwater; - stats->size_in_use += size_in_use; - stats->size_allocated += size_allocated; - } - } - } -} - -static boolean_t -_nano_locked(nanozone_t *nanozone) -{ - int i; - - for (i = 0; i < nano_common_max_magazines; ++i) { - if (_malloc_lock_trylock(&nanozone->band_resupply_lock[i])) { - _malloc_lock_unlock(&nanozone->band_resupply_lock[i]); - return TRUE; - } - } - return FALSE; -} - -static boolean_t -nano_locked(nanozone_t *nanozone) -{ - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - - return _nano_locked(nanozone) || zone->introspect->zone_locked(zone); -} - -static const struct malloc_introspection_t nano_introspect = { - (void *)nano_ptr_in_use_enumerator, (void *)nano_good_size, (void *)nanozone_check, (void *)nano_print, nano_log, - (void *)nano_force_lock, (void *)nano_force_unlock, (void *)nano_statistics, (void *)nano_locked, NULL, NULL, NULL, - NULL, /* Zone enumeration version 7 and forward. */ - (void *)nano_reinit_lock, // reinit_lock version 9 and foward -}; // marked as const to spare the DATA section - -void -nano_forked_zone(nanozone_t *nanozone) -{ - /* - * Hobble the nano zone in the child of a fork prior to an exec since - * the state of the zone can be made inconsistent by a parent thread while the - * fork is underway. - * All new allocations will be referred to the helper zone (which is more stable.) - * All free()'s of existing nano objects will be leaked. - */ - - mprotect(nanozone, sizeof(nanozone->basic_zone), PROT_READ | PROT_WRITE); - - nanozone->basic_zone.size = (void *)nano_size; /* Unchanged. */ - nanozone->basic_zone.malloc = (void *)nano_forked_malloc; - nanozone->basic_zone.calloc = (void *)nano_forked_calloc; - nanozone->basic_zone.valloc = (void *)nano_valloc; /* Unchanged, already always obtained from helper zone. */ - nanozone->basic_zone.free = (void *)nano_forked_free; - nanozone->basic_zone.realloc = (void *)nano_forked_realloc; - nanozone->basic_zone.destroy = (void *)nano_destroy; /* Unchanged. */ - nanozone->basic_zone.batch_malloc = (void *)nano_forked_batch_malloc; - nanozone->basic_zone.batch_free = (void *)nano_forked_batch_free; - nanozone->basic_zone.introspect = (struct malloc_introspection_t *)&nano_introspect; /* Unchanged. */ - nanozone->basic_zone.memalign = (void *)nano_memalign; /* Unchanged. */ - nanozone->basic_zone.free_definite_size = (void *)nano_forked_free_definite_size; - nanozone->basic_zone.claimed_address = nano_forked_claimed_address; - - mprotect(nanozone, sizeof(nanozone->basic_zone), PROT_READ); -} - -malloc_zone_t * -nano_create_zone(malloc_zone_t *helper_zone, unsigned debug_flags) -{ - nanozone_t *nanozone; - int i, j; - - /* Note: It is important that nano_create_zone resets _malloc_engaged_nano - * if it is unable to enable the nanozone (and chooses not to abort). As - * several functions rely on _malloc_engaged_nano to determine if they - * should manipulate the nanozone, and these should not run if we failed - * to create the zone. - */ - MALLOC_ASSERT(_malloc_engaged_nano == NANO_V1); - - /* get memory for the zone. */ - nanozone = nano_common_allocate_based_pages(NANOZONE_PAGED_SIZE, 0, 0, VM_MEMORY_MALLOC, 0); - if (!nanozone) { - _malloc_engaged_nano = NANO_NONE; - return NULL; - } - - /* set up the basic_zone portion of the nanozone structure */ - nanozone->basic_zone.version = 10; - nanozone->basic_zone.size = (void *)nano_size; - nanozone->basic_zone.malloc = (debug_flags & MALLOC_DO_SCRIBBLE) ? (void *)nano_malloc_scribble : (void *)nano_malloc; - nanozone->basic_zone.calloc = (void *)nano_calloc; - nanozone->basic_zone.valloc = (void *)nano_valloc; - nanozone->basic_zone.free = (debug_flags & MALLOC_DO_SCRIBBLE) ? (void *)nano_free_scribble : (void *)nano_free; - nanozone->basic_zone.realloc = (void *)nano_realloc; - nanozone->basic_zone.destroy = (void *)nano_destroy; - nanozone->basic_zone.batch_malloc = (void *)nano_batch_malloc; - nanozone->basic_zone.batch_free = (void *)nano_batch_free; - nanozone->basic_zone.introspect = (struct malloc_introspection_t *)&nano_introspect; - nanozone->basic_zone.memalign = (void *)nano_memalign; - nanozone->basic_zone.free_definite_size = (debug_flags & MALLOC_DO_SCRIBBLE) ? (void *)nano_free_definite_size_scribble - : (void *)nano_free_definite_size; - - nanozone->basic_zone.pressure_relief = (void *)nano_pressure_relief; - nanozone->basic_zone.claimed_address = (void *)nano_claimed_address; - - nanozone->basic_zone.reserved1 = 0; /* Set to zero once and for all as required by CFAllocator. */ - nanozone->basic_zone.reserved2 = 0; /* Set to zero once and for all as required by CFAllocator. */ - - mprotect(nanozone, sizeof(nanozone->basic_zone), PROT_READ); /* Prevent overwriting the function pointers in basic_zone. */ - - /* Nano zone does not support MALLOC_ADD_GUARD_PAGES. */ - if (debug_flags & MALLOC_ADD_GUARD_PAGES) { - malloc_report(ASL_LEVEL_INFO, "nano zone does not support guard pages\n"); - debug_flags &= ~MALLOC_ADD_GUARD_PAGES; - } - - /* set up the remainder of the nanozone structure */ - nanozone->debug_flags = debug_flags; - - if (phys_ncpus > sizeof(nanozone->core_mapped_size) / - sizeof(nanozone->core_mapped_size[0])) { - MALLOC_REPORT_FATAL_ERROR(phys_ncpus, - "nanozone abandoned because NCPUS > max magazines.\n"); - } - - /* Initialize slot queue heads and resupply locks. */ - OSQueueHead q0 = OS_ATOMIC_QUEUE_INIT; - for (i = 0; i < nano_common_max_magazines; ++i) { - _malloc_lock_init(&nanozone->band_resupply_lock[i]); - - for (j = 0; j < NANO_SLOT_SIZE; ++j) { - nanozone->meta_data[i][j].slot_LIFO = q0; - } - } - - /* Initialize the security token. */ - nanozone->cookie = (uintptr_t)malloc_entropy[0] & 0x0000ffffffff0000ULL; // scramble central 32bits with this cookie - - nanozone->helper_zone = helper_zone; - - return (malloc_zone_t *)nanozone; -} - -void -nano_init(const char *envp[], const char *apple[], - const char *bootargs MALLOC_UNUSED) -{ -#if NANO_PREALLOCATE_BAND_VM - // Unconditionally preallocate the VA space set aside for nano malloc to - // reserve it in all configurations. rdar://problem/33392283 - boolean_t preallocated = nano_preallocate_band_vm(); - if (!preallocated) { - malloc_report(ASL_LEVEL_NOTICE, "nano zone abandoned due to inability to preallocate reserved vm space.\n"); - _malloc_engaged_nano = NANO_NONE; - } -#endif -} - -// Second phase of initialization, called during _malloc_initialize(), after -// environment variables have been read and processed. -void -nano_configure() -{ - // Nothing to do. -} - -#endif // CONFIG_NANOZONE - -/* vim: set noet:ts=4:sw=4:cindent: */ diff --git a/src/libmalloc/src/nano_malloc.h b/src/libmalloc/src/nano_malloc.h deleted file mode 100644 index 91255d4bc..000000000 --- a/src/libmalloc/src/nano_malloc.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __NANO_MALLOC_H -#define __NANO_MALLOC_H - -// Forward decl for the nanozone. -typedef struct nanozone_s nanozone_t; - -MALLOC_NOEXPORT -malloc_zone_t * -nano_create_zone(malloc_zone_t *helper_zone, unsigned debug_flags); - -MALLOC_NOEXPORT -void -nano_forked_zone(nanozone_t *nanozone); - -MALLOC_NOEXPORT -void -nano_init(const char *envp[], const char *apple[], const char *bootargs); - -MALLOC_NOEXPORT -void -nano_configure(void); - -#endif // __NANO_MALLOC_H diff --git a/src/libmalloc/src/nano_malloc_common.c b/src/libmalloc/src/nano_malloc_common.c deleted file mode 100644 index cf8893a8a..000000000 --- a/src/libmalloc/src/nano_malloc_common.c +++ /dev/null @@ -1,311 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#include "internal.h" - -// Code that is common to Nano V1 and Nano V2. When Nano V1 is removed, -// most of this file will move to nanov2_malloc.c. - -#if CONFIG_NANOZONE - -// Possible enablement modes for Nano V2 -typedef enum { - NANO_INACTIVE, // Inactive, but can be selected with MallocNanoZone=V2 - NANO_ENABLED, // Available and default if Nano is turned on. - NANO_FORCED, // Force use of Nano V2 for all processes. -} nanov2_mode_t; - -// Which version of Nano is engaged. By default, none. -nano_version_t _malloc_engaged_nano = NANO_NONE; - -// Nano mode selection boot argument -static const char mode_boot_arg[] = "nanov2_mode"; -static const char inactive_mode[] = "inactive"; // Use Nano V1 for Nano -static const char enabled_mode[] = "enabled"; // Use Nano V2 for Nano -static const char forced_mode[] = "forced"; // Force Nano V2 everywhere - -// The maximum number of per-CPU allocation regions to use for Nano. -unsigned int nano_common_max_magazines; -boolean_t nano_common_max_magazines_is_ncpu = true; - -// Boot argument for nano_common_max_magazines -static const char nano_max_magazines_boot_arg[] = "malloc_nano_max_magazines"; - -#pragma mark - -#pragma mark Initialization - -// Shared initialization code. Determines which version of Nano should be used, -// if any, and sets _malloc_engaged_nano. The Nano version is determined as -// follows: -// 1. If the nanov2_mode boot arg has value "forced", Nano V2 is used -// unconditionally in every process, except in processes that have -// the MallocNanoZone variable set to V1. -// 2. If the nanov2_mode boot arg has value "enabled", Nano V2 is used if -// the process wants to use Nano (i.e. the kernel opts the process in, or -// the environment variable MallocNanoZone is 1). -// 3. If the nanov2_mode boot arg is not present or has any other value, -// Nano V1 is used if the process wants to use Nano (i.e. the kernel opts -// the process in, or the environment variable MallocNanoZone is 1). -// -// In cases (2) and (3), the selection can be explicitly overridden by setting -// the environment variable MallocNanoZone to V1 or V2. -void -nano_common_init(const char *envp[], const char *apple[], const char *bootargs) -{ - // Use the nanov2_mode boot argument and MallocNanoZone to determine - // which version of Nano to use, if any. - nanov2_mode_t nanov2_mode = NANO_ENABLED; - const char *p = malloc_common_value_for_key(bootargs, mode_boot_arg); - if (p) { - if (!strncmp(p, inactive_mode, sizeof(inactive_mode) - 1)) { - nanov2_mode = NANO_INACTIVE; - } else if (!strncmp(p, enabled_mode, sizeof(enabled_mode) - 1)) { - nanov2_mode = NANO_ENABLED; - } else if (!strncmp(p, forced_mode, sizeof(forced_mode) - 1)) { - nanov2_mode = NANO_FORCED; - } - } - - if (nanov2_mode == NANO_FORCED) { - // We will use Nano V2 unless MallocNanoZone is "V1". - const char *flag = _simple_getenv(envp, "MallocNanoZone"); - if (flag && (flag[0] == 'V' || flag[0] == 'v') && flag[1] == '1') { - _malloc_engaged_nano = NANO_V1; - } else { - _malloc_engaged_nano = NANO_V2; - } - } else { - const char *flag = _simple_getenv(apple, "MallocNanoZone"); - if (flag && flag[0] == '1') { - _malloc_engaged_nano = nanov2_mode == NANO_ENABLED ? NANO_V2 : NANO_V1; - } - /* Explicit overrides from the environment */ - flag = _simple_getenv(envp, "MallocNanoZone"); - if (flag) { - if (flag[0] == '1') { - _malloc_engaged_nano = nanov2_mode == NANO_ENABLED ? NANO_V2 : NANO_V1; - } else if (flag[0] == '0') { - _malloc_engaged_nano = NANO_NONE; - } else if (flag[0] == 'V' || flag[0] == 'v') { - if (flag[1] == '1') { - _malloc_engaged_nano = NANO_V1; - } else if (flag[1] == '2') { - _malloc_engaged_nano = NANO_V2; - } - } - } - } - - if (_malloc_engaged_nano) { - // The maximum number of nano magazines can be set either via a - // boot argument or from the environment. Get the boot argument value - // here and store it. We can't bounds check it until we have phys_ncpus, - // which happens later in nano_common_configure(), along with handling - // of the environment value setting. - char value_buf[256]; - const char *flag = malloc_common_value_for_key_copy(bootargs, - nano_max_magazines_boot_arg, value_buf, sizeof(value_buf)); - if (flag) { - const char *endp; - long value = malloc_common_convert_to_long(flag, &endp); - if (!*endp && value >= 0) { - nano_common_max_magazines = (unsigned int)value; - } else { - malloc_report(ASL_LEVEL_ERR, - "malloc_nano_max_magazines must be positive - ignored.\n"); - } - } - } - - switch (_malloc_engaged_nano) { - case NANO_V1: - nano_init(envp, apple, bootargs); - break; - case NANO_V2: - nanov2_init(envp, apple, bootargs); - break; - default: - break; - } -} - -// Second phase of initialization, called from _malloc_initialize(). Used for -// code that depends on state set in _malloc_initialize(), such as the -// number of physical CPUs. -void -nano_common_configure(void) -{ - // Set nano_common_max_magazines. An initial (unvalidated) value may have - // been set from the boot args. - unsigned int magazines = nano_common_max_magazines > 0 ? - nano_common_max_magazines : phys_ncpus; - - // Environment variable overrides boot arg, unless it's not valid. - const char *flag = getenv("MallocNanoMaxMagazines"); -#if RDAR_48993662 - if (!flag) { - flag = getenv("_MallocNanoMaxMagazines"); - } -#endif // RDAR_48993662 - if (flag) { - int value = (int)strtol(flag, NULL, 0); - if (value < 0) { - malloc_report(ASL_LEVEL_ERR, - "MallocNanoMaxMagazines must be positive - ignored.\n"); - } else { - magazines = value; - } - } - - if (magazines == 0) { - magazines = phys_ncpus; - } else if (magazines > phys_ncpus) { - magazines = phys_ncpus; - malloc_report(ASL_LEVEL_ERR, - "Nano maximum magazines limited to number of physical " - "CPUs [%d]\n", phys_ncpus); - } - nano_common_max_magazines = magazines; - if (flag) { - malloc_report(ASL_LEVEL_INFO, "Nano maximum magazines set to %d\n", - nano_common_max_magazines); - } - nano_common_cpu_number_override_set(); - - switch (_malloc_engaged_nano) { - case NANO_V1: - nano_configure(); - break; - case NANO_V2: - nanov2_configure(); - break; - default: - break; - } -} - -#pragma mark - -#pragma mark VM Helper Functions - -void * -nano_common_allocate_based_pages(size_t size, unsigned char align, - unsigned debug_flags, int vm_page_label, void *base_addr) -{ - mach_vm_address_t vm_addr; - uintptr_t addr; - mach_vm_size_t allocation_size = round_page(size); - mach_vm_offset_t allocation_mask = ((mach_vm_offset_t)1 << align) - 1; - int alloc_flags = VM_FLAGS_ANYWHERE | VM_MAKE_TAG(vm_page_label); - kern_return_t kr; - - if (!allocation_size) { - allocation_size = vm_page_size; - } - if (allocation_size < size) { // size_t arithmetic wrapped! - return NULL; - } - - vm_addr = round_page((mach_vm_address_t)base_addr); - if (!vm_addr) { - vm_addr = vm_page_size; - } - kr = mach_vm_map(mach_task_self(), &vm_addr, allocation_size, - allocation_mask, alloc_flags, MEMORY_OBJECT_NULL, 0, FALSE, - VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); - if (kr) { - malloc_zone_error(debug_flags, false, "*** can't allocate pages: " - "mach_vm_map(size=%lu) failed (error code=%d)\n", size, kr); - return NULL; - } - addr = (uintptr_t)vm_addr; - - return (void *)addr; -} - -// Allocates virtual address from a given address for a given size. Succeeds -// (and returns TRUE) only if we get exactly the range of addresses that we -// asked for. -boolean_t -nano_common_allocate_vm_space(mach_vm_address_t base, mach_vm_size_t size) -{ - mach_vm_address_t vm_addr = base; - kern_return_t kr = mach_vm_map(mach_task_self(), &vm_addr, size, 0, - VM_MAKE_TAG(VM_MEMORY_MALLOC_NANO), MEMORY_OBJECT_NULL, 0, FALSE, - VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); - - if (kr != KERN_SUCCESS || vm_addr != base) { - // Failed or we got allocated somewhere else. - if (!kr) { - mach_vm_deallocate(mach_task_self(), vm_addr, size); - } - return FALSE; - } - return TRUE; -} - -void -nano_common_deallocate_pages(void *addr, size_t size, unsigned debug_flags) -{ - mach_vm_address_t vm_addr = (mach_vm_address_t)addr; - mach_vm_size_t allocation_size = size; - kern_return_t kr; - - kr = mach_vm_deallocate(mach_task_self(), vm_addr, allocation_size); - if (kr) { - malloc_zone_error(debug_flags, false, "Can't deallocate_pages at %p\n", - addr); - } -} - -#pragma mark - -#pragma mark Introspection Helper Functions - -kern_return_t -nano_common_default_reader(task_t task, vm_address_t address, vm_size_t size, - void **ptr) -{ - *ptr = (void *)address; - return 0; -} - -#pragma mark - -#pragma mark Utility functions - -void -nano_common_cpu_number_override_set() -{ - boolean_t is_ncpu = _os_cpu_number_override == -1 && nano_common_max_magazines == phys_ncpus; - - // This facilitates a shortcut in nanov2_get_allocation_block_index() -- - // if nano_common_max_magazines_is_ncpu is true, we can also assume that - // _os_cpu_number_override == -1 (i.e. we are not in malloc_replay). - // - // We check here for false, because we don't want to write "true" to a __DATA page because - // that would make it dirty: - if (!is_ncpu) { - nano_common_max_magazines_is_ncpu = is_ncpu; - } -} - -#endif // CONFIG_NANOZONE - diff --git a/src/libmalloc/src/nano_malloc_common.h b/src/libmalloc/src/nano_malloc_common.h deleted file mode 100644 index 401145ce9..000000000 --- a/src/libmalloc/src/nano_malloc_common.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __NANO_MALLOC_COMMON_H -#define __NANO_MALLOC_COMMON_H - -// Definitions that are common to Nano V1 and Nano V2. -#if TARGET_OS_OSX || TARGET_OS_SIMULATOR || TARGET_OS_DRIVERKIT -#define NANO_PREALLOCATE_BAND_VM 0 -#else // TARGET_OS_OSX || TARGET_OS_SIMULATOR || TARGET_OS_DRIVERKIT -#define NANO_PREALLOCATE_BAND_VM 1 // pre-allocate reserved vm range -#endif // TARGET_OS_OSX || TARGET_OS_SIMULATOR || TARGET_OS_DRIVERKIT - -typedef enum { - NANO_NONE = 0, - NANO_V1 = 1, - NANO_V2 = 2, -} nano_version_t; - -// Nano malloc enabled flag -MALLOC_NOEXPORT -extern nano_version_t _malloc_engaged_nano; - -// The maximum number of per-CPU allocation regions to use for Nano. -MALLOC_NOEXPORT -extern unsigned int nano_common_max_magazines; - -MALLOC_NOEXPORT -extern boolean_t nano_common_max_magazines_is_ncpu; - -MALLOC_NOEXPORT -void -nano_common_cpu_number_override_set(void); - -MALLOC_NOEXPORT -void -nano_common_init(const char *envp[], const char *apple[], const char *bootargs); - -MALLOC_NOEXPORT -void -nano_common_configure(void); - -MALLOC_NOEXPORT -void * -nano_common_allocate_based_pages(size_t size, unsigned char align, - unsigned debug_flags, int vm_page_label, void *base_addr); - -MALLOC_NOEXPORT -boolean_t -nano_common_allocate_vm_space(mach_vm_address_t base, mach_vm_size_t size); - -MALLOC_NOEXPORT -void -nano_common_deallocate_pages(void *addr, size_t size, unsigned debug_flags); - -MALLOC_NOEXPORT -kern_return_t -nano_common_default_reader(task_t task, vm_address_t address, vm_size_t size, - void **ptr); - -#endif // __NANO_MALLOC_COMMON_H - diff --git a/src/libmalloc/src/nano_zone.h b/src/libmalloc/src/nano_zone.h deleted file mode 100644 index e9828c583..000000000 --- a/src/libmalloc/src/nano_zone.h +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - - -#ifndef __NANO_ZONE_H -#define __NANO_ZONE_H - -#if CONFIG_NANOZONE - -/********************* DEFINITIONS ************************/ - -#define MAX_RECORDER_BUFFER 256 - -/************* nanozone address field layout ******************/ - -#if defined(__x86_64) -#define NANO_MAG_BITS 6 -#define NANO_BAND_BITS 17 -#define NANO_SLOT_BITS 4 -#define NANO_OFFSET_BITS 17 - -#else -#error Unknown Architecture -#endif - -// clang-format really dislikes the bitfields here -// clang-format off -#if defined(__BIG_ENDIAN__) -struct nano_blk_addr_s { - uint64_t - nano_signature:NANOZONE_SIGNATURE_BITS, // the address range devoted to us. - nano_mag_index:NANO_MAG_BITS, // the core that allocated this block - nano_band:NANO_BAND_BITS, - nano_slot:NANO_SLOT_BITS, // bucket of homogenous quanta-multiple blocks - nano_offset:NANO_OFFSET_BITS; // locates the block -}; -#else -// least significant bits declared first -struct nano_blk_addr_s { - uint64_t - nano_offset:NANO_OFFSET_BITS, // locates the block - nano_slot:NANO_SLOT_BITS, // bucket of homogenous quanta-multiple blocks - nano_band:NANO_BAND_BITS, - nano_mag_index:NANO_MAG_BITS, // the core that allocated this block - nano_signature:NANOZONE_SIGNATURE_BITS; // the address range devoted to us. -}; -#endif -// clang-format on - -typedef union { - uint64_t addr; - struct nano_blk_addr_s fields; -} nano_blk_addr_t; - -#define SLOT_IN_BAND_SIZE (1 << NANO_OFFSET_BITS) -#define SLOT_KEY_LIMIT (1 << NANO_SLOT_BITS) /* Must track nano_slot width */ -#define BAND_SIZE (1 << (NANO_SLOT_BITS + NANO_OFFSET_BITS)) /* == Number of bytes covered by a page table entry */ -#define NANO_MAG_SIZE (1 << NANO_MAG_BITS) -#define NANO_SLOT_SIZE (1 << NANO_SLOT_BITS) - -#ifdef __INTERNAL_H - -/****************************** zone itself ***********************************/ - -/* - * Note that objects whose adddress are held in pointers here must be pursued - * individually in the nano_in_use_enumeration() routines. - */ - -typedef struct chained_block_s { - uintptr_t double_free_guard; - struct chained_block_s *next; -} *chained_block_t; - -typedef struct nano_meta_s { - OSQueueHead slot_LIFO MALLOC_NANO_CACHE_ALIGN; - unsigned int slot_madvised_log_page_count; - volatile uintptr_t slot_current_base_addr; - volatile uintptr_t slot_limit_addr; - volatile size_t slot_objects_mapped; - volatile size_t slot_objects_skipped; - bitarray_t slot_madvised_pages; - // position on cache line distinct from that of slot_LIFO - volatile uintptr_t slot_bump_addr MALLOC_NANO_CACHE_ALIGN; - volatile boolean_t slot_exhausted; - unsigned int slot_bytes; - unsigned int slot_objects; -} *nano_meta_admin_t; - -// vm_allocate()'d, so page-aligned to begin with. -typedef struct nanozone_s { - // first page will be given read-only protection - malloc_zone_t basic_zone; - uint8_t pad[PAGE_MAX_SIZE - sizeof(malloc_zone_t)]; - - // remainder of structure is R/W (contains no function pointers) - // page-aligned - // max: NANO_MAG_SIZE cores x NANO_SLOT_SIZE slots for nano blocks {16 .. 256} - struct nano_meta_s meta_data[NANO_MAG_SIZE][NANO_SLOT_SIZE]; - _malloc_lock_s band_resupply_lock[NANO_MAG_SIZE]; - uintptr_t band_max_mapped_baseaddr[NANO_MAG_SIZE]; - size_t core_mapped_size[NANO_MAG_SIZE]; - - unsigned debug_flags; - - /* security cookie */ - uintptr_t cookie; - - /* - * The nano zone constructed by create_nano_zone() would like to hand off tiny, small, and large - * allocations to the default scalable zone. Record the latter as the "helper" zone here. - */ - malloc_zone_t *helper_zone; -} nanozone_t; - -#define NANOZONE_PAGED_SIZE ((sizeof(nanozone_t) + vm_page_size - 1) & ~ (vm_page_size - 1)) - -#endif // __INTERNAL_H - -#endif // CONFIG_NANOZONE - -#endif // __NANO_ZONE_H diff --git a/src/libmalloc/src/nano_zone_common.h b/src/libmalloc/src/nano_zone_common.h deleted file mode 100644 index 4ab9e5142..000000000 --- a/src/libmalloc/src/nano_zone_common.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __NANO_ZONE_COMMON_H -#define __NANO_ZONE_COMMON_H - -#define NANO_MAX_SIZE 256 /* Buckets sized {16, 32, 48, ..., 256} */ -#define SHIFT_NANO_QUANTUM 4 -#define NANO_REGIME_QUANTA_SIZE (1 << SHIFT_NANO_QUANTUM) // 16 -#define NANO_QUANTA_MASK (NANO_REGIME_QUANTA_SIZE - 1) -#define NANO_SIZE_CLASSES (NANO_MAX_SIZE/NANO_REGIME_QUANTA_SIZE) - -#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR - -// Nanozone follows the shared region. -#define SHIFT_NANO_SIGNATURE 29 -#define NANOZONE_SIGNATURE_BITS 35 -#define NANOZONE_BASE_REGION_ADDRESS (SHARED_REGION_BASE + SHARED_REGION_SIZE) -#define NANOZONE_SIGNATURE (NANOZONE_BASE_REGION_ADDRESS >> SHIFT_NANO_SIGNATURE) - -#else // TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR - -#define SHIFT_NANO_SIGNATURE 44 -#define NANOZONE_SIGNATURE_BITS 20 -#define NANOZONE_SIGNATURE 0x6ULL -#define NANOZONE_BASE_REGION_ADDRESS (NANOZONE_SIGNATURE << SHIFT_NANO_SIGNATURE) - -#endif // TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR - -static MALLOC_INLINE size_t -_nano_common_good_size(size_t size) -{ - return (size <= NANO_REGIME_QUANTA_SIZE) ? NANO_REGIME_QUANTA_SIZE - : (((size + NANO_REGIME_QUANTA_SIZE - 1) >> SHIFT_NANO_QUANTUM) << SHIFT_NANO_QUANTUM); -} - -#endif // __NANO_ZONE_COMMON_H diff --git a/src/libmalloc/src/nanov2_malloc.c b/src/libmalloc/src/nanov2_malloc.c deleted file mode 100644 index 06c85ab05..000000000 --- a/src/libmalloc/src/nanov2_malloc.c +++ /dev/null @@ -1,2950 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -#include "resolver.h" -#include "internal.h" - -#if CONFIG_NANOZONE - -#pragma mark - -#pragma mark Forward Declarations - -#if OS_VARIANT_NOTRESOLVED -static kern_return_t -nanov2_statistics_task_printer(task_t task, vm_address_t zone_address, - memory_reader_t reader, print_task_printer_t printer, - malloc_statistics_t *stats); -static kern_return_t -nanov2_statistics_task(task_t task, vm_address_t zone_address, - memory_reader_t reader, malloc_statistics_t *stats); -#endif // OS_VARIANT_NOTRESOLVED - -#pragma mark - -#pragma mark Externals for resolved functions - -MALLOC_NOEXPORT extern void *nanov2_allocate(nanozonev2_t *nanozone, size_t rounded_size, - boolean_t clear); -MALLOC_NOEXPORT extern void nanov2_free_to_block(nanozonev2_t *nanozone, void *ptr, - nanov2_size_class_t size_class); -MALLOC_NOEXPORT extern boolean_t nanov2_madvise_block(nanozonev2_t *nanozone, - nanov2_block_meta_t *block_metap, nanov2_block_t *blockp, - nanov2_size_class_t size_class); -MALLOC_NOEXPORT extern size_t nanov2_pointer_size(nanozonev2_t *nanozone, void *ptr, - boolean_t allow_inner); -MALLOC_NOEXPORT extern size_t nanov2_pressure_relief(nanozonev2_t *nanozone, size_t goal); - -#if OS_VARIANT_RESOLVED -MALLOC_NOEXPORT extern boolean_t nanov2_allocate_new_region(nanozonev2_t *nanozone); -#endif // OS_VARIANT_RESOLVED - -#pragma mark - -#pragma mark Global Allocator State - -// -- Block scanning -typedef enum { - NANO_SCAN_FIRST_FIT = 0, - NANO_SCAN_CAPACITY_BASED, -} nanov2_block_scan_policy_t; - -// Minimum occupancy percentage for an ideal block. -#define DEFAULT_SCAN_MIN_CAPACITY 20 - -// Maximum occupancy percentage for an ideal block. -#define DEFAULT_SCAN_MAX_CAPACITY 80 - -// Maximum number of blocks to scan while looking for a best fit once a -// candidate block has been found. Value 0 means no limit. -#define DEFAULT_SCAN_LIMIT 10 - -// -- Madvise policy -typedef enum { - NANO_MADVISE_IMMEDIATE = 0, - NANO_MADVISE_WARNING_PRESSURE, - NANO_MADVISE_CRITICAL_PRESSURE, -} nanov2_madvise_policy_t; - -typedef struct nanov2_policy_config_s { - // Determines the algorithm for scanning for the next allocation block. - // Used in conjunction with nanov2_block_scan_capacity_min, - // nanov2_block_scan_capacity_max and nanov2_block_scan_limit. Set from the - // MallocNanoScanPolicy environment variable. - nanov2_block_scan_policy_t block_scan_policy; - - // Minimum occupancy percentage for an ideal block. - int block_scan_min_capacity; - - // Maximum occupancy percentage for an ideal block. - int block_scan_max_capacity; - - // Maximum number of blocks to scan while looking for a best fit once a - // candidate block has been found. Value 0 means no limit. - int block_scan_limit; - - // Bitmask for size classes that are only allowed a single arena. Set from - // the MallocNanoSingleArena environment variable. - uint16_t single_arena_size_classes; - - // Madvise policy. Set from the MallocNanoMadvisePolicy environment variable - nanov2_madvise_policy_t madvise_policy; -} nanov2_policy_config_t; - -#if OS_VARIANT_NOTRESOLVED - -// Madvise policy. Set from the MallocNanoMadvisePolicy environment variable. -MALLOC_NOEXPORT nanov2_madvise_policy_t nanov2_madvise_policy; - -MALLOC_NOEXPORT nanov2_policy_config_t nanov2_policy_config = { - .block_scan_policy = NANO_SCAN_CAPACITY_BASED, - .block_scan_min_capacity = DEFAULT_SCAN_MIN_CAPACITY, - .block_scan_max_capacity = DEFAULT_SCAN_MAX_CAPACITY, - .block_scan_limit = DEFAULT_SCAN_LIMIT, - .single_arena_size_classes = 0, - .madvise_policy = NANO_MADVISE_IMMEDIATE, -}; - -#else // OS_VARIANT_NOTRESOLVED - -MALLOC_NOEXPORT extern nanov2_policy_config_t nanov2_policy_config; -MALLOC_NOEXPORT extern nanov2_madvise_policy_t nanov2_madvise_policy; - -#endif // OS_VARIANT_NOTRESOLVED - -// BLOCKS_PER_UNIT must be a power of two to make it possible to get the size -// class from a pointer reasonably cheaply. Do not change the value without -// fixing the code that depends on it. -#define BLOCKS_PER_UNIT_SHIFT 6 -#define BLOCKS_PER_UNIT (1 << BLOCKS_PER_UNIT_SHIFT) - -#if OS_VARIANT_NOTRESOLVED -// Number of units of each size class in an arena. The numbers here must add -// up to 64. One unit corresponds to BLOCKS_PER_UNIT blocks in the corresponding -// size class, so 64 units maps to a total of 64 * 64 = 4096 blocks and each -// block is 16K, making a total of 64MB, which is the size of an arena. -static int block_units_by_size_class[] = { - 2, // 16-byte allocations (less 1 for the metadata block) - 10, // 32-byte allocations - 11, // 48-byte allocations - 10, // 64-byte allocations - 5, // 80-byte allocations - 3, // 96-byte allocations - 3, // 112-byte allocations - 4, // 128-byte allocations - 3, // 144-byte allocations - 2, // 160-byte allocations - 2, // 176-byte allocations - 2, // 192-byte allocations - 2, // 208-byte allocations - 2, // 224-byte allocations - 1, // 240-byte allocations - 2, // 256-byte allocations -}; - -MALLOC_STATIC_ASSERT( - sizeof(block_units_by_size_class)/sizeof(block_units_by_size_class[0]) - == NANO_SIZE_CLASSES, - "Size of block_units_by_size_class is incorrect"); - -// Total of the number of blocks in all size classes. Currently this is 64. -#define TOTAL_BLOCK_UNITS (NANOV2_BLOCKS_PER_ARENA/BLOCKS_PER_UNIT) - -// Offsets to the first and last blocks for each size class within an arena, in -// the logical address space. These tables are constructed from the values in -// the block_units_by_size_class table. -MALLOC_NOEXPORT int first_block_offset_by_size_class[NANO_SIZE_CLASSES]; -MALLOC_NOEXPORT int last_block_offset_by_size_class[NANO_SIZE_CLASSES]; - -// Table mapping the part of a logical address that depends on size class to -// the size class. Also built from the block_units_by_size_class table. -MALLOC_NOEXPORT int ptr_offset_to_size_class[TOTAL_BLOCK_UNITS]; - -// Number of slots in a block, indexed by size class. Note that there is a small -// amount of wastage in some size classes because the block size is not always -// exactly divisible by the allocation size. The number of wasted bytes is shown -// in parentheses in the comments below. -MALLOC_NOEXPORT const int slots_by_size_class[] = { - NANOV2_BLOCK_SIZE/(1 * NANO_REGIME_QUANTA_SIZE), // 16 bytes: 1024 (0) - NANOV2_BLOCK_SIZE/(2 * NANO_REGIME_QUANTA_SIZE), // 32 bytes: 512 (0) - NANOV2_BLOCK_SIZE/(3 * NANO_REGIME_QUANTA_SIZE), // 48 bytes: 341 (16) - NANOV2_BLOCK_SIZE/(4 * NANO_REGIME_QUANTA_SIZE), // 64 bytes: 256 (0) - NANOV2_BLOCK_SIZE/(5 * NANO_REGIME_QUANTA_SIZE), // 80 bytes: 204 (64) - NANOV2_BLOCK_SIZE/(6 * NANO_REGIME_QUANTA_SIZE), // 96 bytes: 170 (64) - NANOV2_BLOCK_SIZE/(7 * NANO_REGIME_QUANTA_SIZE), // 112 bytes: 146 (32) - NANOV2_BLOCK_SIZE/(8 * NANO_REGIME_QUANTA_SIZE), // 128 bytes: 128 (0) - NANOV2_BLOCK_SIZE/(9 * NANO_REGIME_QUANTA_SIZE), // 144 bytes: 113 (112) - NANOV2_BLOCK_SIZE/(10 * NANO_REGIME_QUANTA_SIZE), // 160 bytes: 102 (64) - NANOV2_BLOCK_SIZE/(11 * NANO_REGIME_QUANTA_SIZE), // 176 bytes: 93 (16) - NANOV2_BLOCK_SIZE/(12 * NANO_REGIME_QUANTA_SIZE), // 192 bytes: 85 (64) - NANOV2_BLOCK_SIZE/(13 * NANO_REGIME_QUANTA_SIZE), // 208 bytes: 78 (160) - NANOV2_BLOCK_SIZE/(14 * NANO_REGIME_QUANTA_SIZE), // 224 bytes: 73 (32) - NANOV2_BLOCK_SIZE/(15 * NANO_REGIME_QUANTA_SIZE), // 240 bytes: 68 (64) - NANOV2_BLOCK_SIZE/(16 * NANO_REGIME_QUANTA_SIZE), // 256 bytes: 64 (0) -}; -#else // OS_VARIANT_NOTRESOLVED - -extern int block_units_by_size_class[]; -extern int ptr_offset_to_size_class[]; -extern int first_block_offset_by_size_class[]; -extern int last_block_offset_by_size_class[]; -extern const int slots_by_size_class[]; - -#endif // OS_VARIANT_NOTRESOLVED - -#pragma mark - -#pragma mark Conversion and Mapping Inlines - -// nanov2_block_index_to_meta_index() and nanov2_meta_index_to_block_index() -// map between the index of a block in its arena and the index of the meta data -// header for that block in the metadata block. The mapping is not direct -// to avoid false sharing caused by CPUs that are using adjacent blocks -// writing to what would otherwise be adjacent meta data headers. The effect of -// these functions is to separate the meta data headers for adjacent blocks by -// at least the size of a cache line (assumed to be 64 bytes). -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_meta_index_t -nanov2_block_index_to_meta_index(nanov2_block_index_t block_index) -{ - return ((block_index >> 6) | (block_index << 6)) & 0xFFF; -} - -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_index_t -nanov2_meta_index_to_block_index(nanov2_meta_index_t block_meta_index) -{ - return ((block_meta_index >> 6) | (block_meta_index << 6)) & 0xFFF; -} - -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_meta_index_t -nanov2_metablock_meta_index(nanozonev2_t *nanozone) -{ - return nanov2_block_index_to_meta_index((nanov2_block_index_t) - nanozone->aslr_cookie); -} - -// Given a block metadata pointer, returns whether the block is active (that is, -// it is being used for allocations, it has allocations that have not been freed, -// or is waiting to be madvised). -static MALLOC_ALWAYS_INLINE MALLOC_INLINE boolean_t -nanov2_is_block_active(nanov2_block_meta_t block_meta) -{ - return block_meta.next_slot != SLOT_NULL - && block_meta.next_slot != SLOT_MADVISING - && block_meta.next_slot != SLOT_MADVISED; -} - -#if OS_VARIANT_RESOLVED -// Given a block metadata pointer, returns whether an allocation could be -// attempted from it. Allocations are not allowed from blocks that have not yet -// been used (since such a block has not been assigned), is full or has been -// madvised. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE boolean_t -nanov2_can_allocate_from_block(nanov2_block_meta_t block_meta) -{ - return block_meta.in_use && block_meta.next_slot != SLOT_FULL; -} - -// Given a pointer, returns whether it has the correct signature to be a -// Nano V2 address. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE boolean_t -nanov2_has_valid_signature(void *ptr) -{ - return (((uintptr_t)ptr) >> SHIFT_NANO_SIGNATURE) == NANOZONE_SIGNATURE; -} -#endif // OS_VARIANT_RESOLVED - -// Converts a Nano V2 logical address to the corresponding real address. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE void * -nanov2_logical_address_to_ptr(nanozonev2_t *nanozone, void *laddr) -{ - return (void *)(((uintptr_t)laddr) ^ nanozone->aslr_cookie_aligned); -} - -// Gets the maximum allocation size for a given size class. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE int -nanov2_size_from_size_class(nanov2_size_class_t size_class) -{ - return (size_class + 1) * NANO_REGIME_QUANTA_SIZE; -} - -#if OS_VARIANT_RESOLVED -// Given an allocation size, returns the corresponding size class. It is the -// responsibility of the caller to ensure that the size is valid. Returned -// value is zero-based. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_size_class_t -nanov2_size_class_from_size(size_t size) -{ - return (nanov2_size_class_t)howmany(size, NANO_REGIME_QUANTA_SIZE) - 1; -} -#endif // OS_VARIANT_RESOLVED - -#if OS_VARIANT_RESOLVED -// Given a pointer that is assumed to be in the Nano zone, returns the address -// of its containing block. Works for both real and logical pointers and returns -// a pointer of the same type. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_t * -nanov2_block_address_for_ptr(void *ptr) -{ - return (void *)(((uintptr_t)ptr) & NANOV2_BLOCK_ADDRESS_MASK); -} -#endif // OS_VARIANT_RESOLVED - -// Given a pointer that is assumed to be in the Nano zone, returns the address -// of its containing arena. Works for both real and logical pointers. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_arena_t * -nanov2_arena_address_for_ptr(void *ptr) -{ - return (void *)(((uintptr_t)ptr) & NANOV2_ARENA_ADDRESS_MASK); -} - -#if OS_VARIANT_RESOLVED -// Given a pointer that is assumed to be in the Nano zone, returns the address -// of its containing region. Works for both real and logical pointers. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_region_t * -nanov2_region_address_for_ptr(void *ptr) -{ - return (nanov2_region_t *)(((uintptr_t)ptr) & NANOV2_REGION_ADDRESS_MASK); -} -#endif // OS_VARIANT_RESOLVED - -// Given a pointer that is assumed to be in the Nano zone, returns the real -// address of its metadata block. Works for both real and logical pointers. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_arena_metablock_t * -nanov2_metablock_address_for_ptr(nanozonev2_t *nanozone, void *ptr) -{ - // The metadata block is the first logical block in the arena, so its - // logical address is that of the arena. To get a real pointer, we map it - // through nanov2_logical_address_to_ptr(). - return (nanov2_arena_metablock_t *)nanov2_logical_address_to_ptr(nanozone, - nanov2_arena_address_for_ptr(ptr)); -} - -#if OS_VARIANT_RESOLVED -// Given a pointer to a block_metap_t for a block, returns a pointer to the -// block itself. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_t * -nanov2_block_address_from_meta_ptr(nanozonev2_t *nanozone, - nanov2_block_meta_t *block_metap) -{ - nanov2_block_t *meta_block = nanov2_block_address_for_ptr(block_metap); - nanov2_arena_t *arena = nanov2_arena_address_for_ptr(block_metap); - - // Get the block's index and use that to get the address of the block. - nanov2_meta_index_t meta_index = - (nanov2_meta_index_t)(block_metap - (nanov2_block_meta_t *)meta_block); - nanov2_block_index_t block_index = nanov2_meta_index_to_block_index(meta_index); - return &arena->blocks[block_index]; -} -#endif // OS_VARIANT_RESOLVED - -// Given the index of a block_metap_t for a block, returns a pointer to the -// block itself. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_t * -nanov2_block_address_from_meta_index(nanozonev2_t *nanozone, - nanov2_arena_t *arena, nanov2_meta_index_t meta_index) -{ - nanov2_block_index_t block_index = nanov2_meta_index_to_block_index(meta_index); - return &arena->blocks[block_index]; -} - -// Given a pointer that is assumed to be in the nanozone, returns the index -// of its containing block within its hosting arena. Works for both logical and -// real pointers and returns an index in the corresponding address space. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_index_t -nanov2_block_index_for_ptr(void *ptr) -{ - return (nanov2_block_index_t)(((uintptr_t)ptr) >> NANOV2_OFFSET_BITS) - & ((1 << NANOV2_BLOCK_BITS) - 1); -} - -#if OS_VARIANT_RESOLVED -// Given a pointer that is assumed to be in the nanozone, returns a pointer to -// the meta data for its containing block. Expects ptr be a real address. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_meta_t * -nanov2_meta_ptr_for_ptr(nanozonev2_t *nanozone, void *ptr) -{ - nanov2_arena_metablock_t *meta_block = nanov2_metablock_address_for_ptr( - nanozone, ptr); - nanov2_block_index_t block_index = nanov2_block_index_for_ptr(ptr); - nanov2_meta_index_t meta_index = nanov2_block_index_to_meta_index(block_index); - return &meta_block->arena_block_meta[meta_index]; -} -#endif // OS_VARIANT_RESOLVED - -// Given a region pointer, returns the address of the first arena in the region. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_arena_t * -nanov2_first_arena_for_region(nanov2_region_t *region) -{ - // The first arena is colocated with the region itself. - return (nanov2_arena_t *)region; -} - -// Given a region pointer, returns a pointer to the arena after the last -// active arena in the region. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_arena_t * -nanov2_limit_arena_for_region(nanozonev2_t *nanozone, nanov2_region_t *region) -{ - // The first arena is colocated with the region itself. - nanov2_arena_t *limit_arena; - if (region == nanozone->current_region_base) { - limit_arena = nanozone->current_region_next_arena; - } else { - limit_arena = nanov2_first_arena_for_region(region + 1); - } - return limit_arena; -} - -// Given a region pointer, returns the address of the linkage structure for -// that region. The linkage structure is stored in the first entry of the -// metadata block of the first arena in the region. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_region_linkage_t * -nanov2_region_linkage_for_region(nanozonev2_t *nanozone, nanov2_region_t *region) -{ - nanov2_arena_metablock_t *first_metadata_block = - nanov2_metablock_address_for_ptr(nanozone, region); - return (nanov2_region_linkage_t *)&first_metadata_block->arena_block_meta[ - nanov2_metablock_meta_index(nanozone)]; -} - -#if OS_VARIANT_RESOLVED -// Given a pointer to a region, returns a pointer to the region that follows it, -// or NULL if there isn't one. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_region_t * -nanov2_next_region_for_region(nanozonev2_t *nanozone, nanov2_region_t *region) -{ - nanov2_region_linkage_t *linkage = - nanov2_region_linkage_for_region(nanozone, region); - int offset = linkage->next_region_offset; - return offset ? region + offset : NULL; -} -#endif // OS_VARIANT_RESOLVED - -#if OS_VARIANT_NOTRESOLVED -// Given a pointer to a region, returns a pointer to the region that follows it, -// or NULL if there isn't one. This variant is used when mapping the nanozone -// for another process. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_region_t * -nanov2_next_region_for_region_offset(nanozonev2_t *nanozone, - nanov2_region_t *region, off_t region_offset) -{ - nanov2_region_linkage_t *linkage = - nanov2_region_linkage_for_region(nanozone, region); - nanov2_region_linkage_t *mapped_linkage = (nanov2_region_linkage_t *) - ((uintptr_t)linkage + region_offset); - int offset = mapped_linkage->next_region_offset; - return offset ? region + offset : NULL; -} -#endif // OS_VARIANT_NOTRESOLVED - -// Given the index of a slot in a block of a given size and the base address of -// the block, returns a pointer to the start of the slot. This works for both -// real and logical block pointers and returns a pointer of the same type. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE void * -nanov2_slot_in_block_ptr(nanov2_block_t *block, nanov2_size_class_t size_class, - int slot_index) -{ - return (void *)((uintptr_t)block + - nanov2_size_from_size_class(size_class) * slot_index); -} - -#if OS_VARIANT_RESOLVED -// Given the base address of a block, the size class for the block and a pointer, -// returns the index of the slot represented by the pointer. It is assumed that -// the pointer is slot-aligned and is within the bounds of the block. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE int -nanov2_slot_index_in_block(nanov2_block_t *block, nanov2_size_class_t size_class, - void *ptr) -{ - return (int)((uintptr_t)ptr - (uintptr_t)block)/ - (nanov2_size_from_size_class(size_class)); -} -#endif // OS_VARIANT_RESOLVED - -// Given a (real) pointer, gets the size class of its containing block. Assumes -// that the pointer is in a valid region, arena and block. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_size_class_t -nanov2_size_class_for_ptr(nanozonev2_t *nanozone, void *ptr) -{ - // To get the size class, we need to convert the block number from - // physical to logical, since the ptr_offset_to_size_class table is - // indexed by logical block. - nanov2_block_index_t block = - (int)(nanov2_block_index_for_ptr(ptr) ^ nanozone->aslr_cookie); - return ptr_offset_to_size_class[block >> BLOCKS_PER_UNIT_SHIFT]; -} - -#if OS_VARIANT_NOTRESOLVED - -// Given a meta data index, gets the size class of the corresponding block. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_size_class_t -nanov2_size_class_for_meta_index(nanozonev2_t *nanozone, nanov2_meta_index_t meta_index) -{ - // To get the size class, we need to get the block index from meta index - // and then convert it from real to logical, since the - // ptr_offset_to_size_class table is indexed by logical block. - nanov2_block_index_t block_index = nanov2_meta_index_to_block_index(meta_index); - int logical_block_index = (int)(block_index ^ nanozone->aslr_cookie); - return ptr_offset_to_size_class[logical_block_index >> BLOCKS_PER_UNIT_SHIFT]; -} -#endif // OS_VARIANT_NOTRESOLVED - -#if OS_VARIANT_RESOLVED -// Given a size class and an arena, returns a pointer to the metadata for the -// first block for that size class in the arena, ignoring the metadata block. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_meta_t * -nanov2_first_block_for_size_class_in_arena(nanozonev2_t *nanozone, - nanov2_size_class_t size_class, nanov2_arena_t *arena) -{ - int block_offset = first_block_offset_by_size_class[size_class]; - nanov2_arena_metablock_t *meta_blockp = - nanov2_metablock_address_for_ptr(nanozone, arena); - nanov2_block_index_t block_index = - (nanov2_block_index_t)(block_offset ^ nanozone->aslr_cookie); - nanov2_meta_index_t meta_index = nanov2_block_index_to_meta_index(block_index); - return &meta_blockp->arena_block_meta[meta_index]; -} - -// Given a pointer to the metadata for a block in a given size class, returns -// a pointer to the metadata for the next block, wrapping from the last block -// to the first if necessary. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_meta_t * -nanov2_next_block_for_size_class(nanozonev2_t *nanozone, - nanov2_size_class_t size_class, nanov2_block_meta_t *meta_blockp, - boolean_t *wrapped) -{ - // To find the next block, get the index of the current block, which is in - // the real address space, unscramble it to get a logical block number, - // add 1 to it, wrapping if necessary, then scramble the result. - nanov2_block_meta_t *base_meta_blockp = - (nanov2_block_meta_t *)(((uintptr_t)meta_blockp) & (NANOV2_BLOCK_ADDRESS_MASK)); - nanov2_meta_index_t meta_index = (int)(meta_blockp - base_meta_blockp); - nanov2_block_index_t block_index = nanov2_meta_index_to_block_index(meta_index); - block_index ^= nanozone->aslr_cookie; // Unscramble - int last_offset = last_block_offset_by_size_class[size_class]; - if (wrapped) *wrapped = block_index == last_offset; - block_index = block_index == last_offset ? - first_block_offset_by_size_class[size_class] : block_index + 1; - block_index = (nanov2_block_index_t)(block_index ^ nanozone->aslr_cookie); - meta_index = nanov2_block_index_to_meta_index(block_index); - return &base_meta_blockp[meta_index]; -} - -// Given a pointer to the metadata for a block in a given size class, returns -// a pointer to the metadata for the previous block, wrapping from the first -// block to the last if necessary. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_meta_t * -nanov2_previous_block_for_size_class(nanozonev2_t *nanozone, - nanov2_size_class_t size_class, nanov2_block_meta_t *meta_blockp, - boolean_t *wrapped) -{ - // To find the previous block, get the index of the current block, which is - // in the real address space, unscramble it to get a logical block number, - // subtract 1 from it, wrapping if necessary, then scramble the result. - nanov2_block_meta_t *base_meta_blockp = - (nanov2_block_meta_t *)(((uintptr_t)meta_blockp) & (NANOV2_BLOCK_ADDRESS_MASK)); - nanov2_meta_index_t meta_index = (int)(meta_blockp - base_meta_blockp); - nanov2_block_index_t block_index = nanov2_meta_index_to_block_index(meta_index); - block_index ^= nanozone->aslr_cookie; // Unscramble - int first_offset = first_block_offset_by_size_class[size_class]; - if (wrapped) *wrapped = block_index == first_offset; - block_index = block_index == first_offset ? - last_block_offset_by_size_class[size_class] : block_index - 1; - block_index = (nanov2_block_index_t)(block_index ^ nanozone->aslr_cookie); - meta_index = nanov2_block_index_to_meta_index(block_index); - return &base_meta_blockp[meta_index]; -} - -// Turns off the in-use bit in the meta data for a given block. -static MALLOC_ALWAYS_INLINE MALLOC_INLINE void -nanov2_turn_off_in_use(nanov2_block_meta_t *block_metap) -{ - // TODO: find a way to do this that is more efficient and readable. - static nanov2_block_meta_t mask = { - .in_use = 0, - .next_slot = ~0, - .free_count = ~0, - .gen_count = ~0, - }; - os_atomic_and((uint32_t *)block_metap, *(uint32_t *)&mask, relaxed); -} - -#pragma mark - -#pragma mark Policy Functions - -// Gets the index of the block in the zone's current_block array from which -// allocations should be made. This function should be replaced to implement -// a different strategy (e.g. for E- vs P-cores). -static MALLOC_ALWAYS_INLINE MALLOC_INLINE int -nanov2_get_allocation_block_index(void) -{ -#if CONFIG_NANO_USES_HYPER_SHIFT - if (os_likely(nano_common_max_magazines_is_ncpu)) { - // Default case is max magazines == physical number of CPUs, which - // must be > _os_cpu_number() >> hyper_shift, so the modulo - // operation is not required. - return _os_cpu_number() >> hyper_shift; - } -#else // CONFIG_NANO_USES_HYPER_SHIFT - if (os_likely(nano_common_max_magazines_is_ncpu)) { - // Default case is max magazines == logical number of CPUs, which - // must be > _os_cpu_number() so the modulo operation is not required. - return _os_cpu_number(); - } -#endif // CONFIG_NANO_USES_HYPER_SHIFT - - unsigned int shift = 0; -#if CONFIG_NANO_USES_HYPER_SHIFT - shift = hyper_shift; -#endif // CONFIG_NANO_USES_HYPER_SHIFT - - if (os_likely(_os_cpu_number_override == -1)) { - return (_os_cpu_number() >> shift) % nano_common_max_magazines; - } - return (_os_cpu_number_override >> shift) % nano_common_max_magazines; -} -#endif // OS_VARIANT_RESOLVED - -#pragma mark - -#pragma mark Allocator Initialization - -#if OS_VARIANT_NOTRESOLVED - -static const char madvise_policy_env[] = "MallocNanoMadvisePolicy"; -static const char madvise_policy_bootarg[] = "nanov2_madvise_policy"; -static const char madvise_immediate[] = "immediate"; -static const char madvise_warning[] = "warning"; -static const char madvise_critical[] = "critical"; - -static const char single_arena_env[] = "MallocNanoSingleArena"; -static const char single_arena_bootarg[] = "nanov2_single_arena"; - -static const char scan_policy_env[] = "MallocNanoScanPolicy"; -static const char scan_policy_bootarg[] = "nanov2_scan_policy"; - -static const char size_class_blocks_env[] = "MallocNanoSizeClassBlocks"; -static const char size_class_blocks_bootarg[] = "nanov2_size_class_blocks"; - -// Parse and set the madvise policy setting. If ptr is NULL, sets the default -// policy. -static void -nanov2_set_madvise_policy(const char *name, const char *ptr) -{ - nanov2_madvise_policy_t madvise_policy = NANO_MADVISE_IMMEDIATE; - if (ptr) { - if (!strncmp(ptr, madvise_immediate, sizeof(madvise_immediate) - 1)) { - madvise_policy = NANO_MADVISE_IMMEDIATE; - } else if (!strncmp(ptr, madvise_warning, sizeof(madvise_warning) - 1)) { - madvise_policy = NANO_MADVISE_WARNING_PRESSURE; - } else if (!strncmp(ptr, madvise_critical, sizeof(madvise_critical) - 1)) { - madvise_policy = NANO_MADVISE_CRITICAL_PRESSURE; - } else { - malloc_report(ASL_LEVEL_ERR, - "%s value (%s) invalid - ignored.\n", name, ptr); - } - } - nanov2_madvise_policy = madvise_policy; -} - -// Parse and set the list of size classes that are allowed only one arena. If -// ptr is NULL, no size classes are restricted to a single arena, -// Format is a list of sizes separated by colons (e.g. 16:240). Each size must -// be a multiple of NANO_REGIME_QUANTA_SIZE and must be between 16 and 256. -static void -nanov2_set_single_arena_size_classes(const char *name, const char *ptr) -{ - uint16_t single_arena_size_classes = 0; - if (ptr) { - const char *value = ptr; - const char *endp; - boolean_t failed = FALSE; - while (*ptr) { - long size = malloc_common_convert_to_long(ptr, &endp); - if (endp != ptr) { - if (*endp && *endp != ':') { - failed = TRUE; - break; - } - if (size > NANO_MAX_SIZE || size < NANO_REGIME_QUANTA_SIZE || - (size % NANO_REGIME_QUANTA_SIZE) != 0) { - failed = TRUE; - break; - } - single_arena_size_classes |= - 1 << ((size/NANO_REGIME_QUANTA_SIZE) - 1); - } else { - failed = true; - break; - } - if (!*endp) { - break; - } - ptr = endp + 1; - } - if (failed) { - malloc_report(ASL_LEVEL_ERR, - "%s value (%s) invalid - ignored.\n", name, value); - single_arena_size_classes = 0; - } - } - nanov2_policy_config.single_arena_size_classes = single_arena_size_classes; -} - -// Parse and set the block scan policy setting. If ptr is NULL, the default -// policy is used. Format is either "firstfit" or "minXX:maxYY:limZZ", where -// XX, YY and ZZ are numbers, XX and YY must be between 0 and 100 and XX must -// not be greater than YY. min, max and lim may appear in any order or may be -// omitted to get default values. -static void -nanov2_set_block_scan_policy(const char *name, const char *ptr) -{ - static char first_fit_key[] = "firstfit"; - static char min_key[] = "min"; - static char max_key[] = "max"; - static char lim_key[] = "lim"; - - nanov2_block_scan_policy_t block_scan_policy = NANO_SCAN_CAPACITY_BASED; - int scan_min_capacity = DEFAULT_SCAN_MIN_CAPACITY; - int scan_max_capacity = DEFAULT_SCAN_MAX_CAPACITY; - int scan_limit = DEFAULT_SCAN_LIMIT; - const char *endp; - boolean_t failed = FALSE; - boolean_t min_found = FALSE; - boolean_t max_found = FALSE; - boolean_t lim_found = FALSE; - const char *value = ptr; - - if (ptr) { - if (!strcmp(ptr, first_fit_key)) { - block_scan_policy = NANO_SCAN_FIRST_FIT; - } else { - while (!failed && ptr && *ptr) { - if (!strncmp(ptr, min_key, sizeof(min_key) - 1) && !min_found) { - min_found = TRUE; - ptr += sizeof(min_key) - 1; - long value = malloc_common_convert_to_long(ptr, &endp); - if (ptr != endp && value >= 0 && value <= 100) { - scan_min_capacity = (int)value; - ptr = endp; - } else { - failed = TRUE; - } - } else if (!strncmp(ptr, max_key, sizeof(max_key) - 1) - && !max_found) { - max_found = TRUE; - ptr += sizeof(max_key) - 1; - long value = malloc_common_convert_to_long(ptr, &endp); - if (ptr != endp && value >= 0 && value <= 100) { - scan_max_capacity = (int)value; - ptr = endp; - } else { - failed = TRUE; - } - } else if (!strncmp(ptr, lim_key, sizeof(lim_key) - 1) - && !lim_found) { - lim_found = TRUE; - ptr += sizeof(lim_key) - 1; - long value = malloc_common_convert_to_long(ptr, &endp); - if (ptr != endp && value >= 0) { - scan_limit = (int)value; - ptr = endp; - } else { - failed = TRUE; - } - } else { - failed = TRUE; - } - if (*ptr) { - if (*ptr == ':') { - ptr++; - } else { - failed = TRUE; - } - } - } - - if (!failed && scan_min_capacity > scan_max_capacity) { - failed = TRUE; - } - } - } - - if (!failed) { - nanov2_policy_config.block_scan_policy = block_scan_policy; - nanov2_policy_config.block_scan_min_capacity = scan_min_capacity; - nanov2_policy_config.block_scan_max_capacity = scan_max_capacity; - nanov2_policy_config.block_scan_limit = scan_limit; - } else { - malloc_report(ASL_LEVEL_ERR, "%s value (%s) invalid - ignored.\n", - name, value); - } -} - -// Configures the nanov2_blocks_by_size_class array. If ptr is not NULL and -// *ptr is not empty, it is expected to be a list of 16 positive integers -// separated by commas that sum to TOTAL_BLOCK_UNITS (which is currently 64). -// For example, as an environment variable: -// MallocNanoSizeClassBlocks=2,7,6,6,6,5,5,5,5,2,2,2,2,2,6,1 -// or as a boot argument: -// nanov2_size_class_blocks=2,7,6,6,6,5,5,5,5,2,2,2,2,2,6,1 -static void -nanov2_set_blocks_by_size_class(const char *name, const char *ptr) -{ - int new_total_block_units = 0; - int new_blocks_by_size_class[NANO_SIZE_CLASSES]; - MALLOC_STATIC_ASSERT( - sizeof(new_blocks_by_size_class) == sizeof(block_units_by_size_class), - "Size mismatch in nanov2_set_blocks_by_size_class()"); - const char *endp; - const char *sptr = ptr; - for (int i = 0; i < NANO_SIZE_CLASSES; i++) { - int count = (int)malloc_common_convert_to_long(ptr, &endp); - char separator = i == NANO_SIZE_CLASSES - 1 ? '\0' : ','; - if (endp == ptr || *endp != separator || count > TOTAL_BLOCK_UNITS) { - malloc_report(ASL_LEVEL_ERR, - "%s value invalid: [%s] - ignored.\n", name, sptr); - return; - } - new_blocks_by_size_class[i] = count; - new_total_block_units += count; - ptr = endp + 1; - } - - if (new_total_block_units != TOTAL_BLOCK_UNITS) { - malloc_report(ASL_LEVEL_ERR, - "%s value invalid - values must sum to %d, not %d - ignored.\n", - name, TOTAL_BLOCK_UNITS, new_total_block_units); - } else { - memcpy(block_units_by_size_class, new_blocks_by_size_class, - sizeof(block_units_by_size_class)); - } -} - -// First stage initialization. Called during libSystem initialization. -// Reads environment variables and boot arguments and sets the madvise policy, -// single arena list and the block scan policy. Environment variables override -// boot arguments. -void -nanov2_init(const char *envp[], const char *apple[], const char *bootargs) -{ - // Get and process the boot args and environment variables. - char value_buf[256]; - const char *value = _simple_getenv(envp, madvise_policy_env); - const char *name = madvise_policy_env; - if (!value) { - value = malloc_common_value_for_key(bootargs, madvise_policy_bootarg); - if (value) { - name = madvise_policy_bootarg; - } - } - nanov2_set_madvise_policy(name, value); - - name = single_arena_env; - value = _simple_getenv(envp, single_arena_env); - if (!value) { - value = malloc_common_value_for_key_copy(bootargs, single_arena_bootarg, - value_buf, sizeof(value_buf)); - if (value) { - name = single_arena_bootarg; - } - } - nanov2_set_single_arena_size_classes(name, value); - - name = scan_policy_env; - value = _simple_getenv(envp, scan_policy_env); - if (!value) { - value = malloc_common_value_for_key_copy(bootargs, scan_policy_bootarg, - value_buf, sizeof(value_buf)); - if (value) { - name = scan_policy_bootarg; - } - } - nanov2_set_block_scan_policy(name, value); - - name = size_class_blocks_env; - value = _simple_getenv(envp, size_class_blocks_env); - if (!value) { - value = malloc_common_value_for_key_copy(bootargs, size_class_blocks_bootarg, - value_buf, sizeof(value_buf)); - if (value) { - name = size_class_blocks_bootarg; - } - } - if (value) { - nanov2_set_blocks_by_size_class(name, value); - } -} - -static void -nanov2_configure_once(void *context MALLOC_UNUSED) -{ - // Check that the block_units_by_size_class table is consistent. - int total_blocks = 0; - for (int i = 0; i < NANO_SIZE_CLASSES; i++) { - total_blocks += block_units_by_size_class[i] * BLOCKS_PER_UNIT; - } - MALLOC_ASSERT(total_blocks == NANOV2_BLOCKS_PER_ARENA); - - // Build the first_block_offset_by_size_class and - // last_block_offset_by_size_class tables. The first entry is special - // because block 0 is reserved for the metadata block, so the first offset - // is 1 and the number of blocks allocated is reduced by 1. - int next_offset = 1; - first_block_offset_by_size_class[0] = next_offset; - next_offset = block_units_by_size_class[0] * BLOCKS_PER_UNIT; - last_block_offset_by_size_class[0] = next_offset - 1; - - for (int i = 1; i < NANO_SIZE_CLASSES; i++) { - first_block_offset_by_size_class[i] = next_offset; - next_offset += block_units_by_size_class[i] * BLOCKS_PER_UNIT; - last_block_offset_by_size_class[i] = next_offset - 1; - } - MALLOC_ASSERT(next_offset == NANOV2_BLOCKS_PER_ARENA); - - // Construct the ptr_offset_to_size_class map, which maps the part of the - // logical address that depends on size class to the corresponding size - // class. This would be a simple mask operation if all size classes were of - // equal size, but sadly they are not. - int next_index = 0; - for (int i = 0; i < NANO_SIZE_CLASSES; i++) { - int block_units = block_units_by_size_class[i]; - for (int j = 0; j < block_units; j++) { - ptr_offset_to_size_class[next_index++] = i; - } - } - MALLOC_ASSERT(next_index == NANOV2_BLOCKS_PER_ARENA/BLOCKS_PER_UNIT); -} - -static os_once_t nanov2_config_predicate; - -void -nanov2_configure(void) -{ - os_once(&nanov2_config_predicate, NULL, nanov2_configure_once); -} -#endif // OS_VARIANT_NOTRESOLVED - -#pragma mark - -#pragma mark Zone Functions - -#if OS_VARIANT_RESOLVED -// Returns the allocation size for a pointer. Uses nanov2_pointer_size() to -// determine whether the pointer is for a Nano V2 allocation and, if not, -// delegates to the helper zone. Returns 0 if the pointer is not to memory -// allocated by Nano V2 or attributable to the helper zone. -MALLOC_NOEXPORT size_t -nanov2_size(nanozonev2_t *nanozone, const void *ptr) -{ - size_t size = nanov2_pointer_size(nanozone, (void *)ptr, FALSE); - return size ? size : nanozone->helper_zone->size(nanozone->helper_zone, ptr); -} - -MALLOC_NOEXPORT void * -nanov2_malloc(nanozonev2_t *nanozone, size_t size) -{ - size_t rounded_size = _nano_common_good_size(size); - if (rounded_size <= NANO_MAX_SIZE) { - void *ptr = nanov2_allocate(nanozone, rounded_size, FALSE); - if (ptr) { - if (os_unlikely(size && (nanozone->debug_flags & MALLOC_DO_SCRIBBLE))) { - memset(ptr, SCRIBBLE_BYTE, size); - } - return ptr; - } - } - - // If we reach this point, we couldn't allocate, so delegate to the - // helper zone. - return nanozone->helper_zone->malloc(nanozone->helper_zone, size); -} - -MALLOC_NOEXPORT void -nanov2_free_definite_size(nanozonev2_t *nanozone, void *ptr, size_t size) -{ - // Check whether it's a Nano pointer and get the size. We should only get - // here if it is and furthermore we already know that "size" is the actual - // rounded size, so don't waste time rechecking that. This is just a - // sanity check. - if (ptr && nanov2_has_valid_signature(ptr)) { - if (os_unlikely(nanozone->debug_flags & MALLOC_DO_SCRIBBLE)) { - memset(ptr, SCRABBLE_BYTE, size); - } - nanov2_free_to_block(nanozone, ptr, nanov2_size_class_from_size(size)); - return; - } - return nanozone->helper_zone->free_definite_size(nanozone->helper_zone, ptr, - size); -} - -MALLOC_NOEXPORT void -nanov2_free(nanozonev2_t *nanozone, void *ptr) -{ - if (ptr && nanov2_has_valid_signature(ptr)) { - // Check whether it's a Nano pointer and get the size. If it's not - // Nano, pass it to the helper zone. - size_t size = nanov2_pointer_size(nanozone, ptr, FALSE); - if (size) { - if (os_unlikely(nanozone->debug_flags & MALLOC_DO_SCRIBBLE)) { - memset(ptr, SCRABBLE_BYTE, size); - } - nanov2_free_to_block(nanozone, ptr, nanov2_size_class_from_size(size)); - return; - } - } - return nanozone->helper_zone->free(nanozone->helper_zone, ptr); -} - -MALLOC_NOEXPORT void * -nanov2_calloc(nanozonev2_t *nanozone, size_t num_items, size_t size) -{ - size_t total_bytes; - if (calloc_get_size(num_items, size, 0, &total_bytes)) { - return NULL; - } - size_t rounded_size = _nano_common_good_size(total_bytes); - if (total_bytes <= NANO_MAX_SIZE) { - void *ptr = nanov2_allocate(nanozone, rounded_size, TRUE); - if (ptr) { - return ptr; - } - } - - // If we reach this point, we couldn't allocate, so delegate to the - // helper zone. - return nanozone->helper_zone->calloc(nanozone->helper_zone, 1, total_bytes); -} -#endif // OS_VARIANT_RESOLVED - -#if OS_VARIANT_NOTRESOLVED -static void * -nanov2_valloc(nanozonev2_t *nanozone, size_t size) -{ - // Always delegate this to the helper zone. - return nanozone->helper_zone->valloc(nanozone->helper_zone, size); -} -#endif // OS_VARIANT_NOTRESOLVED - -#if OS_VARIANT_RESOLVED -MALLOC_NOEXPORT void * -nanov2_realloc(nanozonev2_t *nanozone, void *ptr, size_t new_size) -{ - // If we are given a NULL pointer, just allocate memory of the requested - // size. - if (ptr == NULL) { - return nanov2_malloc(nanozone, new_size); - } - - size_t old_size = nanov2_pointer_size(nanozone, ptr, FALSE); - if (!old_size) { - // Not a Nano pointer - let the helper deal with it - return nanozone->helper_zone->realloc(nanozone->helper_zone, ptr, new_size); - } - - void *new_ptr; - if (new_size > NANO_MAX_SIZE) { - // Too large for Nano. Try to allocate from the helper zone. - new_ptr = nanozone->helper_zone->malloc(nanozone->helper_zone, new_size); - if (!new_ptr) { - // Failed to allocate - leave the existing allocation alone. - return NULL; - } - } else if (!new_size) { - // Resizing to zero. Free the existing memory and explicitly allocate - // zero bytes. - nanov2_free(nanozone, ptr); - return nanov2_malloc(nanozone, 0); - } else { - size_t new_good_size = _nano_common_good_size(new_size); - if (new_good_size > old_size || new_good_size <= old_size/2) { - // Growing or shrinking to less than half size - we need to - // reallocate. - new_ptr = nanov2_malloc(nanozone, new_good_size); - if (!new_ptr) { - // Failed to allocate - leave the existing allocation alone. - return NULL; - } - } else { - // Same size or shrinking by less than half size. Keep the same - // allocation and clear the area that's being released. - if (new_size != old_size) { - MALLOC_ASSERT(new_size < old_size); - if (os_unlikely(nanozone->debug_flags & MALLOC_DO_SCRIBBLE)) { - memset(ptr + new_size, SCRABBLE_BYTE, old_size - new_size); - } - } - return ptr; - } - } - - // If we reach this point, we allocated new memory. Copy the existing - // content to the new location and release the old allocation. - MALLOC_ASSERT(new_ptr); - memcpy(new_ptr, ptr, MIN(old_size, new_size)); - nanov2_free(nanozone, ptr); - - return new_ptr; -} -#endif // OS_VARIANT_RESOLVED - -#if OS_VARIANT_NOTRESOLVED -static void -nanov2_destroy(nanozonev2_t *nanozone) -{ - nanozone->helper_zone->destroy(nanozone->helper_zone); - nano_common_deallocate_pages((void *)nanozone, NANOZONEV2_ZONE_PAGED_SIZE, - nanozone->debug_flags); -} -#endif // OS_VARIANT_NOTRESOLVED - -#if OS_VARIANT_RESOLVED -MALLOC_NOEXPORT boolean_t -nanov2_claimed_address(nanozonev2_t *nanozone, void *ptr) -{ - return nanov2_pointer_size(nanozone, ptr, TRUE) - || malloc_zone_claimed_address(nanozone->helper_zone, ptr); -} - -MALLOC_NOEXPORT unsigned -nanov2_batch_malloc(nanozonev2_t *nanozone, size_t size, void **results, - unsigned count) -{ - unsigned allocated = 0; - size_t rounded_size = _nano_common_good_size(size); - if (rounded_size <= NANO_MAX_SIZE) { - while (allocated < count) { - void *ptr = nanov2_allocate(nanozone, rounded_size, FALSE); - if (!ptr) { - break; - } - - *results++ = ptr; - allocated++; - } - if (allocated == count) { - // Allocated everything. - return allocated; - } - } - - // We could not allocate everything. Let the helper zone do the rest. - return allocated + nanozone->helper_zone->batch_malloc( - nanozone->helper_zone, size, results, count - allocated); -} - -MALLOC_NOEXPORT void -nanov2_batch_free(nanozonev2_t *nanozone, void **to_be_freed, unsigned count) -{ - if (count) { - while (count--) { - void *ptr = to_be_freed[count]; - if (ptr) { - nanov2_free(nanozone, ptr); - } - } - } -} -#endif // OS_VARIANT_RESOLVED - -#if OS_VARIANT_NOTRESOLVED -static void * -nanov2_memalign(nanozonev2_t *nanozone, size_t alignment, size_t size) -{ - // Always delegate this to the helper zone. - return nanozone->helper_zone->memalign(nanozone->helper_zone, alignment, - size); -} -#endif // OS_VARIANT_NOTRESOLVED - -#if OS_VARIANT_RESOLVED - -size_t -nanov2_pressure_relief(nanozonev2_t *nanozone, size_t goal) -{ - if (nanov2_madvise_policy != NANO_MADVISE_WARNING_PRESSURE - && nanov2_madvise_policy != NANO_MADVISE_CRITICAL_PRESSURE) { - // In the current implementation, we only get called on warning, so - // act if the policy is either warning or critical. We would need to - // add a new zone entry point to respond to critical. - return 0; - } - const char *name = nanozone->basic_zone.zone_name; - MAGMALLOC_PRESSURERELIEFBEGIN((void *)nanozone, name, (int)goal); - MALLOC_TRACE(TRACE_nano_memory_pressure | DBG_FUNC_START, - (uint64_t)nanozone, goal, 0, 0); - size_t total = 0; - - // Loop over all arenas madvising blocks that are marked as madvisable, - // until we reach our goal. - nanov2_region_t *region = nanozone->first_region_base; - nanov2_meta_index_t metablock_meta_index = nanov2_metablock_meta_index(nanozone); - while (region) { - nanov2_arena_t *arena = nanov2_first_arena_for_region(region); - nanov2_arena_t *arena_after_region = nanov2_limit_arena_for_region(nanozone, region); - while (arena < arena_after_region) { - // Scan all of the blocks in the arena, skipping the metadata block. - nanov2_arena_metablock_t *meta_blockp = - nanov2_metablock_address_for_ptr(nanozone, arena); - nanov2_block_meta_t *block_metap = &meta_blockp->arena_block_meta[0]; - - // We need to hold the zone madvise lock to madvise. We could take - // it for the duration of this function, but that might hold up - // ongoing allocation and free operations for too long. So just - // lock and unlock for each arena. - _malloc_lock_lock(&nanozone->madvise_lock); - for (nanov2_meta_index_t i = 0; i < NANOV2_BLOCKS_PER_ARENA; - i++, block_metap++) { - if (i != metablock_meta_index) { - nanov2_block_meta_t meta = os_atomic_load(block_metap, relaxed); - if (meta.next_slot == SLOT_CAN_MADVISE) { - nanov2_block_t *blockp = nanov2_block_address_from_meta_index( - nanozone, arena, i); - if (nanov2_madvise_block(nanozone, block_metap, - blockp, nanov2_size_class_for_ptr(nanozone, blockp))) { - total += NANOV2_BLOCK_SIZE; - } - } - } - } - _malloc_lock_unlock(&nanozone->madvise_lock); - if (goal && total >= goal) { - goto done; - } - arena++; - } - region = nanov2_next_region_for_region(nanozone, region); - } - -done: - MAGMALLOC_PRESSURERELIEFEND((void *)nanozone, name, (int)goal, (int)total); - MALLOC_TRACE(TRACE_nano_memory_pressure | DBG_FUNC_END, - (uint64_t)nanozone, goal, total, 0); - - return total; -} -#endif // OS_VARIANT_RESOLVED - -#pragma mark - -#pragma mark Zone Introspection - -#if OS_VARIANT_NOTRESOLVED - -// NOTE: in the code that follows, address that we obtain from the Nano -// structures are relative to the target process. They need to be translated -// before they can be used to read the mapping in this process. - -#define NANOV2_ZONE_PTR_TO_MAPPED_PTR(type, zone_ptr, offset) \ - (type)((mach_vm_address_t)zone_ptr - (mach_vm_offset_t)offset) -#define NANOV2_MAPPED_PTR_TO_ZONE_PTR(type, mapped_ptr, offset) \ - (type)((mach_vm_address_t)mapped_ptr + (mach_vm_offset_t)offset) - -static kern_return_t -nanov2_ptr_in_use_enumerator(task_t task, void *context, unsigned type_mask, - vm_address_t zone_address, memory_reader_t reader, - vm_range_recorder_t recorder) -{ - // Ensure that we have configured enough of the allocator to be able to - // examine its data structures. In tools that do not directly use Nano, we - // won't have done this yet. nanov2_configure() runs the initialization - // only once. - nanov2_configure(); - - // Only MALLOC_PTR_IN_USE_RANGE_TYPE and MALLOC_PTR_REGION_RANGE_TYPE have - // meaning for Nano. Anything else returns immediately. - if (!(type_mask & (MALLOC_PTR_IN_USE_RANGE_TYPE|MALLOC_PTR_REGION_RANGE_TYPE))) { - return 0; - } - - // Read the zone data. - nanozonev2_t *nanozone; - nanozonev2_t zone_copy; - kern_return_t kr; - bitarray_t slots; - - if (!reader) { - reader = nano_common_default_reader; - } - - kr = reader(task, zone_address, sizeof(nanozonev2_t), (void **)&nanozone); - if (kr) { - return kr; - } - boolean_t self_zone = (nanozonev2_t *)zone_address == nanozone; - memcpy(&zone_copy, nanozone, sizeof(zone_copy)); - nanozone = &zone_copy; - nanov2_meta_index_t metablock_meta_index = nanov2_metablock_meta_index(nanozone); - - // Process the zone one region at a time. Report each in-use block as a - // pointer range and each in-use slot as a pointer. - nanov2_region_t *region = nanozone->first_region_base; - while (region) { - mach_vm_address_t vm_addr = (mach_vm_address_t)NULL; - kern_return_t kr = reader(task, (vm_address_t)region, NANOV2_REGION_SIZE, (void **)&vm_addr); - if (kr) { - return kr; - } - - // ptr_offset is the difference between an address in the target process - // and its mapped address in this process. - mach_vm_offset_t ptr_offset = (mach_vm_address_t)region - vm_addr; - nanov2_arena_t *arena = nanov2_first_arena_for_region(region); - nanov2_arena_t *limit_arena = nanov2_limit_arena_for_region(nanozone, region); - vm_range_t ptr_range; - while (arena < limit_arena) { - // Find the metadata block and process every entry, apart from the - // one for the metadata block itself. - nanov2_arena_metablock_t *arena_meta_blockp = - NANOV2_ZONE_PTR_TO_MAPPED_PTR(nanov2_arena_metablock_t *, - nanov2_metablock_address_for_ptr(nanozone, arena), - ptr_offset); - nanov2_block_meta_t *block_metap = &arena_meta_blockp->arena_block_meta[0]; - - for (nanov2_meta_index_t i = 0; i < NANOV2_BLOCKS_PER_ARENA; i++, block_metap++) { - if (i == metablock_meta_index) { - // Skip the metadata block. - continue; - } - nanov2_block_meta_t meta = os_atomic_load(block_metap, relaxed); - if (!nanov2_is_block_active(meta)) { - continue; - } - - nanov2_block_t *blockp = nanov2_block_address_from_meta_index( - nanozone, arena, i); - if (type_mask & MALLOC_PTR_REGION_RANGE_TYPE) { - // Report this block as an in-use range. - ptr_range.address = (vm_address_t)blockp; - ptr_range.size = NANOV2_BLOCK_SIZE; - recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1); - } - if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { - // Report all of the pointers in the block that are not on - // the free list. - nanov2_size_class_t size_class = nanov2_size_class_for_ptr( - nanozone, blockp); - int slot_size = nanov2_size_from_size_class(size_class); - int slot_count = slots_by_size_class[size_class]; - vm_range_t ranges[NANOV2_MAX_SLOTS_PER_BLOCK]; - int range_count = 0; - if (meta.next_slot == SLOT_BUMP || meta.next_slot == SLOT_FULL) { - // Either the block is full or the freelist is empty. If - // it's full, everything is in use. If the free list is - // empty, everything up to slot_count - meta.free_count - 1 - // is in use. - range_count = meta.next_slot == SLOT_BUMP ? - slot_count - meta.free_count - 1 : slot_count; - for (int i = 0; i < range_count; i++) { - ranges[i].address = (vm_address_t)nanov2_slot_in_block_ptr(blockp, size_class, i); - ranges[i].size = slot_size; - } - } else { - // We need to scan the freelist to see what's in use. - int log_size = 64 - __builtin_clzl(slot_count); - if (self_zone) { - // Don't allocate from ourselves! - slots = nanozone->helper_zone->calloc(nanozone->helper_zone, - 1, bitarray_size(log_size)); - } else { - slots = bitarray_create(log_size); - } - for (int i = 0; i < slot_count; i++) { - bitarray_set(slots, log_size, i); - } - - int next_slot = meta.next_slot; - int free_list_count = 0; - while (next_slot != SLOT_BUMP) { - next_slot--; // meta.next_slot is 1-based. - if (next_slot < 0 || next_slot >= slot_count || - !bitarray_get(slots, log_size, next_slot)) { - // Out of range or already seen?? We may have - // snapshotted the block while it was updating. - // Don't go any further to avoid an infinite loop. - break; - } - bitarray_zap(slots, log_size, next_slot); - void *ptr = nanov2_slot_in_block_ptr(blockp, size_class, next_slot); - nanov2_free_slot_t *slotp = NANOV2_ZONE_PTR_TO_MAPPED_PTR(nanov2_free_slot_t *, ptr, ptr_offset); - next_slot = slotp->next_slot; - free_list_count++; - } - // Add a range for each slot that is not on the freelist, - // unless that slot has never been allocated. - int block_free_count = meta.free_count + 1; // actual free count. - int in_use_count = slot_count - block_free_count; - int slots_used_count = in_use_count + free_list_count; - index_t index; - while (bitarray_zap_first_set(slots, log_size, &index)) { - if (index >= slots_used_count) { - // Reached the end of the slots that have been - // allocated at some point. - break; - } - ranges[range_count].address = (vm_address_t)nanov2_slot_in_block_ptr(blockp, size_class, index); - ranges[range_count].size = slot_size; - range_count++; - } - free(slots); - } - if (range_count) { - // Notify the in-use pointers that we found. - recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, ranges, range_count); - } - } - } - arena++; - } - - // We have to manually handle the linkage to the next region because - // of the address slide between this process and the target. - nanov2_region_linkage_t *region_linkagep = - nanov2_region_linkage_for_region(nanozone, region); - nanov2_region_linkage_t *mapped_region_linkagep = - NANOV2_ZONE_PTR_TO_MAPPED_PTR(nanov2_region_linkage_t *, - region_linkagep, ptr_offset); - int offset = mapped_region_linkagep->next_region_offset; - region = offset ? region + offset : NULL; - } - return 0; -} - -static size_t -nanov2_good_size(nanozonev2_t *nanozone, size_t size) -{ - if (size <= NANO_MAX_SIZE) { - return _nano_common_good_size(size); - } - return nanozone->helper_zone->introspect->good_size(nanozone->helper_zone, - size); -} - -static boolean_t -nanov2_check(nanozonev2_t *nanozone) -{ - // Does nothing, just like Nano V1. - return 1; -} - -static void -nanov2_print(task_t task, unsigned level, vm_address_t zone_address, - memory_reader_t reader, print_task_printer_t printer) -{ - // Ensure that we have configured enough of the allocator to be able to - // examine its data structures. In tools that do not directly use Nano, we - // won't have done this yet. nanov2_configure() runs the initialization - // only once. - nanov2_configure(); - - nanozonev2_t *mapped_nanozone; - if (reader(task, (vm_address_t)zone_address, sizeof(nanozonev2_t), - (void **)&mapped_nanozone)) { - printer("Failed to map nanozonev2_s at %p\n", zone_address); - return; - } - - // Zone-wide statistics - malloc_statistics_t stats; - nanov2_statistics_task_printer(task, zone_address, reader, printer, &stats); - nanov2_statistics_t *nano_stats = &mapped_nanozone->statistics; - printer("Nanozonev2 %p: blocks in use: %llu, size in use: %llu " - "allocated size: %llu, allocated regions: %d, region holes: %d\n", - zone_address, (uint64_t)stats.blocks_in_use, - (uint64_t)stats.size_in_use, (uint64_t)stats.size_allocated, - nano_stats->allocated_regions, nano_stats->region_address_clashes); - -#if DEBUG_MALLOC - // Per-size class statistics - printer("\nPer size-class statistics:\n"); - for (int i = 0; i < NANO_SIZE_CLASSES; i++) { - nanov2_size_class_statistics *cs = &nano_stats->size_class_statistics[i]; - printer(" Class %d: ", i); - printer("total alloc: %llu, total frees: %llu, madvised blocks: %llu, " - "madvise races: %llu", - cs->total_allocations, cs->total_frees, cs->madvised_blocks, - cs->madvise_races); - printer("\n"); - } -#endif // DEBUG_MALLOC - - // Per-context block pointers. - printer("Current Allocation Blocks By Size Class/Context [CPU]\n"); - for (int i = 0; i < NANO_SIZE_CLASSES; i++) { - printer(" Class %d: ", i); - for (int j = 0; j < MAX_CURRENT_BLOCKS; j++) { - if (mapped_nanozone->current_block[i][j]) { - printer("%d: %p; ", j, mapped_nanozone->current_block[i][j]); - } - } - printer("\n"); - } - - nanov2_meta_index_t metablock_meta_index = - nanov2_metablock_meta_index(mapped_nanozone); - nanov2_region_t *region = mapped_nanozone->first_region_base; - int region_index = 0; - while (region) { - printer("\nRegion %d: base address %p\n", region_index, region); - nanov2_region_t *mapped_region; - if (reader(task, (vm_address_t)region, sizeof(nanov2_region_t), - (void **)&mapped_region)) { - printer("Failed to map nanov2 region at %p\n", region); - return; - } - off_t region_offset = (uintptr_t)mapped_region - (uintptr_t)region; - - nanov2_arena_t *arena = nanov2_first_arena_for_region(region); - nanov2_arena_t *limit_arena = nanov2_limit_arena_for_region( - mapped_nanozone, region); - int arena_index = 0; - while (arena < limit_arena) { - // Find the metadata block and process every entry, apart from the - // one for the metadata block itself. - nanov2_arena_metablock_t *arena_meta_blockp = - nanov2_metablock_address_for_ptr(mapped_nanozone, arena); - nanov2_arena_metablock_t *mapped_arena_meta_blockp = - (nanov2_arena_metablock_t *)((uintptr_t)arena_meta_blockp + region_offset); - nanov2_block_meta_t *block_metap = &arena_meta_blockp->arena_block_meta[0]; - nanov2_block_meta_t *mapped_block_metap = &mapped_arena_meta_blockp->arena_block_meta[0]; - - int active_blocks = 0; - int madvisable_blocks = 0; - int unused_blocks = 0; - int madvised_blocks = 0; - int madvising_blocks = 0; - for (nanov2_meta_index_t i = 0; i < NANOV2_BLOCKS_PER_ARENA; i++) { - if (i == metablock_meta_index) { - // Skip the metadata block. - continue; - } - nanov2_block_meta_t meta = mapped_block_metap[i]; - switch (meta.next_slot) { - case SLOT_NULL: - unused_blocks++; - break; - case SLOT_MADVISED: - madvised_blocks++; - break; - case SLOT_MADVISING: - madvising_blocks++; - break; - case SLOT_CAN_MADVISE: - madvisable_blocks++; - break; - default: - active_blocks++; - break; - } - } - printer("Arena #%d: base address %p. Blocks - active: %d, " - "madvisable: %d, madvising: %d, madvised: %d, unused: %d\n", - arena_index, arena, active_blocks, madvisable_blocks, - madvising_blocks, madvised_blocks, unused_blocks); - - // Print which size classes have blocks allocated in this arena. - int non_empty_size_classes[NANO_SIZE_CLASSES]; - for (int i = 0; i < NANO_SIZE_CLASSES; i++) { - non_empty_size_classes[i] = 0; - } - for (nanov2_meta_index_t i = 0; i < NANOV2_BLOCKS_PER_ARENA; i++) { - if (i == metablock_meta_index) { - // Skip the metadata block. - continue; - } - nanov2_block_meta_t meta = mapped_block_metap[i]; - nanov2_size_class_t size_class = - nanov2_size_class_for_meta_index(mapped_nanozone, i); - switch (meta.next_slot) { - case SLOT_FULL: - case SLOT_BUMP: - default: - non_empty_size_classes[size_class]++; - break; - case SLOT_NULL: - case SLOT_CAN_MADVISE: - case SLOT_MADVISING: - case SLOT_MADVISED: - // Do not count these. - break; - } - } - printer("Size classes with allocated blocks: "); - for (int i = 0; i < NANO_SIZE_CLASSES; i++) { - if (non_empty_size_classes[i]) { - printer("%d ", i); - } - } - printer("\n"); - - if (level >= MALLOC_VERBOSE_PRINT_LEVEL) { - for (nanov2_meta_index_t i = 0; i < NANOV2_BLOCKS_PER_ARENA; i++) { - if (i == metablock_meta_index) { - // Skip the metadata block. - continue; - } - nanov2_block_meta_t meta = mapped_block_metap[i]; - if (!nanov2_is_block_active(meta)) { - continue; - } - nanov2_size_class_t size_class = - nanov2_size_class_for_meta_index(mapped_nanozone, i); - char *slot_text; - switch (meta.next_slot) { - case SLOT_NULL: - slot_text = "NOT USED"; - break; - case SLOT_FULL: - slot_text = "FULL"; - break; - case SLOT_CAN_MADVISE: - slot_text = "CAN MADVISE"; - break; - case SLOT_MADVISING: - slot_text = "MADVISING"; - break; - case SLOT_MADVISED: - slot_text = "MADVISED"; - break; - default: - slot_text = NULL; - break; - } - printer(" Block %d: base %p; metadata: %p, size %d " - "(class %d) in-use: %d ", - i, nanov2_block_address_from_meta_index(mapped_nanozone, arena, i), - &block_metap[i], nanov2_size_from_size_class(size_class), - size_class, meta.in_use); - if (slot_text) { - printer("%s\n", slot_text); - } else { - int allocated = slots_by_size_class[size_class] - meta.free_count - 1; - if (meta.next_slot == SLOT_BUMP) { - printer("BUMP (free list empty)"); - } else { - printer("next_slot (1-based) = %d", meta.next_slot); - - } - printer(", allocated slots: %d, free slots = %d, " - "occupancy: %d%%\n", - allocated, meta.free_count + 1, - (100 * allocated)/slots_by_size_class[size_class]); - } - } - } - arena++; - arena_index++; - } - - region = nanov2_next_region_for_region_offset(mapped_nanozone, region, - region_offset); - region_index++; - } -} - -static void -nanov2_print_self(nanozonev2_t *nanozone, boolean_t verbose) -{ - nanov2_print(mach_task_self(), verbose ? MALLOC_VERBOSE_PRINT_LEVEL : 0, - (vm_address_t)nanozone, _malloc_default_reader, malloc_report_simple); -} - -static void -nanov2_print_task(task_t task, unsigned level, vm_address_t zone_address, - memory_reader_t reader, print_task_printer_t printer) -{ - nanov2_print(task, level, zone_address, reader, printer); -} - -static void -nanov2_log(malloc_zone_t *zone, void *log_address) -{ - // Does nothing, just like Nano V1. -} - -static void -nanov2_force_lock(nanozonev2_t *nanozone) -{ - // Nothing to do - Nano V2 does not have a zone lock. -} - -static void -nanov2_force_unlock(nanozonev2_t *nanozone) -{ - // Nothing to do - Nano V2 does not have a zone lock. -} - -static void -nanov2_reinit_lock(nanozonev2_t *nanozone) -{ - // Nothing to do - Nano V2 does not have a zone lock. -} - -static boolean_t -nanov2_locked(nanozonev2_t *nanozone) -{ - // Nothing to do - Nano V2 does not have a zone lock. - return FALSE; -} - -static void -null_printer(const char __unused *fmt, ...) -{ -} - -static kern_return_t -nanov2_statistics(task_t task, vm_address_t zone_address, - memory_reader_t reader, print_task_printer_t printer, - malloc_statistics_t *stats) -{ - printer = printer ? printer : null_printer; - reader = !reader && task == mach_task_self() ? _malloc_default_reader : reader; - - kern_return_t err; - - // Ensure that we have configured enough of the allocator to be able to - // examine its data structures. In tools that do not directly use Nano, we - // won't have done this yet. nanov2_configure() runs the initialization - // only once. - nanov2_configure(); - - memset(stats, '\0', sizeof(*stats)); - - nanozonev2_t *mapped_nanozone; - err = reader(task, (vm_address_t)zone_address, sizeof(nanozonev2_t), - (void **)&mapped_nanozone); - if (err) { - printer("Failed to map nanozonev2_s at %p\n", zone_address); - return err; - } - - nanov2_region_t *region; - nanov2_arena_t *arena; - nanov2_meta_index_t metadata_block_index = - nanov2_metablock_meta_index(mapped_nanozone); - - // Iterate over each arena in each region. Within each region, add - // statistics for each slot in each block, excluding the meta data block. - for (region = mapped_nanozone->first_region_base; region;) { - nanov2_region_t *mapped_region; - err = reader(task, (vm_address_t)region, sizeof(nanov2_region_t), (void **)&mapped_region); - if (err) { - printer("Failed to map nanov2 region at %p\n", region); - return err; - } - off_t region_offset = (uintptr_t)mapped_region - (uintptr_t)region; - for (arena = nanov2_first_arena_for_region(region); - arena < nanov2_limit_arena_for_region(mapped_nanozone, region); - arena++) { - nanov2_arena_metablock_t *meta_block = - nanov2_metablock_address_for_ptr(mapped_nanozone, arena); - nanov2_arena_metablock_t *mapped_meta_block = - (nanov2_arena_metablock_t *)((uintptr_t)meta_block + region_offset); - for (nanov2_meta_index_t i = 0; i < NANOV2_BLOCKS_PER_ARENA; i++) { - if (i == metadata_block_index) { - // Skip the metadata block. - continue; - } - - nanov2_block_meta_t *mapped_block_metap = &mapped_meta_block->arena_block_meta[i]; - nanov2_size_class_t size_class = - nanov2_size_class_for_meta_index(mapped_nanozone, i); - int slot_size = nanov2_size_from_size_class(size_class); - - nanov2_block_meta_t meta = - os_atomic_load(mapped_block_metap, relaxed); - int slots_in_use = 0; - switch (meta.next_slot) { - case SLOT_NULL: - // FALLTHRU - case SLOT_CAN_MADVISE: - // FALLTHRU - case SLOT_MADVISING: - // FALLTHRU - case SLOT_MADVISED: - // These blocks have no active content. - break; - case SLOT_FULL: - slots_in_use = slots_by_size_class[size_class]; - break; - case SLOT_BUMP: - // FALLTHRU - default: - slots_in_use = slots_by_size_class[size_class] - meta.free_count - 1; - break; - } - - // We can't report max_size_in_use because we don't have the - // metadata to do so. - if (slots_in_use) { - stats->blocks_in_use += slots_in_use; - stats->size_in_use += slots_in_use * slot_size; - stats->size_allocated += NANOV2_BLOCK_SIZE; - } - } - } - region = nanov2_next_region_for_region_offset(mapped_nanozone, - region, region_offset); - } - return KERN_SUCCESS; -} - -static void -nanov2_statistics_self(nanozonev2_t *nanozone, malloc_statistics_t *stats) -{ - nanov2_statistics(mach_task_self(), (vm_address_t)nanozone, - _malloc_default_reader, malloc_report_simple, stats); -} - -static kern_return_t -nanov2_statistics_task_printer(task_t task, vm_address_t zone_address, - memory_reader_t reader, print_task_printer_t printer, - malloc_statistics_t *stats) -{ - return nanov2_statistics(task, zone_address, reader, printer, stats); -} - -static kern_return_t -nanov2_statistics_task(task_t task, vm_address_t zone_address, memory_reader_t reader, malloc_statistics_t *stats) -{ - return nanov2_statistics(task, zone_address, reader, NULL, stats); -} - - -static const struct malloc_introspection_t nanov2_introspect = { - .enumerator = (void *)nanov2_ptr_in_use_enumerator, - .good_size = (void *)nanov2_good_size, - .check = (void *)nanov2_check, - .print = (void *)nanov2_print_self, - .log = (void *)nanov2_log, - .force_lock = (void *)nanov2_force_lock, - .force_unlock = (void *)nanov2_force_unlock, - .statistics = (void *)nanov2_statistics_self, - .zone_locked = (void *)nanov2_locked, - .enable_discharge_checking = NULL, - .disable_discharge_checking = NULL, -#ifdef __BLOCKS__ - .enumerate_discharged_pointers = NULL, -#else // __BLOCKS__ - .enumerate_unavailable_without_blocks = NULL, -#endif // __BLOCKS__ - .reinit_lock = (void *)nanov2_reinit_lock, - .print_task = (void *)nanov2_print_task, - .task_statistics = (void*)nanov2_statistics_task, -}; - -#endif // OS_VARIANT_NOTRESOLVED - -#pragma mark - -#pragma mark Utility Functions - -#if OS_VARIANT_RESOLVED - -// Given a pointer that may be to Nano V2-allocated memory, returns the size of -// the allocation, or 0 if the pointer does not correspond to an active -// allocation. If allow_inner is true, the pointer need not point to the start -// of the allocation. -size_t -nanov2_pointer_size(nanozonev2_t *nanozone, void *ptr, boolean_t allow_inner) -{ - // First check the address signature. - if (!nanov2_has_valid_signature((void *)ptr)) { - return 0; - } - - // Check for proper alignment, unless we could have an inner pointer. - if (!allow_inner && ((uintptr_t)ptr) & NANO_QUANTA_MASK) { - return 0; - } - - // Bounds check against the active address space. - if (ptr < (void *)nanozone->first_region_base || - ptr > (void *)nanozone->current_region_next_arena) { - return 0; - } - -#if NANOV2_MULTIPLE_REGIONS - // Need to check that the region part is valid because there could be holes. - // Do this only if we know there is a hole. - // NOTE: in M2 convergence, use a hashed structure to make this more - // efficient. - if (nanozone->statistics.region_address_clashes) { - nanov2_region_t *ptr_region = nanov2_region_address_for_ptr(ptr); - nanov2_region_t *region = nanozone->first_region_base; - while (region) { - if (ptr_region == region) { - break; - } - region = nanov2_next_region_for_region(nanozone, region); - } - if (!region) { - // Reached the end of the region list without matching - not a - // valid Nano V2 pointer. - return 0; - } - } -#endif // NANOV2_MULTIPLE_REGIONS - - // Get the size class for the pointer and the address of its meta block - // header. - nanov2_size_class_t size_class = nanov2_size_class_for_ptr(nanozone, ptr); - nanov2_block_meta_t *block_metap = nanov2_meta_ptr_for_ptr(nanozone, ptr); - - // Reject if the block is not active, or it doesn't have any allocations. - nanov2_block_meta_t meta = os_atomic_load(block_metap, relaxed); - if (!nanov2_is_block_active(meta) || (meta.next_slot != SLOT_FULL && - meta.free_count == slots_by_size_class[size_class] - 1)) { - return 0; - } - - size_t size = nanov2_size_from_size_class(size_class); - nanov2_addr_t addr = { .addr = ptr }; - if (!allow_inner && (addr.fields.nano_offset % size)) { - return 0; - } - - // The only reasonable way to check whether the pointer is free is to - // inspect the canary value at the start of the slot, since we cannot take - // a huge hit for walking the free list. - nanov2_free_slot_t *slotp = (nanov2_free_slot_t *)ptr; - uintptr_t guard = os_atomic_load(&slotp->double_free_guard, relaxed); - if ((guard ^ nanozone->slot_freelist_cookie) == (uintptr_t)ptr) { - return 0; - } - - return size; -} - -#pragma mark - -#pragma mark Madvise Management - -// Given a pointer to a block and its metadata, calls madvise() on that block -// if it is in state SLOT_CAN_MADVISE. Returns true on success, false if the -// block is not in the correct state or if the state changed during the -// operation. -// -// This function must be called with the zone's madvise_lock held -boolean_t -nanov2_madvise_block(nanozonev2_t *nanozone, nanov2_block_meta_t *block_metap, - nanov2_block_t *blockp, nanov2_size_class_t size_class) -{ - _malloc_lock_assert_owner(&nanozone->madvise_lock); - - boolean_t madvised = FALSE; - nanov2_block_meta_t old_meta = os_atomic_load(block_metap, relaxed); - if (old_meta.next_slot == SLOT_CAN_MADVISE) { - // Nobody raced with us. We can safely madvise this block. First change - // the state to SLOT_MADVISING so that other threads don't try to - // grab the block for new allocations. - nanov2_block_meta_t new_meta = { - .next_slot = SLOT_MADVISING, - .gen_count = old_meta.gen_count + 1, - }; - if (!os_atomic_cmpxchgv(block_metap, old_meta, new_meta, &old_meta, - relaxed)) { - // Somebody else tampered with this block. This can happen if - // another thread raced with us to allocate in this block. Count - // the contended access. - nanozone->statistics.size_class_statistics[size_class].madvise_races++; - return false; - } - - if (mvm_madvise_free(nanozone, nanov2_region_address_for_ptr(blockp), - (uintptr_t)blockp, (uintptr_t)(blockp + 1), NULL, FALSE)) { - malloc_zone_error(0, false, "Failed to madvise block at blockp: %p, error: %d\n", blockp, errno); - } else { - nanozone->statistics.size_class_statistics[size_class].madvised_blocks++; - madvised = TRUE; - } - - nanov2_block_meta_t final_meta = { - .next_slot = SLOT_MADVISED, - .gen_count = new_meta.gen_count + 1, - }; - - if (!os_atomic_cmpxchgv(block_metap, new_meta, final_meta, &old_meta, - relaxed)) { - // This should not happen since we should have exclusive interest - // in this block. - malloc_zone_error(nanozone->debug_flags, false, - "Failed when changing state from MADVISING to MADVISED, " - "block_metap = %p, blockp = %p\n", block_metap, blockp); - } - } - return madvised; -} - -#endif // OS_VARIANT_RESOLVED - -#pragma mark - -#pragma mark Region Management - -#if OS_VARIANT_NOTRESOLVED - -#if NANOV2_MULTIPLE_REGIONS -static nanov2_addr_t nanov2_max_region_base = { - .fields.nano_signature = NANOZONE_SIGNATURE, - .fields.nano_region = NANOV2_MAX_REGION_NUMBER -}; -#endif // NANOV2_MULTIPLE_REGIONS - -// Attempts to allocate VM space for a region at a given address and returns -// whether the allocation succeeded. -static boolean_t -nanov2_allocate_region(nanov2_region_t *region) -{ - MALLOC_TRACE(TRACE_nanov2_region_allocation | DBG_FUNC_START, - (uint64_t)region, 0, 0, 0); - boolean_t result = nano_common_allocate_vm_space((mach_vm_address_t)region, - NANOV2_REGION_SIZE); - MALLOC_TRACE(TRACE_nanov2_region_allocation | DBG_FUNC_END, - (uint64_t)region, result, 0, 0); - return result; -} - -// Allocates a new region adjacent to the current one. If the allocation fails, -// keep sliding up by the size of a region until we either succeed or run out of -// address space. The caller must own the Nanozone regions lock. -MALLOC_NOEXPORT boolean_t -nanov2_allocate_new_region(nanozonev2_t *nanozone) -{ -#if NANOV2_MULTIPLE_REGIONS - boolean_t result = FALSE; - - _malloc_lock_assert_owner(&nanozone->regions_lock); - nanov2_region_t *current_region = nanozone->current_region_base; - nanov2_region_t *next_region = (nanov2_region_t *)nanozone->current_region_limit; - while ((void *)next_region <= nanov2_max_region_base.addr) { - if (nanov2_allocate_region(next_region)) { - nanozone->current_region_base = next_region; - nanozone->current_region_next_arena = (nanov2_arena_t *)next_region; - nanozone->current_region_limit = next_region + 1; - nanozone->statistics.allocated_regions++; - result = TRUE; - break; - } - next_region++; - nanozone->statistics.region_address_clashes++; - } - - if (result) { - // Link this region to the previous one. - nanov2_region_linkage_t *current_region_linkage = - nanov2_region_linkage_for_region(nanozone, current_region); - nanov2_region_linkage_t *next_region_linkage = - nanov2_region_linkage_for_region(nanozone, next_region); - uint16_t offset = next_region - current_region; - current_region_linkage->next_region_offset = offset; - next_region_linkage->next_region_offset = 0; - } - - return result; -#else // NANOV2_MULTIPLE_REGIONS - // On iOS, only one region is supported, so we fail since the first - // region is allocated separately. - return FALSE; -#endif // CONFIG_NANOV2_MULTIPLE_REGIONS -} -#endif // OS_VARIANT_NOTRESOLVED - -#pragma mark - -#pragma mark Allocation - -#if OS_VARIANT_RESOLVED - -// Allocates memory from the block that corresponds to a given block meta data -// pointer. The memory is taken from the free list if possible, or from the -// unused region of the block if not. If the block is no longer in use or is -// full, NULL is returned and the caller is expected to find another block to -// allocate from. -MALLOC_NOEXPORT -void * -nanov2_allocate_from_block(nanozonev2_t *nanozone, - nanov2_block_meta_t *block_metap, nanov2_size_class_t size_class) -{ - nanov2_block_meta_view_t old_meta_view; - old_meta_view.meta = os_atomic_load(block_metap, relaxed); - - // Calculating blockp and ptr is relatively expensive. Do both lazily to - // minimize the time in the block starting with "again:" and ending with the - // atomic update so that we lose at little time as possible if we have to - // repeat that loop due to contention. This should also reduce the risk of - // contention. - nanov2_block_t *blockp = NULL; - -again: - if (!nanov2_can_allocate_from_block(old_meta_view.meta)) { - // Move along, nothing to allocate here... - return NULL; - } - - int slot; - void *ptr = NULL; - boolean_t from_free_list = FALSE; - nanov2_block_meta_t new_meta = { - .in_use = 1, - .free_count = old_meta_view.meta.free_count - 1, - .gen_count = old_meta_view.meta.gen_count + 1 - }; - - // Grab a slot from the free list or get the next unused slot. We know there - // should be one because the block is not full. - boolean_t slot_full = old_meta_view.meta.free_count == 0; - if (old_meta_view.meta.next_slot == SLOT_BUMP - || old_meta_view.meta.next_slot == SLOT_CAN_MADVISE) { - // Free list empty, grab the next unused slot. - new_meta.next_slot = slot_full ? SLOT_FULL : SLOT_BUMP; - slot = slots_by_size_class[size_class] - old_meta_view.meta.free_count - 1; - } else { - // Grab the first item from the free list. - from_free_list = TRUE; - if (!blockp) { - blockp = nanov2_block_address_from_meta_ptr(nanozone, block_metap); - } - slot = old_meta_view.meta.next_slot - 1; // meta.next_slot is 1-based. - ptr = nanov2_slot_in_block_ptr(blockp, size_class, slot); - nanov2_free_slot_t *slotp = (nanov2_free_slot_t *)ptr; - new_meta.next_slot = slot_full ? SLOT_FULL : slotp->next_slot; - } - - // Write the updated meta data; try again if we raced with another thread. - if (!os_atomic_cmpxchgv(block_metap, old_meta_view.meta, new_meta, - &old_meta_view.meta, dependency)) { - if (old_meta_view.meta.next_slot == SLOT_CAN_MADVISE || - old_meta_view.meta.next_slot == SLOT_MADVISING || - old_meta_view.meta.next_slot == SLOT_MADVISED) { - _malloc_lock_lock(&nanozone->madvise_lock); - if (old_meta_view.meta.next_slot == SLOT_MADVISED) { - // We raced against another thread madvising this block. We need - // to redo the madvise because we may have touched it when - // reading the next pointer in the freelist. - if (!blockp) { - blockp = nanov2_block_address_from_meta_ptr(nanozone, block_metap); - } - if (mvm_madvise_free(nanozone, nanov2_region_address_for_ptr(blockp), - (uintptr_t)blockp, (uintptr_t)(blockp + 1), NULL, FALSE)) { - malloc_zone_error(0, false, - "Failed to remadvise block at blockp: %p, error: %d\n", blockp, errno); - } - } - _malloc_lock_unlock(&nanozone->madvise_lock); - } - goto again; - } - - if (!ptr) { - if (!blockp) { - blockp = nanov2_block_address_from_meta_ptr(nanozone, block_metap); - } - ptr = nanov2_slot_in_block_ptr(blockp, size_class, slot); - } - - nanov2_free_slot_t *slotp = - (nanov2_free_slot_t *)os_atomic_force_dependency_on(ptr, - (unsigned long)old_meta_view.bits); - if (from_free_list) { - // We grabbed the item from the free list. Check the free list canary - // and crash if it's not valid. We can't do this check before the - // cmpxchgv because another thread may race with us, claim the slot and - // write to it. - uintptr_t guard = os_atomic_load(&slotp->double_free_guard, relaxed); - if ((guard ^ nanozone->slot_freelist_cookie) != (uintptr_t)ptr) { - malloc_zone_error(MALLOC_ABORT_ON_CORRUPTION, false, - "Heap corruption detected, free list is damaged at %p\n" - "*** Incorrect guard value: %lu\n", ptr, guard); - __builtin_unreachable(); - } - } - -#if DEBUG_MALLOC - nanozone->statistics.size_class_statistics[size_class].total_allocations++; -#endif // DEBUG_MALLOC - - return ptr; -} - -// Finds a block for allocation in an arena and returns a pointer to its -// metadata header. The search begins from the block with metadata pointer -// start_block (which must not be NULL). If no acceptable block was found, -// NULL is returned and it is expected that the caller will take appropriate -// action (typically allocate a new arena). -// -// The search starts with start_block. If this is in-use and not full, that -// block is returned. Otherwise, a scan for a usable block is initiated. The -// search starts from start_block and initially works backward towards the -// start of the arena. If this does not succeed, a forward search from -// start_block is made. -// -// A block is considered a candidate if it is not in use. As the scan proceeds, -// we remember blocks which have been madvisable, blocks which have been -// madvised or never used and those blocks which still have allocated slots -// but which fall within the reuse criteria (i.e. their occupancy is within the -// max/min occupancy range). -// -// If the scan policy is NANO_SCAN_FIRST_FIT, we just return the first block -// from the above list that we find. This is the fastest option, but likely -// maximises fragmentation. -// -// Otherwise, the scan policy is NANO_SCAN_CAPACITY_BASED. If we find a block -// that fits the reuse criteria, we return it immediately. Otherwise, we -// continue to scan until we find such a block, or we find a less ideal block -// and we reach the scan limit or exhaust the arena. At that point, we return -// one of the candidate blocks that we found, choosing based on the state of -// that block: -// - blocks that have allocations that are greater than the minimum capacity -// are preferred. -// - failing that, return an unused or madvise'd block. -// - failing that, return a block that is waiting to be madvised. -// -// In order to avoid races, this function must be called with the -// current_block_lock for the calling context [CPU] and size class locked. -// On return, the selected block has been marked as in-use, so the caller must -// either assign it as the active allocation block for the calling context or -// clear the in-use bit. -// -MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_meta_t * -nanov2_find_block_in_arena(nanozonev2_t *nanozone, - nanov2_arena_t *arena, nanov2_size_class_t size_class, - nanov2_block_meta_t *start_block) -{ - // If we don't have a starting point, start with the first block in the - // arena for the given size class. This is the case where we are looking for - // the first allocation block for a new context (i.e probably a new CPU, so - // take the first fit to avoid having to scan the whole size class for this - // very common start up case.) - boolean_t use_first_fit = !start_block || - nanov2_policy_config.block_scan_policy == NANO_SCAN_FIRST_FIT; - nanov2_block_meta_t *first_block = nanov2_first_block_for_size_class_in_arena( - nanozone, size_class, arena); - boolean_t scanning_backwards; - if (!start_block) { - start_block = first_block; - } - int slots_in_block = slots_by_size_class[size_class]; - nanov2_block_meta_t old_meta; - nanov2_block_meta_t *this_block; - nanov2_block_meta_t *found_block; - nanov2_block_meta_t *madvisable_block; - nanov2_block_meta_t *free_block; - nanov2_block_meta_t *fallback_block; - boolean_t fallback_below_max; - int scan_limit; - - // Check all of the blocks in the size class until we find one that we can - // use, based on nanov2_block_scan_policy. -retry: - this_block = start_block; - found_block = NULL; - madvisable_block = NULL; - free_block = NULL; - fallback_block = NULL; - fallback_below_max = FALSE; - scan_limit = nanov2_policy_config.block_scan_limit; - scanning_backwards = TRUE; - - do { - old_meta = os_atomic_load(this_block, relaxed); - if (!old_meta.in_use && old_meta.next_slot != SLOT_FULL - && old_meta.next_slot != SLOT_MADVISING) { - if (old_meta.next_slot == SLOT_CAN_MADVISE) { - if (!madvisable_block) { - // We can use this block as a last-ditch fallback. - madvisable_block = this_block; - } - } else if (old_meta.next_slot == SLOT_NULL - || old_meta.next_slot == SLOT_MADVISED) { - if (!free_block) { - free_block = this_block; - } - } else if (use_first_fit) { - found_block = this_block; - } else { - MALLOC_ASSERT(nanov2_policy_config.block_scan_policy == NANO_SCAN_CAPACITY_BASED); - int percent_used = (100 * old_meta.free_count)/slots_in_block; - if (percent_used >= nanov2_policy_config.block_scan_min_capacity - && percent_used <= nanov2_policy_config.block_scan_max_capacity) { - // Within specified limits -- take this one. - found_block = this_block; - } else if (percent_used >= nanov2_policy_config.block_scan_min_capacity) { - if (!fallback_block || fallback_below_max) { - // More full than we want, but still acceptable as a - // fallback. - fallback_block = this_block; - } - } else if (!fallback_block - && percent_used < nanov2_policy_config.block_scan_min_capacity) { - // Less full than we want. Keep it as a backup, but set - // fallback_below_max to allow a block that's above max to - // be preferred. The rationale behind this is to allow - // blocks that have low occupancy to drain so that they can - // be madvised. - fallback_block = this_block; - fallback_below_max = TRUE; - } else if (!free_block) { - // Not ideal, but we could use it. - free_block = this_block; - } - } - if (use_first_fit && (found_block || fallback_block || free_block)) { - // Take whatever we got. - break; - } - } - - if (scan_limit > 0) { - // Only enforce the scan limit once we have a candidate. - if ((fallback_block || free_block) && --scan_limit == 0) { - break; - } - } - - if (scanning_backwards) { - boolean_t wrapped; - nanov2_block_meta_t *prev_block = nanov2_previous_block_for_size_class( - nanozone, size_class, this_block, &wrapped); - if (wrapped) { - // We wrapped. Scan forward from the start block instead. - scan_limit = nanov2_policy_config.block_scan_limit; - scanning_backwards = FALSE; - this_block = start_block; - } else { - this_block = prev_block; - } - } else { - // Move to the next block, wrapping when we reach the last one for - // this size class. Stop once we get to the block where we started. - this_block = nanov2_next_block_for_size_class(nanozone, size_class, - this_block, NULL); - if (this_block == start_block) { - break; - } - } - } while (!found_block); - - if (!found_block) { - if (fallback_block) { - found_block = fallback_block; - } else if (free_block) { - found_block = free_block; - } else if (madvisable_block) { - found_block = madvisable_block; - } - } - - if (found_block) { - // Now we need to activate the block. If this fails, we look for - // another block. - // If we are bringing a block that is draining back into use, we - // just need to set in_use to 1. Otherwise, we fully initialize it. - old_meta = os_atomic_load(found_block, relaxed); - if (old_meta.next_slot == SLOT_MADVISING) { - goto retry; - } - boolean_t reset_slot = old_meta.next_slot == SLOT_NULL - || old_meta.next_slot == SLOT_CAN_MADVISE - || old_meta.next_slot == SLOT_MADVISED; - nanov2_block_meta_t new_meta = { - .in_use = 1, - .free_count = reset_slot ? slots_in_block - 1 : old_meta.free_count, - .next_slot = reset_slot ? SLOT_BUMP : old_meta.next_slot, - .gen_count = reset_slot ? 0 : old_meta.gen_count + 1, - }; - if (!os_atomic_cmpxchgv(found_block, old_meta, new_meta, &old_meta, - relaxed)) { - goto retry; - } - } - - return found_block; -} - -// Finds a block to allocate from and allocates memory from it. The search -// for a block starts from *block_metapp if not NULL, otherwise from the first -// arena in the first block (which is the case when the first block is allocated -// for a size class for a CPU). -// If none of the blocks for a size class in the current arena can be used, a -// new arena is allocated and, if necessary, a new region is added. -// -// The address of the allocated memory is returned and its metadata pointer is -// stored in *block_metapp. If a new region is required and it can't be -// allocated, NULL is returned and *block_metapp is unmodified. -// -// On success, the returned block is marked as in-use and the block originally -// pointed to by *block_metapp has its in-use bit cleared. -// -// In order to avoid races, this function must be called with the -// current_block_lock for the calling context [CPU] and size class locked. -MALLOC_NOEXPORT MALLOC_NOINLINE void * -nanov2_find_block_and_allocate(nanozonev2_t *nanozone, - nanov2_size_class_t size_class, nanov2_block_meta_t **block_metapp) -{ - nanov2_arena_t *arena; - nanov2_block_meta_t *start_block = os_atomic_load(block_metapp, relaxed); - nanov2_block_meta_t *orig_block = start_block; - if (start_block) { - // Use the arena for the starting block. - arena = nanov2_arena_address_for_ptr(start_block); - } else { - // Start from the first arena. - arena = nanov2_arena_address_for_ptr(nanozone->first_region_base); - } - - nanov2_region_t *start_region; -retry: - start_region = nanov2_region_address_for_ptr(arena); - nanov2_arena_t *start_arena = arena; - nanov2_region_t *region = start_region; - nanov2_arena_t *limit_arena = nanov2_limit_arena_for_region(nanozone, start_region); - nanov2_arena_t *initial_region_next_arena = nanozone->current_region_next_arena; - do { - nanov2_block_meta_t *block_metap = nanov2_find_block_in_arena(nanozone, - arena, size_class, start_block); - if (block_metap) { - // Try to allocate from this block and return if it succeeds. Note - // that the block is now marked as in-use, so effectively belongs - // to the calling context. - void *ptr = nanov2_allocate_from_block(nanozone, block_metap, size_class); - if (ptr) { - // Make the new block the current one for the calling context. - os_atomic_store(block_metapp, block_metap, relaxed); - - // Turn off in-use in old block_metap, if there is one. - if (orig_block) { - // Turn off in-use in the original current block. - nanov2_turn_off_in_use(orig_block); - } - return ptr; - } - - // We found a block but failed to allocate from it, probably because - // it became full. Look for a new block, using the one that we just - // failed with as the starting point. First, we need to turn off the - // in-use bit for the block that we just failed to allocate from. - nanov2_turn_off_in_use(block_metap); - - start_block = block_metap; - goto retry; - } - - // Try the next arena. If this is the last arena in the region, try the - // next region. - start_block = NULL; - arena++; - if (arena >= limit_arena) { - region = nanov2_next_region_for_region(nanozone, region); - if (!region) { - // Reached the last region -- loop back to the first. - region = nanozone->first_region_base; - } - arena = nanov2_first_arena_for_region(region); - limit_arena = nanov2_limit_arena_for_region(nanozone, region); - } - } while (arena != start_arena); - - // If we get to this point, we need to allocate a new arena and possibly - // a new region. If we are not permitted to do so by policy, return NULL. - if (nanov2_policy_config.single_arena_size_classes & (1 << size_class)) { - return NULL; - } - - // Allocate a new arena and maybe a new region. To do either of those - // things, we need to take the regions_lock. After doing so, check that - // the state is unchanged. If it has, just assume that we might have some - // new space to allocate into and try again. - boolean_t failed = FALSE; - arena = initial_region_next_arena; - _malloc_lock_lock(&nanozone->regions_lock); - if (nanozone->current_region_next_arena == arena) { - if ((void *)arena >= nanozone->current_region_limit) { - // Reached the end of the region. Allocate a new one, if we can. - if (nanov2_allocate_new_region(nanozone)) { - arena = nanozone->current_region_next_arena++; - } else { - failed = TRUE; - } - } else { - // Assign the new arena, in the same region. - nanozone->current_region_next_arena = arena + 1; - } - } - _malloc_lock_unlock(&nanozone->regions_lock); - - if (!failed) { - // Now allocate from the new arena. Since we updated the nanozone, it's - // possible that some other thread has already raced with us to allocate - // some space from it, so just use the normal allocation path to avoid - // assumptions. It's a little more expensive, but this path is rare. - start_block = NULL; - goto retry; - } - - // We need more space and we can't get it. We'll delegate to the helper. - return NULL; -} - -// Allocates memory of a given size (which must be a multiple of the Nano -// quantum size) and optionally clears it (for calloc). -// -// Allocation is attempted first from the block last used for the caller's -// context (which is initially the physical CPU by default). If there is no -// last block, or the block is full or now out of use, find another one, if -// possible. See the comments for nanov2_get_allocation_block() for the details. -// -// If the allocation fails, NULL is returned. -void * -nanov2_allocate(nanozonev2_t *nanozone, size_t rounded_size, boolean_t clear) -{ - void *ptr = NULL; - nanov2_size_class_t size_class = nanov2_size_class_from_size(rounded_size); - MALLOC_ASSERT(size_class < NANO_SIZE_CLASSES); - MALLOC_ASSERT(rounded_size != 0); - nanov2_block_meta_t *block_metap; - nanov2_block_meta_t **block_metapp; - - // Get the index of the pointer to the block from which we are should be - // allocating. This currently depends on the physical CPU number. - int allocation_index = nanov2_get_allocation_block_index() & MAX_CURRENT_BLOCKS_MASK; - - // Get the current allocation block meta data pointer. If this is NULL, - // we need to find a new allocation block. - block_metapp = &nanozone->current_block[size_class][allocation_index]; - block_metap = os_atomic_load(block_metapp, relaxed); - if (block_metap) { - // Fast path: we have a block -- try to allocate from it. - ptr = nanov2_allocate_from_block(nanozone, block_metap, size_class); - if (ptr) { - goto done; - } - } - - // No current allocation block, or we were unable to allocate. We need to - // get a new block. Before doing so, delegate to the helper allocator if - // the size class was full and has not released enough memory yet. - if (nanozone->delegate_allocations & (1 << size_class)) { - ptr = nanozone->helper_zone->malloc(nanozone->helper_zone, rounded_size); - goto done; - } - - // Before we try to get another block, lock and try another allocation, - // which may succeed because another thread may have beaten us to it, or - // some space may have freed up in the current block. - _malloc_lock_s *lock = &nanozone->current_block_lock[size_class][allocation_index]; - _malloc_lock_lock(lock); - - block_metap = os_atomic_load(block_metapp, relaxed); - if (block_metap) { - ptr = nanov2_allocate_from_block(nanozone, block_metap, size_class); - if (ptr) { - // Good to go - keep the current block. - goto unlock; - } - } - - // At this point, we do not have a current allocation block and the old one, - // if there was one, has been marked as not in use. We need to find and - // assign a new block. Since we have the lock, nobody else can change the - // current_block pointer. - ptr = nanov2_find_block_and_allocate(nanozone, size_class, block_metapp); - -unlock: - _malloc_lock_unlock(lock); - - if (!ptr) { - // We could not find a block to allocate from -- make future - // allocations for this size class go to the helper zone until - // we have enough free space. - _malloc_lock_lock(&nanozone->delegate_allocations_lock); - nanozone->delegate_allocations |= 1 << size_class; - _malloc_lock_unlock(&nanozone->delegate_allocations_lock); - } - -done: - if (ptr) { - if (clear) { - memset(ptr, '\0', rounded_size); - } else { - // Always clear the double-free guard so that we can recognize that - // this block is not on the free list. - nanov2_free_slot_t *slotp = (nanov2_free_slot_t *)ptr; - os_atomic_store(&slotp->double_free_guard, 0, relaxed); - } - } - return ptr; -} - -#pragma mark - -#pragma mark Freeing - -// Frees an allocation to its owning block and updates the block's state. -// If the block becomes empty, it is marked as SLOT_CAN_MADVISE and is -// madvised immediately if the policy is NANO_MADVISE_IMMEDIATE. -void -nanov2_free_to_block(nanozonev2_t *nanozone, void *ptr, - nanov2_size_class_t size_class) -{ - nanov2_block_t *blockp = nanov2_block_address_for_ptr(ptr); - nanov2_block_meta_t *block_metap = nanov2_meta_ptr_for_ptr(nanozone, ptr); - - // Release the slot memory onto the block's freelist. - nanov2_block_meta_t old_meta = os_atomic_load(block_metap, relaxed); - int slot_count = slots_by_size_class[size_class]; - nanov2_block_meta_t new_meta; - boolean_t was_full; - -again: - was_full = old_meta.next_slot == SLOT_FULL; - new_meta.free_count = old_meta.free_count + 1; - new_meta.in_use = old_meta.in_use; - new_meta.gen_count = old_meta.gen_count + 1; - boolean_t freeing_last_active_slot = !was_full && - new_meta.free_count == slots_by_size_class[size_class] - 1; - if (freeing_last_active_slot) { - // Releasing the last active slot onto the free list. Mark the block as - // ready to be madvised if it's not in use, otherwise reset next_slot - // to SLOT_BUMP. - new_meta.next_slot = new_meta.in_use ? SLOT_BUMP : SLOT_CAN_MADVISE; - // Write the updated meta data; try again if we raced with another thread. - if (!os_atomic_cmpxchgv(block_metap, old_meta, new_meta, &old_meta, relaxed)) { - goto again; - } - - // If the block is now empty and it's not in use, madvise it if the policy - // is to do so immediately. - if (new_meta.next_slot == SLOT_CAN_MADVISE && - nanov2_madvise_policy == NANO_MADVISE_IMMEDIATE) { - _malloc_lock_lock(&nanozone->madvise_lock); - nanov2_madvise_block(nanozone, block_metap, blockp, size_class); - _malloc_lock_unlock(&nanozone->madvise_lock); - } - } else { - int slot_index = nanov2_slot_index_in_block(blockp, size_class, ptr); - new_meta.next_slot = slot_index + 1; // meta.next_slot is 1-based - nanov2_free_slot_t *slotp = (nanov2_free_slot_t *)ptr; - slotp->next_slot = was_full ? SLOT_BUMP : old_meta.next_slot; - os_atomic_store(&slotp->double_free_guard, - nanozone->slot_freelist_cookie ^ (uintptr_t)ptr, relaxed); - - // The double_free_guard change must be visible when the os_atomic_cmpxchgv - // completes. - // Write the updated meta data; try again if we raced with another thread. - if (!os_atomic_cmpxchgv(block_metap, old_meta, new_meta, &old_meta, release)) { - goto again; - } - } - - // If this size class has been marked as full and this block is below an - // acceptable level of occupancy, turn off delegation to the helper. Do this - // only if the block is not in-use, because an in-use block cannot be a - // candidate when searching for a new block. - uint16_t class_mask = 1 << size_class; - if (!new_meta.in_use && (nanozone->delegate_allocations & class_mask) && - (new_meta.free_count >= 0.75 * slot_count)) { - _malloc_lock_lock(&nanozone->delegate_allocations_lock); - nanozone->delegate_allocations &= ~class_mask; - _malloc_lock_unlock(&nanozone->delegate_allocations_lock); - } - -#if DEBUG_MALLOC - nanozone->statistics.size_class_statistics[size_class].total_frees++; -#endif // DEBUG_MALLOC -} - -#endif // OS_VARIANT_RESOLVED - -#if OS_VARIANT_NOTRESOLVED - -#pragma mark - -#pragma mark Zone Operations - -malloc_zone_t * -nanov2_create_zone(malloc_zone_t *helper_zone, unsigned debug_flags) -{ - // Note: It is important that nanov2_create_zone resets _malloc_engaged_nano - // if it is unable to enable the nanozone (and chooses not to abort). As - // several functions rely on _malloc_engaged_nano to determine if they - // should manipulate the nanozone, and these should not run if we failed - // to create the zone. - MALLOC_ASSERT(_malloc_engaged_nano == NANO_V2); - - // Get memory for the zone and disable Nano if we fail. - nanozonev2_t *nanozone = nano_common_allocate_based_pages( - NANOZONEV2_ZONE_PAGED_SIZE, 0, 0, VM_MEMORY_MALLOC, 0); - if (!nanozone) { - _malloc_engaged_nano = NANO_NONE; - return NULL; - } - - // Set up the basic_zone portion of the nanozonev2 structure - nanozone->basic_zone.version = 12; - nanozone->basic_zone.size = OS_RESOLVED_VARIANT_ADDR(nanov2_size); - nanozone->basic_zone.malloc = OS_RESOLVED_VARIANT_ADDR(nanov2_malloc); - nanozone->basic_zone.calloc = OS_RESOLVED_VARIANT_ADDR(nanov2_calloc); - nanozone->basic_zone.valloc = (void *)nanov2_valloc; - nanozone->basic_zone.free = OS_RESOLVED_VARIANT_ADDR(nanov2_free); - nanozone->basic_zone.realloc = OS_RESOLVED_VARIANT_ADDR(nanov2_realloc); - nanozone->basic_zone.destroy = (void *)nanov2_destroy; - nanozone->basic_zone.batch_malloc = OS_RESOLVED_VARIANT_ADDR(nanov2_batch_malloc); - nanozone->basic_zone.batch_free = OS_RESOLVED_VARIANT_ADDR(nanov2_batch_free); - nanozone->basic_zone.introspect = - (struct malloc_introspection_t *)&nanov2_introspect; - nanozone->basic_zone.memalign = (void *)nanov2_memalign; - nanozone->basic_zone.free_definite_size = OS_RESOLVED_VARIANT_ADDR(nanov2_free_definite_size); - nanozone->basic_zone.pressure_relief = OS_RESOLVED_VARIANT_ADDR(nanov2_pressure_relief); - nanozone->basic_zone.claimed_address = OS_RESOLVED_VARIANT_ADDR(nanov2_claimed_address); - - // Set these both to zero as required by CFAllocator. - nanozone->basic_zone.reserved1 = 0; - nanozone->basic_zone.reserved2 = 0; - - // Prevent overwriting the function pointers in basic_zone. - mprotect(nanozone, sizeof(nanozone->basic_zone), PROT_READ); - - // Nano V2 zone does not support MALLOC_ADD_GUARD_PAGES - if (debug_flags & MALLOC_ADD_GUARD_PAGES) { - malloc_report(ASL_LEVEL_INFO, "nano does not support guard pages\n"); - debug_flags &= ~MALLOC_ADD_GUARD_PAGES; - } - - // Set up the remainder of the nanozonev2 structure - nanozone->debug_flags = debug_flags; - nanozone->helper_zone = helper_zone; - - // Initialize the cookies used to detect double freeing and for the ASLR - // scramble mapping. -#define COOKIE_ENTROPY_MASK 0x0000ffffffff0000ULL -#define DEFAULT_ENTROPY_BITS 0x0000DEADDEAD0000ULL - uintptr_t cookie = (uintptr_t)malloc_entropy[0] & COOKIE_ENTROPY_MASK; - if (!cookie) { - cookie = malloc_entropy[1] & COOKIE_ENTROPY_MASK; - if (!cookie) { - // The cookie can't be zero, because it's used to compute the guard - // value in free slots, so make sure we have a non-zero value. Using - // a fixed value allows us to recognize that it isn't real entropy. - cookie = DEFAULT_ENTROPY_BITS; - } - } - nanozone->slot_freelist_cookie = cookie; - - // For the ASLR cookie, we take the top 12 bits of malloc_entropy[1] and - // align it to the block field of a Nano address. - nanozone->aslr_cookie = malloc_entropy[1] >> (64 - NANOV2_BLOCK_BITS); - nanozone->aslr_cookie_aligned = nanozone->aslr_cookie << NANOV2_OFFSET_BITS; - - _malloc_lock_init(&nanozone->blocks_lock); - _malloc_lock_init(&nanozone->regions_lock); - _malloc_lock_init(&nanozone->madvise_lock); - - // Allocate the initial region. If this does not succeed, we disable Nano. - nanov2_addr_t p = {.fields.nano_signature = NANOZONE_SIGNATURE}; - nanov2_region_t *region = (nanov2_region_t *)p.addr; - boolean_t result = nanov2_allocate_region(region); - if (!result) { - nano_common_deallocate_pages(nanozone, NANOZONEV2_ZONE_PAGED_SIZE, 0); - _malloc_engaged_nano = NANO_NONE; - malloc_report(ASL_LEVEL_NOTICE, "nano zone abandoned due to inability " - "to preallocate reserved vm space.\n"); - return NULL; - } - nanov2_region_linkage_t *region_linkage = - nanov2_region_linkage_for_region(nanozone, region); - region_linkage->next_region_offset = 0; - - // Install the first region and pre-allocate the first arena. - nanozone->first_region_base = region; - nanozone->current_region_base = region; - nanozone->current_region_next_arena = ((nanov2_arena_t *)region) + 1; - nanozone->current_region_limit = region + 1; - nanozone->statistics.allocated_regions = 1; - - return (malloc_zone_t *)nanozone; -} -#endif // OS_VARIANT_NOTRESOLVED - -#pragma mark - -#pragma mark Zone Fork Handling - -// Nanomalloc assumes that after a fork, it would be dangerous to rely on -// the integrity of the zone data. During a fork, some of the zone handlers are -// switched to the versions below, which do the following: -// 1. Delegate all new allocation to the helper zone. -// 2. Do nothing when asked to free memory that Nano allocated. There will be a -// leak, but this is better than possibly crashing. - -#if OS_VARIANT_RESOLVED -MALLOC_NOEXPORT void * -nanov2_forked_malloc(nanozonev2_t *nanozone, size_t size) -{ - // Just hand to the helper zone. - return nanozone->helper_zone->malloc(nanozone->helper_zone, size); -} -#endif // OS_VARIANT_RESOLVED - -#if OS_VARIANT_NOTRESOLVED - -static void * -nanov2_forked_calloc(nanozonev2_t *nanozone, size_t num_items, size_t size) -{ - // Just hand to the helper zone. - return nanozone->helper_zone->calloc(nanozone->helper_zone, num_items, - size); -} - -#endif // OS_VARIANT_NOTRESOLVED - -#if OS_VARIANT_RESOLVED - -MALLOC_NOEXPORT void -nanov2_forked_free(nanozonev2_t *nanozone, void *ptr) -{ - if (!ptr) { - return; // Protect against malloc_zone_free() passing NULL. - } - - // exhausting a slot may result in a pointer with - // the nanozone prefix being given to nano_free via malloc_zone_free. Calling - // vet_and_size here, instead of in _nano_free_check_scribble means we can - // early-out into the helper_zone if it turns out nano does not own this ptr. - size_t sz = nanov2_pointer_size(nanozone, ptr, FALSE); - if (sz || nanov2_has_valid_signature(ptr)) { - /* Drop it on the floor as nanozone metadata could be fouled by fork. */ - return; - } else { - nanozone->helper_zone->free(nanozone->helper_zone, ptr); - return; - } - /* NOTREACHED */ -} - -MALLOC_NOEXPORT void -nanov2_forked_free_definite_size(nanozonev2_t *nanozone, void *ptr, size_t size) -{ - nanov2_forked_free(nanozone, ptr); -} - -MALLOC_NOEXPORT void * -nanov2_forked_realloc(nanozonev2_t *nanozone, void *ptr, size_t new_size) -{ - // could occur through malloc_zone_realloc() path - if (!ptr) { - // If ptr is a null pointer, realloc() shall be equivalent to malloc() - // for the specified size. - return nanov2_forked_malloc(nanozone, new_size); - } - - size_t old_size = nanov2_pointer_size(nanozone, ptr, FALSE); - if (!old_size) { - // not-nano pointer, hand down to helper zone - malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone); - return zone->realloc(zone, ptr, new_size); - } else { - if (!new_size) { - // If size is 0 and ptr is not a null pointer, the object pointed to - // is freed. However as nanozone metadata could be fouled by fork, - // we'll intentionally leak it. - - // If size is 0, either a null pointer or a unique pointer that can - // be successfully passed to free() shall be returned. - return nanov2_forked_malloc(nanozone, 1); - } - - void *new_ptr = nanozone->helper_zone->malloc(nanozone->helper_zone, - new_size); - if (new_ptr) { - size_t valid_size = MIN(old_size, new_size); - memcpy(new_ptr, ptr, valid_size); - // Original pointer is intentionally leaked as nanozone metadata - // could be fouled by fork. - return new_ptr; - } else { - // Original ptr is left intact - return NULL; - } - /* NOTREACHED */ - } - /* NOTREACHED */ -} -#endif // OS_VARIANT_RESOLVED - -#if OS_VARIANT_NOTRESOLVED - -static unsigned -nanov2_forked_batch_malloc(nanozonev2_t *nanozone, size_t size, void **results, - unsigned count) -{ - // Just pass this to the helper zone. - return nanozone->helper_zone->batch_malloc(nanozone->helper_zone, size, - results, count); -} -#endif // OS_VARIANT_NOTRESOLVED - -#if OS_VARIANT_RESOLVED - -MALLOC_NOEXPORT void -nanov2_forked_batch_free(nanozonev2_t *nanozone, void **to_be_freed, - unsigned count) -{ - if (!count) { - return; - } - - while (count--) { - void *ptr = to_be_freed[count]; - if (ptr) { - nanov2_forked_free(nanozone, ptr); - } - } -} -#endif // OS_VARIANT_RESOLVED - -#if OS_VARIANT_NOTRESOLVED - -static boolean_t -nanov2_forked_claimed_address(struct _malloc_zone_t *zone, void *ptr) -{ - // This does not operate after fork - default to true to avoid - // false negatives. - return true; -} - -void -nanov2_forked_zone(nanozonev2_t *nanozone) -{ - // Hobble the nano zone in the child of a fork prior to an exec since - // the state of the zone can be made inconsistent by a parent thread while - // the fork is underway. All new allocations will be referred to the helper - // zone (which is more stable.) All free()'s of existing nano objects will - // be leaked. - mprotect(nanozone, sizeof(nanozone->basic_zone), PROT_READ | PROT_WRITE); - - nanozone->basic_zone.size = OS_RESOLVED_VARIANT_ADDR(nanov2_size); // Unchanged - nanozone->basic_zone.malloc = OS_RESOLVED_VARIANT_ADDR(nanov2_forked_malloc); - nanozone->basic_zone.calloc = (void *)nanov2_forked_calloc; - nanozone->basic_zone.valloc = (void *)nanov2_valloc; // Unchanged - nanozone->basic_zone.free = OS_RESOLVED_VARIANT_ADDR(nanov2_forked_free); - nanozone->basic_zone.realloc = OS_RESOLVED_VARIANT_ADDR(nanov2_forked_realloc); - nanozone->basic_zone.destroy = (void *)nanov2_destroy; // Unchanged - nanozone->basic_zone.batch_malloc = (void *)nanov2_forked_batch_malloc; - nanozone->basic_zone.batch_free = OS_RESOLVED_VARIANT_ADDR(nanov2_forked_batch_free); - nanozone->basic_zone.introspect = - (struct malloc_introspection_t *)&nanov2_introspect;// Unchanged - nanozone->basic_zone.memalign = (void *)nanov2_memalign; // Unchanged - nanozone->basic_zone.free_definite_size = - OS_RESOLVED_VARIANT_ADDR(nanov2_forked_free_definite_size); - nanozone->basic_zone.claimed_address = nanov2_forked_claimed_address; - mprotect(nanozone, sizeof(nanozone->basic_zone), PROT_READ); -} - -#endif // OS_VARIANT_NOTRESOLVED - -#endif // CONFIG_NANOZONE diff --git a/src/libmalloc/src/nanov2_malloc.h b/src/libmalloc/src/nanov2_malloc.h deleted file mode 100644 index bb85ff70d..000000000 --- a/src/libmalloc/src/nanov2_malloc.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __NANOV2_MALLOC_H -#define __NANOV2_MALLOC_H - -// Forward declaration for the nanozonev2 structure. -typedef struct nanozonev2_s nanozonev2_t; - -MALLOC_NOEXPORT -void -nanov2_init(const char *envp[], const char *apple[], const char *bootargs); - -MALLOC_NOEXPORT -void -nanov2_configure(void); - -MALLOC_NOEXPORT -malloc_zone_t * -nanov2_create_zone(malloc_zone_t *helper_zone, unsigned debug_flags); - -MALLOC_NOEXPORT -void -nanov2_forked_zone(nanozonev2_t *nanozone); - -#endif // __NANOV2_MALLOC_H diff --git a/src/libmalloc/src/nanov2_zone.h b/src/libmalloc/src/nanov2_zone.h deleted file mode 100644 index 9fe0cedf6..000000000 --- a/src/libmalloc/src/nanov2_zone.h +++ /dev/null @@ -1,322 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __NANOV2_ZONE_H -#define __NANOV2_ZONE_H - -#if CONFIG_NANOZONE - -#pragma mark - -#pragma mark Address Structure - -#if TARGET_OS_OSX || TARGET_OS_SIMULATOR || TARGET_OS_DRIVERKIT - -#define NANOV2_REGION_BITS 15 -#define NANOV2_ARENA_BITS 3 -#define NANOV2_BLOCK_BITS 12 -#define NANOV2_OFFSET_BITS 14 - -#else // TARGET_OS_OSX || TARGET_OS_SIMULATOR || TARGET_OS_DRIVERKIT - -#define NANOV2_REGION_BITS 0 -#define NANOV2_ARENA_BITS 3 -#define NANOV2_BLOCK_BITS 12 -#define NANOV2_OFFSET_BITS 14 - -#endif // TARGET_OS_OSX || TARGET_OS_SIMULATOR || TARGET_OS_DRIVERKIT - -#if NANOV2_REGION_BITS > 0 -#define NANOV2_MULTIPLE_REGIONS 1 -#else // NANOV2_REGION_BITS > 0 -#define NANOV2_MULTIPLE_REGIONS 0 -#endif // NANOV2_REGION_BITS > 0 - -// Size of a block (currently 16KB) -#define NANOV2_BLOCK_SIZE (1 << NANOV2_OFFSET_BITS) - -// Size of an arena (currently 64MB) -#define NANOV2_ARENA_SIZE (64 * 1024 * 1024) - -// Size of a region (currently 512MB) -#define NANOV2_REGION_SIZE (512 * 1024 * 1024) - -// Number of blocks per arena (currently 4096) -#define NANOV2_BLOCKS_PER_ARENA (NANOV2_ARENA_SIZE/NANOV2_BLOCK_SIZE) - -// Number of arenas per region (currently 8) -#define NANOV2_ARENAS_PER_REGION (NANOV2_REGION_SIZE/NANOV2_ARENA_SIZE) - -// Maximum number of slots per block -#define NANOV2_MAX_SLOTS_PER_BLOCK (NANOV2_BLOCK_SIZE/NANO_REGIME_QUANTA_SIZE) - -// Highest region number. -#if NANOV2_MULTIPLE_REGIONS -#define NANOV2_MAX_REGION_NUMBER ((1 << NANOV2_REGION_BITS) - 1) -#else // NANOV2_MULTIPLE_REGIONS -#define NANOV2_MAX_REGION_NUMBER 0 -#endif // NANOV2_MULTIPLE_REGIONS - -// clang-format really dislikes the bitfields here -// clang-format off -#if defined(__BIG_ENDIAN__) - -// Nano V2 address structure. -struct nanov2_addr_s { - uintptr_t nano_signature : NANOZONE_SIGNATURE_BITS; -#if NANOV2_MULTIPLE_REGIONS - uintptr_t nano_region: NANOV2_REGION_BITS; -#endif // NANOV2_MULTIPLE_REGIONS - uintptr_t nano_arena : NANOV2_ARENA_BITS; - uintptr_t nano_block : NANOV2_BLOCK_BITS; - uintptr_t nano_offset : NANOV2_OFFSET_BITS; -}; -MALLOC_STATIC_ASSERT(sizeof(struct nanov2_addr_s) == sizeof(uintptr_t), - "Wrong size for nanov2_addr_s"); - -#else // defined(__BIG_ENDIAN__) - -// least significant bits declared first -struct nanov2_addr_s { - uintptr_t nano_offset : NANOV2_OFFSET_BITS; - uintptr_t nano_block : NANOV2_BLOCK_BITS; - uintptr_t nano_arena : NANOV2_ARENA_BITS; -#if NANOV2_MULTIPLE_REGIONS - uintptr_t nano_region: NANOV2_REGION_BITS; -#endif // NANOV2_MULTIPLE_REGIONS - uintptr_t nano_signature : NANOZONE_SIGNATURE_BITS; -}; -MALLOC_STATIC_ASSERT(sizeof(struct nanov2_addr_s) == sizeof(uintptr_t), - "Wrong size for nanov2_addr_s"); - -#endif // defined(__BIG_ENDIAN__) - -// clang-format on - -// Union that allows easy extraction of the fields in a Nano V2 address. -typedef union { - void *addr; - struct nanov2_addr_s fields; -} nanov2_addr_t; - -// Typedef that tags a size class value. Range is 0 to NANO_SIZE_CLASSES - 1. -typedef unsigned nanov2_size_class_t; - -#pragma mark - -#pragma mark Block Definitions - -// A block is a chunk of NANOV2_BLOCK_SIZE bytes of memory. -typedef struct { - unsigned char content[NANOV2_BLOCK_SIZE]; -} nanov2_block_t; - -MALLOC_STATIC_ASSERT(sizeof(nanov2_block_t) == NANOV2_BLOCK_SIZE, - "nanov2_block_t must be the same size as a block"); - -#pragma mark - -#pragma mark Arena and Block Definitions - -// An arena is an array of NANOV2_BLOCKS_PER_ARENA blocks. -typedef struct { - nanov2_block_t blocks[NANOV2_BLOCKS_PER_ARENA]; -} nanov2_arena_t; - -MALLOC_STATIC_ASSERT(sizeof(nanov2_arena_t) == NANOV2_BLOCK_SIZE * NANOV2_BLOCKS_PER_ARENA, - "nanov2_arena_t must be the same size as its blocks"); - -// Per-block header structure, embedded in the arena metadata block. -typedef struct { - uint32_t next_slot : 11; // Next slot on free list, 1-based. - uint32_t free_count : 10; // Free slots in this block - 1 - uint32_t gen_count : 10; // A-B-A count - uint32_t in_use : 1; // Being used for allocations. -} nanov2_block_meta_t; -MALLOC_STATIC_ASSERT(sizeof(nanov2_block_meta_t) == sizeof(uint32_t), - "Incorrect size for nanov2_block_meta_t"); - -// Distinguished values of next_slot -#define SLOT_NULL 0 // Slot has never been used. -#define SLOT_BUMP 0x7fb // Marks the end of the free list -#define SLOT_FULL 0x7fc // Slot is full (no free slots) -#define SLOT_CAN_MADVISE 0x7fd // Block can be madvised (and in_use == 0) -#define SLOT_MADVISING 0x7fe // Block is being madvised. Do not touch -#define SLOT_MADVISED 0x7ff // Block has been madvised. - -// View of the per-block header structure that allows it to be used where a -// primitive type is required. -typedef union { - nanov2_block_meta_t meta; - uint32_t bits; -} nanov2_block_meta_view_t; - -// Structure overlaid onto an arena's metadata block. This must be exactly -// the same size as a block. -typedef struct { - nanov2_block_meta_t arena_block_meta[NANOV2_BLOCKS_PER_ARENA]; -} nanov2_arena_metablock_t; - -MALLOC_STATIC_ASSERT(sizeof(nanov2_arena_metablock_t) == NANOV2_BLOCK_SIZE, - "nanov2_arena_metablock_t must be the same size as a block"); - -// Structure overlaid on slots that are on the block freelist. -typedef struct { - uint64_t double_free_guard; - uint16_t next_slot; -} nanov2_free_slot_t; - -MALLOC_STATIC_ASSERT( - sizeof(nanov2_free_slot_t) <= NANO_REGIME_QUANTA_SIZE, - "nanov2_free_slot_t too large"); - -// Type for the index of a block in its hosting arena. -typedef unsigned nanov2_block_index_t; - -// Type for the index of a block meta structure in its hosting metadata block. -typedef unsigned nanov2_meta_index_t; - -#pragma mark - -#pragma mark Region Definitions - -// A region is an array of NANOV2_ARENAS_PER_REGION arenas. -typedef struct { - nanov2_arena_t arenas[NANOV2_ARENAS_PER_REGION]; -} nanov2_region_t; - -// Linkage between regions. Overlays the nanov2_block_meta_t that corresponds -// to the arena metadata block, so must be the same size as nanov2_block_meta_t. -typedef struct { - uint16_t next_region_offset; // Offset to next region in 512MB blocks - uint16_t unused; -} nanov2_region_linkage_t; - -MALLOC_STATIC_ASSERT( - sizeof (nanov2_block_meta_t) == sizeof(nanov2_region_linkage_t), - "nanov2_block_meta_t must be the same size as nanov2_region_linkage_t"); - -#pragma mark - -#pragma mark Statistics - -typedef struct { - uint64_t total_allocations; - uint64_t total_frees; - uint64_t madvised_blocks; // Does not reduce when reused. - uint64_t madvise_races; // Reused while being madvised. -} nanov2_size_class_statistics; - -typedef struct { - // Number of allocated regions - unsigned allocated_regions; - - // Number of times a region could not be placed at its preferred location - unsigned region_address_clashes; - - // Statistics collected by size class - nanov2_size_class_statistics size_class_statistics[NANO_SIZE_CLASSES]; -} nanov2_statistics_t; - -#pragma mark - -#pragma mark Zone Definitions - -// Maximum number of currently active allocation blocks per size class. -// Initially, the default is for each physical CPU to have a dedicated block. -#define MAX_CURRENT_BLOCKS 64 -#define MAX_CURRENT_BLOCKS_MASK (MAX_CURRENT_BLOCKS - 1) -MALLOC_STATIC_ASSERT(MAX_CURRENT_BLOCKS > 1 && - !(MAX_CURRENT_BLOCKS & MAX_CURRENT_BLOCKS_MASK), - "MAX_CURRENT_BLOCKS must be a power of 2"); - -typedef struct nanozonev2_s { - // first page will be given read-only protection - malloc_zone_t basic_zone; - uint8_t pad[PAGE_MAX_SIZE - sizeof(malloc_zone_t)]; - - // Current allocation blocks. Indexed by a factor that may change in the - // future (and may be tuneable). Initially indexed by physical CPU number. - nanov2_block_meta_t *current_block[NANO_SIZE_CLASSES][MAX_CURRENT_BLOCKS]; - - // Locks for the current allocation blocks. - _malloc_lock_s current_block_lock[NANO_SIZE_CLASSES][MAX_CURRENT_BLOCKS]; - - // Lock for delegate_allocations. - _malloc_lock_s delegate_allocations_lock; - - // Mask of size classes for which allocation should be delegated when a new - // block is needed and the class has become full. - uint16_t delegate_allocations; - - // Zone debug flags. - unsigned debug_flags; - - // Cookie used for ASLR within an arena. - uint64_t aslr_cookie; - uint64_t aslr_cookie_aligned; - - // cookie used to protect linkage on the block freelist - uintptr_t slot_freelist_cookie; - - // The zone to which allocations that cannot be satisfied by Nano V2 - // will be handed off. - malloc_zone_t *helper_zone; - - // Lock used to serialize access to current_block. - _malloc_lock_s blocks_lock; - - // Lock used to protect current_region_base, current_region_next_arena and - // current_region_limit. - _malloc_lock_s regions_lock; - - // Base address of the first region. Fixed once set. - nanov2_region_t *first_region_base; - - // Base address of the current region. Always the most recently allocated - // region and therefore the one with the highest base address. - nanov2_region_t *current_region_base; - - // Address to use for the next arena. Always between current_region_base - // and current_region_limit. - nanov2_arena_t *current_region_next_arena; - - // Limit address of the current region (first byte after the region). - void *current_region_limit; - - // Lock used when madvising. - _malloc_lock_s madvise_lock; - - // Global and per-size class statistics - nanov2_statistics_t statistics; -} nanozonev2_t; - -#define NANOZONEV2_ZONE_PAGED_SIZE mach_vm_round_page(sizeof(nanozonev2_t)) - -#pragma mark - -#pragma mark Address Manipulation - -#define NANOV2_BLOCK_ADDRESS_MASK ~((1ULL << (NANOV2_OFFSET_BITS)) - 1) -#define NANOV2_ARENA_ADDRESS_MASK \ - ~((1ULL << (NANOV2_BLOCK_BITS + NANOV2_OFFSET_BITS)) - 1) -#define NANOV2_REGION_ADDRESS_MASK \ - ~((1ULL << (NANOV2_ARENA_BITS + NANOV2_BLOCK_BITS + NANOV2_OFFSET_BITS)) - 1) - -#endif // CONFIG_NANOZONE - -#endif // __NANOV2_ZONE_H - diff --git a/src/libmalloc/src/platform.h b/src/libmalloc/src/platform.h deleted file mode 100644 index 25d18b155..000000000 --- a/src/libmalloc/src/platform.h +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __PLATFORM_H -#define __PLATFORM_H - -#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR -#define MALLOC_TARGET_IOS 1 -#else // TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR -#define MALLOC_TARGET_IOS 0 -#endif // TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR - -#ifdef __LP64__ -#define MALLOC_TARGET_64BIT 1 -#else // __LP64__ -#define MALLOC_TARGET_64BIT 0 -#endif - -// -#if MALLOC_TARGET_IOS -# define CONFIG_MADVISE_PRESSURE_RELIEF 0 -#else // MALLOC_TARGET_IOS -# define CONFIG_MADVISE_PRESSURE_RELIEF 1 -#endif // MALLOC_TARGET_IOS - -// -#if MALLOC_TARGET_IOS -# define CONFIG_RECIRC_DEPOT 1 -# define CONFIG_AGGRESSIVE_MADVISE 1 -#else // MALLOC_TARGET_IOS -# define CONFIG_RECIRC_DEPOT 1 -# define CONFIG_AGGRESSIVE_MADVISE 0 -#endif // MALLOC_TARGET_IOS - -// -#define CONFIG_RELAXED_INVARIANT_CHECKS 1 - -// -#define CONFIG_MADVISE_STYLE MADV_FREE_REUSABLE - -#if MALLOC_TARGET_64BIT -#define CONFIG_NANOZONE 1 -#define CONFIG_ASLR_INTERNAL 0 -#else // MALLOC_TARGET_64BIT -#define CONFIG_NANOZONE 0 -#define CONFIG_ASLR_INTERNAL 1 -#endif // MALLOC_TARGET_64BIT - -// enable nano checking for corrupt free list -#define NANO_FREE_DEQUEUE_DILIGENCE 1 - -// This governs a last-free cache of 1 that bypasses the free-list for each region size -#define CONFIG_TINY_CACHE 1 -#define CONFIG_SMALL_CACHE 1 -#define CONFIG_MEDIUM_CACHE 1 - -// medium allocator enabled or disabled -#if MALLOC_TARGET_64BIT -#if MALLOC_TARGET_IOS -#define CONFIG_MEDIUM_ALLOCATOR 0 -#else // MALLOC_TARGET_IOS -#define CONFIG_MEDIUM_ALLOCATOR 1 -#endif // MALLOC_TARGET_IOS -#else // MALLOC_TARGET_64BIT -#define CONFIG_MEDIUM_ALLOCATOR 0 -#endif // MALLOC_TARGET_64BIT - -// The large last-free cache (aka. death row cache) -#if MALLOC_TARGET_IOS -#define CONFIG_LARGE_CACHE 0 -#else -#define CONFIG_LARGE_CACHE 1 -#endif - -#if MALLOC_TARGET_IOS -// The VM system on iOS forces malloc-tagged memory to never be marked as -// copy-on-write, this would include calls we make to vm_copy. Given that the -// kernel would just be doing a memcpy, we force it to happen in userpsace. -#define CONFIG_REALLOC_CAN_USE_VMCOPY 0 -#else -#define CONFIG_REALLOC_CAN_USE_VMCOPY 1 -#endif - -// memory resource exception handling -#if MALLOC_TARGET_IOS || TARGET_OS_SIMULATOR -#define ENABLE_MEMORY_RESOURCE_EXCEPTION_HANDLING 0 -#else -#define ENABLE_MEMORY_RESOURCE_EXCEPTION_HANDLING 1 -#endif - -// presence of commpage memsize -#define CONFIG_HAS_COMMPAGE_MEMSIZE 1 - -// presence of commpage number of cpu count -#define CONFIG_HAS_COMMPAGE_NCPUS 1 - -// Use of hyper-shift for magazine selection. -#define CONFIG_NANO_USES_HYPER_SHIFT 0 -#define CONFIG_TINY_USES_HYPER_SHIFT 0 -#define CONFIG_SMALL_USES_HYPER_SHIFT 0 - -#endif // __PLATFORM_H diff --git a/src/libmalloc/src/printf.h b/src/libmalloc/src/printf.h deleted file mode 100644 index 5f87a25a4..000000000 --- a/src/libmalloc/src/printf.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#define MALLOC_REPORT_NOLOG 0x10 -#define MALLOC_REPORT_NOPREFIX 0x20 -#define MALLOC_REPORT_CRASH 0x40 -#define MALLOC_REPORT_DEBUG 0x80 - -// Most internal logging should use malloc_report() or malloc_vreport(). The -// flags argument should be a combination of the MALLOC_REPORT_xxx values and -// an optional log level encoded using the ASL_LEVEL_xxx constants. The log -// level is ignored if MALLOC_REPORT_NOLOG is set. -// -// The flags do the following: -// MALLOC_REPORT_NOLOG: -// Does not send the text to _simple_asl_log(). -// MALLOC_REPORT_NO_PREFIX: -// Does not write the program name, pid and thread identifier before -// the report text. -// MALLOC_REPORTDEBUG: -// includes text suggesting that a breakpoint could be set -// on malloc_error_break() to debug this kind of error. -// MALLOC_REPORT_CRASH: -// Same as MALLOC_REPORTDEBUG, but crashes after writing the report -// message. -// -// In addition, if MALLOC_REPORT_CRASH or MALLOC_REPORTDEBUG are specified, this -// function will sleep for an hour or send a SIGSTOP signal to the process if -// the MallocErrorSleep and MallocErrorStop environment variables were set and -// the report text will include a message indicating that this is -// happening. In the case of MALLOC_REPORT_CRASH, the crash occurs after all of -// the other actions have completed. -MALLOC_NOEXPORT MALLOC_NOINLINE void -malloc_report(uint32_t flags, const char *fmt, ...) __printflike(2,3); - -// Like malloc_report(), but does not send the text to _simple_asl_log() and -// does not write the program name, pid and thread identifier before the report -// text. Equivalent to malloc_report(MALLOC_REPORT_NOLOG|MALLOC_REPORT_NO_PREFIX) -MALLOC_NOEXPORT MALLOC_NOINLINE void -malloc_report_simple(const char *fmt, ...) __printflike(1,2); - -// Like malloc_report(), but precedes the output message with prefix_msg -// as a format string using prefix_arg as a single substition parameter, -// allows the length of time to sleep while reporting an error to be -// specified and passes the arguments to the fmt parameter in a va_list. -MALLOC_NOEXPORT MALLOC_NOINLINE void -malloc_vreport(uint32_t flags, unsigned sleep_time, const char *prefix_msg, - const void *prefix_arg, const char *fmt, va_list ap); - -// Higher-level functions used by zone implementations to report errors. -MALLOC_NOEXPORT MALLOC_NOINLINE void -malloc_zone_error(uint32_t flags, bool is_corruption, const char *fmt, ...) __printflike(3,4); - -MALLOC_NOEXPORT MALLOC_NOINLINE void -malloc_zone_check_fail(const char *msg, const char *fmt, ...) __printflike(2,3); - -// Configures where malloc logging goes based on environment variables. By -// default, goes to stderr if it's a tty, and is otherwise dropped. -MALLOC_NOEXPORT void -malloc_print_configure(bool restricted); diff --git a/src/libmalloc/src/purgeable_malloc.c b/src/libmalloc/src/purgeable_malloc.c deleted file mode 100644 index 8b5172dee..000000000 --- a/src/libmalloc/src/purgeable_malloc.c +++ /dev/null @@ -1,426 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - - -#include "internal.h" - -// -// purgeable zones have their own "large" allocation pool, but share "tiny" and "large" -// heaps with a helper_zone identified in the call to create_purgeable_zone() -// -static size_t -purgeable_size(szone_t *szone, const void *ptr) -{ - // Only claim our large allocations, leave the shared tiny/small for the helper zone to claim. - return szone_size_try_large(szone, ptr); -} - -static void * -purgeable_malloc(szone_t *szone, size_t size) -{ - if (size <= LARGE_THRESHOLD(szone)) { - return szone_malloc(szone->helper_zone, size); - } else { - return szone_malloc(szone, size); - } -} - -static void * -purgeable_calloc(szone_t *szone, size_t num_items, size_t size) -{ - size_t total_bytes; - - if (calloc_get_size(num_items, size, 0, &total_bytes)) { - return NULL; - } - - if (total_bytes <= LARGE_THRESHOLD(szone)) { - return szone_calloc(szone->helper_zone, 1, total_bytes); - } else { - return szone_calloc(szone, 1, total_bytes); - } -} - -static void * -purgeable_valloc(szone_t *szone, size_t size) -{ - if (size <= LARGE_THRESHOLD(szone)) { - return szone_valloc(szone->helper_zone, size); - } else { - return szone_valloc(szone, size); - } -} - -static void -purgeable_free(szone_t *szone, void *ptr) -{ - large_entry_t *entry; - - SZONE_LOCK(szone); - entry = large_entry_for_pointer_no_lock(szone, ptr); - SZONE_UNLOCK(szone); - if (entry) { - return free_large(szone, ptr); - } else { - return szone_free(szone->helper_zone, ptr); - } -} - -static void -purgeable_free_definite_size(szone_t *szone, void *ptr, size_t size) -{ - if (size <= LARGE_THRESHOLD(szone)) { - return szone_free_definite_size(szone->helper_zone, ptr, size); - } else { - return szone_free_definite_size(szone, ptr, size); - } -} - -static void * -purgeable_realloc(szone_t *szone, void *ptr, size_t new_size) -{ - size_t old_size; - - if (NULL == ptr) { - // If ptr is a null pointer, realloc() shall be equivalent to malloc() for the specified size. - return purgeable_malloc(szone, new_size); - } else if (0 == new_size) { - // If size is 0 and ptr is not a null pointer, the object pointed to is freed. - purgeable_free(szone, ptr); - // If size is 0, either a null pointer or a unique pointer that can be successfully passed - // to free() shall be returned. - return purgeable_malloc(szone, 1); - } - - old_size = purgeable_size(szone, ptr); // Now ptr can be safely size()'d - if (!old_size) { - old_size = szone_size(szone->helper_zone, ptr); - } - - if (!old_size) { - malloc_zone_error(szone->debug_flags, true, "pointer %p being reallocated was not allocated\n", ptr); - return NULL; - } - - // Distinguish 4 cases: {oldsize, newsize} x { <= , > large_threshold } - // and deal with the allocation crossing from the purgeable zone to the helper zone and vice versa. - if (old_size <= LARGE_THRESHOLD(szone)) { - if (new_size <= LARGE_THRESHOLD(szone)) { - return szone_realloc(szone->helper_zone, ptr, new_size); - } else { - // allocation crosses from helper to purgeable zone - void *new_ptr = purgeable_malloc(szone, new_size); - if (new_ptr) { - memcpy(new_ptr, ptr, old_size); - szone_free_definite_size(szone->helper_zone, ptr, old_size); - } - return new_ptr; // in state VM_PURGABLE_NONVOLATILE - } - } else { - if (new_size <= LARGE_THRESHOLD(szone)) { - // allocation crosses from purgeable to helper zone - void *new_ptr = szone_malloc(szone->helper_zone, new_size); - if (new_ptr) { - memcpy(new_ptr, ptr, new_size); - purgeable_free_definite_size(szone, ptr, old_size); - } - return new_ptr; - } else { - void *new_ptr = purgeable_malloc(szone, new_size); - if (new_ptr) { - memcpy(new_ptr, ptr, MIN(old_size, new_size)); - purgeable_free_definite_size(szone, ptr, old_size); - } - return new_ptr; // in state VM_PURGABLE_NONVOLATILE - } - } - /* NOTREACHED */ -} - -static void -purgeable_destroy(szone_t *szone) -{ - /* destroy large entries */ - size_t index = szone->num_large_entries; - large_entry_t *large; - vm_range_t range_to_deallocate; - - while (index--) { - large = szone->large_entries + index; - if (large->address) { - // we deallocate_pages, including guard pages - mvm_deallocate_pages((void *)(large->address), large->size, szone->debug_flags); - } - } - large_entries_free_no_lock(szone, szone->large_entries, szone->num_large_entries, &range_to_deallocate); - if (range_to_deallocate.size) { - mvm_deallocate_pages((void *)range_to_deallocate.address, (size_t)range_to_deallocate.size, 0); - } - - /* Now destroy the separate szone region */ - mvm_deallocate_pages((void *)szone, SZONE_PAGED_SIZE, 0); -} - -static unsigned -purgeable_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count) -{ - return szone_batch_malloc(szone->helper_zone, size, results, count); -} - -static void -purgeable_batch_free(szone_t *szone, void **to_be_freed, unsigned count) -{ - return szone_batch_free(szone->helper_zone, to_be_freed, count); -} - -static void * -purgeable_memalign(szone_t *szone, size_t alignment, size_t size) -{ - if (size <= LARGE_THRESHOLD(szone)) { - return szone_memalign(szone->helper_zone, alignment, size); - } else { - return szone_memalign(szone, alignment, size); - } -} - -static kern_return_t -purgeable_ptr_in_use_enumerator(task_t task, - void *context, - unsigned type_mask, - vm_address_t zone_address, - memory_reader_t reader, - vm_range_recorder_t recorder) -{ - szone_t *szone; - kern_return_t err; - - if (!reader) { - reader = _malloc_default_reader; - } - - err = reader(task, zone_address, sizeof(szone_t), (void **)&szone); - if (err) { - return err; - } - - err = large_in_use_enumerator(task, context, type_mask, - (vm_address_t)szone->large_entries, szone->num_large_entries, - reader, recorder); - return err; -} - -static size_t -purgeable_good_size(szone_t *szone, size_t size) -{ - if (size <= LARGE_THRESHOLD(szone)) { - return szone_good_size(szone->helper_zone, size); - } else { - return szone_good_size(szone, size); - } -} - -static boolean_t -purgeable_check(szone_t *szone) -{ - return 1; -} - -static void -purgeable_print(task_t task, unsigned level MALLOC_UNUSED, - vm_address_t zone_address, memory_reader_t reader, - print_task_printer_t printer) -{ - szone_t *szone; - if (reader(task, zone_address, sizeof(szone_t), (void **)&szone)) { - printer("Purgeable zone %p: inUse=%u(%y) flags=%d\n", zone_address, - szone->num_large_objects_in_use, - (int)szone->num_bytes_in_large_objects, - szone->debug_flags); - } -} - -static void -purgeable_print_self(szone_t *szone, boolean_t verbose) -{ - purgeable_print(mach_task_self(), verbose ? MALLOC_VERBOSE_PRINT_LEVEL : 0, - (vm_address_t)szone, _malloc_default_reader, malloc_report_simple); -} - -static void -purgeable_print_task(task_t task, unsigned level, vm_address_t zone_address, - memory_reader_t reader, print_task_printer_t printer) -{ - purgeable_print(task, level, zone_address, reader, printer); -} - -static void -purgeable_log(malloc_zone_t *zone, void *log_address) -{ - szone_t *szone = (szone_t *)zone; - - szone->log_address = log_address; -} - -static void -purgeable_force_lock(szone_t *szone) -{ - SZONE_LOCK(szone); -} - -static void -purgeable_force_unlock(szone_t *szone) -{ - SZONE_UNLOCK(szone); -} - -static void -purgeable_reinit_lock(szone_t *szone) -{ - SZONE_REINIT_LOCK(szone); -} - -static void -purgeable_statistics(szone_t *szone, malloc_statistics_t *stats) -{ - stats->blocks_in_use = szone->num_large_objects_in_use; - stats->size_in_use = stats->max_size_in_use = stats->size_allocated = szone->num_bytes_in_large_objects; -} - -static boolean_t -purgeable_locked(szone_t *szone) -{ - int tookLock; - - tookLock = SZONE_TRY_LOCK(szone); - if (tookLock == 0) { - return 1; - } - SZONE_UNLOCK(szone); - return 0; -} - -static size_t -purgeable_pressure_relief(szone_t *szone, size_t goal) -{ - return szone_pressure_relief(szone, goal) + szone_pressure_relief(szone->helper_zone, goal); -} - -static const struct malloc_introspection_t purgeable_introspect = { - (void *)purgeable_ptr_in_use_enumerator, - (void *)purgeable_good_size, - (void *)purgeable_check, - (void *)purgeable_print_self, - (void *)purgeable_log, - (void *)purgeable_force_lock, - (void *)purgeable_force_unlock, - (void *)purgeable_statistics, - (void *)purgeable_locked, - NULL, NULL, NULL, NULL, /* Zone enumeration version 7 and forward. */ - (void *)purgeable_reinit_lock, // reinit_lock version 9 and forward - (void *)purgeable_print_task, // print_task version 11 and forward -}; // marked as const to spare the DATA section - - -static boolean_t -purgeable_claimed_address(szone_t *szone, void *ptr) -{ - return szone_claimed_address(szone->helper_zone, ptr); -} - -malloc_zone_t * -create_purgeable_zone(size_t initial_size, malloc_zone_t *malloc_default_zone, unsigned debug_flags) -{ - szone_t *szone; - uint64_t hw_memsize = 0; - - /* get memory for the zone. */ - szone = mvm_allocate_pages(SZONE_PAGED_SIZE, 0, 0, VM_MEMORY_MALLOC); - if (!szone) { - return NULL; - } - - /* set up the szone structure */ -#if 0 -#warning LOG enabled - szone->log_address = ~0; -#endif - -#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__arm64__) - hw_memsize = *(uint64_t *)(uintptr_t)_COMM_PAGE_MEMORY_SIZE; -#else - size_t uint64_t_size = sizeof(hw_memsize); - - sysctlbyname("hw.memsize", &hw_memsize, &uint64_t_size, 0, 0); -#endif - - rack_init(&szone->tiny_rack, RACK_TYPE_TINY, 0, debug_flags | MALLOC_PURGEABLE); - rack_init(&szone->small_rack, RACK_TYPE_SMALL, 0, debug_flags | MALLOC_PURGEABLE); - -#if CONFIG_LARGE_CACHE - // madvise(..., MADV_REUSABLE) death-row arrivals above this threshold [~0.1%] - szone->large_entry_cache_reserve_limit = (size_t)(hw_memsize >> 10); - - /* Reset protection when returning a previous large allocation? */ - int32_t libSystemVersion = NSVersionOfLinkTimeLibrary("System"); - if ((-1 != libSystemVersion) && ((libSystemVersion >> 16) < 112) /* CFSystemVersionSnowLeopard */) { - szone->large_legacy_reset_mprotect = TRUE; - } else { - szone->large_legacy_reset_mprotect = FALSE; - } -#endif - - szone->basic_zone.version = 11; - szone->basic_zone.size = (void *)purgeable_size; - szone->basic_zone.malloc = (void *)purgeable_malloc; - szone->basic_zone.calloc = (void *)purgeable_calloc; - szone->basic_zone.valloc = (void *)purgeable_valloc; - szone->basic_zone.free = (void *)purgeable_free; - szone->basic_zone.realloc = (void *)purgeable_realloc; - szone->basic_zone.destroy = (void *)purgeable_destroy; - szone->basic_zone.batch_malloc = (void *)purgeable_batch_malloc; - szone->basic_zone.batch_free = (void *)purgeable_batch_free; - szone->basic_zone.introspect = (struct malloc_introspection_t *)&purgeable_introspect; - szone->basic_zone.memalign = (void *)purgeable_memalign; - szone->basic_zone.free_definite_size = (void *)purgeable_free_definite_size; - szone->basic_zone.pressure_relief = (void *)purgeable_pressure_relief; - szone->basic_zone.claimed_address = (void *)purgeable_claimed_address; - - szone->basic_zone.reserved1 = 0; /* Set to zero once and for all as required by CFAllocator. */ - szone->basic_zone.reserved2 = 0; /* Set to zero once and for all as required by CFAllocator. */ - mprotect(szone, sizeof(szone->basic_zone), PROT_READ); /* Prevent overwriting the function pointers in basic_zone. */ - - szone->debug_flags = debug_flags | MALLOC_PURGEABLE; - - /* Purgeable zone does not support MALLOC_ADD_GUARD_PAGES. */ - if (szone->debug_flags & MALLOC_ADD_GUARD_PAGES) { - malloc_report(ASL_LEVEL_INFO, "purgeable zone does not support guard pages\n"); - szone->debug_flags &= ~MALLOC_ADD_GUARD_PAGES; - } - - _malloc_lock_init(&szone->large_szone_lock); - - szone->helper_zone = (struct szone_s *)malloc_default_zone; - - CHECK(szone, __PRETTY_FUNCTION__); - return (malloc_zone_t *)szone; -} diff --git a/src/libmalloc/src/purgeable_malloc.h b/src/libmalloc/src/purgeable_malloc.h deleted file mode 100644 index 20e6705ff..000000000 --- a/src/libmalloc/src/purgeable_malloc.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __PURGEABLE_MALLOC_H -#define __PURGEABLE_MALLOC_H - -/* Create a new zone that supports malloc_make{un}purgeable() discipline. */ -MALLOC_NOEXPORT -malloc_zone_t * -create_purgeable_zone(size_t initial_size, malloc_zone_t *malloc_default_zone, unsigned debug_flags); - -#endif // __PURGEABLE_MALLOC_H diff --git a/src/libmalloc/src/thresholds.h b/src/libmalloc/src/thresholds.h deleted file mode 100644 index 554603891..000000000 --- a/src/libmalloc/src/thresholds.h +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __THRESHOLDS_H -#define __THRESHOLDS_H - -/* - * The actual threshold boundaries between allocators. These boundaries are - * where the next allocator will take over and they are *inclusive* of the - * limit value. That is, a TINY limit of X implies that an X-byte - * allocation will come from TINY. - * - * The LARGE allocator cuts in at whatever the last boundary limit is. So, when - * the medium allocator is compiled out, or not engaged, then large starts at - * the limit of SMALL. - */ -#if MALLOC_TARGET_64BIT -#define TINY_LIMIT_THRESHOLD (1008) -#else // MALLOC_TARGET_64BIT -#define TINY_LIMIT_THRESHOLD (496) -#endif // MALLOC_TARGET_64BIT - -#if MALLOC_TARGET_IOS -#define SMALL_LIMIT_THRESHOLD (15 * 1024) -#else // MALLOC_TARGET_IOS -#define SMALL_LIMIT_THRESHOLD (32 * 1024) -#endif // MALLOC_TARGET_IOS -#define MEDIUM_LIMIT_THRESHOLD (8 * 1024 * 1024) - -/* - * Tiny region size definitions; these are split into quanta of 16 bytes, - * 64504 blocks is the magical value of how many quanta we can fit in a 1mb - * region including the region trailer and metadata. - */ -#define SHIFT_TINY_QUANTUM 4ull -#define SHIFT_TINY_CEIL_BLOCKS 16 // ceil(log2(NUM_TINY_BLOCKS)) -#define TINY_QUANTUM (1 << SHIFT_TINY_QUANTUM) -#define NUM_TINY_BLOCKS 64504 -#define NUM_TINY_CEIL_BLOCKS (1 << SHIFT_TINY_CEIL_BLOCKS) -#define NUM_TINY_SLOTS (TINY_LIMIT_THRESHOLD >> SHIFT_TINY_QUANTUM) - -#if MALLOC_TARGET_64BIT -#define TINY_BITMAP_RANGE_LIMIT 63 -#else -#define TINY_BITMAP_RANGE_LIMIT 31 -#endif - -/* - * Small region size definitions. - * - * We can only represent up to 1<<15 for msize; but we choose to stay - * even below that to avoid the convention msize=0 => msize = (1<<15) - */ -#define SHIFT_SMALL_QUANTUM (SHIFT_TINY_QUANTUM + 5) // 9 -#define SHIFT_SMALL_CEIL_BLOCKS 14 // ceil(log2(NUM_SMALL_BLOCKs)) -#define SMALL_QUANTUM (1 << SHIFT_SMALL_QUANTUM) // 512 bytes -#define SMALL_BLOCKS_ALIGN (SHIFT_SMALL_CEIL_BLOCKS + SHIFT_SMALL_QUANTUM) // 23 -#define NUM_SMALL_BLOCKS 16319 -#define NUM_SMALL_CEIL_BLOCKS (1 << SHIFT_SMALL_CEIL_BLOCKS) -#define NUM_SMALL_SLOTS (SMALL_LIMIT_THRESHOLD >> SHIFT_SMALL_QUANTUM) - -/* - * Medium region size definitions. - * - * We can only represent up to 1<<15 for msize; but we choose to stay - * even below that to avoid the convention msize=0 => msize = (1<<15) - */ -#define SHIFT_MEDIUM_QUANTUM (SHIFT_SMALL_QUANTUM + 6) // 15 -#define SHIFT_MEDIUM_CEIL_BLOCKS 12ull // ceil(log2(NUM_MEDIUM_BLOCKS)) -#define MEDIUM_QUANTUM ((uint64_t)(1 << SHIFT_MEDIUM_QUANTUM)) // 32kbytes -#define MEDIUM_BLOCKS_ALIGN (SHIFT_MEDIUM_CEIL_BLOCKS + SHIFT_MEDIUM_QUANTUM) // 27 -#define NUM_MEDIUM_BLOCKS 4095 -#define NUM_MEDIUM_CEIL_BLOCKS (1ull << SHIFT_MEDIUM_CEIL_BLOCKS) -#define NUM_MEDIUM_SLOTS (MEDIUM_LIMIT_THRESHOLD >> SHIFT_MEDIUM_QUANTUM) -#define MEDIUM_ACTIVATION_THRESHOLD (32ull * 1024 * 1024 * 1024) -#define MEDIUM_CONDITIONAL_MADVISE_LIMIT (2 * 1024 * 1024) -#define MEDIUM_MADVISE_SHIFT 4 -#define MEDIUM_MADVISE_MIN ((3 * 1024 * 1024) / 2) // 1.5 megabytes - -/* - * When performing a realloc() that must fallback to creating a new allocation - * and copying the previous contents to the new allocation, vm_copy is used if - * the allocation is greater than a given size. - * - * This threshold must be set such that all eligible allocations would have - * come from a page-sized, page-aligned allocator (so, medium or large). - * - * Note: iOS disables this threshold because the VM forces non-sharing from - * malloc-tagged allocations. - */ -#define VM_COPY_THRESHOLD (2 * 1024 * 1024) - -/* - * Extremely old versions of Microsoft Word - * (and, subsequently, versions of Adobe apps) required the Leopard behaviour - * where LARGE allocations were zero-filled prior to returning them to the - * caller. - * - * We've always used LARGE_THRESHOLD to denote this boundary but as we keep - * moving it around it's better to fix it at the point it was originally. - */ -#define LEGACY_ZEROING_THRESHOLD (127 * 1024) - -/* - * Large entry cache (death row) sizes. The large cache is bounded with - * an overall top limit size, each entry is allowed a given slice of - * that limit. - */ - -#define LARGE_CACHE_EXPANDED_THRESHOLD (32ull * 1024 * 1024 * 1024) - -#if MALLOC_TARGET_64BIT -#define LARGE_ENTRY_CACHE_SIZE_HIGH 64 -#define LARGE_ENTRY_SIZE_ENTRY_LIMIT_HIGH (512 * 1024 * 1024) -// lowmem config -#define LARGE_ENTRY_CACHE_SIZE_LOW 16 -#define LARGE_ENTRY_SIZE_ENTRY_LIMIT_LOW (128 * 1024 * 1024) -#else // MALLOC_TARGET_64BIT -#define LARGE_ENTRY_CACHE_SIZE_HIGH 8 -#define LARGE_ENTRY_SIZE_ENTRY_LIMIT_HIGH (32 * 1024 * 1024) -// lowmem config same as "highmem" -#define LARGE_ENTRY_CACHE_SIZE_LOW LARGE_ENTRY_CACHE_SIZE_HIGH -#define LARGE_ENTRY_SIZE_ENTRY_LIMIT_LOW LARGE_ENTRY_SIZE_ENTRY_LIMIT_HIGH -#endif // MALLOC_TARGET_64BIT - -/* - * Large entry cache (death row) "flotsam" limits. Until the large cache - * contains at least "high" bytes, the cache is not cleaned under memory - * pressure. After that, memory pressure notifications cause cache cleaning - * until the large cache drops below the "low" limit. - */ -#define SZONE_FLOTSAM_THRESHOLD_LOW (1024 * 512) -#define SZONE_FLOTSAM_THRESHOLD_HIGH (1024 * 1024) - -/* - * The magazine freelist array must be large enough to accomodate the allocation - * granularity of the tiny, small and medium allocators. In addition, the last - * slot in the list is special and reserved for coalesced regions bigger than - * the overall max allocation size of the allocator. - */ -#define MAGAZINE_FREELIST_SLOTS (NUM_MEDIUM_SLOTS + 1) -#define MAGAZINE_FREELIST_BITMAP_WORDS ((MAGAZINE_FREELIST_SLOTS + 31) >> 5) - -/* - * Density threshold used in determining the level of emptiness before - * moving regions to the recirc depot. - */ -#define DENSITY_THRESHOLD(a) \ - ((a) - ((a) >> 2)) // "Emptiness" f = 0.25, so "Density" is (1 - f)*a. Generally: ((a) - ((a) >> -log2(f))) - -/* - * Minimum number of regions to retain in a recirc depot. - */ -#define DEFAULT_RECIRC_RETAINED_REGIONS 2 - -/* Sanity checks. */ - -// Tiny performs an ffsl of a uint64_t in order to determine how big an -// allocation is. Therefore, the total allocation size of small cannot exceed -// 63-bits worth of 16-byte quanta (64-bits but minus one for the start of the -// allocation itself). -MALLOC_STATIC_ASSERT((TINY_LIMIT_THRESHOLD / TINY_QUANTUM) <= TINY_BITMAP_RANGE_LIMIT, - "TINY_LIMIT_THRESHOLD cannot exceed TINY_BITMAP_RANGE_LIMIT-bits worth of metadata"); - -// Check that the given threshold limits are a round multiple of their -// allocator's quantum size. -MALLOC_STATIC_ASSERT((TINY_LIMIT_THRESHOLD % TINY_QUANTUM) == 0, - "TINY_LIMIT_THRESHOLD must be a multiple of TINY_QUANTUM"); - -MALLOC_STATIC_ASSERT((SMALL_LIMIT_THRESHOLD % SMALL_QUANTUM) == 0, - "SMALL_LIMIT_THRESHOLD must be a multiple of SMALL_QUANTUM"); - -MALLOC_STATIC_ASSERT((MEDIUM_LIMIT_THRESHOLD % MEDIUM_QUANTUM) == 0, - "MEDIUM_LIMIT_THRESHOLD must be a multiple of MEDIUM_QUANTUM"); - -// All the "slot" counts are calculated as a shift of thresholds now but -// in-case someone decides to try hand-crafted values, make sure they adhere to -// the basic expectation that slot count must account for all valid sizes of -// allocations. -MALLOC_STATIC_ASSERT(NUM_TINY_SLOTS >= TINY_LIMIT_THRESHOLD >> SHIFT_TINY_QUANTUM, - "NUM_TINY_SLOTS must allow a free list for every valid TINY allocation"); - -MALLOC_STATIC_ASSERT(NUM_SMALL_SLOTS >= SMALL_LIMIT_THRESHOLD >> SHIFT_SMALL_QUANTUM, - "NUM_SMALL_SLOTS must allow a free list for every valid SMALL allocation"); - -MALLOC_STATIC_ASSERT(NUM_MEDIUM_SLOTS >= MEDIUM_LIMIT_THRESHOLD >> SHIFT_MEDIUM_QUANTUM, - "NUM_MEDIUM_SLOTS must allow a free list for every valid MEDIUM allocation"); - -// MAGAZINE_FREELIST_SLOTS cannot be dynamically selected by the MAX() of all -// three allocators, so it must match (at least) the maxmium slot count of the -// allocator with the largest range. -// -// Additionally, each allocator assumes that there is one additional free-list -// slot above their maximum allocation size. This allows each allocator to -// store an unordered list of maximally-sized free list entries. -MALLOC_STATIC_ASSERT(NUM_TINY_SLOTS < MAGAZINE_FREELIST_SLOTS, - "NUM_TINY_SLOTS must be less than MAGAZINE_FREELIST_SLOTS"); - -MALLOC_STATIC_ASSERT(NUM_SMALL_SLOTS < MAGAZINE_FREELIST_SLOTS, - "NUM_SMALL_SLOTS must be less than MAGAZINE_FREELIST_SLOTS"); - -MALLOC_STATIC_ASSERT(NUM_MEDIUM_SLOTS < MAGAZINE_FREELIST_SLOTS, - "NUM_MEDIUM_SLOTS must be less than MAGAZINE_FREELIST_SLOTS"); - -MALLOC_STATIC_ASSERT(VM_COPY_THRESHOLD >= SMALL_LIMIT_THRESHOLD, - "VM_COPY_THRESHOLD must be larger than SMALL_LIMIT_THRESHOLD"); - -#endif // __THRESHOLDS_H diff --git a/src/libmalloc/src/trace.h b/src/libmalloc/src/trace.h deleted file mode 100644 index 02226b499..000000000 --- a/src/libmalloc/src/trace.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2016 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __TRACE_H -#define __TRACE_H - -// defines these two subclasses for us: -// DBG_UMALLOC_EXTERNAL - for external entry points into malloc -// DBG_UMALLOC_INTERNAL - for tracing internal malloc state - -#ifndef _MALLOC_BUILDING_CODES_ -#include -#define MALLOC_TRACE(code,arg1,arg2,arg3,arg4) \ - { if (malloc_tracing_enabled) { kdebug_trace(code, arg1, arg2, arg3, arg4); } } -#define TRACE_CODE(name, subclass, code) \ - static const int TRACE_##name = KDBG_EVENTID(DBG_UMALLOC, subclass, code) -#else -# define DBG_UMALLOC 51 -# define DBG_UMALLOC_EXTERNAL 0x1 -# define DBG_UMALLOC_INTERNAL 0x2 -# define STR(x) #x -# define TRACE_CODE(name, subclass, code) \ - printf("0x%x\t%s\n", ((DBG_UMALLOC << 24) | ((subclass & 0xff) << 16) | ((code & 0x3fff) << 2)), STR(name)) -#endif - -// "external" trace points -TRACE_CODE(malloc, DBG_UMALLOC_EXTERNAL, 0x01); -TRACE_CODE(free, DBG_UMALLOC_EXTERNAL, 0x02); -TRACE_CODE(realloc, DBG_UMALLOC_EXTERNAL, 0x03); -TRACE_CODE(memalign, DBG_UMALLOC_EXTERNAL, 0x04); -TRACE_CODE(calloc, DBG_UMALLOC_EXTERNAL, 0x05); -TRACE_CODE(valloc, DBG_UMALLOC_EXTERNAL, 0x06); - -// "internal" trace points -TRACE_CODE(nano_malloc, DBG_UMALLOC_INTERNAL, 0x1); -TRACE_CODE(tiny_malloc, DBG_UMALLOC_INTERNAL, 0x2); -TRACE_CODE(small_malloc, DBG_UMALLOC_INTERNAL, 0x3); -TRACE_CODE(large_malloc, DBG_UMALLOC_INTERNAL, 0x4); -TRACE_CODE(nano_free, DBG_UMALLOC_INTERNAL, 0x5); -TRACE_CODE(tiny_free, DBG_UMALLOC_INTERNAL, 0x6); -TRACE_CODE(small_free, DBG_UMALLOC_INTERNAL, 0x7); -TRACE_CODE(large_free, DBG_UMALLOC_INTERNAL, 0x8); -TRACE_CODE(malloc_memory_pressure, DBG_UMALLOC_INTERNAL, 0x9); -TRACE_CODE(nano_memory_pressure, DBG_UMALLOC_INTERNAL, 0xa); -TRACE_CODE(madvise, DBG_UMALLOC_INTERNAL, 0xb); -TRACE_CODE(medium_malloc, DBG_UMALLOC_INTERNAL, 0xc); -TRACE_CODE(medium_free, DBG_UMALLOC_INTERNAL, 0xd); - -TRACE_CODE(nanov2_region_allocation, DBG_UMALLOC_INTERNAL, 0x10); - -#endif // __TRACE_H diff --git a/src/libmalloc/src/vm.c b/src/libmalloc/src/vm.c deleted file mode 100644 index bd56221ec..000000000 --- a/src/libmalloc/src/vm.c +++ /dev/null @@ -1,295 +0,0 @@ -/* - * Copyright (c) 2016 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#include "internal.h" - -static volatile uintptr_t entropic_address = 0; -static volatile uintptr_t entropic_limit = 0; - -MALLOC_NOEXPORT -uint64_t malloc_entropy[2] = {0, 0}; - -#define ENTROPIC_KABILLION 0x10000000 /* 256Mb */ - -// align 64bit ARM shift to 32MB PTE entries -#if MALLOC_TARGET_IOS && MALLOC_TARGET_64BIT -#define ENTROPIC_SHIFT 25 -#else // MALLOC_TARGET_IOS && MALLOC_TARGET_64BIT -#define ENTROPIC_SHIFT SMALL_BLOCKS_ALIGN -#endif - -void -mvm_aslr_init(void) -{ - // Prepare ASLR -#if __i386__ || __x86_64__ || __arm64__ || (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) -#if __i386__ - uintptr_t stackbase = 0x8fe00000; - int entropic_bits = 3; -#elif __x86_64__ - uintptr_t stackbase = USRSTACK64; - int entropic_bits = 16; -#elif __arm64__ -#if __LP64__ - uintptr_t stackbase = USRSTACK64; - int entropic_bits = 7; -#else // __LP64__ - uintptr_t stackbase = USRSTACK; - int entropic_bits = 3; -#endif -#else - uintptr_t stackbase = USRSTACK; - int entropic_bits = 3; -#endif - // assert(((1 << entropic_bits) - 1) << SMALL_BLOCKS_ALIGN < (stackbase - MAXSSIZ - ENTROPIC_KABILLION)); - - if (mvm_aslr_enabled()) { - if (0 == entropic_address) { - uintptr_t t = stackbase - MAXSSIZ - ((uintptr_t)(malloc_entropy[1] & ((1 << entropic_bits) - 1)) << ENTROPIC_SHIFT); - OSAtomicCompareAndSwapLong(0, t, (volatile long *)&entropic_limit); - OSAtomicCompareAndSwapLong(0, t - ENTROPIC_KABILLION, (volatile long *)&entropic_address); - } - } else { - // zero slide when ASLR has been disabled by boot-arg. Eliminate cloaking. - malloc_entropy[0] = 0; - malloc_entropy[1] = 0; - } -#else // TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR -#error ASLR unhandled on this platform -#endif // TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR -} - -void * -mvm_allocate_pages(size_t size, unsigned char align, unsigned debug_flags, int vm_page_label) -{ - boolean_t add_guard_pages = debug_flags & MALLOC_ADD_GUARD_PAGES; - boolean_t purgeable = debug_flags & MALLOC_PURGEABLE; - mach_vm_address_t vm_addr; - uintptr_t addr; - mach_vm_size_t allocation_size = round_page_quanta(size); - mach_vm_offset_t allocation_mask = ((mach_vm_offset_t)1 << align) - 1; - int alloc_flags = VM_FLAGS_ANYWHERE | VM_MAKE_TAG(vm_page_label); - kern_return_t kr; - - if (!allocation_size) { - allocation_size = vm_page_quanta_size; - } - if (add_guard_pages) { - if (align > vm_page_quanta_shift) { - /* alignment greater than pagesize needs more work */ - allocation_size += (1 << align) + vm_page_quanta_size; - } else { - allocation_size += 2 * vm_page_quanta_size; - } - } - - if (purgeable) { - alloc_flags |= VM_FLAGS_PURGABLE; - } - if (allocation_size < size) { // size_t arithmetic wrapped! - return NULL; - } - - vm_addr = vm_page_quanta_size; - kr = mach_vm_map(mach_task_self(), &vm_addr, allocation_size, allocation_mask, alloc_flags, MEMORY_OBJECT_NULL, 0, FALSE, - VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); - if (kr) { - malloc_zone_error(debug_flags, false, "can't allocate region\n*** mach_vm_map(size=%lu) failed (error code=%d)\n", size, kr); - return NULL; - } - addr = (uintptr_t)vm_addr; - - if (add_guard_pages) { - if (align > vm_page_quanta_shift) { - /* calculate the first address inside the alignment padding - * where we can place the guard page and still be aligned. - * - * |-----------------------------------------------------------| - * |leading|gp| alloc |gp| t | - * |-----------------------------------------------------------| - */ - uintptr_t alignaddr = ((addr + vm_page_quanta_size) + (1 << align) - 1) & ~((1 << align) - 1); - size_t leading = alignaddr - addr - vm_page_quanta_size; - size_t trailing = (1 << align) - vm_page_quanta_size - leading; - - /* Unmap the excess area. */ - kr = mach_vm_deallocate(mach_task_self(), addr, leading); - if (kr) { - malloc_zone_error(debug_flags, false, "can't unmap excess guard region\n" - "*** mach_vm_deallocate(addr=%p, size=%lu) failed (code=%d)\n", - (void *)addr, leading, kr); - return NULL; - } - - kr = mach_vm_deallocate(mach_task_self(), addr + allocation_size - trailing, trailing); - if (kr) { - malloc_zone_error(debug_flags, false, "can't unmap excess trailing guard region\n" - "*** mach_vm_deallocate(addr=%p, size=%lu) failed (code=%d)\n", - (void *)(addr + allocation_size - trailing), trailing, kr); - return NULL; - } - - addr = alignaddr; - } else { - addr += vm_page_quanta_size; - } - mvm_protect((void *)addr, size, PROT_NONE, debug_flags); - } - return (void *)addr; -} - -void * -mvm_allocate_pages_securely(size_t size, unsigned char align, int vm_page_label, uint32_t debug_flags) -{ - mach_vm_address_t vm_addr; - uintptr_t addr; - mach_vm_size_t allocation_size = round_page_quanta(size); - mach_vm_offset_t allocation_mask = ((mach_vm_offset_t)1 << align) - 1; - int alloc_flags = VM_FLAGS_ANYWHERE | VM_MAKE_TAG(vm_page_label); - kern_return_t kr; - - if (debug_flags & DISABLE_ASLR) { - return mvm_allocate_pages(size, align, 0, vm_page_label); - } - - if (!allocation_size) { - allocation_size = vm_page_quanta_size; - } - if (allocation_size < size) { // size_t arithmetic wrapped! - return NULL; - } - -retry: - vm_addr = entropic_address; - kr = mach_vm_map(mach_task_self(), &vm_addr, allocation_size, allocation_mask, alloc_flags, MEMORY_OBJECT_NULL, 0, FALSE, - VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); - if (kr == KERN_NO_SPACE) { - vm_addr = vm_page_quanta_size; - kr = mach_vm_map(mach_task_self(), &vm_addr, allocation_size, allocation_mask, alloc_flags, MEMORY_OBJECT_NULL, 0, FALSE, - VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); - } - if (kr) { - malloc_zone_error(debug_flags, false, "can't allocate region securely\n", - "*** mach_vm_map(size=%lu) failed (error code=%d)\n", size, kr); - return NULL; - } - addr = (uintptr_t)vm_addr; - - // Don't allow allocation to rise above entropic_limit (for tidiness). - if (addr + allocation_size > entropic_limit) { // Exhausted current range? - uintptr_t t = entropic_address; - uintptr_t u = t - ENTROPIC_KABILLION; - - if (u < t) { // provided we don't wrap, deallocate and retry, in the expanded entropic range - mach_vm_deallocate(mach_task_self(), vm_addr, allocation_size); - OSAtomicCompareAndSwapLong(t, u, (volatile long *)&entropic_address); // Just one reduction please - goto retry; - } - // fall through to use what we got - } - - if (addr < entropic_address) { // we wrapped to find this allocation, expand the entropic range - uintptr_t t = entropic_address; - uintptr_t u = t - ENTROPIC_KABILLION; - if (u < t) { - OSAtomicCompareAndSwapLong(t, u, (volatile long *)&entropic_address); // Just one reduction please - } - // fall through to use what we got - } - return (void *)addr; -} - -void -mvm_deallocate_pages(void *addr, size_t size, unsigned debug_flags) -{ - boolean_t add_guard_pages = debug_flags & MALLOC_ADD_GUARD_PAGES; - mach_vm_address_t vm_addr = (mach_vm_address_t)addr; - mach_vm_size_t allocation_size = size; - kern_return_t kr; - - if (add_guard_pages) { - vm_addr -= vm_page_quanta_size; - allocation_size += 2 * vm_page_quanta_size; - } - kr = mach_vm_deallocate(mach_task_self(), vm_addr, allocation_size); - if (kr) { - malloc_zone_error(debug_flags, false, "Can't deallocate_pages region at %p\n", addr); - } -} - -void -mvm_protect(void *address, size_t size, unsigned protection, unsigned debug_flags) -{ - kern_return_t err; - - if (!(debug_flags & MALLOC_DONT_PROTECT_PRELUDE)) { - err = mprotect((void *)((uintptr_t)address - vm_page_quanta_size), vm_page_quanta_size, protection); - if (err) { - malloc_report(ASL_LEVEL_ERR, "*** can't mvm_protect(%u) region for prelude guard page at %p\n", protection, - (void *)((uintptr_t)address - vm_page_quanta_size)); - } - } - if (!(debug_flags & MALLOC_DONT_PROTECT_POSTLUDE)) { - err = mprotect((void *)(round_page_quanta(((uintptr_t)address + size))), vm_page_quanta_size, protection); - if (err) { - malloc_report(ASL_LEVEL_ERR, "*** can't mvm_protect(%u) region for postlude guard page at %p\n", protection, - (void *)((uintptr_t)address + size)); - } - } -} - -int -mvm_madvise_free(void *rack, void *r, uintptr_t pgLo, uintptr_t pgHi, uintptr_t *last, boolean_t scribble) -{ - if (pgHi > pgLo) { - size_t len = pgHi - pgLo; - - if (scribble) { - memset((void *)pgLo, SCRUBBLE_BYTE, len); // Scribble on MADV_FREEd memory - } - -#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR - if (last) { - if (*last == pgLo) { - return 0; - } - - *last = pgLo; - } -#endif // TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR - - MAGMALLOC_MADVFREEREGION(rack, r, (void *)pgLo, (int)len); // DTrace USDT Probe - if (-1 == madvise((void *)pgLo, len, CONFIG_MADVISE_STYLE)) { - /* -1 return: VM map entry change makes this unfit for reuse. Something evil lurks. */ -#if DEBUG_MADVISE - malloc_zone_error(NULL, false, - "madvise_free_range madvise(..., MADV_FREE_REUSABLE) failed for %p, length=%d\n", - (void *)pgLo, len); -#endif - return 1; - } else { - MALLOC_TRACE(TRACE_madvise, (uintptr_t)r, (uintptr_t)pgLo, len, CONFIG_MADVISE_STYLE); - } - } - return 0; -} diff --git a/src/libmalloc/src/vm.h b/src/libmalloc/src/vm.h deleted file mode 100644 index 337fc327e..000000000 --- a/src/libmalloc/src/vm.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2016 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - - -#ifndef __VM_H -#define __VM_H - -static inline bool -mvm_aslr_enabled(void) -{ - return _dyld_get_image_slide((const struct mach_header *)_NSGetMachExecuteHeader()) != 0; -} - -MALLOC_NOEXPORT -void -mvm_aslr_init(void); - -MALLOC_NOEXPORT -void * -mvm_allocate_pages(size_t size, unsigned char align, unsigned debug_flags, int vm_page_label); - -MALLOC_NOEXPORT -void * -mvm_allocate_pages_securely(size_t size, unsigned char align, int vm_page_label, uint32_t debug_flags); - -MALLOC_NOEXPORT -void -mvm_deallocate_pages(void *addr, size_t size, unsigned debug_flags); - -MALLOC_NOEXPORT -int -mvm_madvise_free(void *szone, void *r, uintptr_t pgLo, uintptr_t pgHi, uintptr_t *last, boolean_t scribble); - -MALLOC_NOEXPORT -void -mvm_protect(void *address, size_t size, unsigned protection, unsigned debug_flags); - -#endif // __VM_H diff --git a/src/libmalloc/tools/malloc_replay.cpp b/src/libmalloc/tools/malloc_replay.cpp deleted file mode 100644 index fb2e8f411..000000000 --- a/src/libmalloc/tools/malloc_replay.cpp +++ /dev/null @@ -1,1012 +0,0 @@ -/* - * Copyright (c) 2016 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "malloc_replay.h" -#include -#include -#include -#include -#include - -#define capture_thread_counters(x, c) \ - if (c & (CONFIG_REC_COUNTERS | CONFIG_REC_STATS)) { \ - x = thread_instruction_count(); \ - } - -#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR - -// Maximum size to map when reading replay file chunks -#define MAX_REPLAY_FILE_CHUNK_SIZE (100 * 1024 * 1024) - -#endif // TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR - -static void (*s_funcMagSetThreadIndex)(unsigned int index); - -extern "C" int thread_selfcounts(int type, void *buf, size_t nbytes); - -// -//Store counter values for each (call, size) tuple. -// -typedef std::pair CallSizePair; -typedef std::vector> ReplayVector; -static std::map, ReplayAllocator>> s_counterDistributions; -static std::map, ReplayAllocator > > s_addressMap; - -static uint64_t s_totalEvents = 0; -static uint64_t s_totalLibMallocEvents = 0; -static uint64_t s_totalMallocEvents = 0; -static uint64_t s_totalMalignEvents = 0; -static uint64_t s_totalCallocEvents = 0; -static uint64_t s_totalReallocEvents = 0; -static uint64_t s_totalFreeEvents = 0; -static uint64_t s_totalVallocEvents = 0; -static uint64_t s_totalFailedFreeEvents = 0; -static uint64_t s_totalFailedReallocEvents = 0; - -uint64_t call_ins_retired[operation_count] = {0}; -uint64_t call_count[operation_count] = {0}; - -static const char *_DefaultFragMetricName = "DefaultZoneFragmentation"; -static const char *_DefaultNanoZone = "DefaultMallocZone"; - -enum { - CONFIG_REC_COUNTERS = 1 << 0, - CONFIG_REC_STATS = 1 << 1, - CONFIG_RUN_REPLAY = 1 << 2, - CONFIG_CONVERT_FILE = 1 << 3, - CONFIG_PAUSE = 1 << 4, -}; -typedef uint8_t replay_config_t; - -// -//Our allocator to allocate from a specific zone. -// -malloc_zone_t* s_zone = NULL; - -// The magazine number to use for non-replayed allocations. -#define NON_REPLAY_MAGAZINE 0 - -static void -configure_ktrace_session(ktrace_session_t s) -{ - ktrace_set_execnames_enabled(s, KTRACE_FEATURE_DISABLED); - ktrace_set_walltimes_enabled(s, KTRACE_FEATURE_DISABLED); - ktrace_set_uuid_map_enabled(s, KTRACE_FEATURE_DISABLED); - ktrace_set_thread_groups_enabled(s, KTRACE_FEATURE_DISABLED); -} - -static uint64_t -thread_instruction_count(void) -{ - uint64_t instrCounts[2] = {}; - int err; - err = thread_selfcounts(1, &instrCounts, sizeof(instrCounts)); - return instrCounts[0]; -} - -//////////////////////////////////////////////////////////////////////////////// -// -// run_ktrace - Takes a nullable input ktrace file path and an output file path. -// If the input file is NULL, this will setup a ktrace recording -// session targeted at a file in the output file path. If an input -// ktrace file path is provided, this will convert the ktrace file -// to the compressed mtrace format, targeted at the output file -// path. -// -//////////////////////////////////////////////////////////////////////////////// - -const int chunk_buffer_size = 16 * 1024 * 1024; - -typedef union { - struct compressed_alloc alloc; - struct compressed_calloc calloc; - struct compressed_realloc realloc; - struct compressed_free free; - struct compressed_memalign memalign; -} compressed_op_params; - -static bool -run_ktrace(const char* inputFile, const char* outputFile) -{ - __block uint32_t blockBytesWritten = 0; - - ktrace_file_t output_file = ktrace_file_create(NULL, outputFile); - if (!output_file) { - printf("Couldn't create output file: %s\n", outputFile); - return false; - } - - ktrace_session_t s = ktrace_session_create(); - if (inputFile) { - if (ktrace_set_file(s, inputFile)) { - printf("Couldn't open file: %s\n", inputFile); - ktrace_file_close(output_file); - ktrace_session_destroy(s); - return false; - } - } else { - assert(outputFile); - ktrace_set_signal_handler(s); - } - - configure_ktrace_session(s); - - ktrace_chunk_t events_chunk = ktrace_file_append_start(output_file, - MALLOC_EVENTS_TAG, MALLOC_EVENTS_V_MAJOR, MALLOC_EVENTS_V_MINOR); - if (!events_chunk) { - ktrace_file_close(output_file); - ktrace_session_destroy(s); - return false; - } - - void *buffer = malloc(chunk_buffer_size); - if (!buffer) { - printf("Could not allocate buffer for events\n"); - ktrace_file_close(output_file); - ktrace_session_destroy(s); - return false; - } - - __block void *next_ptr = buffer; - __block size_t space_left = chunk_buffer_size; - - dispatch_group_t g = dispatch_group_create(); - dispatch_queue_t q = dispatch_queue_create("Read Source File", DISPATCH_QUEUE_SERIAL); - - ktrace_events_subclass(s, DBG_UMALLOC, DBG_UMALLOC_EXTERNAL, (^(ktrace_event_t event) { - s_totalEvents++; - if (space_left < sizeof(compressed_operation) + sizeof(compressed_op_params)) { - ktrace_file_append_data(output_file, events_chunk, buffer, - chunk_buffer_size - space_left); - blockBytesWritten += chunk_buffer_size - space_left; - next_ptr = buffer; - space_left = chunk_buffer_size; - } - unsigned int debugid = event->debugid; - size_t entry_size = 0; - - struct compressed_operation *operation = (struct compressed_operation *)next_ptr; - operation->core = (uint8_t)event->cpuid; - operation->opcode = (uint8_t)KDBG_EXTRACT_CODE(debugid); - - switch (debugid) { - case TRACE_malloc|DBG_FUNC_END: - case TRACE_valloc|DBG_FUNC_END: { - s_totalLibMallocEvents++; - s_totalMallocEvents++; - - struct compressed_alloc *allocp = (struct compressed_alloc *)operation->body; - allocp->size = (uint32_t)event->arg2; - allocp->address = event->arg3; - entry_size = sizeof(compressed_operation) + sizeof(struct compressed_alloc); - break; - } - case TRACE_calloc|DBG_FUNC_END: { - s_totalLibMallocEvents++; - s_totalCallocEvents++; - - struct compressed_calloc *callocp = (struct compressed_calloc *)operation->body; - callocp->count = (uint32_t)event->arg2; - callocp->size = (uint32_t)event->arg3; - callocp->address = event->arg4; - entry_size = sizeof(compressed_operation) + sizeof(struct compressed_calloc); - break; - } - case TRACE_memalign|DBG_FUNC_END: { - s_totalLibMallocEvents++; - s_totalMalignEvents++; - - struct compressed_memalign *malignp = (struct compressed_memalign *)operation->body; - malignp->alignment = (uint32_t)event->arg2; - malignp->size = (uint32_t)event->arg3; - malignp->address = event->arg4; - entry_size = sizeof(compressed_operation) + sizeof(struct compressed_memalign); - break; - } - case TRACE_realloc|DBG_FUNC_END: { - s_totalLibMallocEvents++; - s_totalReallocEvents++; - - struct compressed_realloc *reallocp = (struct compressed_realloc *)operation->body; - reallocp->oldAddress = event->arg2; - reallocp->size = (uint32_t)event->arg3; - reallocp->newAddress = event->arg4; - entry_size = sizeof(compressed_operation) + sizeof(struct compressed_realloc); - break; - } - case TRACE_free: { - s_totalLibMallocEvents++; - s_totalFreeEvents++; - - struct compressed_free *freep = (struct compressed_free *)operation->body; - freep->address = event->arg2; - entry_size = sizeof(compressed_operation) + sizeof(struct compressed_free); - break; - } - } - if (entry_size) { - next_ptr = (char *)next_ptr + entry_size; - space_left -= entry_size; - } - })); - ktrace_set_completion_handler(s, ^{ - dispatch_group_leave(g); - }); - dispatch_group_enter(g); - - if (!ktrace_start(s, q)) { - dispatch_group_wait(g, DISPATCH_TIME_FOREVER); - } else { - dispatch_group_leave(g); - } - dispatch_release(g); - dispatch_release(q); - - // Write out any remaining data - if (space_left < chunk_buffer_size) { - ktrace_file_append_data(output_file, events_chunk, buffer, - chunk_buffer_size - space_left); - blockBytesWritten += chunk_buffer_size - space_left; - } - free(buffer); - - if (ktrace_file_append_finish(output_file, events_chunk)) { - printf("Failed to write events to %s\n", outputFile); - } - ktrace_file_close(output_file); - ktrace_session_destroy(s); - - // - //Dump out data about how many events we saw. - // - printf("TotalMalloc: %16llu\n" - "TotalCalloc: %16llu\n" - "TotalRealloc: %16llu\n" - "TotalMalign: %16llu\n" - "TotalFree: %16llu\n" - "\n" - "TotalEvents: %16llu\n" - "TotalLibMalloc: %16llu\n" - "\n" - "TotalBytesWritten: %16u\n", - s_totalMallocEvents, - s_totalCallocEvents, - s_totalReallocEvents, - s_totalMalignEvents, - s_totalFreeEvents, - s_totalEvents, - s_totalLibMallocEvents, - blockBytesWritten - ); - - return true; -} - - -//////////////////////////////////////////////////////////////////////////////// -// -// dirty_memory - Writes the minimum number of bytes to dirty a range of memory. -// -//////////////////////////////////////////////////////////////////////////////// - -static void -dirty_memory(uint8_t* memory, size_t size) -{ - *memory = 0xFF; - - uint8_t* current = (uint8_t*)round_page_kernel((uint64_t)memory); - size_t good_size = malloc_good_size(size); - while (current < (memory + good_size)) { - *current = 0xFF; - current += vm_kernel_page_size; - } -} - - -//////////////////////////////////////////////////////////////////////////////// -// -// run_event - Decodes an operation into its actual event type and then calls the -// proper libmalloc function. Returns the size of the event type so -// so the caller can move to the next compressed_operation. -// -//////////////////////////////////////////////////////////////////////////////// - -static size_t -run_event(const struct compressed_operation* currentOperation, - size_t remainingMapping, replay_config_t config) -{ - void* event = (void *)currentOperation->body; - size_t bytesRead = sizeof(compressed_operation); - remainingMapping -= sizeof(compressed_operation); - - if (s_funcMagSetThreadIndex){ - s_funcMagSetThreadIndex(currentOperation->core); - } - - uint64_t preICount = 0; - uint64_t postICount = 0; - uint32_t reqAllocSize = 0; - - //printf("EVENT : %llx\n", event); - switch (currentOperation->opcode) { - case op_malloc: { - if (remainingMapping < sizeof(struct compressed_alloc)) { - return 0; - } - struct compressed_alloc* alloc = (struct compressed_alloc*)event; - reqAllocSize = alloc->size; - capture_thread_counters(preICount, config); - uint64_t* allocation = (uint64_t*)malloc(alloc->size); - capture_thread_counters(postICount, config); - os_assert(allocation); - dirty_memory((uint8_t*)allocation, alloc->size); - s_addressMap.insert(std::make_pair(alloc->address, (uint64_t)allocation)); - s_totalMallocEvents++; - - bytesRead += sizeof(struct compressed_alloc); - break; - } - case op_calloc: { - if (remainingMapping < sizeof(struct compressed_calloc)) { - return 0; - } - - struct compressed_calloc* alloc = (struct compressed_calloc*)event; - reqAllocSize = alloc->size * alloc->count; - capture_thread_counters(preICount, config); - uint64_t allocation = (uint64_t)calloc(alloc->count, alloc->size); - capture_thread_counters(postICount, config); - os_assert(allocation); - dirty_memory((uint8_t*)allocation, alloc->size * alloc->count); - s_addressMap.insert(std::make_pair(alloc->address, allocation)); - s_totalCallocEvents++; - - bytesRead += sizeof(struct compressed_calloc); - break; - } - case op_memalign: { - if (remainingMapping < sizeof(struct compressed_memalign)) { - return 0; - } - - struct compressed_memalign* alloc = (struct compressed_memalign*)event; - reqAllocSize = alloc->size; - uint64_t allocation = 0; - capture_thread_counters(preICount, config); - posix_memalign((void**)&allocation, alloc->alignment, alloc->size); - capture_thread_counters(postICount, config); - os_assert(allocation); - dirty_memory((uint8_t*)allocation, alloc->size); - s_addressMap.insert(std::make_pair(alloc->address, allocation)); - s_totalMalignEvents++; - - bytesRead += sizeof(struct compressed_memalign); - break; - } - case op_valloc: { - if (remainingMapping < sizeof(struct compressed_alloc)) { - return 0; - } - - struct compressed_alloc* alloc = (struct compressed_alloc*)event; - reqAllocSize = alloc->size; - capture_thread_counters(preICount, config); - uint64_t allocation = (uint64_t)valloc(alloc->size); - capture_thread_counters(postICount, config); - os_assert(allocation); - dirty_memory((uint8_t*)allocation, alloc->size); - s_addressMap.insert(std::make_pair(alloc->address, allocation)); - s_totalVallocEvents++; - - bytesRead += sizeof(struct compressed_alloc); - break; - } - case op_free: { - if (remainingMapping < sizeof(struct compressed_free)) { - return 0; - } - - bytesRead += sizeof(struct compressed_free); - - struct compressed_free* freed = (struct compressed_free*)event; - auto iter = s_addressMap.find(freed->address); - if (iter == s_addressMap.end()) { - s_totalFailedFreeEvents++; - break; - } - capture_thread_counters(preICount, config); - free((void*)iter->second); - capture_thread_counters(postICount, config); - s_addressMap.erase(iter); - s_totalFreeEvents++; - break; - } - case op_realloc: { - if (remainingMapping < sizeof(struct compressed_realloc)) { - return 0; - } - - bytesRead += sizeof(struct compressed_realloc); - - struct compressed_realloc* alloc = (struct compressed_realloc*)event; - reqAllocSize = alloc->size; - auto iter = s_addressMap.find(alloc->oldAddress); - if (iter == s_addressMap.end()) { - s_totalFailedReallocEvents++; - break; - } - - uint64_t oldAddress = iter->second; - capture_thread_counters(preICount, config); - uint64_t newAddress = (uint64_t)realloc((void*)oldAddress, alloc->size); - capture_thread_counters(postICount, config); - os_assert(newAddress); - dirty_memory((uint8_t*)newAddress, alloc->size); - s_addressMap.erase(iter); - s_addressMap.insert(std::make_pair(alloc->newAddress, newAddress)); - s_totalReallocEvents++; - break; - } - default: - __builtin_trap(); - break; - }; - - if (s_funcMagSetThreadIndex){ - s_funcMagSetThreadIndex(NON_REPLAY_MAGAZINE); - } - - if (config & (CONFIG_REC_COUNTERS | CONFIG_REC_STATS)) { - uint64_t diff = postICount - preICount; - uint16_t instrCount = diff <= UINT16_MAX ? diff : UINT16_MAX; - if (config & CONFIG_REC_STATS) { - call_ins_retired[currentOperation->opcode - 1] += instrCount; - call_count[currentOperation->opcode - 1]++; - } - if ((config & CONFIG_REC_COUNTERS) && reqAllocSize > 0) { - auto lookup = CallSizePair(currentOperation->opcode, reqAllocSize); - auto iter = s_counterDistributions.find(lookup); - if (iter != s_counterDistributions.end()) { - iter->second.push_back(instrCount); - } else { - s_counterDistributions.insert({lookup, ReplayVector(1, instrCount)}); - } - } - } - - return bytesRead; -} - - -//////////////////////////////////////////////////////////////////////////////// -// -// setup_private_malloc_zone - Creates a malloc zone for use during actual replay. -// We need to do so in order to keep the bookkeeping -// separate from the replayed data. This zone is not -// counted when figuring out fragmentation. -// -//////////////////////////////////////////////////////////////////////////////// - -static bool -setup_private_malloc_zone() -{ - s_zone = malloc_create_zone(0, 0); - if (!s_zone) { - printf("Couldn't create zone\n"); - return false; - } - - malloc_set_zone_name(s_zone, "IGNORE_THIS_ZONE"); - return true; -} - - -//////////////////////////////////////////////////////////////////////////////// -// -// memory_reader - Read from ourselves, instead of a remote process like vmmap -// does. -// -//////////////////////////////////////////////////////////////////////////////// - -static kern_return_t -memory_reader(task_t remote_task, vm_address_t remote_address, vm_size_t size, - void **local_memory) -{ - if (local_memory) { - *local_memory = (void*)remote_address; - return KERN_SUCCESS; - } - - return KERN_FAILURE; -} - - -//////////////////////////////////////////////////////////////////////////////// -// -// vm_range_recorder - Enumerate all the malloc vm ranges, looking at each page -// to figure out if it is resident or not, and dirty or not. -// Used to calculate fragmentation. -// -//////////////////////////////////////////////////////////////////////////////// - -static void -vm_range_recorder(task_t task, void* context, unsigned type, vm_range_t *ranges, - unsigned count) -{ - for (unsigned currentRange = 0; currentRange < count; currentRange++ ) { - replay_malloc_magazine magazine = { - .baseAddress = ranges[currentRange].address, - .extent = ranges[currentRange].address + ranges[currentRange].size - }; - - for (uint64_t i = magazine.baseAddress; i < magazine.extent; i += vm_kernel_page_size) { - kern_return_t err = 0; - integer_t disposition = 0; - integer_t refCount = 0; - - err = mach_vm_page_query(mach_task_self(), i, &disposition, &refCount); - if (!err) { - if (disposition & VM_PAGE_QUERY_PAGE_PRESENT) { - if (disposition & (VM_PAGE_QUERY_PAGE_COPIED|VM_PAGE_QUERY_PAGE_DIRTY)) { - magazine.pages_dirty++; - } - } else if (disposition & VM_PAGE_QUERY_PAGE_PAGED_OUT) { - magazine.pages_dirty++; - } - } - } - ((replay_malloc_zone_t)context)->magazines.push_back(magazine); - } -} - - -//////////////////////////////////////////////////////////////////////////////// -// -// run_malloc_replay - Replay a compressed malloc trace. The idea here is to replay -// the recorded events while forcing a specific CPU. By doing -// so libmalloc will target a specific magazine. This way we -// can see how the current allocator would pack an old allocation -// stream. -// -//////////////////////////////////////////////////////////////////////////////// - -static bool -run_malloc_replay(const char* fileName, pdwriter_t perfDataWriter, replay_config_t config) -{ - if (!setup_private_malloc_zone()) { - return false; - } - - ktrace_session_t s = ktrace_session_create(); - if (ktrace_set_file(s, fileName)) { - printf("Couldn't open file: %s\n", fileName); - ktrace_session_destroy(s); - return false; - } - configure_ktrace_session(s); - - dispatch_group_t g = dispatch_group_create(); - dispatch_queue_t q = dispatch_queue_create("Read Malloc Trace File", - DISPATCH_QUEUE_SERIAL); - - ktrace_chunks(s, MALLOC_EVENTS_TAG, ^(ktrace_chunk_t c) { - if (ktrace_chunk_version_major(c) != MALLOC_EVENTS_V_MAJOR - || ktrace_chunk_version_minor(c) != MALLOC_EVENTS_V_MINOR) { - printf("Invalid replay file: %s\n", fileName); - exit(1); - } - - size_t size = (size_t)ktrace_chunk_size(c); - off_t offset = 0; - while (size > sizeof(compressed_operation)) { - void *ptr; - size_t mapped_size = size; -#ifdef MAX_REPLAY_FILE_CHUNK_SIZE - mapped_size = MIN(mapped_size, MAX_REPLAY_FILE_CHUNK_SIZE); -#endif // MAX_REPLAY_FILE_CHUNK_SIZE - - // Map as much of the chunk as we can. If we can't map everything, - // keep halving the requested size until we get to something that - // works. If nothing works, bail. - do { - ptr = ktrace_chunk_map_data(c, offset, mapped_size); - if (!ptr) { - mapped_size /= 2; - } - } while (!ptr && mapped_size); - - if (!mapped_size) { - perror("Could not map replay file chunk"); - exit(1); - } - - struct compressed_operation* event = (struct compressed_operation*)ptr; - size_t size_left = mapped_size; - do { - size_t read = run_event(event, size_left, config); - if (read == 0) { - break; - } - s_totalLibMallocEvents++; - - size_left -= read; - size -= read; - offset += read; - - event = (struct compressed_operation*)((char *)event + read); - } while (size_left > sizeof(compressed_operation)); - - ktrace_chunk_unmap_data(c, ptr, mapped_size); - } - }); - - ktrace_set_completion_handler(s, ^{ - dispatch_group_leave(g); - }); - dispatch_group_enter(g); - - ktrace_events_all(s, ^(ktrace_event_t event) { }); - if (!ktrace_start(s, q)) { - dispatch_group_wait(g, DISPATCH_TIME_FOREVER); - } else { - dispatch_group_leave(g); - } - dispatch_release(g); - dispatch_release(q); - - s_addressMap.clear(); - ktrace_session_destroy(s); - - return true; -} - -static void -report_results(pdwriter_t perfDataWriter, replay_config_t config) -{ - // - //If passed a writer, output performance data. - // - if (perfDataWriter) { - pdwriter_new_value(perfDataWriter, "TotalMalloc", PDUNIT_CUSTOM(totalmalloc), s_totalMallocEvents); - pdwriter_new_value(perfDataWriter, "TotalCalloc", PDUNIT_CUSTOM(totalcalloc), s_totalCallocEvents); - pdwriter_new_value(perfDataWriter, "TotalRealloc", PDUNIT_CUSTOM(totalrealloc), s_totalReallocEvents); - pdwriter_new_value(perfDataWriter, "TotalValloc", PDUNIT_CUSTOM(totalvalloc), s_totalVallocEvents); - pdwriter_new_value(perfDataWriter, "TotalMalign", PDUNIT_CUSTOM(totalmalign), s_totalMalignEvents); - pdwriter_new_value(perfDataWriter, "TotalFree", PDUNIT_CUSTOM(totalfree), s_totalFreeEvents); - pdwriter_new_value(perfDataWriter, "FailedRealloc", PDUNIT_CUSTOM(failedrealloc), s_totalFailedReallocEvents); - pdwriter_new_value(perfDataWriter, "FailedFree", PDUNIT_CUSTOM(failedfree), s_totalFailedFreeEvents); - } - - printf("TotalMalloc: %16llu\n" - "TotalCalloc: %16llu\n" - "TotalRealloc: %16llu\n" - "TotalValloc: %16llu\n" - "TotalMalign: %16llu\n" - "TotalFree: %16llu\n" - "\n" - "FailedRealloc: %16llu\n" - "FailedFree: %16llu\n", - s_totalMallocEvents, - s_totalCallocEvents, - s_totalReallocEvents, - s_totalVallocEvents, - s_totalMalignEvents, - s_totalFreeEvents, - s_totalFailedReallocEvents, - s_totalFailedFreeEvents - ); - - // - //Now lets go over the data and find how fragmented we are. - // - vm_address_t* addresses = NULL; - unsigned count = 0; - - printf("\n\n\n"); - printf("Zone: BytesDirty BytesInUse %%Frag\n"); - printf("===========================================================\n"); - - double defaultFrag = 0; - - malloc_get_all_zones(mach_task_self(), memory_reader, &addresses, &count); - for (unsigned i = 0; i < count; i++) { - malloc_zone_t* zone = (malloc_zone_t*)addresses[i]; - replay_malloc_zone zoneInfo = { 0 }; - if (strcmp(zone->zone_name, "IGNORE_THIS_ZONE") != 0) { - malloc_statistics_t stats = {0}; - zone->introspect->enumerator(mach_task_self(), &zoneInfo, MALLOC_PTR_REGION_RANGE_TYPE, (vm_address_t)zone, memory_reader, vm_range_recorder); - zone->introspect->statistics(zone, &stats); - - uint64_t bytesDirty = 0; - - for (const auto& magazine : zoneInfo.magazines) { - bytesDirty += magazine.pages_dirty * vm_kernel_page_size; - //printf("%llx %llx %d\n", magazine.baseAddress, magazine.extent, magazine.pages_dirty); - } - - double frag = (bytesDirty && (stats.size_in_use < bytesDirty)) ? 100 - (100.0 * stats.size_in_use)/bytesDirty : 0; - - printf("%20s %14llu %14lu %6.2f\n", zone->zone_name, bytesDirty, stats.size_in_use, frag); - if (perfDataWriter) { - pdwriter_new_value(perfDataWriter, "BytesDirty", pdunit_bytes, bytesDirty); - pdwriter_record_variable_str(perfDataWriter, "ZoneName", zone->zone_name); - pdwriter_new_value(perfDataWriter, "BytesInUse", pdunit_bytes, stats.size_in_use); - pdwriter_record_variable_str(perfDataWriter, "ZoneName", zone->zone_name); - pdwriter_new_value(perfDataWriter, "Fragmentation", PDUNIT_CUSTOM(FragmentedPercent), frag); - pdwriter_record_variable_str(perfDataWriter, "ZoneName", zone->zone_name); - - if (strcmp(zone->zone_name, _DefaultNanoZone) == 0) { - defaultFrag = frag; - } - } - } - } - - if (perfDataWriter) { - // - //Write out the fragmentation in DefaultMallocZone as a primary metric. - // - pdwriter_new_value(perfDataWriter, _DefaultFragMetricName, PDUNIT_CUSTOM(FragmentedPercent), defaultFrag); - pdwriter_record_variable(perfDataWriter, kPCFailureThresholdPctVar, 10); - } else if (config & CONFIG_REC_STATS) { - printf("\n\n\n"); - printf("Call Cycles (mean)\n"); - printf("=====================\n"); - } - - // - //If we were asked to gather instruction counts, iterate through them and - //either output the mean for the call or the raw counts for each - //call:requested-size pair. - // - if (config & (CONFIG_REC_COUNTERS | CONFIG_REC_STATS)) { - json_t jsonW = NULL; - if (perfDataWriter && (config & CONFIG_REC_COUNTERS)) { - // - //Write out the instruction count data. We record into an extension - //since there's typically a large numbers of counts. - // - jsonW = pdwriter_start_extension(perfDataWriter, "libmalloc.instruction_counts"); - if (jsonW) { - for (auto const &mCallDistribution : s_counterDistributions) { - // - //If requested, write the i-counts out to the perfdata. - // - char description[16]; - snprintf(description, sizeof(description), "%d:%u", mCallDistribution.first.first, mCallDistribution.first.second); - json_member_start_object(jsonW, description); - json_member_int(jsonW, "call", mCallDistribution.first.first); - json_member_int(jsonW, "size", mCallDistribution.first.second); - json_member_uint(jsonW, "count", (unsigned int)mCallDistribution.second.size()); - json_member_start_array(jsonW, "values"); - for (uint64_t val : mCallDistribution.second) { - json_value_uint(jsonW, (unsigned int)val); - } - json_end_array(jsonW); // Inner counts - json_end_object(jsonW); - } - } - } - - // - //Output the mean number of instructions retired. - // - if (config & CONFIG_REC_STATS) { - for (int i = 0; i < operation_count; i++) { - if (call_ins_retired[i] > 0 && call_count[i] > 0) { - uint64_t mean = call_ins_retired[i] / call_count[i]; - if (perfDataWriter) { - char full_name[16]; - // operation enum is indexed from 1, adjust index for mcall_to_name. - snprintf(full_name, sizeof(full_name), "%s-mean", mcall_to_name(i + 1)); - pdwriter_new_value(perfDataWriter, full_name, pdunit_instructions, mean); - pdwriter_record_variable(perfDataWriter, kPCFailureThresholdPctVar, 100); - } else { - printf("%9s %6llu\n", mcall_to_name(i + 1), mean); - } - } - } - } - - if (jsonW) { - pdwriter_end_extension(perfDataWriter, jsonW); - } - } -} - -//////////////////////////////////////////////////////////////////////////////// -// -// usage - Output help. -// -//////////////////////////////////////////////////////////////////////////////// - -static void -usage() -{ - printf("libmalloc_replay -r [-p] [-j filename] [-t testname] [-c | -s]\n"); - printf("libmalloc_replay [-i ] -o [-p]\n"); - printf("\t-p Pause the replay process before exit\n"); - printf("\t-j \toutput perfdata V2 formatted file\n"); - printf("\t-t \tset the test name for the perfdata V2 formatted output file\n"); - printf("\t-c capture and output instruction counts along with the performance data.\n"); - printf("\t-s capture and output instruction count statistics along with the performance data.\n"); -} - - -//////////////////////////////////////////////////////////////////////////////// -// -// main - Yep. -// -//////////////////////////////////////////////////////////////////////////////// - -int -main(int argc, char** argv) -{ - char * inputMTrace = NULL; - char * inputKtrace = NULL; - char * outputMTrace = NULL; - char * outputPerfData = NULL; - char * outputTestName = NULL; - replay_config_t config = 0; - int c = 0; - - if (argc < 2) { - usage(); - return -1; - } - - while ((c = getopt(argc, (char* const*)argv, "phr:i:o:j:t:cs")) != -1) { - switch (c) { - case 'r': - inputMTrace = strdup(optarg); - config |= CONFIG_RUN_REPLAY; - break; - case 'i': - inputKtrace = strdup(optarg); - if (inputKtrace && outputMTrace) { - config |= CONFIG_CONVERT_FILE; - } - break; - case 'o': - outputMTrace = strdup(optarg); - if (inputKtrace && outputMTrace) { - config |= CONFIG_CONVERT_FILE; - } - break; - case 'p': - config |= CONFIG_PAUSE; - break; - case 'j': - outputPerfData = strdup(optarg); - break; - case 't': - outputTestName = strdup(optarg); - break; - case 'c': - config |= CONFIG_REC_COUNTERS; - break; - case 's': - config |= CONFIG_REC_STATS; - break; - case 'h': - default: - usage(); - return EX_USAGE; - } - } - - if ((config & CONFIG_REC_COUNTERS) && (config & CONFIG_REC_STATS)) { - printf("Invalid usage: -c and -s\n"); - usage(); - return EX_USAGE; - } - - timespec beginTime = {0}; - timespec endTime = {0}; - - pdwriter_t writer = NULL; - if (outputPerfData) { - char dataPath[MAXPATHLEN]; - - // - //Ensure the filename is prepended with libmalloc - // - const char *prepend = "libmalloc"; - auto outputFilePath = std::string(outputPerfData); - const auto namePos = outputFilePath.find_last_of('/') + 1; - if (outputFilePath.find(prepend, namePos) != namePos) { - outputFilePath.insert(namePos, prepend); - } - int ret = snprintf(dataPath, sizeof(dataPath), "%s.%d.%llx." PD_FILE_EXT, - outputFilePath.c_str(), getpid(), mach_absolute_time()); - if (ret < 0) { - return errno; - } - auto perfdataName = std::string("libmalloc.replay."); - perfdataName += outputTestName ? outputTestName : dataPath; - writer = pdwriter_open(dataPath, perfdataName.c_str(), 0, 0); - if (!writer) { - printf("\n****Couldn't open writer for performance data file. Error: %s\n", strerror(errno)); - } else { - pdwriter_set_primary_metric(writer, _DefaultFragMetricName); - } - } - - if (config & CONFIG_RUN_REPLAY) { - void *libmalloc = dlopen("/usr/lib/system/libsystem_malloc.dylib", RTLD_NOW); - if (libmalloc) { - s_funcMagSetThreadIndex = (void (*)(unsigned int))dlsym(libmalloc, "mag_set_thread_index"); - } - - if (!s_funcMagSetThreadIndex) { - printf("\n****Couldn't load mag_set_thread_index, replay won't honor core****\n\n"); - } else { - s_funcMagSetThreadIndex(NON_REPLAY_MAGAZINE); - } - - clock_gettime(CLOCK_MONOTONIC_RAW, &beginTime); - - if (!run_malloc_replay(inputMTrace, writer, config)) { - return -1; - } - - clock_gettime(CLOCK_MONOTONIC_RAW, &endTime); - - report_results(writer, config); - } else if (config & CONFIG_CONVERT_FILE) { - clock_gettime(CLOCK_MONOTONIC_RAW, &beginTime); - if (!run_ktrace(inputKtrace, outputMTrace)) { - printf("\n****Couldn't record mtrace file.\n"); - } - clock_gettime(CLOCK_MONOTONIC_RAW, &endTime); - } else if (outputMTrace) { - clock_gettime(CLOCK_MONOTONIC_RAW, &beginTime); - if (!run_ktrace(NULL, outputMTrace)) { - printf("\n****Couldn't record mtrace file.\n"); - } - clock_gettime(CLOCK_MONOTONIC_RAW, &endTime); - } - - if (beginTime.tv_sec) { - printf("\n\nRuntime: %ld ms\n", ((endTime.tv_sec - beginTime.tv_sec) * 1000) + (endTime.tv_nsec - beginTime.tv_nsec)/1000000); - } - - if (writer) { - pdwriter_close(writer); - } - - if (config & CONFIG_PAUSE) { - printf("\n\nProcess paused, hit Crtl+C to exit\n"); - pause(); - } - - return 0; -} diff --git a/src/libmalloc/tools/malloc_replay.h b/src/libmalloc/tools/malloc_replay.h deleted file mode 100644 index 39ef5cbab..000000000 --- a/src/libmalloc/tools/malloc_replay.h +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef __MALLOC_REPLAY_H -#define __MALLOC_REPLAY_H - -#include "trace.h" -#include - -// -//Our file format -// - -// Definitions for the event chunk. -#define MALLOC_EVENTS_TAG (uint32_t)0xe001e001 -#define MALLOC_EVENTS_V_MAJOR 1 -#define MALLOC_EVENTS_V_MINOR 1 - -enum operation { - op_malloc = 0x01, - op_free = 0x02, - op_realloc = 0x03, - op_memalign = 0x04, - op_calloc = 0x05, - op_valloc = 0x06, -}; - -static const int operation_count = op_valloc; -static const char *mcall_names[] = {"malloc", "free", "realloc", "memalign", "calloc", "valloc"}; - -static inline const char * -mcall_to_name(int call_num) { - if (call_num > 0 && call_num <= operation_count) { - return mcall_names[call_num - 1]; - } - return NULL; -} - -enum flags { - flag_stacks = 0x00000001, - flag_timestamps = 0x00000002 -}; - -struct compressed_header { - uint16_t version; - uint64_t flags; -} __attribute__((packed)); - -struct compressed_operation { - uint8_t opcode; - uint8_t core; - uint32_t body[]; -}__attribute__((packed)); - -struct compressed_alloc { - uint64_t address; - uint32_t size; -} __attribute__((packed)); - -struct compressed_calloc { - uint64_t address; - uint32_t count; - uint32_t size; -} __attribute__((packed)); - -struct compressed_memalign { - uint64_t address; - uint32_t alignment; - uint32_t size; -} __attribute__((packed)); - -struct compressed_free { - uint64_t address; -} __attribute__((packed)); - -struct compressed_realloc { - uint64_t oldAddress; - uint64_t newAddress; - uint32_t size; -} __attribute__((packed)); - -struct compressed_stack_key { - uint64_t stackKey; -} __attribute__((packed)); - -struct compressed_time { - uint64_t timestamp; -} __attribute__((packed)); - - -// -//Our allocator to allocate from a specific zone. -// -extern malloc_zone_t* s_zone; - -template -class ReplayAllocator { -public: - // type definitions - typedef T value_type; - typedef T* pointer; - typedef const T* const_pointer; - typedef T& reference; - typedef const T& const_reference; - typedef std::size_t size_type; - typedef std::ptrdiff_t difference_type; - - // rebind allocator to type U - template - struct rebind { - typedef ReplayAllocator other; - }; - - // return address of values - pointer address (reference value) const - { - return &value; - } - const_pointer address (const_reference value) const - { - return &value; - } - - /* constructors and destructor - * - nothing to do because the allocator has no state - */ - ReplayAllocator() throw() { } - ReplayAllocator(const ReplayAllocator&) throw() { } - template - ReplayAllocator (const ReplayAllocator&) throw() { } - ~ReplayAllocator() throw() { } - - // return maximum number of elements that can be allocated - size_type max_size () const throw() - { - return std::numeric_limits::max() / sizeof(T); - } - - // allocate but don't initialize num elements of type T - pointer allocate (size_type num, const void* = 0) - { - return (pointer)malloc_zone_malloc(s_zone, num * sizeof(T)); - } - - // initialize elements of allocated storage p with value value - void construct (pointer p, const T& value) - { - // initialize memory with placement new - new((void*)p)T(value); - } - - // destroy elements of initialized storage p - void destroy (pointer p) - { - // destroy objects by calling their destructor - p->~T(); - } - - // deallocate storage p of deleted elements - void deallocate (pointer p, size_type num) - { - malloc_zone_free(s_zone, p); - } -}; - -template -bool operator== (const ReplayAllocator&, - const ReplayAllocator&) throw() -{ - return true; -} -template -bool operator!= (const ReplayAllocator&, - const ReplayAllocator&) throw() -{ - return false; -} - -typedef struct replay_malloc_magazine { - uint64_t baseAddress; - uint64_t extent; - uint32_t pages_dirty; -} *replay_malloc_magazine_t; - -typedef struct replay_malloc_zone { - const char* name; - std::vector > magazines; -} *replay_malloc_zone_t; - - -#endif // __MALLOC_REPLAY_H diff --git a/src/libmalloc/tools/malloc_replay_plotter.py b/src/libmalloc/tools/malloc_replay_plotter.py deleted file mode 100644 index e25c4829a..000000000 --- a/src/libmalloc/tools/malloc_replay_plotter.py +++ /dev/null @@ -1,366 +0,0 @@ -#!/usr/bin/env python - -from __future__ import absolute_import -from __future__ import unicode_literals -from __future__ import division -from __future__ import print_function - -import sys -import os -import re -import argparse -import logging -import json -from pprint import pprint -import numpy as np -import matplotlib.pyplot as plt - - -class ReportConfiguration(object): - - def __init__(self, report_type, call, nano_malloc_cutoff, xfilter, num_bins, merge_calloc, fileV1, fileV2): - self.report_type = report_type - self.call = call - self.nano_malloc_cutoff = nano_malloc_cutoff - self.xfilter = xfilter - self.num_bins = num_bins - self.merge_calloc = merge_calloc - self.fileV1 = fileV1 - self.fileV2 = fileV2 - - def plotter_class(self): - if self.report_type == "scatter": - return ScatterPlotter - if self.report_type == "instructions": - return InstructionsPlotter - if self.report_type == "request_sizes": - return RequestSizePlotter - if self.report_type == "nano_request_bins": - return RequestSizePlotter - if self.report_type == "nano_request_bins_ysize": - return RequestSizePlotter - - def call_identifier(self): - return self.call_identifier_for_name(self.call) - - @classmethod - def call_identifier_for_name(cls, name): - mapping = {'malloc': 1, 'realloc': 3, 'memalign': 4, 'calloc': 5, 'valloc': 6} - return mapping[name] - - @classmethod - def configuration_for_arguments(cls, args): - return cls(args.report_type, args.call, args.nano_malloc_cutoff, args.xfilter, args.num_bins, args.merge_calloc, args.fileV1, args.fileV2) - - -class ReportData(object): - - def __init__(self, fileV1, fileV2): - self.fileV1 = fileV1 - self.fileV2 = fileV2 - - self.all_data = [] - self.frag = [] - self.paths = [fileV1, fileV2] - - self.parse_input_files() - - def parse_input_files(self): - with open(self.fileV1) as f: - self.all_data.append(json.load(f)) - if self.fileV2: - with open(self.fileV2) as f: - self.all_data.append(json.load(f)) - self.calculate_fragmentation() - - def enumerate(self): - for i, data in enumerate(self.all_data): - yield i, data, self.frag[i], self.paths[i] - - def fileV1_data(self): - return self.all_data[0] - - def num_plots(self): - return 2 if self.fileV1 and self.fileV2 else 1 - - def calculate_fragmentation(self): - for data in self.all_data: - total_frag = 0 - data = data['data'] - for obj in data: - for i in obj: - if 'variables' in i: - if i['metric'] == 'Fragmentation': - total_frag += i['value'] - self.frag.append(total_frag) - - -class Plotter(object): - - def __init__(self, report_configuration): - self.configuration = report_configuration - - def plot(self, report_data): - pass - - # Returns a list of sizes requested and the frequency at which this request - # was made. - def size_freq_for_data(self, data, call_identifier): - size_filter = self.configuration.nano_malloc_cutoff - if not size_filter: - size_filter = size.maxint - - size_freq = [] - for ext, counts in data['extensions']['libmalloc.instruction_counts'].items(): - if counts['call'] == call_identifier and int(counts['size']) <= size_filter: - size_freq.append([counts['size'], counts['count']]) - return size_freq - - # Returns a list of lists of ([size, [instruction counts]]). Where size is the - # requested size and instruction counts are the number of CPU instructions it took (as - # recorded by libmalloc_replay. If coalesce is set, this instead returns a - # coalesced list of instruction counts ([instruction counts]), flattened across all - # request sizes. - def times_for_data(self, data, call_identifier, coalesce): - size_filter = self.configuration.nano_malloc_cutoff - if not size_filter: - size_filter = size.maxint - times = [] - for ext, counts in data['extensions']['libmalloc.instruction_counts'].items(): - if counts['call'] == call_identifier and int(counts['size']) <= size_filter: - if coalesce: - times += counts['values'] - else: - times.append([counts['size'], counts['values']]) - return times - - def show(self): - plt.show() - - def write_to_path(self, path): - plt.savefig(path) - - -class ScatterPlotter(Plotter): - - def plot(self, report_data): - plt.figure(figsize=(20,10)) - labels = ["V1", "V2"] - colours = ['r', 'b'] - for i, data, _, _ in report_data.enumerate(): - logging.debug("Building data") - sizecounts = self.times_for_data(data, self.configuration.call_identifier(), False) - sizes = [] - counts = [] - for pair in sizecounts: - rsize = pair[0] - for icount in pair[1]: - sizes.append(rsize) - counts.append(icount) - colmark = colours[i] + 'x' - logging.debug("Plotting scatter") - scatter, = plt.plot(sizes, counts, colmark) - scatter.set_label(labels[i]) - plt.xlabel("Requested Size (Bytes)") - plt.ylabel("Instruction Count") - plt.legend() - - -class InstructionsPlotter(Plotter): - - def plot(self, report_data): - num_plots = report_data.num_plots() - fig = plt.figure(figsize=(20, num_plots * 5)) - subplot_config = 221 if num_plots == 2 else 121 - - for i, data, fragmentation, path in report_data.enumerate(): - all_times = self.times_for_data(data, self.configuration.call_identifier(), True) - - # We may want to just filter for a certain range (0, xfilter) - if self.configuration.xfilter: - filtered = [t for t in all_times if t < self.configuration.xfilter] - else: - filtered = all_times - - logging.debug("Plotting: Histogram") - # Histogram - h_ax = plt.subplot(subplot_config) - subplot_config += 1 - self.hist_data(filtered, False, 1) - if self.configuration.xfilter > 0: - h_ax.set_xlim([0, self.configuration.xfilter.xfilter]) - - plt.suptitle('{}: {}'.format(path, self.configuration.call)) - - logging.debug("Plotting: CDF") - # CDF - ax = plt.subplot(subplot_config) - subplot_config += 1 - self.hist_data(all_times, True, 0) - - # Table - logging.debug("Producing table") - per50 = np.percentile(all_times, 50) - per75 = np.percentile(all_times, 75) - per95 = np.percentile(all_times, 95) - - tblstr = 'Fragmentation: {}%\n50th: {}\n75th: {}\n95th: {}'.format(fragmentation, per50, per75, per95) - ax.text(0, 0.1, tblstr, bbox=dict(facecolor='white'), horizontalalignment='right', verticalalignment='top') - - def hist_data(self, data, cumulative, width): - norm = 1 if cumulative else 0 - plt.hist(data, bins=self.configuration.num_bins, log=False, cumulative=cumulative, linewidth=width, normed=norm) - plt.xlabel("Instruction Counts") - if cumulative: - plt.title("Cumulative") - - -class RequestSizePlotter(Plotter): - - def sort_split_and_fill_size_freqs(self, size_freq): - # Sort by the size. Then split into two lists. - size_freq.sort(key=lambda x: x[0]) - sizes = [i[0] for i in size_freq] - counts = [i[1] for i in size_freq] - - # Fill out the arrays where we didn't see events. This helps when we - # bin the data later. - sparse_sizes = [] - sparse_counts = [] - i = 0 - j = 0 - while i < max(sizes): - if sizes[j] == (i + 1): - sparse_sizes.append(sizes[j]) - sparse_counts.append(counts[j]) - j = j + 1 - else: - sparse_sizes.append(i+1) - sparse_counts.append(0) - i = i + 1 - return sparse_sizes, sparse_counts - - def merge_size_counts(self, sizes, counts, sizes_c, counts_c): - # Merge the calloc data with malloc. N.b. The lists can be different lengths; merge into sizes. - if len(sizes_c) > len(sizes): - tmpS = sizes - tmpC = counts - sizes = sizes_c - counts = counts_c - sizes_c = tmpS - counts_c = tmpC - for i in range(len(sizes)): - if i >= len(sizes_c): - break - assert(sizes[i] == sizes_c[i]) - counts[i] = counts[i] + counts_c[i] - return sizes, counts - - def plot(self, report_data): - plt.figure(figsize=(50, 10)) - plt_config = 211 - calls = ['malloc'] - if not self.configuration.merge_calloc: - plt_config = 311 - calls.append('calloc') - calls.append('realloc') - - for call_name in calls: - logging.debug('Plotting: %s' % call_name) - call_identifier = self.configuration.call_identifier_for_name(call_name) - - data = report_data.fileV1_data() - size_freq = self.size_freq_for_data(data, call_identifier) - sizes, counts = self.sort_split_and_fill_size_freqs(size_freq) - - # calloc merging - if self.configuration.merge_calloc and call_name == 'malloc': - size_freq_calloc = self.size_freq_for_data(data, 5) - sizes_c, counts_c = self.sort_split_and_fill_size_freqs(size_freq_calloc) - sizes, counts = self.merge_size_counts(sizes, counts, sizes_c, counts_c) - - # Bin the data - num_bins = 16 - binned_counts = [0] * 16 - curr_bin = 0 - bin_num = 0 - for i in range(max(sizes))[1:]: - if i % num_bins == 0 and i != 0: - logging.debug('Bin end: %d' % i) - binned_counts[bin_num] = curr_bin - # Extra logging. Enable this if you want to output the - # counts in each bin to the console. - #logging.debug(' Count: %d' % curr_bin) - bin_num = bin_num + 1 - curr_bin = 0 - if self.configuration.report_type == "nano_request_bins_ysize": - curr_bin = curr_bin + (counts[i-1] * sizes[i-1]) - else: - curr_bin = curr_bin + counts[i-1] - - # Draw the plot - ax = plt.subplot(plt_config) - plt_config += 1 - if self.configuration.report_type == "nano_request_bins" or self.configuration.report_type == "nano_request_bins_ysize": - ax.bar(range(num_bins), binned_counts) - ax.set_xticks(range(num_bins)) - x_labels = range(1, 256, 16) - x_labels.append("0") - ax.set_xticklabels(x_labels) - ax.set_xlabel("Request size (bytes)", fontsize=12) - if self.configuration.report_type == "nano_request_bins_ysize": - ax.set_ylabel("Total Requested (Bytes)") - else: - ax.set_ylabel("Frequency") - ax.set_title(call_name) - else: - plt.bar(sizes, counts) - - plt.suptitle(self.configuration.fileV1) - plt.subplots_adjust(hspace=0.5) - - -class Tool(object): - - def __init__(self, args): - self.args = args - - def run(self): - logging.debug('Loading JSON') - configuration = ReportConfiguration.configuration_for_arguments(self.args) - plotter_class = configuration.plotter_class() - plotter = plotter_class(configuration) - report_data = ReportData(self.args.fileV1, self.args.fileV2) - plotter.plot(report_data) - - if self.args.show_plot: - plotter.show() - else: - plotter.write_to_path(self.args.output) - - @classmethod - def main(cls): - parser = argparse.ArgumentParser(description='Analyze libmalloc_replay perfdata output. This takes as input a .pdj file containing request sizes and instruction counts and outputs various plots.') - parser.add_argument('fileV1', help='Path to nano V1 data JSON file') - parser.add_argument('fileV2', nargs='?', help='Optional path to nano V2 data JSON file') - parser.add_argument('--report', dest='report_type', choices=['instructions', 'scatter', 'request_sizes', 'nano_request_bins', 'nano_request_bins_ysize'], default='instructions', help='The report type to produce (default: %(default)s)') - parser.add_argument('--call', dest='call', default='malloc', choices=['malloc', 'calloc', 'realloc', 'memalign', 'valloc'], help="The call to analyze (default: %(default)s)") - parser.add_argument('-f', '--xfilter', type=int, default=0, help="Filter the histogram to a range from 0 to <%(dest)s)>") - parser.add_argument('-b', '--num_bins', type=int, default=10000, help="The number of bins to use for histogrammed data (default: %(default)s)") - parser.add_argument('-n', '--nano_malloc_cutoff', type=int, default=256, help="The cutoff size to filter for (default: %(default)s bytes)") - parser.add_argument('--merge_calloc', action='store_true', default=False, help='Merge calloc calls with malloc. For use with the nano_request_bins, nano_request_bins_ysize and request_sizes reports.') - parser.add_argument('-v', '--verbose', action='store_true', help='Enable verbose debug logging') - output_group = parser.add_mutually_exclusive_group(required=True) - output_group.add_argument('-s', '--show_plot', action='store_true') - output_group.add_argument('-o', '--output', default='fig.pdf', help="The output file path, including type extension (default: %(default)s)") - - args = parser.parse_args() - if args.verbose: - logging.basicConfig(level=logging.DEBUG) - - cls(args).run() - - -if __name__ == "__main__": - Tool.main() - diff --git a/src/libmalloc/tools/read-radix-tree b/src/libmalloc/tools/read-radix-tree deleted file mode 100644 index caa3dd4b9..000000000 --- a/src/libmalloc/tools/read-radix-tree +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/perl - -# read the radix tree out of a process and write it to stdout - -my $proc = shift; - -my @vmmap_out = `vmmap -vw -interleaved -noCoalesce $proc`; -my $pid; - -for (@vmmap_out) { - if (/^Process:.*\[(\d+)\]\s*$/) { - $pid = $1; - } - if (/^Performance tool data \s*([0-9a-fA-F]+)-([0-9a-fA-F]+)/) { - my $addr = $1; - my $end = $2; - if (`memread $pid 0x$addr 7` eq "radixv2") { - exec sprintf("memread $pid 0x$addr 0x%x", hex($end) - hex($addr)); - } - } -} - -printf STDERR "not found\n"; -exit 1; diff --git a/src/libmalloc/xcodeconfig/interposable.list b/src/libmalloc/xcodeconfig/interposable.list deleted file mode 100644 index 0e12582cc..000000000 --- a/src/libmalloc/xcodeconfig/interposable.list +++ /dev/null @@ -1 +0,0 @@ -_realloc diff --git a/src/libmalloc/xcodeconfig/libmalloc.xcconfig b/src/libmalloc/xcodeconfig/libmalloc.xcconfig deleted file mode 100644 index 5063fd347..000000000 --- a/src/libmalloc/xcodeconfig/libmalloc.xcconfig +++ /dev/null @@ -1,92 +0,0 @@ -#include "/Makefiles/CoreOS/Xcode/BSD.xcconfig" - -SDKROOT = macosx.internal -SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator appletvos appletvsimulator watchos watchsimulator -BUILD_VARIANTS = normal debug - -SDK_INSTALL_VARIANT = $(SDK_INSTALL_VARIANT_$(DRIVERKIT)) -SDK_INSTALL_VARIANT_1 = driverkit -SDK_INSTALL_VARIANT_ = default -SDK_INSTALL_ROOT = $(SDK_INSTALL_ROOT_$(SDK_INSTALL_VARIANT)) -SDK_INSTALL_ROOT_driverkit = $(DRIVERKITROOT) -SDK_INSTALL_HEADERS_ROOT = $(SDK_INSTALL_HEADERS_ROOT_$(SDK_INSTALL_VARIANT)) -SDK_INSTALL_HEADERS_ROOT_driverkit = $(SDK_INSTALL_ROOT)/$(SDK_RUNTIME_HEADERS_PREFIX) -SDK_RUNTIME_HEADERS_PREFIX = Runtime - -PRODUCT_NAME = libsystem_malloc -INSTALL_PATH = $(SDK_INSTALL_ROOT)/usr/lib/system -PUBLIC_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/include/malloc -PRIVATE_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/local/include -USE_HEADERMAP = NO - -SYSTEM_FRAMEWORK_HEADERS = $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/System/Library/Frameworks/System.framework/PrivateHeaders -HEADER_SEARCH_PATHS = $(DERIVED_FILES_DIR)/dtrace $(SRCROOT)/include $(SRCROOT)/private $(SRCROOT)/resolver $(inherited) -SYSTEM_HEADER_SEARCH_PATHS = $(SYSTEM_FRAMEWORK_HEADERS) $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/usr/local/include $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/usr/include -SYSTEM_FRAMEWORK_SEARCH_PATHS = $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/System/Library/Frameworks - -GCC_PREPROCESSOR_DEFINITIONS = _FORTIFY_SOURCE=0 NDEBUG $(OSATOMIC_PREPROCESSOR_DEFINITIONS) $(PLATFORM_PREPROCESSOR_DEFINITIONS) -OSATOMIC_PREPROCESSOR_DEFINITIONS = OSATOMIC_USE_INLINED=1 OS_UNFAIR_LOCK_INLINE=1 -OSATOMIC_PREPROCESSOR_DEFINITIONS_NOINLINE = OSATOMIC_DEPRECATED=0 OSATOMIC_USE_INLINED=0 OS_UNFAIR_LOCK_INLINE=0 - -GCC_NO_COMMON_BLOCKS = YES -ENABLE_STRICT_OBJC_MSGSEND = YES - -// TODO: Add -fno-stack-protector when uplink to Libc is removed -OTHER_CFLAGS = $(PLATFORM_CFLAGS) -OTHER_CFLAGS_normal = -momit-leaf-frame-pointer -OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDEBUG=1 -UNDEBUG - -GCC_TREAT_IMPLICIT_FUNCTION_DECLARATIONS_AS_ERRORS = YES -//GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES -GCC_WARN_UNUSED_FUNCTION = YES -GCC_WARN_UNUSED_LABEL = YES -//GCC_WARN_UNUSED_PARAMETER = YES -GCC_WARN_UNUSED_VALUE = YES -GCC_WARN_UNUSED_VARIABLE = YES -GCC_WARN_64_TO_32_BIT_CONVERSION = YES -GCC_WARN_ABOUT_RETURN_TYPE = YES -GCC_WARN_UNDECLARED_SELECTOR = YES -GCC_WARN_UNINITIALIZED_AUTOS = YES -CLANG_WARN_BOOL_CONVERSION = YES -CLANG_WARN_CONSTANT_CONVERSION = YES -CLANG_WARN_EMPTY_BODY = YES -CLANG_WARN_ENUM_CONVERSION = YES -CLANG_WARN_INFINITE_RECURSION = YES -CLANG_WARN_INT_CONVERSION = YES -CLANG_WARN_SUSPICIOUS_MOVE = YES -CLANG_WARN_UNREACHABLE_CODE = YES -CLANG_WARN__DUPLICATE_METHOD_MATCH = YES - -// clang doesn't understand the simple_printf %y specifier -WARNING_CFLAGS = -Wno-format-invalid-specifier -Wno-format-extra-args - -LLVM_LTO = LLVM_LTO_$(CURRENT_VARIANT) -LLVM_LTO_normal = YES -LLVM_LTO_debug = NO -DEAD_CODE_STRIPPING = NO - -IS_ZIPPERED = YES - -SIM_SUFFIX[sdk=*simulator*] = _sim -LINK_WITH_STANDARD_LIBRARIES = NO -OTHER_LDFLAGS = -all_load -L$(SDK_INSTALL_ROOT)/usr/lib/system -umbrella System $(CR_LDFLAGS) $(LIBCOMPILER_RT_LDFLAGS) $(LIBDYLD_LDFLAGS) $(LIBSYSCALL_LDFLAGS) $(LIBPLATFORM_LDFLAGS) $(PLATFORM_LDFLAGS) $(UPLINK_LDFLAGS) $(INTERPOSE_LDFLAGS) $(DIRTY_LDFLAGS) -LIBCOMPILER_RT_LDFLAGS = -lcompiler_rt -LIBPLATFORM_LDFLAGS = -lsystem$(SIM_SUFFIX)_platform -LIBSYSCALL_LDFLAGS = -lsystem$(SIM_SUFFIX)_kernel -LIBDYLD_LDFLAGS = -ldyld - -// TODO: Eliminate the crosslink between libmalloc and Libc (13046853) -UPLINK_LDFLAGS = -Wl,-upward-lsystem_c - -INTERPOSE_LDFLAGS = -Wl,-interposable_list,$(SRCROOT)/xcodeconfig/interposable.list - -ORDER_FILE = $(SDKROOT)/$(APPLE_INTERNAL_DIR)/OrderFiles/$(PRODUCT_NAME).order -ORDER_FILE[sdk=*simulator*] = -ORDER_FILE[sdk=driverkit*] = - -DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION) - - -SUPPORTS_TEXT_BASED_API = YES -TAPI_VERIFY_MODE = Pedantic -OTHER_TAPI_FLAGS = -umbrella System -extra-private-header $(SRCROOT)/private/make_tapi_happy.h -extra-private-header $(SRCROOT)/src/frozen_malloc.h -extra-private-header $(SRCROOT)/private/stack_logging.h -extra-private-header $(SRCROOT)/private/malloc_implementation.h diff --git a/src/libmalloc/xcodeconfig/libmalloc_eos.xcconfig b/src/libmalloc/xcodeconfig/libmalloc_eos.xcconfig deleted file mode 100644 index 273535abb..000000000 --- a/src/libmalloc/xcodeconfig/libmalloc_eos.xcconfig +++ /dev/null @@ -1,13 +0,0 @@ -#include "libmalloc.xcconfig" - -BUILD_VARIANTS = normal -EXECUTABLE_PREFIX = lib -GENERATE_MASTER_OBJECT_FILE = YES -INSTALL_PATH = /usr/local/lib/eOS -PRODUCT_NAME = malloc_eOS -SKIP_INSTALL = YES -SKIP_INSTALL[sdk=iphoneos*] = NO -STRIP_INSTALLED_PRODUCT = NO -VERSIONING_SYSTEM = apple-generic -MACH_O_TYPE = staticlib -OTHER_LDFLAGS = diff --git a/src/libmalloc/xcodeconfig/libmalloc_resolved.xcconfig b/src/libmalloc/xcodeconfig/libmalloc_resolved.xcconfig deleted file mode 100644 index 7a3823609..000000000 --- a/src/libmalloc/xcodeconfig/libmalloc_resolved.xcconfig +++ /dev/null @@ -1,10 +0,0 @@ -#include "libmalloc.xcconfig" - -SUPPORTED_PLATFORMS = iphoneos appletvos watchos -PRODUCT_NAME = malloc_$(RESOLVED_VARIANT) -OTHER_LDFLAGS = -SKIP_INSTALL = YES -VERSIONING_SYSTEM = -EXCLUDED_SOURCE_FILE_NAMES = * - - diff --git a/src/libmalloc/xcodeconfig/libmalloc_resolver.xcconfig b/src/libmalloc/xcodeconfig/libmalloc_resolver.xcconfig deleted file mode 100644 index fe46f99d5..000000000 --- a/src/libmalloc/xcodeconfig/libmalloc_resolver.xcconfig +++ /dev/null @@ -1,3 +0,0 @@ -#include "libmalloc.xcconfig" - - diff --git a/src/libmalloc/xcodeconfig/libmalloc_static.xcconfig b/src/libmalloc/xcodeconfig/libmalloc_static.xcconfig deleted file mode 100644 index 6b37fab5f..000000000 --- a/src/libmalloc/xcodeconfig/libmalloc_static.xcconfig +++ /dev/null @@ -1,11 +0,0 @@ -#include "libmalloc.xcconfig" - -BUILD_VARIANTS = normal debug -EXECUTABLE_PREFIX = lib -GENERATE_MASTER_OBJECT_FILE = YES -INSTALL_PATH = /usr/local/lib/system -PRODUCT_NAME = malloc -STRIP_INSTALLED_PRODUCT = NO -VERSIONING_SYSTEM = apple-generic -MACH_O_TYPE = staticlib -OTHER_LDFLAGS = diff --git a/src/libmalloc/xcodescripts/install-codes.sh b/src/libmalloc/xcodescripts/install-codes.sh deleted file mode 100644 index 0c3eebf87..000000000 --- a/src/libmalloc/xcodescripts/install-codes.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -e - -if [ "${DRIVERKIT}" = 1 ]; then exit 0; fi - -# install kdebug trace files based on the input file -INPUT=${SCRIPT_INPUT_FILE_0} -OUTPUT=${SCRIPT_OUTPUT_FILE_0} - -# pre-process the source and pass through perl it -xcrun cc -E -I${SDKROOT}/System/Library/Frameworks/System.framework/PrivateHeaders -D_MALLOC_BUILDING_CODES_ "${INPUT}" | perl > "${OUTPUT}" diff --git a/src/libmalloc/xcodescripts/manpages.sh b/src/libmalloc/xcodescripts/manpages.sh deleted file mode 100644 index 213f0b9af..000000000 --- a/src/libmalloc/xcodescripts/manpages.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -e - -if [ "$ACTION" = installhdrs ]; then exit 0; fi -if [[ "$PLATFORM_NAME" != "macosx" ]]; then exit 0; fi - -UNIFDEF_FLAGS="" - -MANPAGES_LIST="${SRCROOT}/man/manpages.lst" -FILES=$(find -E ${SRCROOT} -regex '.*/[^.]+\.[0-9]' -type f) - -cat ${MANPAGES_LIST} | grep -v -E '(^#|^\s*$)' | while read first solid rest; do - SOURCE=$(grep -E "/${first}$"<&2 - exit 1 -fi - -echo "Using ${CLANGFORMAT} to reindent, using concurrency of ${CPUS}" - -find -x . \! \( \( -name BUILD -o -name EXTERNAL_HEADERS -o -name libMicro -o -name zlib -o -name .svn -o -name .git -o -name cscope.\* -o -name \*~ \) -prune \) -type f \( -name \*.c -o -name \*.cpp \) -print0 | \ - xargs -0 -P "${CPUS}" -n 10 "${CLANGFORMAT}" -style=file -i -ret=$? - -if [ $ret -ne 0 ]; then - echo "reindent failed: $ret" 1>&2 - exit 1 -fi - -exit 0 - diff --git a/src/libmalloc/xcodescripts/sanitise_headers.sh b/src/libmalloc/xcodescripts/sanitise_headers.sh deleted file mode 100755 index 41f466939..000000000 --- a/src/libmalloc/xcodescripts/sanitise_headers.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -e -# -# Copyright (c) 2010-2011 Apple Inc. All rights reserved. -# -# @APPLE_APACHE_LICENSE_HEADER_START@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# @APPLE_APACHE_LICENSE_HEADER_END@ -# - From de252b5b7abea3a9df587049253e6f2f2e21f4d3 Mon Sep 17 00:00:00 2001 From: Thomas A Date: Sun, 3 Apr 2022 13:41:53 -0700 Subject: [PATCH 2/4] Add `libmalloc` submodule --- .gitmodules | 3 +++ src/external/libmalloc | 1 + 2 files changed, 4 insertions(+) create mode 160000 src/external/libmalloc diff --git a/.gitmodules b/.gitmodules index 92afa280c..14bc83340 100644 --- a/.gitmodules +++ b/.gitmodules @@ -372,3 +372,6 @@ [submodule "src/external/pyobjc"] path = src/external/pyobjc url = ../darling-pyobjc.git +[submodule "src/external/libmalloc"] + path = src/external/libmalloc + url = https://github.com/darlinghq/darling-libmalloc.git diff --git a/src/external/libmalloc b/src/external/libmalloc new file mode 160000 index 000000000..b3cb239dd --- /dev/null +++ b/src/external/libmalloc @@ -0,0 +1 @@ +Subproject commit b3cb239ddb6f0d707881bafdd01fd69f80956c24 From 0e42ca3e7a146ccd96a1b96b38c996476b789954 Mon Sep 17 00:00:00 2001 From: Thomas A Date: Sun, 3 Apr 2022 17:05:51 -0700 Subject: [PATCH 3/4] Fix Building `libmalloc` --- .../Developer/SDKs/MacOSX.sdk/usr/include/malloc/_malloc.h | 2 +- .../Developer/SDKs/MacOSX.sdk/usr/include/malloc/malloc.h | 2 +- .../SDKs/MacOSX.sdk/usr/include/malloc/malloc_private.h | 1 - .../SDKs/MacOSX.sdk/usr/include/malloc_implementation.h | 2 +- .../Developer/SDKs/MacOSX.sdk/usr/include/malloc_private.h | 1 + .../Developer/SDKs/MacOSX.sdk/usr/include/stack_logging.h | 2 +- Developer/symlinks.sh | 2 +- cmake/use_ld64.cmake | 2 +- src/CMakeLists.txt | 2 +- src/libsystem/CMakeLists.txt | 2 +- 10 files changed, 9 insertions(+), 9 deletions(-) delete mode 120000 Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc/malloc_private.h create mode 120000 Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc_private.h diff --git a/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc/_malloc.h b/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc/_malloc.h index 7f08d407d..4d224787d 120000 --- a/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc/_malloc.h +++ b/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc/_malloc.h @@ -1 +1 @@ -../../../../../../../../../src/libmalloc/include/malloc/_malloc.h \ No newline at end of file +../../../../../../../../../src/external/libmalloc/include/malloc/_malloc.h \ No newline at end of file diff --git a/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc/malloc.h b/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc/malloc.h index e3f9bcee0..13d6e93cf 120000 --- a/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc/malloc.h +++ b/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc/malloc.h @@ -1 +1 @@ -../../../../../../../../../src/libmalloc/include/malloc/malloc.h \ No newline at end of file +../../../../../../../../../src/external/libmalloc/include/malloc/malloc.h \ No newline at end of file diff --git a/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc/malloc_private.h b/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc/malloc_private.h deleted file mode 120000 index 661f3aa60..000000000 --- a/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc/malloc_private.h +++ /dev/null @@ -1 +0,0 @@ -../../../../../../../../../src/libmalloc/private/malloc_private.h \ No newline at end of file diff --git a/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc_implementation.h b/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc_implementation.h index f3d072a7c..b66a6b801 120000 --- a/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc_implementation.h +++ b/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc_implementation.h @@ -1 +1 @@ -../../../../../../../../src/libmalloc/private/malloc_implementation.h \ No newline at end of file +../../../../../../../../src/external/libmalloc/private/malloc_implementation.h \ No newline at end of file diff --git a/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc_private.h b/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc_private.h new file mode 120000 index 000000000..1b941605f --- /dev/null +++ b/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/malloc_private.h @@ -0,0 +1 @@ +../../../../../../../../src/external/libmalloc/private/malloc_private.h \ No newline at end of file diff --git a/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/stack_logging.h b/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/stack_logging.h index 5703528b5..75ee6fad9 120000 --- a/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/stack_logging.h +++ b/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/stack_logging.h @@ -1 +1 @@ -../../../../../../../../src/libmalloc/private/stack_logging.h \ No newline at end of file +../../../../../../../../src/external/libmalloc/private/stack_logging.h \ No newline at end of file diff --git a/Developer/symlinks.sh b/Developer/symlinks.sh index 06fd2f32c..459c89dcb 100755 --- a/Developer/symlinks.sh +++ b/Developer/symlinks.sh @@ -18,7 +18,7 @@ ln -sf "$BUILD/src/libremovefile/libremovefile.dylib" "$LIBS/system" ln -sf "$BUILD/src/copyfile/libcopyfile.dylib" "$LIBS/system" ln -sf "$BUILD/src/libsystem_coreservices/libsystem_coreservices.dylib" "$LIBS/system" ln -sf "$BUILD/src/external/coretls/libsystem_coretls.dylib" "$LIBS/system" -ln -sf "$BUILD/src/libmalloc/libsystem_malloc.dylib" "$LIBS/system" +ln -sf "$BUILD/src/external/libmalloc/libsystem_malloc.dylib" "$LIBS/system" ln -sf "$BUILD/src/libc/libsystem_c.dylib" "$LIBS/system" ln -sf "$BUILD/src/kernel/libsystem_kernel.dylib" "$LIBS/system" ln -sf "$BUILD/src/keymgr/libkeymgr.dylib" "$LIBS/system" diff --git a/cmake/use_ld64.cmake b/cmake/use_ld64.cmake index e9f41b333..32fd86b11 100644 --- a/cmake/use_ld64.cmake +++ b/cmake/use_ld64.cmake @@ -40,7 +40,7 @@ FUNCTION(use_ld64 target) -Wl,-dylib_file,/usr/lib/libresolv.9.dylib:${CMAKE_BINARY_DIR}/src/libresolv/libresolv.9.dylib \ -Wl,-dylib_file,/usr/lib/system/libxpc.dylib:${CMAKE_BINARY_DIR}/src/external/libxpc/libxpc_firstpass.dylib \ -Wl,-dylib_file,/usr/lib/libc++.1.dylib:${CMAKE_BINARY_DIR}/src/external/libcxx/libc++.1.dylib \ --Wl,-dylib_file,/usr/lib/system/libsystem_malloc.dylib:${CMAKE_BINARY_DIR}/src/libmalloc/libsystem_malloc_firstpass.dylib \ +-Wl,-dylib_file,/usr/lib/system/libsystem_malloc.dylib:${CMAKE_BINARY_DIR}/src/external/libmalloc/libsystem_malloc_firstpass.dylib \ -Wl,-dylib_file,/usr/lib/libobjc.A.dylib:${CMAKE_BINARY_DIR}/src/external/objc4/runtime/libobjc.A.dylib \ -Wl,-dylib_file,/usr/lib/libicucore.A.dylib:${CMAKE_BINARY_DIR}/src/external/icu/icuSources/libicucore.A.dylib \ -Wl,-dylib_file,/usr/lib/libncurses.5.4.dylib:${CMAKE_BINARY_DIR}/src/ncurses/ncurses/libncurses.5.4.dylib \ diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 5d89ae3f5..599cc8408 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -132,7 +132,7 @@ add_subdirectory(libgcc) add_subdirectory(copyfile) add_subdirectory(libinfo) add_subdirectory(quarantine) -add_subdirectory(libmalloc) +add_subdirectory(external/libmalloc) add_subdirectory(libunwind) add_subdirectory(networkextension) add_subdirectory(libsystem) diff --git a/src/libsystem/CMakeLists.txt b/src/libsystem/CMakeLists.txt index 5e3fd8bbc..31ea7b81e 100644 --- a/src/libsystem/CMakeLists.txt +++ b/src/libsystem/CMakeLists.txt @@ -54,7 +54,7 @@ libsystem_reexport( system_copyfile src/copyfile/libcopyfile.dylib system_coreservices src/libsystem_coreservices/libsystem_coreservices.dylib system_coretls src/external/coretls/libsystem_coretls.dylib - system_malloc src/libmalloc/libsystem_malloc.dylib + system_malloc src/external/libmalloc/libsystem_malloc.dylib system_c src/libc/libsystem_c.dylib system_kernel src/kernel/libsystem_kernel.dylib system_trace src/external/libtrace/libsystem_trace.dylib From 8d343d3679273b095ef2365ab22567b63cb71b35 Mon Sep 17 00:00:00 2001 From: Thomas A Date: Sun, 3 Apr 2022 21:26:45 -0700 Subject: [PATCH 4/4] Fix `libmalloc` in `libc` CMakeLists.txt --- src/libc/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/libc/CMakeLists.txt b/src/libc/CMakeLists.txt index 6e390e753..fcfd060ec 100644 --- a/src/libc/CMakeLists.txt +++ b/src/libc/CMakeLists.txt @@ -30,8 +30,8 @@ include_directories( "${CMAKE_CURRENT_SOURCE_DIR}/fbsdcompat" "${CMAKE_CURRENT_SOURCE_DIR}/darwin" "${CMAKE_CURRENT_SOURCE_DIR}/derived" - "${CMAKE_CURRENT_SOURCE_DIR}/../libmalloc/src" - "${CMAKE_CURRENT_SOURCE_DIR}/../libmalloc/private" + "${CMAKE_CURRENT_SOURCE_DIR}/../external/libmalloc/src" + "${CMAKE_CURRENT_SOURCE_DIR}/../external/libmalloc/private" "${CMAKE_CURRENT_SOURCE_DIR}/locale" "${CMAKE_CURRENT_SOURCE_DIR}/gen" "${CMAKE_CURRENT_SOURCE_DIR}/locale/FreeBSD"