diff --git a/ios-m1-simulator/libs/libtbb_static.a b/ios-m1-simulator/libs/libtbb_static.a index afdf8640..7af2843f 100644 Binary files a/ios-m1-simulator/libs/libtbb_static.a and b/ios-m1-simulator/libs/libtbb_static.a differ diff --git a/ios-m1-simulator/libs/libtbbmalloc_proxy_static.a b/ios-m1-simulator/libs/libtbbmalloc_proxy_static.a index ff7c6e76..ea4f8745 100644 Binary files a/ios-m1-simulator/libs/libtbbmalloc_proxy_static.a and b/ios-m1-simulator/libs/libtbbmalloc_proxy_static.a differ diff --git a/ios-m1-simulator/libs/libtbbmalloc_static.a b/ios-m1-simulator/libs/libtbbmalloc_static.a index 87fa3936..05ca2b9e 100644 Binary files a/ios-m1-simulator/libs/libtbbmalloc_static.a and b/ios-m1-simulator/libs/libtbbmalloc_static.a differ diff --git a/linux/CMakeLists.txt b/linux/CMakeLists.txt index 8ca12157..03141737 100644 --- a/linux/CMakeLists.txt +++ b/linux/CMakeLists.txt @@ -118,9 +118,9 @@ set_target_properties(tbb PROPERTIES set(optional_libs_name ${optional_libs_name} tbb) endif() -add_library(sdl2 SHARED IMPORTED GLOBAL) +add_library(sdl2 STATIC IMPORTED GLOBAL) set_target_properties(sdl2 PROPERTIES - IMPORTED_LOCATION ${linux_lib_dir}/sdl2/libSDL2.so + IMPORTED_LOCATION ${linux_lib_dir}/sdl2/libSDL2.a INTERFACE_INCLUDE_DIRECTORIES ${platform_spec_path}/include/sdl2 ) diff --git a/mac/CMakeLists.txt b/mac/CMakeLists.txt index d578ccac..29b34da0 100644 --- a/mac/CMakeLists.txt +++ b/mac/CMakeLists.txt @@ -80,14 +80,16 @@ set_target_properties(GLESv2 PROPERTIES set(se_libs_name) -if(USE_SE_V8) - list(APPEND se_libs_name - v8_monolith - uv - ) - if(USE_V8_DEBUGGER) - list(APPEND se_libs_name inspector) - endif() +if(CC_EDITOR) + message(STATUS "CC_EDITOR USE V8 FROM NODEJS") +elseif(USE_SE_V8) + list(APPEND se_libs_name + v8_monolith + uv + ) + if(USE_V8_DEBUGGER) + list(APPEND se_libs_name inspector) + endif() endif() # if(USE_SOCKETS) diff --git a/sources/boost-source/boost.cmake b/sources/boost-source/boost.cmake index 9ac0be24..df0cd561 100644 --- a/sources/boost-source/boost.cmake +++ b/sources/boost-source/boost.cmake @@ -11,3 +11,4 @@ foreach(lib ${BOOST_LIB_NAMES}) set_target_properties(boost_${lib} PROPERTIES FOLDER Utils) list(APPEND CC_EXTERNAL_LIBS boost_${lib}) endforeach() + diff --git a/sources/jemalloc/CMakeLists.txt b/sources/jemalloc/CMakeLists.txt deleted file mode 100755 index 08b08b58..00000000 --- a/sources/jemalloc/CMakeLists.txt +++ /dev/null @@ -1,57 +0,0 @@ -message(STATUS "Add dependence target: jemalloc ...") - -# Define target name -set(TARGET_NAME jemalloc) - -if(MSVC) - set(INCLUDE_FILES ${COCOS_EXTERNAL_PATH}/jemalloc/include-win ${COCOS_EXTERNAL_PATH}/jemalloc/include-win/msvc_compat) -else() - set(INCLUDE_FILES ${COCOS_EXTERNAL_PATH}/jemalloc/include-linux) -endif() - -include_directories( ${INCLUDE_FILES} ) - -set (SOURCE_FILES - src/je_arena.c - src/je_atomic.c - src/je_base.c - src/je_bitmap.c - src/je_chunk.c - src/je_chunk_dss.c - src/je_chunk_mmap.c - src/je_ckh.c - src/je_ctl.c - src/je_extent.c - src/je_hash.c - src/je_huge.c - src/je_jemalloc.c - src/je_mb.c - src/je_mutex.c - src/je_nstime.c - src/je_pages.c - src/je_prng.c - src/je_prof.c - src/je_quarantine.c - src/je_rtree.c - src/je_spin.c - src/je_stats.c - src/je_tcache.c - src/je_ticker.c - src/je_tsd.c - src/je_util.c - src/je_witness.c -) - -add_library(${TARGET_NAME} STATIC ${SOURCE_FILES}) - -if(MSVC) - set_target_properties(${TARGET_NAME} PROPERTIES COMPILE_FLAGS "/wd4996") -endif() - -if(NOT COCOS_PLATFORM_IOS) - set_target_properties(${TARGET_NAME} PROPERTIES OUTPUT_NAME_DEBUG jemalloc_d) -endif() - -set_target_properties(${TARGET_NAME} PROPERTIES FOLDER External) - -message(STATUS "${TARGET_NAME} Configuration completed.") diff --git a/sources/jemalloc/ChangeLog b/sources/jemalloc/ChangeLog deleted file mode 100755 index a9406853..00000000 --- a/sources/jemalloc/ChangeLog +++ /dev/null @@ -1,1043 +0,0 @@ -Following are change highlights associated with official releases. Important -bug fixes are all mentioned, but some internal enhancements are omitted here for -brevity. Much more detail can be found in the git revision history: - - https://github.com/jemalloc/jemalloc - -* 4.5.0 (February 28, 2017) - - This is the first release to benefit from much broader continuous integration - testing, thanks to @davidtgoldblatt. Had we had this testing infrastructure - in place for prior releases, it would have caught all of the most serious - regressions fixed by this release. - - New features: - - Add --disable-thp and the opt.thp to provide opt-out mechanisms for - transparent huge page integration. (@jasone) - - Update zone allocator integration to work with macOS 10.12. (@glandium) - - Restructure *CFLAGS configuration, so that CFLAGS behaves typically, and - EXTRA_CFLAGS provides a way to specify e.g. -Werror during building, but not - during configuration. (@jasone, @ronawho) - - Bug fixes: - - Fix DSS (sbrk(2)-based) allocation. This regression was first released in - 4.3.0. (@jasone) - - Handle race in per size class utilization computation. This functionality - was first released in 4.0.0. (@interwq) - - Fix lock order reversal during gdump. (@jasone) - - Fix-refactor tcache synchronization. This regression was first released in - 4.0.0. (@jasone) - - Fix various JSON-formatted malloc_stats_print() bugs. This functionality - was first released in 4.3.0. (@jasone) - - Fix huge-aligned allocation. This regression was first released in 4.4.0. - (@jasone) - - When transparent huge page integration is enabled, detect what state pages - start in according to the kernel's current operating mode, and only convert - arena chunks to non-huge during purging if that is not their initial state. - This functionality was first released in 4.4.0. (@jasone) - - Fix lg_chunk clamping for the --enable-cache-oblivious --disable-fill case. - This regression was first released in 4.0.0. (@jasone, @428desmo) - - Properly detect sparc64 when building for Linux. (@glaubitz) - -* 4.4.0 (December 3, 2016) - - New features: - - Add configure support for *-*-linux-android. (@cferris1000, @jasone) - - Add the --disable-syscall configure option, for use on systems that place - security-motivated limitations on syscall(2). (@jasone) - - Add support for Debian GNU/kFreeBSD. (@thesam) - - Optimizations: - - Add extent serial numbers and use them where appropriate as a sort key that - is higher priority than address, so that the allocation policy prefers older - extents. This tends to improve locality (decrease fragmentation) when - memory grows downward. (@jasone) - - Refactor madvise(2) configuration so that MADV_FREE is detected and utilized - on Linux 4.5 and newer. (@jasone) - - Mark partially purged arena chunks as non-huge-page. This improves - interaction with Linux's transparent huge page functionality. (@jasone) - - Bug fixes: - - Fix size class computations for edge conditions involving extremely large - allocations. This regression was first released in 4.0.0. (@jasone, - @ingvarha) - - Remove overly restrictive assertions related to the cactive statistic. This - regression was first released in 4.1.0. (@jasone) - - Implement a more reliable detection scheme for os_unfair_lock on macOS. - (@jszakmeister) - -* 4.3.1 (November 7, 2016) - - Bug fixes: - - Fix a severe virtual memory leak. This regression was first released in - 4.3.0. (@interwq, @jasone) - - Refactor atomic and prng APIs to restore support for 32-bit platforms that - use pre-C11 toolchains, e.g. FreeBSD's mips. (@jasone) - -* 4.3.0 (November 4, 2016) - - This is the first release that passes the test suite for multiple Windows - configurations, thanks in large part to @glandium setting up continuous - integration via AppVeyor (and Travis CI for Linux and OS X). - - New features: - - Add "J" (JSON) support to malloc_stats_print(). (@jasone) - - Add Cray compiler support. (@ronawho) - - Optimizations: - - Add/use adaptive spinning for bootstrapping and radix tree node - initialization. (@jasone) - - Bug fixes: - - Fix large allocation to search starting in the optimal size class heap, - which can substantially reduce virtual memory churn and fragmentation. This - regression was first released in 4.0.0. (@mjp41, @jasone) - - Fix stats.arenas..nthreads accounting. (@interwq) - - Fix and simplify decay-based purging. (@jasone) - - Make DSS (sbrk(2)-related) operations lockless, which resolves potential - deadlocks during thread exit. (@jasone) - - Fix over-sized allocation of radix tree leaf nodes. (@mjp41, @ogaun, - @jasone) - - Fix over-sized allocation of arena_t (plus associated stats) data - structures. (@jasone, @interwq) - - Fix EXTRA_CFLAGS to not affect configuration. (@jasone) - - Fix a Valgrind integration bug. (@ronawho) - - Disallow 0x5a junk filling when running in Valgrind. (@jasone) - - Fix a file descriptor leak on Linux. This regression was first released in - 4.2.0. (@vsarunas, @jasone) - - Fix static linking of jemalloc with glibc. (@djwatson) - - Use syscall(2) rather than {open,read,close}(2) during boot on Linux. This - works around other libraries' system call wrappers performing reentrant - allocation. (@kspinka, @Whissi, @jasone) - - Fix OS X default zone replacement to work with OS X 10.12. (@glandium, - @jasone) - - Fix cached memory management to avoid needless commit/decommit operations - during purging, which resolves permanent virtual memory map fragmentation - issues on Windows. (@mjp41, @jasone) - - Fix TSD fetches to avoid (recursive) allocation. This is relevant to - non-TLS and Windows configurations. (@jasone) - - Fix malloc_conf overriding to work on Windows. (@jasone) - - Forcibly disable lazy-lock on Windows (was forcibly *enabled*). (@jasone) - -* 4.2.1 (June 8, 2016) - - Bug fixes: - - Fix bootstrapping issues for configurations that require allocation during - tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone) - - Fix gettimeofday() version of nstime_update(). (@ronawho) - - Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho) - - Fix potential VM map fragmentation regression. (@jasone) - - Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone) - - Fix heap profiling context leaks in reallocation edge cases. (@jasone) - -* 4.2.0 (May 12, 2016) - - New features: - - Add the arena..reset mallctl, which makes it possible to discard all of - an arena's allocations in a single operation. (@jasone) - - Add the stats.retained and stats.arenas..retained statistics. (@jasone) - - Add the --with-version configure option. (@jasone) - - Support --with-lg-page values larger than actual page size. (@jasone) - - Optimizations: - - Use pairing heaps rather than red-black trees for various hot data - structures. (@djwatson, @jasone) - - Streamline fast paths of rtree operations. (@jasone) - - Optimize the fast paths of calloc() and [m,d,sd]allocx(). (@jasone) - - Decommit unused virtual memory if the OS does not overcommit. (@jasone) - - Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order - to avoid unfortunate interactions during fork(2). (@jasone) - - Bug fixes: - - Fix chunk accounting related to triggering gdump profiles. (@jasone) - - Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone) - - Scale leak report summary according to sampling probability. (@jasone) - -* 4.1.1 (May 3, 2016) - - This bugfix release resolves a variety of mostly minor issues, though the - bitmap fix is critical for 64-bit Windows. - - Bug fixes: - - Fix the linear scan version of bitmap_sfu() to shift by the proper amount - even when sizeof(long) is not the same as sizeof(void *), as on 64-bit - Windows. (@jasone) - - Fix hashing functions to avoid unaligned memory accesses (and resulting - crashes). This is relevant at least to some ARM-based platforms. - (@rkmisra) - - Fix fork()-related lock rank ordering reversals. These reversals were - unlikely to cause deadlocks in practice except when heap profiling was - enabled and active. (@jasone) - - Fix various chunk leaks in OOM code paths. (@jasone) - - Fix malloc_stats_print() to print opt.narenas correctly. (@jasone) - - Fix MSVC-specific build/test issues. (@rustyx, @yuslepukhin) - - Fix a variety of test failures that were due to test fragility rather than - core bugs. (@jasone) - -* 4.1.0 (February 28, 2016) - - This release is primarily about optimizations, but it also incorporates a lot - of portability-motivated refactoring and enhancements. Many people worked on - this release, to an extent that even with the omission here of minor changes - (see git revision history), and of the people who reported and diagnosed - issues, so much of the work was contributed that starting with this release, - changes are annotated with author credits to help reflect the collaborative - effort involved. - - New features: - - Implement decay-based unused dirty page purging, a major optimization with - mallctl API impact. This is an alternative to the existing ratio-based - unused dirty page purging, and is intended to eventually become the sole - purging mechanism. New mallctls: - + opt.purge - + opt.decay_time - + arena..decay - + arena..decay_time - + arenas.decay_time - + stats.arenas..decay_time - (@jasone, @cevans87) - - Add --with-malloc-conf, which makes it possible to embed a default - options string during configuration. This was motivated by the desire to - specify --with-malloc-conf=purge:decay , since the default must remain - purge:ratio until the 5.0.0 release. (@jasone) - - Add MS Visual Studio 2015 support. (@rustyx, @yuslepukhin) - - Make *allocx() size class overflow behavior defined. The maximum - size class is now less than PTRDIFF_MAX to protect applications against - numerical overflow, and all allocation functions are guaranteed to indicate - errors rather than potentially crashing if the request size exceeds the - maximum size class. (@jasone) - - jeprof: - + Add raw heap profile support. (@jasone) - + Add --retain and --exclude for backtrace symbol filtering. (@jasone) - - Optimizations: - - Optimize the fast path to combine various bootstrapping and configuration - checks and execute more streamlined code in the common case. (@interwq) - - Use linear scan for small bitmaps (used for small object tracking). In - addition to speeding up bitmap operations on 64-bit systems, this reduces - allocator metadata overhead by approximately 0.2%. (@djwatson) - - Separate arena_avail trees, which substantially speeds up run tree - operations. (@djwatson) - - Use memoization (boot-time-computed table) for run quantization. Separate - arena_avail trees reduced the importance of this optimization. (@jasone) - - Attempt mmap-based in-place huge reallocation. This can dramatically speed - up incremental huge reallocation. (@jasone) - - Incompatible changes: - - Make opt.narenas unsigned rather than size_t. (@jasone) - - Bug fixes: - - Fix stats.cactive accounting regression. (@rustyx, @jasone) - - Handle unaligned keys in hash(). This caused problems for some ARM systems. - (@jasone, @cferris1000) - - Refactor arenas array. In addition to fixing a fork-related deadlock, this - makes arena lookups faster and simpler. (@jasone) - - Move retained memory allocation out of the default chunk allocation - function, to a location that gets executed even if the application installs - a custom chunk allocation function. This resolves a virtual memory leak. - (@buchgr) - - Fix a potential tsd cleanup leak. (@cferris1000, @jasone) - - Fix run quantization. In practice this bug had no impact unless - applications requested memory with alignment exceeding one page. - (@jasone, @djwatson) - - Fix LinuxThreads-specific bootstrapping deadlock. (Cosmin Paraschiv) - - jeprof: - + Don't discard curl options if timeout is not defined. (@djwatson) - + Detect failed profile fetches. (@djwatson) - - Fix stats.arenas..{dss,lg_dirty_mult,decay_time,pactive,pdirty} for - --disable-stats case. (@jasone) - -* 4.0.4 (October 24, 2015) - - This bugfix release fixes another xallocx() regression. No other regressions - have come to light in over a month, so this is likely a good starting point - for people who prefer to wait for "dot one" releases with all the major issues - shaken out. - - Bug fixes: - - Fix xallocx(..., MALLOCX_ZERO to zero the last full trailing page of large - allocations that have been randomly assigned an offset of 0 when - --enable-cache-oblivious configure option is enabled. - -* 4.0.3 (September 24, 2015) - - This bugfix release continues the trend of xallocx() and heap profiling fixes. - - Bug fixes: - - Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large - allocations when --enable-cache-oblivious configure option is enabled. - - Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations - when resizing from/to a size class that is not a multiple of the chunk size. - - Fix prof_tctx_dump_iter() to filter out nodes that were created after heap - profile dumping started. - - Work around a potentially bad thread-specific data initialization - interaction with NPTL (glibc's pthreads implementation). - -* 4.0.2 (September 21, 2015) - - This bugfix release addresses a few bugs specific to heap profiling. - - Bug fixes: - - Fix ixallocx_prof_sample() to never modify nor create sampled small - allocations. xallocx() is in general incapable of moving small allocations, - so this fix removes buggy code without loss of generality. - - Fix irallocx_prof_sample() to always allocate large regions, even when - alignment is non-zero. - - Fix prof_alloc_rollback() to read tdata from thread-specific data rather - than dereferencing a potentially invalid tctx. - -* 4.0.1 (September 15, 2015) - - This is a bugfix release that is somewhat high risk due to the amount of - refactoring required to address deep xallocx() problems. As a side effect of - these fixes, xallocx() now tries harder to partially fulfill requests for - optional extra space. Note that a couple of minor heap profiling - optimizations are included, but these are better thought of as performance - fixes that were integral to disovering most of the other bugs. - - Optimizations: - - Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the - fast path when heap profiling is enabled. Additionally, split a special - case out into arena_prof_tctx_reset(), which also avoids chunk metadata - reads. - - Optimize irallocx_prof() to optimistically update the sampler state. The - prior implementation appears to have been a holdover from when - rallocx()/xallocx() functionality was combined as rallocm(). - - Bug fixes: - - Fix TLS configuration such that it is enabled by default for platforms on - which it works correctly. - - Fix arenas_cache_cleanup() and arena_get_hard() to handle - allocation/deallocation within the application's thread-specific data - cleanup functions even after arenas_cache is torn down. - - Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS. - - Fix chunk purge hook calls for in-place huge shrinking reallocation to - specify the old chunk size rather than the new chunk size. This bug caused - no correctness issues for the default chunk purge function, but was - visible to custom functions set via the "arena..chunk_hooks" mallctl. - - Fix heap profiling bugs: - + Fix heap profiling to distinguish among otherwise identical sample sites - with interposed resets (triggered via the "prof.reset" mallctl). This bug - could cause data structure corruption that would most likely result in a - segfault. - + Fix irealloc_prof() to prof_alloc_rollback() on OOM. - + Make one call to prof_active_get_unlocked() per allocation event, and use - the result throughout the relevant functions that handle an allocation - event. Also add a missing check in prof_realloc(). These fixes protect - allocation events against concurrent prof_active changes. - + Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample() - in the correct order. - + Fix prof_realloc() to call prof_free_sampled_object() after calling - prof_malloc_sample_object(). Prior to this fix, if tctx and old_tctx were - the same, the tctx could have been prematurely destroyed. - - Fix portability bugs: - + Don't bitshift by negative amounts when encoding/decoding run sizes in - chunk header maps. This affected systems with page sizes greater than 8 - KiB. - + Rename index_t to szind_t to avoid an existing type on Solaris. - + Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to - match glibc and avoid compilation errors when including both - jemalloc/jemalloc.h and malloc.h in C++ code. - + Don't assume that /bin/sh is appropriate when running size_classes.sh - during configuration. - + Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM. - + Link tests to librt if it contains clock_gettime(2). - -* 4.0.0 (August 17, 2015) - - This version contains many speed and space optimizations, both minor and - major. The major themes are generalization, unification, and simplification. - Although many of these optimizations cause no visible behavior change, their - cumulative effect is substantial. - - New features: - - Normalize size class spacing to be consistent across the complete size - range. By default there are four size classes per size doubling, but this - is now configurable via the --with-lg-size-class-group option. Also add the - --with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and - --with-lg-tiny-min options, which can be used to tweak page and size class - settings. Impacts: - + Worst case performance for incrementally growing/shrinking reallocation - is improved because there are far fewer size classes, and therefore - copying happens less often. - + Internal fragmentation is limited to 20% for all but the smallest size - classes (those less than four times the quantum). (1B + 4 KiB) - and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation. - + Chunk fragmentation tends to be lower because there are fewer distinct run - sizes to pack. - - Add support for explicit tcaches. The "tcache.create", "tcache.flush", and - "tcache.destroy" mallctls control tcache lifetime and flushing, and the - MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API - control which tcache is used for each operation. - - Implement per thread heap profiling, as well as the ability to - enable/disable heap profiling on a per thread basis. Add the "prof.reset", - "prof.lg_sample", "thread.prof.name", "thread.prof.active", - "opt.prof_thread_active_init", "prof.thread_active_init", and - "thread.prof.active" mallctls. - - Add support for per arena application-specified chunk allocators, configured - via the "arena..chunk_hooks" mallctl. - - Refactor huge allocation to be managed by arenas, so that arenas now - function as general purpose independent allocators. This is important in - the context of user-specified chunk allocators, aside from the scalability - benefits. Related new statistics: - + The "stats.arenas..huge.allocated", "stats.arenas..huge.nmalloc", - "stats.arenas..huge.ndalloc", and "stats.arenas..huge.nrequests" - mallctls provide high level per arena huge allocation statistics. - + The "arenas.nhchunks", "arenas.hchunk..size", - "stats.arenas..hchunks..nmalloc", - "stats.arenas..hchunks..ndalloc", - "stats.arenas..hchunks..nrequests", and - "stats.arenas..hchunks..curhchunks" mallctls provide per size class - statistics. - - Add the 'util' column to malloc_stats_print() output, which reports the - proportion of available regions that are currently in use for each small - size class. - - Add "alloc" and "free" modes for for junk filling (see the "opt.junk" - mallctl), so that it is possible to separately enable junk filling for - allocation versus deallocation. - - Add the jemalloc-config script, which provides information about how - jemalloc was configured, and how to integrate it into application builds. - - Add metadata statistics, which are accessible via the "stats.metadata", - "stats.arenas..metadata.mapped", and - "stats.arenas..metadata.allocated" mallctls. - - Add the "stats.resident" mallctl, which reports the upper limit of - physically resident memory mapped by the allocator. - - Add per arena control over unused dirty page purging, via the - "arenas.lg_dirty_mult", "arena..lg_dirty_mult", and - "stats.arenas..lg_dirty_mult" mallctls. - - Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump - feature on/off during program execution. - - Add sdallocx(), which implements sized deallocation. The primary - optimization over dallocx() is the removal of a metadata read, which often - suffers an L1 cache miss. - - Add missing header includes in jemalloc/jemalloc.h, so that applications - only have to #include . - - Add support for additional platforms: - + Bitrig - + Cygwin - + DragonFlyBSD - + iOS - + OpenBSD - + OpenRISC/or1k - - Optimizations: - - Maintain dirty runs in per arena LRUs rather than in per arena trees of - dirty-run-containing chunks. In practice this change significantly reduces - dirty page purging volume. - - Integrate whole chunks into the unused dirty page purging machinery. This - reduces the cost of repeated huge allocation/deallocation, because it - effectively introduces a cache of chunks. - - Split the arena chunk map into two separate arrays, in order to increase - cache locality for the frequently accessed bits. - - Move small run metadata out of runs, into arena chunk headers. This reduces - run fragmentation, smaller runs reduce external fragmentation for small size - classes, and packed (less uniformly aligned) metadata layout improves CPU - cache set distribution. - - Randomly distribute large allocation base pointer alignment relative to page - boundaries in order to more uniformly utilize CPU cache sets. This can be - disabled via the --disable-cache-oblivious configure option, and queried via - the "config.cache_oblivious" mallctl. - - Micro-optimize the fast paths for the public API functions. - - Refactor thread-specific data to reside in a single structure. This assures - that only a single TLS read is necessary per call into the public API. - - Implement in-place huge allocation growing and shrinking. - - Refactor rtree (radix tree for chunk lookups) to be lock-free, and make - additional optimizations that reduce maximum lookup depth to one or two - levels. This resolves what was a concurrency bottleneck for per arena huge - allocation, because a global data structure is critical for determining - which arenas own which huge allocations. - - Incompatible changes: - - Replace --enable-cc-silence with --disable-cc-silence to suppress spurious - warnings by default. - - Assure that the constness of malloc_usable_size()'s return type matches that - of the system implementation. - - Change the heap profile dump format to support per thread heap profiling, - rename pprof to jeprof, and enhance it with the --thread= option. As a - result, the bundled jeprof must now be used rather than the upstream - (gperftools) pprof. - - Disable "opt.prof_final" by default, in order to avoid atexit(3), which can - internally deadlock on some platforms. - - Change the "arenas.nlruns" mallctl type from size_t to unsigned. - - Replace the "stats.arenas..bins..allocated" mallctl with - "stats.arenas..bins..curregs". - - Ignore MALLOC_CONF in set{uid,gid,cap} binaries. - - Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the - MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage. - - Removed features: - - Remove the *allocm() API, which is superseded by the *allocx() API. - - Remove the --enable-dss options, and make dss non-optional on all platforms - which support sbrk(2). - - Remove the "arenas.purge" mallctl, which was obsoleted by the - "arena..purge" mallctl in 3.1.0. - - Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically - detects whether it is running inside Valgrind. - - Remove the "stats.huge.allocated", "stats.huge.nmalloc", and - "stats.huge.ndalloc" mallctls. - - Remove the --enable-mremap option. - - Remove the "stats.chunks.current", "stats.chunks.total", and - "stats.chunks.high" mallctls. - - Bug fixes: - - Fix the cactive statistic to decrease (rather than increase) when active - memory decreases. This regression was first released in 3.5.0. - - Fix OOM handling in memalign() and valloc(). A variant of this bug existed - in all releases since 2.0.0, which introduced these functions. - - Fix an OOM-related regression in arena_tcache_fill_small(), which could - cause cache corruption on OOM. This regression was present in all releases - from 2.2.0 through 3.6.0. - - Fix size class overflow handling for malloc(), posix_memalign(), memalign(), - calloc(), and realloc() when profiling is enabled. - - Fix the "arena..dss" mallctl to return an error if "primary" or - "secondary" precedence is specified, but sbrk(2) is not supported. - - Fix fallback lg_floor() implementations to handle extremely large inputs. - - Ensure the default purgeable zone is after the default zone on OS X. - - Fix latent bugs in atomic_*(). - - Fix the "arena..dss" mallctl to handle read-only calls. - - Fix tls_model configuration to enable the initial-exec model when possible. - - Mark malloc_conf as a weak symbol so that the application can override it. - - Correctly detect glibc's adaptive pthread mutexes. - - Fix the --without-export configure option. - -* 3.6.0 (March 31, 2014) - - This version contains a critical bug fix for a regression present in 3.5.0 and - 3.5.1. - - Bug fixes: - - Fix a regression in arena_chunk_alloc() that caused crashes during - small/large allocation if chunk allocation failed. In the absence of this - bug, chunk allocation failure would result in allocation failure, e.g. NULL - return from malloc(). This regression was introduced in 3.5.0. - - Fix backtracing for gcc intrinsics-based backtracing by specifying - -fno-omit-frame-pointer to gcc. Note that the application (and all the - libraries it links to) must also be compiled with this option for - backtracing to be reliable. - - Use dss allocation precedence for huge allocations as well as small/large - allocations. - - Fix test assertion failure message formatting. This bug did not manifest on - x86_64 systems because of implementation subtleties in va_list. - - Fix inconsequential test failures for hash and SFMT code. - - New features: - - Support heap profiling on FreeBSD. This feature depends on the proc - filesystem being mounted during heap profile dumping. - -* 3.5.1 (February 25, 2014) - - This version primarily addresses minor bugs in test code. - - Bug fixes: - - Configure Solaris/Illumos to use MADV_FREE. - - Fix junk filling for mremap(2)-based huge reallocation. This is only - relevant if configuring with the --enable-mremap option specified. - - Avoid compilation failure if 'restrict' C99 keyword is not supported by the - compiler. - - Add a configure test for SSE2 rather than assuming it is usable on i686 - systems. This fixes test compilation errors, especially on 32-bit Linux - systems. - - Fix mallctl argument size mismatches (size_t vs. uint64_t) in the stats unit - test. - - Fix/remove flawed alignment-related overflow tests. - - Prevent compiler optimizations that could change backtraces in the - prof_accum unit test. - -* 3.5.0 (January 22, 2014) - - This version focuses on refactoring and automated testing, though it also - includes some non-trivial heap profiling optimizations not mentioned below. - - New features: - - Add the *allocx() API, which is a successor to the experimental *allocm() - API. The *allocx() functions are slightly simpler to use because they have - fewer parameters, they directly return the results of primary interest, and - mallocx()/rallocx() avoid the strict aliasing pitfall that - allocm()/rallocm() share with posix_memalign(). Note that *allocm() is - slated for removal in the next non-bugfix release. - - Add support for LinuxThreads. - - Bug fixes: - - Unless heap profiling is enabled, disable floating point code and don't link - with libm. This, in combination with e.g. EXTRA_CFLAGS=-mno-sse on x64 - systems, makes it possible to completely disable floating point register - use. Some versions of glibc neglect to save/restore caller-saved floating - point registers during dynamic lazy symbol loading, and the symbol loading - code uses whatever malloc the application happens to have linked/loaded - with, the result being potential floating point register corruption. - - Report ENOMEM rather than EINVAL if an OOM occurs during heap profiling - backtrace creation in imemalign(). This bug impacted posix_memalign() and - aligned_alloc(). - - Fix a file descriptor leak in a prof_dump_maps() error path. - - Fix prof_dump() to close the dump file descriptor for all relevant error - paths. - - Fix rallocm() to use the arena specified by the ALLOCM_ARENA(s) flag for - allocation, not just deallocation. - - Fix a data race for large allocation stats counters. - - Fix a potential infinite loop during thread exit. This bug occurred on - Solaris, and could affect other platforms with similar pthreads TSD - implementations. - - Don't junk-fill reallocations unless usable size changes. This fixes a - violation of the *allocx()/*allocm() semantics. - - Fix growing large reallocation to junk fill new space. - - Fix huge deallocation to junk fill when munmap is disabled. - - Change the default private namespace prefix from empty to je_, and change - --with-private-namespace-prefix so that it prepends an additional prefix - rather than replacing je_. This reduces the likelihood of applications - which statically link jemalloc experiencing symbol name collisions. - - Add missing private namespace mangling (relevant when - --with-private-namespace is specified). - - Add and use JEMALLOC_INLINE_C so that static inline functions are marked as - static even for debug builds. - - Add a missing mutex unlock in a malloc_init_hard() error path. In practice - this error path is never executed. - - Fix numerous bugs in malloc_strotumax() error handling/reporting. These - bugs had no impact except for malformed inputs. - - Fix numerous bugs in malloc_snprintf(). These bugs were not exercised by - existing calls, so they had no impact. - -* 3.4.1 (October 20, 2013) - - Bug fixes: - - Fix a race in the "arenas.extend" mallctl that could cause memory corruption - of internal data structures and subsequent crashes. - - Fix Valgrind integration flaws that caused Valgrind warnings about reads of - uninitialized memory in: - + arena chunk headers - + internal zero-initialized data structures (relevant to tcache and prof - code) - - Preserve errno during the first allocation. A readlink(2) call during - initialization fails unless /etc/malloc.conf exists, so errno was typically - set during the first allocation prior to this fix. - - Fix compilation warnings reported by gcc 4.8.1. - -* 3.4.0 (June 2, 2013) - - This version is essentially a small bugfix release, but the addition of - aarch64 support requires that the minor version be incremented. - - Bug fixes: - - Fix race-triggered deadlocks in chunk_record(). These deadlocks were - typically triggered by multiple threads concurrently deallocating huge - objects. - - New features: - - Add support for the aarch64 architecture. - -* 3.3.1 (March 6, 2013) - - This version fixes bugs that are typically encountered only when utilizing - custom run-time options. - - Bug fixes: - - Fix a locking order bug that could cause deadlock during fork if heap - profiling were enabled. - - Fix a chunk recycling bug that could cause the allocator to lose track of - whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause - corruption if allocating via sbrk(2) (unlikely unless running with the - "dss:primary" option specified). This was completely harmless on Linux - unless using mlockall(2) (and unlikely even then, unless the - --disable-munmap configure option or the "dss:primary" option was - specified). This regression was introduced in 3.1.0 by the - mlockall(2)/madvise(2) interaction fix. - - Fix TLS-related memory corruption that could occur during thread exit if the - thread never allocated memory. Only the quarantine and prof facilities were - susceptible. - - Fix two quarantine bugs: - + Internal reallocation of the quarantined object array leaked the old - array. - + Reallocation failure for internal reallocation of the quarantined object - array (very unlikely) resulted in memory corruption. - - Fix Valgrind integration to annotate all internally allocated memory in a - way that keeps Valgrind happy about internal data structure access. - - Fix building for s390 systems. - -* 3.3.0 (January 23, 2013) - - This version includes a few minor performance improvements in addition to the - listed new features and bug fixes. - - New features: - - Add clipping support to lg_chunk option processing. - - Add the --enable-ivsalloc option. - - Add the --without-export option. - - Add the --disable-zone-allocator option. - - Bug fixes: - - Fix "arenas.extend" mallctl to output the number of arenas. - - Fix chunk_recycle() to unconditionally inform Valgrind that returned memory - is undefined. - - Fix build break on FreeBSD related to alloca.h. - -* 3.2.0 (November 9, 2012) - - In addition to a couple of bug fixes, this version modifies page run - allocation and dirty page purging algorithms in order to better control - page-level virtual memory fragmentation. - - Incompatible changes: - - Change the "opt.lg_dirty_mult" default from 5 to 3 (32:1 to 8:1). - - Bug fixes: - - Fix dss/mmap allocation precedence code to use recyclable mmap memory only - after primary dss allocation fails. - - Fix deadlock in the "arenas.purge" mallctl. This regression was introduced - in 3.1.0 by the addition of the "arena..purge" mallctl. - -* 3.1.0 (October 16, 2012) - - New features: - - Auto-detect whether running inside Valgrind, thus removing the need to - manually specify MALLOC_CONF=valgrind:true. - - Add the "arenas.extend" mallctl, which allows applications to create - manually managed arenas. - - Add the ALLOCM_ARENA() flag for {,r,d}allocm(). - - Add the "opt.dss", "arena..dss", and "stats.arenas..dss" mallctls, - which provide control over dss/mmap precedence. - - Add the "arena..purge" mallctl, which obsoletes "arenas.purge". - - Define LG_QUANTUM for hppa. - - Incompatible changes: - - Disable tcache by default if running inside Valgrind, in order to avoid - making unallocated objects appear reachable to Valgrind. - - Drop const from malloc_usable_size() argument on Linux. - - Bug fixes: - - Fix heap profiling crash if sampled object is freed via realloc(p, 0). - - Remove const from __*_hook variable declarations, so that glibc can modify - them during process forking. - - Fix mlockall(2)/madvise(2) interaction. - - Fix fork(2)-related deadlocks. - - Fix error return value for "thread.tcache.enabled" mallctl. - -* 3.0.0 (May 11, 2012) - - Although this version adds some major new features, the primary focus is on - internal code cleanup that facilitates maintainability and portability, most - of which is not reflected in the ChangeLog. This is the first release to - incorporate substantial contributions from numerous other developers, and the - result is a more broadly useful allocator (see the git revision history for - contribution details). Note that the license has been unified, thanks to - Facebook granting a license under the same terms as the other copyright - holders (see COPYING). - - New features: - - Implement Valgrind support, redzones, and quarantine. - - Add support for additional platforms: - + FreeBSD - + Mac OS X Lion - + MinGW - + Windows (no support yet for replacing the system malloc) - - Add support for additional architectures: - + MIPS - + SH4 - + Tilera - - Add support for cross compiling. - - Add nallocm(), which rounds a request size up to the nearest size class - without actually allocating. - - Implement aligned_alloc() (blame C11). - - Add the "thread.tcache.enabled" mallctl. - - Add the "opt.prof_final" mallctl. - - Update pprof (from gperftools 2.0). - - Add the --with-mangling option. - - Add the --disable-experimental option. - - Add the --disable-munmap option, and make it the default on Linux. - - Add the --enable-mremap option, which disables use of mremap(2) by default. - - Incompatible changes: - - Enable stats by default. - - Enable fill by default. - - Disable lazy locking by default. - - Rename the "tcache.flush" mallctl to "thread.tcache.flush". - - Rename the "arenas.pagesize" mallctl to "arenas.page". - - Change the "opt.lg_prof_sample" default from 0 to 19 (1 B to 512 KiB). - - Change the "opt.prof_accum" default from true to false. - - Removed features: - - Remove the swap feature, including the "config.swap", "swap.avail", - "swap.prezeroed", "swap.nfds", and "swap.fds" mallctls. - - Remove highruns statistics, including the - "stats.arenas..bins..highruns" and - "stats.arenas..lruns..highruns" mallctls. - - As part of small size class refactoring, remove the "opt.lg_[qc]space_max", - "arenas.cacheline", "arenas.subpage", "arenas.[tqcs]space_{min,max}", and - "arenas.[tqcs]bins" mallctls. - - Remove the "arenas.chunksize" mallctl. - - Remove the "opt.lg_prof_tcmax" option. - - Remove the "opt.lg_prof_bt_max" option. - - Remove the "opt.lg_tcache_gc_sweep" option. - - Remove the --disable-tiny option, including the "config.tiny" mallctl. - - Remove the --enable-dynamic-page-shift configure option. - - Remove the --enable-sysv configure option. - - Bug fixes: - - Fix a statistics-related bug in the "thread.arena" mallctl that could cause - invalid statistics and crashes. - - Work around TLS deallocation via free() on Linux. This bug could cause - write-after-free memory corruption. - - Fix a potential deadlock that could occur during interval- and - growth-triggered heap profile dumps. - - Fix large calloc() zeroing bugs due to dropping chunk map unzeroed flags. - - Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could - cause memory corruption and crashes with --enable-dss specified. - - Fix fork-related bugs that could cause deadlock in children between fork - and exec. - - Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter. - - Fix realloc(p, 0) to act like free(p). - - Do not enforce minimum alignment in memalign(). - - Check for NULL pointer in malloc_usable_size(). - - Fix an off-by-one heap profile statistics bug that could be observed in - interval- and growth-triggered heap profiles. - - Fix the "epoch" mallctl to update cached stats even if the passed in epoch - is 0. - - Fix bin->runcur management to fix a layout policy bug. This bug did not - affect correctness. - - Fix a bug in choose_arena_hard() that potentially caused more arenas to be - initialized than necessary. - - Add missing "opt.lg_tcache_max" mallctl implementation. - - Use glibc allocator hooks to make mixed allocator usage less likely. - - Fix build issues for --disable-tcache. - - Don't mangle pthread_create() when --with-private-namespace is specified. - -* 2.2.5 (November 14, 2011) - - Bug fixes: - - Fix huge_ralloc() race when using mremap(2). This is a serious bug that - could cause memory corruption and/or crashes. - - Fix huge_ralloc() to maintain chunk statistics. - - Fix malloc_stats_print(..., "a") output. - -* 2.2.4 (November 5, 2011) - - Bug fixes: - - Initialize arenas_tsd before using it. This bug existed for 2.2.[0-3], as - well as for --disable-tls builds in earlier releases. - - Do not assume a 4 KiB page size in test/rallocm.c. - -* 2.2.3 (August 31, 2011) - - This version fixes numerous bugs related to heap profiling. - - Bug fixes: - - Fix a prof-related race condition. This bug could cause memory corruption, - but only occurred in non-default configurations (prof_accum:false). - - Fix off-by-one backtracing issues (make sure that prof_alloc_prep() is - excluded from backtraces). - - Fix a prof-related bug in realloc() (only triggered by OOM errors). - - Fix prof-related bugs in allocm() and rallocm(). - - Fix prof_tdata_cleanup() for --disable-tls builds. - - Fix a relative include path, to fix objdir builds. - -* 2.2.2 (July 30, 2011) - - Bug fixes: - - Fix a build error for --disable-tcache. - - Fix assertions in arena_purge() (for real this time). - - Add the --with-private-namespace option. This is a workaround for symbol - conflicts that can inadvertently arise when using static libraries. - -* 2.2.1 (March 30, 2011) - - Bug fixes: - - Implement atomic operations for x86/x64. This fixes compilation failures - for versions of gcc that are still in wide use. - - Fix an assertion in arena_purge(). - -* 2.2.0 (March 22, 2011) - - This version incorporates several improvements to algorithms and data - structures that tend to reduce fragmentation and increase speed. - - New features: - - Add the "stats.cactive" mallctl. - - Update pprof (from google-perftools 1.7). - - Improve backtracing-related configuration logic, and add the - --disable-prof-libgcc option. - - Bug fixes: - - Change default symbol visibility from "internal", to "hidden", which - decreases the overhead of library-internal function calls. - - Fix symbol visibility so that it is also set on OS X. - - Fix a build dependency regression caused by the introduction of the .pic.o - suffix for PIC object files. - - Add missing checks for mutex initialization failures. - - Don't use libgcc-based backtracing except on x64, where it is known to work. - - Fix deadlocks on OS X that were due to memory allocation in - pthread_mutex_lock(). - - Heap profiling-specific fixes: - + Fix memory corruption due to integer overflow in small region index - computation, when using a small enough sample interval that profiling - context pointers are stored in small run headers. - + Fix a bootstrap ordering bug that only occurred with TLS disabled. - + Fix a rallocm() rsize bug. - + Fix error detection bugs for aligned memory allocation. - -* 2.1.3 (March 14, 2011) - - Bug fixes: - - Fix a cpp logic regression (due to the "thread.{de,}allocatedp" mallctl fix - for OS X in 2.1.2). - - Fix a "thread.arena" mallctl bug. - - Fix a thread cache stats merging bug. - -* 2.1.2 (March 2, 2011) - - Bug fixes: - - Fix "thread.{de,}allocatedp" mallctl for OS X. - - Add missing jemalloc.a to build system. - -* 2.1.1 (January 31, 2011) - - Bug fixes: - - Fix aligned huge reallocation (affected allocm()). - - Fix the ALLOCM_LG_ALIGN macro definition. - - Fix a heap dumping deadlock. - - Fix a "thread.arena" mallctl bug. - -* 2.1.0 (December 3, 2010) - - This version incorporates some optimizations that can't quite be considered - bug fixes. - - New features: - - Use Linux's mremap(2) for huge object reallocation when possible. - - Avoid locking in mallctl*() when possible. - - Add the "thread.[de]allocatedp" mallctl's. - - Convert the manual page source from roff to DocBook, and generate both roff - and HTML manuals. - - Bug fixes: - - Fix a crash due to incorrect bootstrap ordering. This only impacted - --enable-debug --enable-dss configurations. - - Fix a minor statistics bug for mallctl("swap.avail", ...). - -* 2.0.1 (October 29, 2010) - - Bug fixes: - - Fix a race condition in heap profiling that could cause undefined behavior - if "opt.prof_accum" were disabled. - - Add missing mutex unlocks for some OOM error paths in the heap profiling - code. - - Fix a compilation error for non-C99 builds. - -* 2.0.0 (October 24, 2010) - - This version focuses on the experimental *allocm() API, and on improved - run-time configuration/introspection. Nonetheless, numerous performance - improvements are also included. - - New features: - - Implement the experimental {,r,s,d}allocm() API, which provides a superset - of the functionality available via malloc(), calloc(), posix_memalign(), - realloc(), malloc_usable_size(), and free(). These functions can be used to - allocate/reallocate aligned zeroed memory, ask for optional extra memory - during reallocation, prevent object movement during reallocation, etc. - - Replace JEMALLOC_OPTIONS/JEMALLOC_PROF_PREFIX with MALLOC_CONF, which is - more human-readable, and more flexible. For example: - JEMALLOC_OPTIONS=AJP - is now: - MALLOC_CONF=abort:true,fill:true,stats_print:true - - Port to Apple OS X. Sponsored by Mozilla. - - Make it possible for the application to control thread-->arena mappings via - the "thread.arena" mallctl. - - Add compile-time support for all TLS-related functionality via pthreads TSD. - This is mainly of interest for OS X, which does not support TLS, but has a - TSD implementation with similar performance. - - Override memalign() and valloc() if they are provided by the system. - - Add the "arenas.purge" mallctl, which can be used to synchronously purge all - dirty unused pages. - - Make cumulative heap profiling data optional, so that it is possible to - limit the amount of memory consumed by heap profiling data structures. - - Add per thread allocation counters that can be accessed via the - "thread.allocated" and "thread.deallocated" mallctls. - - Incompatible changes: - - Remove JEMALLOC_OPTIONS and malloc_options (see MALLOC_CONF above). - - Increase default backtrace depth from 4 to 128 for heap profiling. - - Disable interval-based profile dumps by default. - - Bug fixes: - - Remove bad assertions in fork handler functions. These assertions could - cause aborts for some combinations of configure settings. - - Fix strerror_r() usage to deal with non-standard semantics in GNU libc. - - Fix leak context reporting. This bug tended to cause the number of contexts - to be underreported (though the reported number of objects and bytes were - correct). - - Fix a realloc() bug for large in-place growing reallocation. This bug could - cause memory corruption, but it was hard to trigger. - - Fix an allocation bug for small allocations that could be triggered if - multiple threads raced to create a new run of backing pages. - - Enhance the heap profiler to trigger samples based on usable size, rather - than request size. - - Fix a heap profiling bug due to sometimes losing track of requested object - size for sampled objects. - -* 1.0.3 (August 12, 2010) - - Bug fixes: - - Fix the libunwind-based implementation of stack backtracing (used for heap - profiling). This bug could cause zero-length backtraces to be reported. - - Add a missing mutex unlock in library initialization code. If multiple - threads raced to initialize malloc, some of them could end up permanently - blocked. - -* 1.0.2 (May 11, 2010) - - Bug fixes: - - Fix junk filling of large objects, which could cause memory corruption. - - Add MAP_NORESERVE support for chunk mapping, because otherwise virtual - memory limits could cause swap file configuration to fail. Contributed by - Jordan DeLong. - -* 1.0.1 (April 14, 2010) - - Bug fixes: - - Fix compilation when --enable-fill is specified. - - Fix threads-related profiling bugs that affected accuracy and caused memory - to be leaked during thread exit. - - Fix dirty page purging race conditions that could cause crashes. - - Fix crash in tcache flushing code during thread destruction. - -* 1.0.0 (April 11, 2010) - - This release focuses on speed and run-time introspection. Numerous - algorithmic improvements make this release substantially faster than its - predecessors. - - New features: - - Implement autoconf-based configuration system. - - Add mallctl*(), for the purposes of introspection and run-time - configuration. - - Make it possible for the application to manually flush a thread's cache, via - the "tcache.flush" mallctl. - - Base maximum dirty page count on proportion of active memory. - - Compute various additional run-time statistics, including per size class - statistics for large objects. - - Expose malloc_stats_print(), which can be called repeatedly by the - application. - - Simplify the malloc_message() signature to only take one string argument, - and incorporate an opaque data pointer argument for use by the application - in combination with malloc_stats_print(). - - Add support for allocation backed by one or more swap files, and allow the - application to disable over-commit if swap files are in use. - - Implement allocation profiling and leak checking. - - Removed features: - - Remove the dynamic arena rebalancing code, since thread-specific caching - reduces its utility. - - Bug fixes: - - Modify chunk allocation to work when address space layout randomization - (ASLR) is in use. - - Fix thread cleanup bugs related to TLS destruction. - - Handle 0-size allocation requests in posix_memalign(). - - Fix a chunk leak. The leaked chunks were never touched, so this impacted - virtual memory usage, but not physical memory usage. - -* linux_2008082[78]a (August 27/28, 2008) - - These snapshot releases are the simple result of incorporating Linux-specific - support into the FreeBSD malloc sources. - --------------------------------------------------------------------------------- -vim:filetype=text:textwidth=80 diff --git a/sources/jemalloc/include-linux/jemalloc/internal/arena.h b/sources/jemalloc/include-linux/jemalloc/internal/arena.h deleted file mode 100755 index 119e3a59..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/arena.h +++ /dev/null @@ -1,1528 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) - -/* Maximum number of regions in one run. */ -#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN) -#define RUN_MAXREGS (1U << LG_RUN_MAXREGS) - -/* - * Minimum redzone size. Redzones may be larger than this if necessary to - * preserve region alignment. - */ -#define REDZONE_MINSIZE 16 - -/* - * The minimum ratio of active:dirty pages per arena is computed as: - * - * (nactive >> lg_dirty_mult) >= ndirty - * - * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as - * many active pages as dirty pages. - */ -#define LG_DIRTY_MULT_DEFAULT 3 - -typedef enum { - purge_mode_ratio = 0, - purge_mode_decay = 1, - - purge_mode_limit = 2 -} purge_mode_t; -#define PURGE_DEFAULT purge_mode_ratio -/* Default decay time in seconds. */ -#define DECAY_TIME_DEFAULT 10 -/* Number of event ticks between time checks. */ -#define DECAY_NTICKS_PER_UPDATE 1000 - -typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t; -typedef struct arena_avail_links_s arena_avail_links_t; -typedef struct arena_run_s arena_run_t; -typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t; -typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t; -typedef struct arena_chunk_s arena_chunk_t; -typedef struct arena_bin_info_s arena_bin_info_t; -typedef struct arena_decay_s arena_decay_t; -typedef struct arena_bin_s arena_bin_t; -typedef struct arena_s arena_t; -typedef struct arena_tdata_s arena_tdata_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#ifdef JEMALLOC_ARENA_STRUCTS_A -struct arena_run_s { - /* Index of bin this run is associated with. */ - szind_t binind; - - /* Number of free regions in run. */ - unsigned nfree; - - /* Per region allocated/deallocated bitmap. */ - bitmap_t bitmap[BITMAP_GROUPS_MAX]; -}; - -/* Each element of the chunk map corresponds to one page within the chunk. */ -struct arena_chunk_map_bits_s { - /* - * Run address (or size) and various flags are stored together. The bit - * layout looks like (assuming 32-bit system): - * - * ???????? ???????? ???nnnnn nnndumla - * - * ? : Unallocated: Run address for first/last pages, unset for internal - * pages. - * Small: Run page offset. - * Large: Run page count for first page, unset for trailing pages. - * n : binind for small size class, BININD_INVALID for large size class. - * d : dirty? - * u : unzeroed? - * m : decommitted? - * l : large? - * a : allocated? - * - * Following are example bit patterns for the three types of runs. - * - * p : run page offset - * s : run size - * n : binind for size class; large objects set these to BININD_INVALID - * x : don't care - * - : 0 - * + : 1 - * [DUMLA] : bit set - * [dumla] : bit unset - * - * Unallocated (clean): - * ssssssss ssssssss sss+++++ +++dum-a - * xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx - * ssssssss ssssssss sss+++++ +++dUm-a - * - * Unallocated (dirty): - * ssssssss ssssssss sss+++++ +++D-m-a - * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - * ssssssss ssssssss sss+++++ +++D-m-a - * - * Small: - * pppppppp pppppppp pppnnnnn nnnd---A - * pppppppp pppppppp pppnnnnn nnn----A - * pppppppp pppppppp pppnnnnn nnnd---A - * - * Large: - * ssssssss ssssssss sss+++++ +++D--LA - * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - * -------- -------- ---+++++ +++D--LA - * - * Large (sampled, size <= LARGE_MINCLASS): - * ssssssss ssssssss sssnnnnn nnnD--LA - * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - * -------- -------- ---+++++ +++D--LA - * - * Large (not sampled, size == LARGE_MINCLASS): - * ssssssss ssssssss sss+++++ +++D--LA - * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - * -------- -------- ---+++++ +++D--LA - */ - size_t bits; -#define CHUNK_MAP_ALLOCATED ((size_t)0x01U) -#define CHUNK_MAP_LARGE ((size_t)0x02U) -#define CHUNK_MAP_STATE_MASK ((size_t)0x3U) - -#define CHUNK_MAP_DECOMMITTED ((size_t)0x04U) -#define CHUNK_MAP_UNZEROED ((size_t)0x08U) -#define CHUNK_MAP_DIRTY ((size_t)0x10U) -#define CHUNK_MAP_FLAGS_MASK ((size_t)0x1cU) - -#define CHUNK_MAP_BININD_SHIFT 5 -#define BININD_INVALID ((size_t)0xffU) -#define CHUNK_MAP_BININD_MASK (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) -#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK - -#define CHUNK_MAP_RUNIND_SHIFT (CHUNK_MAP_BININD_SHIFT + 8) -#define CHUNK_MAP_SIZE_SHIFT (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE) -#define CHUNK_MAP_SIZE_MASK \ - (~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK)) -}; - -struct arena_runs_dirty_link_s { - qr(arena_runs_dirty_link_t) rd_link; -}; - -/* - * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just - * like arena_chunk_map_bits_t. Two separate arrays are stored within each - * chunk header in order to improve cache locality. - */ -struct arena_chunk_map_misc_s { - /* - * Linkage for run heaps. There are two disjoint uses: - * - * 1) arena_t's runs_avail heaps. - * 2) arena_run_t conceptually uses this linkage for in-use non-full - * runs, rather than directly embedding linkage. - */ - phn(arena_chunk_map_misc_t) ph_link; - - union { - /* Linkage for list of dirty runs. */ - arena_runs_dirty_link_t rd; - - /* Profile counters, used for large object runs. */ - union { - void *prof_tctx_pun; - prof_tctx_t *prof_tctx; - }; - - /* Small region run metadata. */ - arena_run_t run; - }; -}; -typedef ph(arena_chunk_map_misc_t) arena_run_heap_t; -#endif /* JEMALLOC_ARENA_STRUCTS_A */ - -#ifdef JEMALLOC_ARENA_STRUCTS_B -/* Arena chunk header. */ -struct arena_chunk_s { - /* - * A pointer to the arena that owns the chunk is stored within the node. - * This field as a whole is used by chunks_rtree to support both - * ivsalloc() and core-based debugging. - */ - extent_node_t node; - - /* - * True if memory could be backed by transparent huge pages. This is - * only directly relevant to Linux, since it is the only supported - * platform on which jemalloc interacts with explicit transparent huge - * page controls. - */ - bool hugepage; - - /* - * Map of pages within chunk that keeps track of free/large/small. The - * first map_bias entries are omitted, since the chunk header does not - * need to be tracked in the map. This omission saves a header page - * for common chunk sizes (e.g. 4 MiB). - */ - arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */ -}; - -/* - * Read-only information associated with each element of arena_t's bins array - * is stored separately, partly to reduce memory usage (only one copy, rather - * than one per arena), but mainly to avoid false cacheline sharing. - * - * Each run has the following layout: - * - * /--------------------\ - * | pad? | - * |--------------------| - * | redzone | - * reg0_offset | region 0 | - * | redzone | - * |--------------------| \ - * | redzone | | - * | region 1 | > reg_interval - * | redzone | / - * |--------------------| - * | ... | - * | ... | - * | ... | - * |--------------------| - * | redzone | - * | region nregs-1 | - * | redzone | - * |--------------------| - * | alignment pad? | - * \--------------------/ - * - * reg_interval has at least the same minimum alignment as reg_size; this - * preserves the alignment constraint that sa2u() depends on. Alignment pad is - * either 0 or redzone_size; it is present only if needed to align reg0_offset. - */ -struct arena_bin_info_s { - /* Size of regions in a run for this bin's size class. */ - size_t reg_size; - - /* Redzone size. */ - size_t redzone_size; - - /* Interval between regions (reg_size + (redzone_size << 1)). */ - size_t reg_interval; - - /* Total size of a run for this bin's size class. */ - size_t run_size; - - /* Total number of regions in a run for this bin's size class. */ - uint32_t nregs; - - /* - * Metadata used to manipulate bitmaps for runs associated with this - * bin. - */ - bitmap_info_t bitmap_info; - - /* Offset of first region in a run for this bin's size class. */ - uint32_t reg0_offset; -}; - -struct arena_decay_s { - /* - * Approximate time in seconds from the creation of a set of unused - * dirty pages until an equivalent set of unused dirty pages is purged - * and/or reused. - */ - ssize_t time; - /* time / SMOOTHSTEP_NSTEPS. */ - nstime_t interval; - /* - * Time at which the current decay interval logically started. We do - * not actually advance to a new epoch until sometime after it starts - * because of scheduling and computation delays, and it is even possible - * to completely skip epochs. In all cases, during epoch advancement we - * merge all relevant activity into the most recently recorded epoch. - */ - nstime_t epoch; - /* Deadline randomness generator. */ - uint64_t jitter_state; - /* - * Deadline for current epoch. This is the sum of interval and per - * epoch jitter which is a uniform random variable in [0..interval). - * Epochs always advance by precise multiples of interval, but we - * randomize the deadline to reduce the likelihood of arenas purging in - * lockstep. - */ - nstime_t deadline; - /* - * Number of dirty pages at beginning of current epoch. During epoch - * advancement we use the delta between arena->decay.ndirty and - * arena->ndirty to determine how many dirty pages, if any, were - * generated. - */ - size_t ndirty; - /* - * Trailing log of how many unused dirty pages were generated during - * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last - * element is the most recent epoch. Corresponding epoch times are - * relative to epoch. - */ - size_t backlog[SMOOTHSTEP_NSTEPS]; -}; - -struct arena_bin_s { - /* - * All operations on runcur, runs, and stats require that lock be - * locked. Run allocation/deallocation are protected by the arena lock, - * which may be acquired while holding one or more bin locks, but not - * vise versa. - */ - malloc_mutex_t lock; - - /* - * Current run being used to service allocations of this bin's size - * class. - */ - arena_run_t *runcur; - - /* - * Heap of non-full runs. This heap is used when looking for an - * existing run when runcur is no longer usable. We choose the - * non-full run that is lowest in memory; this policy tends to keep - * objects packed well, and it can also help reduce the number of - * almost-empty chunks. - */ - arena_run_heap_t runs; - - /* Bin statistics. */ - malloc_bin_stats_t stats; -}; - -struct arena_s { - /* This arena's index within the arenas array. */ - unsigned ind; - - /* - * Number of threads currently assigned to this arena, synchronized via - * atomic operations. Each thread has two distinct assignments, one for - * application-serving allocation, and the other for internal metadata - * allocation. Internal metadata must not be allocated from arenas - * created via the arenas.extend mallctl, because the arena..reset - * mallctl indiscriminately discards all allocations for the affected - * arena. - * - * 0: Application allocation. - * 1: Internal metadata allocation. - */ - unsigned nthreads[2]; - - /* - * There are three classes of arena operations from a locking - * perspective: - * 1) Thread assignment (modifies nthreads) is synchronized via atomics. - * 2) Bin-related operations are protected by bin locks. - * 3) Chunk- and run-related operations are protected by this mutex. - */ - malloc_mutex_t lock; - - arena_stats_t stats; - /* - * List of tcaches for extant threads associated with this arena. - * Stats from these are merged incrementally, and at exit if - * opt_stats_print is enabled. - */ - ql_head(tcache_t) tcache_ql; - - uint64_t prof_accumbytes; - - /* - * PRNG state for cache index randomization of large allocation base - * pointers. - */ - size_t offset_state; - - dss_prec_t dss_prec; - - /* Extant arena chunks. */ - ql_head(extent_node_t) achunks; - - /* Extent serial number generator state. */ - size_t extent_sn_next; - - /* - * In order to avoid rapid chunk allocation/deallocation when an arena - * oscillates right on the cusp of needing a new chunk, cache the most - * recently freed chunk. The spare is left in the arena's chunk trees - * until it is deleted. - * - * There is one spare chunk per arena, rather than one spare total, in - * order to avoid interactions between multiple threads that could make - * a single spare inadequate. - */ - arena_chunk_t *spare; - - /* Minimum ratio (log base 2) of nactive:ndirty. */ - ssize_t lg_dirty_mult; - - /* True if a thread is currently executing arena_purge_to_limit(). */ - bool purging; - - /* Number of pages in active runs and huge regions. */ - size_t nactive; - - /* - * Current count of pages within unused runs that are potentially - * dirty, and for which madvise(... MADV_DONTNEED) has not been called. - * By tracking this, we can institute a limit on how much dirty unused - * memory is mapped for each arena. - */ - size_t ndirty; - - /* - * Unused dirty memory this arena manages. Dirty memory is conceptually - * tracked as an arbitrarily interleaved LRU of dirty runs and cached - * chunks, but the list linkage is actually semi-duplicated in order to - * avoid extra arena_chunk_map_misc_t space overhead. - * - * LRU-----------------------------------------------------------MRU - * - * /-- arena ---\ - * | | - * | | - * |------------| /- chunk -\ - * ...->|chunks_cache|<--------------------------->| /----\ |<--... - * |------------| | |node| | - * | | | | | | - * | | /- run -\ /- run -\ | | | | - * | | | | | | | | | | - * | | | | | | | | | | - * |------------| |-------| |-------| | |----| | - * ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----... - * |------------| |-------| |-------| | |----| | - * | | | | | | | | | | - * | | | | | | | \----/ | - * | | \-------/ \-------/ | | - * | | | | - * | | | | - * \------------/ \---------/ - */ - arena_runs_dirty_link_t runs_dirty; - extent_node_t chunks_cache; - - /* Decay-based purging state. */ - arena_decay_t decay; - - /* Extant huge allocations. */ - ql_head(extent_node_t) huge; - /* Synchronizes all huge allocation/update/deallocation. */ - malloc_mutex_t huge_mtx; - - /* - * Trees of chunks that were previously allocated (trees differ only in - * node ordering). These are used when allocating chunks, in an attempt - * to re-use address space. Depending on function, different tree - * orderings are needed, which is why there are two trees with the same - * contents. - */ - extent_tree_t chunks_szsnad_cached; - extent_tree_t chunks_ad_cached; - extent_tree_t chunks_szsnad_retained; - extent_tree_t chunks_ad_retained; - - malloc_mutex_t chunks_mtx; - /* Cache of nodes that were allocated via base_alloc(). */ - ql_head(extent_node_t) node_cache; - malloc_mutex_t node_cache_mtx; - - /* User-configurable chunk hook functions. */ - chunk_hooks_t chunk_hooks; - - /* bins is used to store trees of free regions. */ - arena_bin_t bins[NBINS]; - - /* - * Size-segregated address-ordered heaps of this arena's available runs, - * used for first-best-fit run allocation. Runs are quantized, i.e. - * they reside in the last heap which corresponds to a size class less - * than or equal to the run size. - */ - arena_run_heap_t runs_avail[NPSIZES]; -}; - -/* Used in conjunction with tsd for fast arena-related context lookup. */ -struct arena_tdata_s { - ticker_t decay_ticker; -}; -#endif /* JEMALLOC_ARENA_STRUCTS_B */ - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -static const size_t large_pad = -#ifdef JEMALLOC_CACHE_OBLIVIOUS - PAGE -#else - 0 -#endif - ; - -extern bool opt_thp; -extern purge_mode_t opt_purge; -extern const char *purge_mode_names[]; -extern ssize_t opt_lg_dirty_mult; -extern ssize_t opt_decay_time; - -extern arena_bin_info_t arena_bin_info[NBINS]; - -extern size_t map_bias; /* Number of arena chunk header pages. */ -extern size_t map_misc_offset; -extern size_t arena_maxrun; /* Max run size for arenas. */ -extern size_t large_maxclass; /* Max large size class. */ -extern unsigned nlclasses; /* Number of large size classes. */ -extern unsigned nhclasses; /* Number of huge size classes. */ - -#ifdef JEMALLOC_JET -typedef size_t (run_quantize_t)(size_t); -extern run_quantize_t *run_quantize_floor; -extern run_quantize_t *run_quantize_ceil; -#endif -void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, - bool cache); -void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, - bool cache); -extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena); -void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node); -void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, size_t *sn, bool *zero); -void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, - size_t usize, size_t sn); -void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, - void *chunk, size_t oldsize, size_t usize); -void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, - void *chunk, size_t oldsize, size_t usize, size_t sn); -bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, - void *chunk, size_t oldsize, size_t usize, bool *zero); -ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena); -bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, - ssize_t lg_dirty_mult); -ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena); -bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time); -void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all); -void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena); -void arena_reset(tsd_t *tsd, arena_t *arena); -void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, - tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); -void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, - bool zero); -#ifdef JEMALLOC_JET -typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t, - uint8_t); -extern arena_redzone_corruption_t *arena_redzone_corruption; -typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *); -extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; -#else -void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); -#endif -void arena_quarantine_junk_small(void *ptr, size_t usize); -void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind, - bool zero); -void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, - szind_t ind, bool zero); -void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, bool zero, tcache_t *tcache); -void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size); -void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, - arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm); -void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, - void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm); -void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, - void *ptr, size_t pageind); -#ifdef JEMALLOC_JET -typedef void (arena_dalloc_junk_large_t)(void *, size_t); -extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; -#else -void arena_dalloc_junk_large(void *ptr, size_t usize); -#endif -void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, - arena_chunk_t *chunk, void *ptr); -void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, - void *ptr); -#ifdef JEMALLOC_JET -typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); -extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; -#endif -bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, - size_t size, size_t extra, bool zero); -void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, - size_t size, size_t alignment, bool zero, tcache_t *tcache); -dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena); -bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec); -ssize_t arena_lg_dirty_mult_default_get(void); -bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult); -ssize_t arena_decay_time_default_get(void); -bool arena_decay_time_default_set(ssize_t decay_time); -void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, - unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, - ssize_t *decay_time, size_t *nactive, size_t *ndirty); -void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, - const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, - size_t *nactive, size_t *ndirty, arena_stats_t *astats, - malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, - malloc_huge_stats_t *hstats); -unsigned arena_nthreads_get(arena_t *arena, bool internal); -void arena_nthreads_inc(arena_t *arena, bool internal); -void arena_nthreads_dec(arena_t *arena, bool internal); -size_t arena_extent_sn_next(arena_t *arena); -arena_t *arena_new(tsdn_t *tsdn, unsigned ind); -void arena_boot(void); -void arena_prefork0(tsdn_t *tsdn, arena_t *arena); -void arena_prefork1(tsdn_t *tsdn, arena_t *arena); -void arena_prefork2(tsdn_t *tsdn, arena_t *arena); -void arena_prefork3(tsdn_t *tsdn, arena_t *arena); -void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); -void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk, - size_t pageind); -const arena_chunk_map_bits_t *arena_bitselm_get_const( - const arena_chunk_t *chunk, size_t pageind); -arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk, - size_t pageind); -const arena_chunk_map_misc_t *arena_miscelm_get_const( - const arena_chunk_t *chunk, size_t pageind); -size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm); -void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm); -arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd); -arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run); -size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind); -const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk, - size_t pageind); -size_t arena_mapbitsp_read(const size_t *mapbitsp); -size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_size_decode(size_t mapbits); -size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, - size_t pageind); -size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk, - size_t pageind); -size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk, - size_t pageind); -szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk, - size_t pageind); -size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind); -void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); -size_t arena_mapbits_size_encode(size_t size); -void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, - size_t size, size_t flags); -void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, - size_t size); -void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, - size_t flags); -void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, - size_t size, size_t flags); -void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, - szind_t binind); -void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, - size_t runind, szind_t binind, size_t flags); -void arena_metadata_allocated_add(arena_t *arena, size_t size); -void arena_metadata_allocated_sub(arena_t *arena, size_t size); -size_t arena_metadata_allocated_get(arena_t *arena); -bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); -bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); -bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes); -szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); -szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin); -size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, - const void *ptr); -prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr); -void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, - prof_tctx_t *tctx); -void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, - const void *old_ptr, prof_tctx_t *old_tctx); -void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks); -void arena_decay_tick(tsdn_t *tsdn, arena_t *arena); -void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, - bool zero, tcache_t *tcache, bool slow_path); -arena_t *arena_aalloc(const void *ptr); -size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote); -void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path); -void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, - bool slow_path); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) -# ifdef JEMALLOC_ARENA_INLINE_A -JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t * -arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind) -{ - - assert(pageind >= map_bias); - assert(pageind < chunk_npages); - - return (&chunk->map_bits[pageind-map_bias]); -} - -JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t * -arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind) -{ - - return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind)); -} - -JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * -arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind) -{ - - assert(pageind >= map_bias); - assert(pageind < chunk_npages); - - return ((arena_chunk_map_misc_t *)((uintptr_t)chunk + - (uintptr_t)map_misc_offset) + pageind-map_bias); -} - -JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t * -arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind) -{ - - return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind)); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm) -{ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); - size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk + - map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias; - - assert(pageind >= map_bias); - assert(pageind < chunk_npages); - - return (pageind); -} - -JEMALLOC_ALWAYS_INLINE void * -arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm) -{ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); - size_t pageind = arena_miscelm_to_pageind(miscelm); - - return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE))); -} - -JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * -arena_rd_to_miscelm(arena_runs_dirty_link_t *rd) -{ - arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t - *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd)); - - assert(arena_miscelm_to_pageind(miscelm) >= map_bias); - assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); - - return (miscelm); -} - -JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * -arena_run_to_miscelm(arena_run_t *run) -{ - arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t - *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run)); - - assert(arena_miscelm_to_pageind(miscelm) >= map_bias); - assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); - - return (miscelm); -} - -JEMALLOC_ALWAYS_INLINE size_t * -arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind) -{ - - return (&arena_bitselm_get_mutable(chunk, pageind)->bits); -} - -JEMALLOC_ALWAYS_INLINE const size_t * -arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind) -{ - - return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind)); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbitsp_read(const size_t *mapbitsp) -{ - - return (*mapbitsp); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind) -{ - - return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind))); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_size_decode(size_t mapbits) -{ - size_t size; - -#if CHUNK_MAP_SIZE_SHIFT > 0 - size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT; -#elif CHUNK_MAP_SIZE_SHIFT == 0 - size = mapbits & CHUNK_MAP_SIZE_MASK; -#else - size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT; -#endif - - return (size); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); - return (arena_mapbits_size_decode(mapbits)); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == - (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); - return (arena_mapbits_size_decode(mapbits)); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == - CHUNK_MAP_ALLOCATED); - return (mapbits >> CHUNK_MAP_RUNIND_SHIFT); -} - -JEMALLOC_ALWAYS_INLINE szind_t -arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - szind_t binind; - - mapbits = arena_mapbits_get(chunk, pageind); - binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; - assert(binind < NBINS || binind == BININD_INVALID); - return (binind); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & - (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); - return (mapbits & CHUNK_MAP_DIRTY); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & - (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); - return (mapbits & CHUNK_MAP_UNZEROED); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & - (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); - return (mapbits & CHUNK_MAP_DECOMMITTED); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - return (mapbits & CHUNK_MAP_LARGE); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - return (mapbits & CHUNK_MAP_ALLOCATED); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits) -{ - - *mapbitsp = mapbits; -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_size_encode(size_t size) -{ - size_t mapbits; - -#if CHUNK_MAP_SIZE_SHIFT > 0 - mapbits = size << CHUNK_MAP_SIZE_SHIFT; -#elif CHUNK_MAP_SIZE_SHIFT == 0 - mapbits = size; -#else - mapbits = size >> -CHUNK_MAP_SIZE_SHIFT; -#endif - - assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0); - return (mapbits); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, - size_t flags) -{ - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); - - assert((size & PAGE_MASK) == 0); - assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); - assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & - (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); - arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | - CHUNK_MAP_BININD_INVALID | flags); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, - size_t size) -{ - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - - assert((size & PAGE_MASK) == 0); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); - arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | - (mapbits & ~CHUNK_MAP_SIZE_MASK)); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags) -{ - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); - - assert((flags & CHUNK_MAP_UNZEROED) == flags); - arena_mapbitsp_write(mapbitsp, flags); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, - size_t flags) -{ - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); - - assert((size & PAGE_MASK) == 0); - assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); - assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & - (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); - arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | - CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE | - CHUNK_MAP_ALLOCATED); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, - szind_t binind) -{ - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - - assert(binind <= BININD_INVALID); - assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS + - large_pad); - arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) | - (binind << CHUNK_MAP_BININD_SHIFT)); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, - szind_t binind, size_t flags) -{ - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); - - assert(binind < BININD_INVALID); - assert(pageind - runind >= map_bias); - assert((flags & CHUNK_MAP_UNZEROED) == flags); - arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) | - (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED); -} - -JEMALLOC_INLINE void -arena_metadata_allocated_add(arena_t *arena, size_t size) -{ - - atomic_add_z(&arena->stats.metadata_allocated, size); -} - -JEMALLOC_INLINE void -arena_metadata_allocated_sub(arena_t *arena, size_t size) -{ - - atomic_sub_z(&arena->stats.metadata_allocated, size); -} - -JEMALLOC_INLINE size_t -arena_metadata_allocated_get(arena_t *arena) -{ - - return (atomic_read_z(&arena->stats.metadata_allocated)); -} - -JEMALLOC_INLINE bool -arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) -{ - - cassert(config_prof); - assert(prof_interval != 0); - - arena->prof_accumbytes += accumbytes; - if (arena->prof_accumbytes >= prof_interval) { - arena->prof_accumbytes -= prof_interval; - return (true); - } - return (false); -} - -JEMALLOC_INLINE bool -arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) -{ - - cassert(config_prof); - - if (likely(prof_interval == 0)) - return (false); - return (arena_prof_accum_impl(arena, accumbytes)); -} - -JEMALLOC_INLINE bool -arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) -{ - - cassert(config_prof); - - if (likely(prof_interval == 0)) - return (false); - - { - bool ret; - - malloc_mutex_lock(tsdn, &arena->lock); - ret = arena_prof_accum_impl(arena, accumbytes); - malloc_mutex_unlock(tsdn, &arena->lock); - return (ret); - } -} - -JEMALLOC_ALWAYS_INLINE szind_t -arena_ptr_small_binind_get(const void *ptr, size_t mapbits) -{ - szind_t binind; - - binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; - - if (config_debug) { - arena_chunk_t *chunk; - arena_t *arena; - size_t pageind; - size_t actual_mapbits; - size_t rpages_ind; - const arena_run_t *run; - arena_bin_t *bin; - szind_t run_binind, actual_binind; - arena_bin_info_t *bin_info; - const arena_chunk_map_misc_t *miscelm; - const void *rpages; - - assert(binind != BININD_INVALID); - assert(binind < NBINS); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = extent_node_arena_get(&chunk->node); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - actual_mapbits = arena_mapbits_get(chunk, pageind); - assert(mapbits == actual_mapbits); - assert(arena_mapbits_large_get(chunk, pageind) == 0); - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, - pageind); - miscelm = arena_miscelm_get_const(chunk, rpages_ind); - run = &miscelm->run; - run_binind = run->binind; - bin = &arena->bins[run_binind]; - actual_binind = (szind_t)(bin - arena->bins); - assert(run_binind == actual_binind); - bin_info = &arena_bin_info[actual_binind]; - rpages = arena_miscelm_to_rpages(miscelm); - assert(((uintptr_t)ptr - ((uintptr_t)rpages + - (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval - == 0); - } - - return (binind); -} -# endif /* JEMALLOC_ARENA_INLINE_A */ - -# ifdef JEMALLOC_ARENA_INLINE_B -JEMALLOC_INLINE szind_t -arena_bin_index(arena_t *arena, arena_bin_t *bin) -{ - szind_t binind = (szind_t)(bin - arena->bins); - assert(binind < NBINS); - return (binind); -} - -JEMALLOC_INLINE size_t -arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) -{ - size_t diff, interval, shift, regind; - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - void *rpages = arena_miscelm_to_rpages(miscelm); - - /* - * Freeing a pointer lower than region zero can cause assertion - * failure. - */ - assert((uintptr_t)ptr >= (uintptr_t)rpages + - (uintptr_t)bin_info->reg0_offset); - - /* - * Avoid doing division with a variable divisor if possible. Using - * actual division here can reduce allocator throughput by over 20%! - */ - diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages - - bin_info->reg0_offset); - - /* Rescale (factor powers of 2 out of the numerator and denominator). */ - interval = bin_info->reg_interval; - shift = ffs_zu(interval) - 1; - diff >>= shift; - interval >>= shift; - - if (interval == 1) { - /* The divisor was a power of 2. */ - regind = diff; - } else { - /* - * To divide by a number D that is not a power of two we - * multiply by (2^21 / D) and then right shift by 21 positions. - * - * X / D - * - * becomes - * - * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT - * - * We can omit the first three elements, because we never - * divide by 0, and 1 and 2 are both powers of two, which are - * handled above. - */ -#define SIZE_INV_SHIFT ((sizeof(size_t) << 3) - LG_RUN_MAXREGS) -#define SIZE_INV(s) (((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1) - static const size_t interval_invs[] = { - SIZE_INV(3), - SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), - SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), - SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), - SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), - SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), - SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), - SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) - }; - - if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t)) - + 2))) { - regind = (diff * interval_invs[interval - 3]) >> - SIZE_INV_SHIFT; - } else - regind = diff / interval; -#undef SIZE_INV -#undef SIZE_INV_SHIFT - } - assert(diff == regind * interval); - assert(regind < bin_info->nregs); - - return (regind); -} - -JEMALLOC_INLINE prof_tctx_t * -arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr) -{ - prof_tctx_t *ret; - arena_chunk_t *chunk; - - cassert(config_prof); - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) { - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); - if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) - ret = (prof_tctx_t *)(uintptr_t)1U; - else { - arena_chunk_map_misc_t *elm = - arena_miscelm_get_mutable(chunk, pageind); - ret = atomic_read_p(&elm->prof_tctx_pun); - } - } else - ret = huge_prof_tctx_get(tsdn, ptr); - - return (ret); -} - -JEMALLOC_INLINE void -arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, - prof_tctx_t *tctx) -{ - arena_chunk_t *chunk; - - cassert(config_prof); - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) { - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - - if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx > - (uintptr_t)1U)) { - arena_chunk_map_misc_t *elm; - - assert(arena_mapbits_large_get(chunk, pageind) != 0); - - elm = arena_miscelm_get_mutable(chunk, pageind); - atomic_write_p(&elm->prof_tctx_pun, tctx); - } else { - /* - * tctx must always be initialized for large runs. - * Assert that the surrounding conditional logic is - * equivalent to checking whether ptr refers to a large - * run. - */ - assert(arena_mapbits_large_get(chunk, pageind) == 0); - } - } else - huge_prof_tctx_set(tsdn, ptr, tctx); -} - -JEMALLOC_INLINE void -arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, - const void *old_ptr, prof_tctx_t *old_tctx) -{ - - cassert(config_prof); - assert(ptr != NULL); - - if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr && - (uintptr_t)old_tctx > (uintptr_t)1U))) { - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) { - size_t pageind; - arena_chunk_map_misc_t *elm; - - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> - LG_PAGE; - assert(arena_mapbits_allocated_get(chunk, pageind) != - 0); - assert(arena_mapbits_large_get(chunk, pageind) != 0); - - elm = arena_miscelm_get_mutable(chunk, pageind); - atomic_write_p(&elm->prof_tctx_pun, - (prof_tctx_t *)(uintptr_t)1U); - } else - huge_prof_tctx_reset(tsdn, ptr); - } -} - -JEMALLOC_ALWAYS_INLINE void -arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) -{ - tsd_t *tsd; - ticker_t *decay_ticker; - - if (unlikely(tsdn_null(tsdn))) - return; - tsd = tsdn_tsd(tsdn); - decay_ticker = decay_ticker_get(tsd, arena->ind); - if (unlikely(decay_ticker == NULL)) - return; - if (unlikely(ticker_ticks(decay_ticker, nticks))) - arena_purge(tsdn, arena, false); -} - -JEMALLOC_ALWAYS_INLINE void -arena_decay_tick(tsdn_t *tsdn, arena_t *arena) -{ - - arena_decay_ticks(tsdn, arena, 1); -} - -JEMALLOC_ALWAYS_INLINE void * -arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, - tcache_t *tcache, bool slow_path) -{ - - assert(!tsdn_null(tsdn) || tcache == NULL); - assert(size != 0); - - if (likely(tcache != NULL)) { - if (likely(size <= SMALL_MAXCLASS)) { - return (tcache_alloc_small(tsdn_tsd(tsdn), arena, - tcache, size, ind, zero, slow_path)); - } - if (likely(size <= tcache_maxclass)) { - return (tcache_alloc_large(tsdn_tsd(tsdn), arena, - tcache, size, ind, zero, slow_path)); - } - /* (size > tcache_maxclass) case falls through. */ - assert(size > tcache_maxclass); - } - - return (arena_malloc_hard(tsdn, arena, size, ind, zero)); -} - -JEMALLOC_ALWAYS_INLINE arena_t * -arena_aalloc(const void *ptr) -{ - arena_chunk_t *chunk; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) - return (extent_node_arena_get(&chunk->node)); - else - return (huge_aalloc(ptr)); -} - -/* Return the size of the allocation pointed to by ptr. */ -JEMALLOC_ALWAYS_INLINE size_t -arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote) -{ - size_t ret; - arena_chunk_t *chunk; - size_t pageind; - szind_t binind; - - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) { - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - binind = arena_mapbits_binind_get(chunk, pageind); - if (unlikely(binind == BININD_INVALID || (config_prof && !demote - && arena_mapbits_large_get(chunk, pageind) != 0))) { - /* - * Large allocation. In the common case (demote), and - * as this is an inline function, most callers will only - * end up looking at binind to determine that ptr is a - * small allocation. - */ - assert(config_cache_oblivious || ((uintptr_t)ptr & - PAGE_MASK) == 0); - ret = arena_mapbits_large_size_get(chunk, pageind) - - large_pad; - assert(ret != 0); - assert(pageind + ((ret+large_pad)>>LG_PAGE) <= - chunk_npages); - assert(arena_mapbits_dirty_get(chunk, pageind) == - arena_mapbits_dirty_get(chunk, - pageind+((ret+large_pad)>>LG_PAGE)-1)); - } else { - /* - * Small allocation (possibly promoted to a large - * object). - */ - assert(arena_mapbits_large_get(chunk, pageind) != 0 || - arena_ptr_small_binind_get(ptr, - arena_mapbits_get(chunk, pageind)) == binind); - ret = index2size(binind); - } - } else - ret = huge_salloc(tsdn, ptr); - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void -arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) -{ - arena_chunk_t *chunk; - size_t pageind, mapbits; - - assert(!tsdn_null(tsdn) || tcache == NULL); - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) { - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - mapbits = arena_mapbits_get(chunk, pageind); - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { - /* Small allocation. */ - if (likely(tcache != NULL)) { - szind_t binind = arena_ptr_small_binind_get(ptr, - mapbits); - tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, - binind, slow_path); - } else { - arena_dalloc_small(tsdn, - extent_node_arena_get(&chunk->node), chunk, - ptr, pageind); - } - } else { - size_t size = arena_mapbits_large_size_get(chunk, - pageind); - - assert(config_cache_oblivious || ((uintptr_t)ptr & - PAGE_MASK) == 0); - - if (likely(tcache != NULL) && size - large_pad <= - tcache_maxclass) { - tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, - size - large_pad, slow_path); - } else { - arena_dalloc_large(tsdn, - extent_node_arena_get(&chunk->node), chunk, - ptr); - } - } - } else - huge_dalloc(tsdn, ptr); -} - -JEMALLOC_ALWAYS_INLINE void -arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, - bool slow_path) -{ - arena_chunk_t *chunk; - - assert(!tsdn_null(tsdn) || tcache == NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) { - if (config_prof && opt_prof) { - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> - LG_PAGE; - assert(arena_mapbits_allocated_get(chunk, pageind) != - 0); - if (arena_mapbits_large_get(chunk, pageind) != 0) { - /* - * Make sure to use promoted size, not request - * size. - */ - size = arena_mapbits_large_size_get(chunk, - pageind) - large_pad; - } - } - assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false))); - - if (likely(size <= SMALL_MAXCLASS)) { - /* Small allocation. */ - if (likely(tcache != NULL)) { - szind_t binind = size2index(size); - tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, - binind, slow_path); - } else { - size_t pageind = ((uintptr_t)ptr - - (uintptr_t)chunk) >> LG_PAGE; - arena_dalloc_small(tsdn, - extent_node_arena_get(&chunk->node), chunk, - ptr, pageind); - } - } else { - assert(config_cache_oblivious || ((uintptr_t)ptr & - PAGE_MASK) == 0); - - if (likely(tcache != NULL) && size <= tcache_maxclass) { - tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, - size, slow_path); - } else { - arena_dalloc_large(tsdn, - extent_node_arena_get(&chunk->node), chunk, - ptr); - } - } - } else - huge_dalloc(tsdn, ptr); -} -# endif /* JEMALLOC_ARENA_INLINE_B */ -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/assert.h b/sources/jemalloc/include-linux/jemalloc/internal/assert.h deleted file mode 100755 index 6f8f7eb9..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/assert.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Define a custom assert() in order to reduce the chances of deadlock during - * assertion failure. - */ -#ifndef assert -#define assert(e) do { \ - if (unlikely(config_debug && !(e))) { \ - malloc_printf( \ - ": %s:%d: Failed assertion: \"%s\"\n", \ - __FILE__, __LINE__, #e); \ - abort(); \ - } \ -} while (0) -#endif - -#ifndef not_reached -#define not_reached() do { \ - if (config_debug) { \ - malloc_printf( \ - ": %s:%d: Unreachable code reached\n", \ - __FILE__, __LINE__); \ - abort(); \ - } \ - unreachable(); \ -} while (0) -#endif - -#ifndef not_implemented -#define not_implemented() do { \ - if (config_debug) { \ - malloc_printf(": %s:%d: Not implemented\n", \ - __FILE__, __LINE__); \ - abort(); \ - } \ -} while (0) -#endif - -#ifndef assert_not_implemented -#define assert_not_implemented(e) do { \ - if (unlikely(config_debug && !(e))) \ - not_implemented(); \ -} while (0) -#endif - - diff --git a/sources/jemalloc/include-linux/jemalloc/internal/atomic.h b/sources/jemalloc/include-linux/jemalloc/internal/atomic.h deleted file mode 100755 index 3f15ea14..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/atomic.h +++ /dev/null @@ -1,651 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#define atomic_read_uint64(p) atomic_add_uint64(p, 0) -#define atomic_read_uint32(p) atomic_add_uint32(p, 0) -#define atomic_read_p(p) atomic_add_p(p, NULL) -#define atomic_read_z(p) atomic_add_z(p, 0) -#define atomic_read_u(p) atomic_add_u(p, 0) - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -/* - * All arithmetic functions return the arithmetic result of the atomic - * operation. Some atomic operation APIs return the value prior to mutation, in - * which case the following functions must redundantly compute the result so - * that it can be returned. These functions are normally inlined, so the extra - * operations can be optimized away if the return values aren't used by the - * callers. - * - * atomic_read_( *p) { return (*p); } - * atomic_add_( *p, x) { return (*p += x); } - * atomic_sub_( *p, x) { return (*p -= x); } - * bool atomic_cas_( *p, c, s) - * { - * if (*p != c) - * return (true); - * *p = s; - * return (false); - * } - * void atomic_write_( *p, x) { *p = x; } - */ - -#ifndef JEMALLOC_ENABLE_INLINE -uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); -uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); -bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s); -void atomic_write_uint64(uint64_t *p, uint64_t x); -uint32_t atomic_add_uint32(uint32_t *p, uint32_t x); -uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x); -bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s); -void atomic_write_uint32(uint32_t *p, uint32_t x); -void *atomic_add_p(void **p, void *x); -void *atomic_sub_p(void **p, void *x); -bool atomic_cas_p(void **p, void *c, void *s); -void atomic_write_p(void **p, const void *x); -size_t atomic_add_z(size_t *p, size_t x); -size_t atomic_sub_z(size_t *p, size_t x); -bool atomic_cas_z(size_t *p, size_t c, size_t s); -void atomic_write_z(size_t *p, size_t x); -unsigned atomic_add_u(unsigned *p, unsigned x); -unsigned atomic_sub_u(unsigned *p, unsigned x); -bool atomic_cas_u(unsigned *p, unsigned c, unsigned s); -void atomic_write_u(unsigned *p, unsigned x); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_)) -/******************************************************************************/ -/* 64-bit operations. */ -#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) -# if (defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - uint64_t t = x; - - asm volatile ( - "lock; xaddq %0, %1;" - : "+r" (t), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (t + x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - uint64_t t; - - x = (uint64_t)(-(int64_t)x); - t = x; - asm volatile ( - "lock; xaddq %0, %1;" - : "+r" (t), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (t + x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - uint8_t success; - - asm volatile ( - "lock; cmpxchgq %4, %0;" - "sete %1;" - : "=m" (*p), "=a" (success) /* Outputs. */ - : "m" (*p), "a" (c), "r" (s) /* Inputs. */ - : "memory" /* Clobbers. */ - ); - - return (!(bool)success); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - - asm volatile ( - "xchgq %1, %0;" /* Lock is implied by xchgq. */ - : "=m" (*p), "+r" (x) /* Outputs. */ - : "m" (*p) /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -# elif (defined(JEMALLOC_C11ATOMICS)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; - return (atomic_fetch_add(a, x) + x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; - return (atomic_fetch_sub(a, x) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; - return (!atomic_compare_exchange_strong(a, &c, s)); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; - atomic_store(a, x); -} -# elif (defined(JEMALLOC_ATOMIC9)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - /* - * atomic_fetchadd_64() doesn't exist, but we only ever use this - * function on LP64 systems, so atomic_fetchadd_long() will do. - */ - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - return (atomic_fetchadd_long(p, (unsigned long)x) + x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s)); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - atomic_store_rel_long(p, x); -} -# elif (defined(JEMALLOC_OSATOMIC)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (OSAtomicAdd64((int64_t)x, (int64_t *)p)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p)); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - - return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p)); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - uint64_t o; - - /*The documented OSAtomic*() API does not expose an atomic exchange. */ - do { - o = atomic_read_uint64(p); - } while (atomic_cas_uint64(p, o, x)); -} -# elif (defined(_MSC_VER)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (InterlockedExchangeAdd64(p, x) + x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - uint64_t o; - - o = InterlockedCompareExchange64(p, s, c); - return (o != c); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - - InterlockedExchange64(p, x); -} -# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \ - defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - - return (!__sync_bool_compare_and_swap(p, c, s)); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - - __sync_lock_test_and_set(p, x); -} -# else -# error "Missing implementation for 64-bit atomic operations" -# endif -#endif - -/******************************************************************************/ -/* 32-bit operations. */ -#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - uint32_t t = x; - - asm volatile ( - "lock; xaddl %0, %1;" - : "+r" (t), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (t + x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - uint32_t t; - - x = (uint32_t)(-(int32_t)x); - t = x; - asm volatile ( - "lock; xaddl %0, %1;" - : "+r" (t), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (t + x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - uint8_t success; - - asm volatile ( - "lock; cmpxchgl %4, %0;" - "sete %1;" - : "=m" (*p), "=a" (success) /* Outputs. */ - : "m" (*p), "a" (c), "r" (s) /* Inputs. */ - : "memory" - ); - - return (!(bool)success); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - - asm volatile ( - "xchgl %1, %0;" /* Lock is implied by xchgl. */ - : "=m" (*p), "+r" (x) /* Outputs. */ - : "m" (*p) /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -# elif (defined(JEMALLOC_C11ATOMICS)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; - return (atomic_fetch_add(a, x) + x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; - return (atomic_fetch_sub(a, x) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; - return (!atomic_compare_exchange_strong(a, &c, s)); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; - atomic_store(a, x); -} -#elif (defined(JEMALLOC_ATOMIC9)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (atomic_fetchadd_32(p, x) + x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - - return (!atomic_cmpset_32(p, c, s)); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - - atomic_store_rel_32(p, x); -} -#elif (defined(JEMALLOC_OSATOMIC)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (OSAtomicAdd32((int32_t)x, (int32_t *)p)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p)); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - - return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p)); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - uint32_t o; - - /*The documented OSAtomic*() API does not expose an atomic exchange. */ - do { - o = atomic_read_uint32(p); - } while (atomic_cas_uint32(p, o, x)); -} -#elif (defined(_MSC_VER)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (InterlockedExchangeAdd(p, x) + x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (InterlockedExchangeAdd(p, -((int32_t)x)) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - uint32_t o; - - o = InterlockedCompareExchange(p, s, c); - return (o != c); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - - InterlockedExchange(p, x); -} -#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \ - defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - - return (!__sync_bool_compare_and_swap(p, c, s)); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - - __sync_lock_test_and_set(p, x); -} -#else -# error "Missing implementation for 32-bit atomic operations" -#endif - -/******************************************************************************/ -/* Pointer operations. */ -JEMALLOC_INLINE void * -atomic_add_p(void **p, void *x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); -#elif (LG_SIZEOF_PTR == 2) - return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); -#endif -} - -JEMALLOC_INLINE void * -atomic_sub_p(void **p, void *x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((void *)atomic_add_uint64((uint64_t *)p, - (uint64_t)-((int64_t)x))); -#elif (LG_SIZEOF_PTR == 2) - return ((void *)atomic_add_uint32((uint32_t *)p, - (uint32_t)-((int32_t)x))); -#endif -} - -JEMALLOC_INLINE bool -atomic_cas_p(void **p, void *c, void *s) -{ - -#if (LG_SIZEOF_PTR == 3) - return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); -#elif (LG_SIZEOF_PTR == 2) - return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); -#endif -} - -JEMALLOC_INLINE void -atomic_write_p(void **p, const void *x) -{ - -#if (LG_SIZEOF_PTR == 3) - atomic_write_uint64((uint64_t *)p, (uint64_t)x); -#elif (LG_SIZEOF_PTR == 2) - atomic_write_uint32((uint32_t *)p, (uint32_t)x); -#endif -} - -/******************************************************************************/ -/* size_t operations. */ -JEMALLOC_INLINE size_t -atomic_add_z(size_t *p, size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); -#elif (LG_SIZEOF_PTR == 2) - return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); -#endif -} - -JEMALLOC_INLINE size_t -atomic_sub_z(size_t *p, size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((size_t)atomic_add_uint64((uint64_t *)p, - (uint64_t)-((int64_t)x))); -#elif (LG_SIZEOF_PTR == 2) - return ((size_t)atomic_add_uint32((uint32_t *)p, - (uint32_t)-((int32_t)x))); -#endif -} - -JEMALLOC_INLINE bool -atomic_cas_z(size_t *p, size_t c, size_t s) -{ - -#if (LG_SIZEOF_PTR == 3) - return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); -#elif (LG_SIZEOF_PTR == 2) - return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); -#endif -} - -JEMALLOC_INLINE void -atomic_write_z(size_t *p, size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - atomic_write_uint64((uint64_t *)p, (uint64_t)x); -#elif (LG_SIZEOF_PTR == 2) - atomic_write_uint32((uint32_t *)p, (uint32_t)x); -#endif -} - -/******************************************************************************/ -/* unsigned operations. */ -JEMALLOC_INLINE unsigned -atomic_add_u(unsigned *p, unsigned x) -{ - -#if (LG_SIZEOF_INT == 3) - return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); -#elif (LG_SIZEOF_INT == 2) - return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); -#endif -} - -JEMALLOC_INLINE unsigned -atomic_sub_u(unsigned *p, unsigned x) -{ - -#if (LG_SIZEOF_INT == 3) - return ((unsigned)atomic_add_uint64((uint64_t *)p, - (uint64_t)-((int64_t)x))); -#elif (LG_SIZEOF_INT == 2) - return ((unsigned)atomic_add_uint32((uint32_t *)p, - (uint32_t)-((int32_t)x))); -#endif -} - -JEMALLOC_INLINE bool -atomic_cas_u(unsigned *p, unsigned c, unsigned s) -{ - -#if (LG_SIZEOF_INT == 3) - return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); -#elif (LG_SIZEOF_INT == 2) - return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); -#endif -} - -JEMALLOC_INLINE void -atomic_write_u(unsigned *p, unsigned x) -{ - -#if (LG_SIZEOF_INT == 3) - atomic_write_uint64((uint64_t *)p, (uint64_t)x); -#elif (LG_SIZEOF_INT == 2) - atomic_write_uint32((uint32_t *)p, (uint32_t)x); -#endif -} - -/******************************************************************************/ -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/base.h b/sources/jemalloc/include-linux/jemalloc/internal/base.h deleted file mode 100755 index d6b81e16..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/base.h +++ /dev/null @@ -1,25 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *base_alloc(tsdn_t *tsdn, size_t size); -void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident, - size_t *mapped); -bool base_boot(void); -void base_prefork(tsdn_t *tsdn); -void base_postfork_parent(tsdn_t *tsdn); -void base_postfork_child(tsdn_t *tsdn); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/bitmap.h b/sources/jemalloc/include-linux/jemalloc/internal/bitmap.h deleted file mode 100755 index 36f38b59..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/bitmap.h +++ /dev/null @@ -1,274 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ -#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS -#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) - -typedef struct bitmap_level_s bitmap_level_t; -typedef struct bitmap_info_s bitmap_info_t; -typedef unsigned long bitmap_t; -#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG - -/* Number of bits per group. */ -#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) -#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) -#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) - -/* - * Do some analysis on how big the bitmap is before we use a tree. For a brute - * force linear search, if we would have to call ffs_lu() more than 2^3 times, - * use a tree instead. - */ -#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 -# define USE_TREE -#endif - -/* Number of groups required to store a given number of bits. */ -#define BITMAP_BITS2GROUPS(nbits) \ - ((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) - -/* - * Number of groups required at a particular level for a given number of bits. - */ -#define BITMAP_GROUPS_L0(nbits) \ - BITMAP_BITS2GROUPS(nbits) -#define BITMAP_GROUPS_L1(nbits) \ - BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) -#define BITMAP_GROUPS_L2(nbits) \ - BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) -#define BITMAP_GROUPS_L3(nbits) \ - BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ - BITMAP_BITS2GROUPS((nbits))))) - -/* - * Assuming the number of levels, number of groups required for a given number - * of bits. - */ -#define BITMAP_GROUPS_1_LEVEL(nbits) \ - BITMAP_GROUPS_L0(nbits) -#define BITMAP_GROUPS_2_LEVEL(nbits) \ - (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) -#define BITMAP_GROUPS_3_LEVEL(nbits) \ - (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) -#define BITMAP_GROUPS_4_LEVEL(nbits) \ - (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) - -/* - * Maximum number of groups required to support LG_BITMAP_MAXBITS. - */ -#ifdef USE_TREE - -#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS -# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) -#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 -# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) -#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 -# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) -#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 -# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) -#else -# error "Unsupported bitmap size" -#endif - -/* Maximum number of levels possible. */ -#define BITMAP_MAX_LEVELS \ - (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ - + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) - -#else /* USE_TREE */ - -#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS) - -#endif /* USE_TREE */ - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct bitmap_level_s { - /* Offset of this level's groups within the array of groups. */ - size_t group_offset; -}; - -struct bitmap_info_s { - /* Logical number of bits in bitmap (stored at bottom level). */ - size_t nbits; - -#ifdef USE_TREE - /* Number of levels necessary for nbits. */ - unsigned nlevels; - - /* - * Only the first (nlevels+1) elements are used, and levels are ordered - * bottom to top (e.g. the bottom level is stored in levels[0]). - */ - bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; -#else /* USE_TREE */ - /* Number of groups necessary for nbits. */ - size_t ngroups; -#endif /* USE_TREE */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); -void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo); -size_t bitmap_size(const bitmap_info_t *binfo); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo); -bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo); -void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_)) -JEMALLOC_INLINE bool -bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ -#ifdef USE_TREE - size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1; - bitmap_t rg = bitmap[rgoff]; - /* The bitmap is full iff the root group is 0. */ - return (rg == 0); -#else - size_t i; - - for (i = 0; i < binfo->ngroups; i++) { - if (bitmap[i] != 0) - return (false); - } - return (true); -#endif -} - -JEMALLOC_INLINE bool -bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ - size_t goff; - bitmap_t g; - - assert(bit < binfo->nbits); - goff = bit >> LG_BITMAP_GROUP_NBITS; - g = bitmap[goff]; - return (!(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))); -} - -JEMALLOC_INLINE void -bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ - size_t goff; - bitmap_t *gp; - bitmap_t g; - - assert(bit < binfo->nbits); - assert(!bitmap_get(bitmap, binfo, bit)); - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[goff]; - g = *gp; - assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); - g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - assert(bitmap_get(bitmap, binfo, bit)); -#ifdef USE_TREE - /* Propagate group state transitions up the tree. */ - if (g == 0) { - unsigned i; - for (i = 1; i < binfo->nlevels; i++) { - bit = goff; - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[binfo->levels[i].group_offset + goff]; - g = *gp; - assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); - g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - if (g != 0) - break; - } - } -#endif -} - -/* sfu: set first unset. */ -JEMALLOC_INLINE size_t -bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ - size_t bit; - bitmap_t g; - unsigned i; - - assert(!bitmap_full(bitmap, binfo)); - -#ifdef USE_TREE - i = binfo->nlevels - 1; - g = bitmap[binfo->levels[i].group_offset]; - bit = ffs_lu(g) - 1; - while (i > 0) { - i--; - g = bitmap[binfo->levels[i].group_offset + bit]; - bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1); - } -#else - i = 0; - g = bitmap[0]; - while ((bit = ffs_lu(g)) == 0) { - i++; - g = bitmap[i]; - } - bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); -#endif - bitmap_set(bitmap, binfo, bit); - return (bit); -} - -JEMALLOC_INLINE void -bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ - size_t goff; - bitmap_t *gp; - bitmap_t g; - UNUSED bool propagate; - - assert(bit < binfo->nbits); - assert(bitmap_get(bitmap, binfo, bit)); - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[goff]; - g = *gp; - propagate = (g == 0); - assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); - g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - assert(!bitmap_get(bitmap, binfo, bit)); -#ifdef USE_TREE - /* Propagate group state transitions up the tree. */ - if (propagate) { - unsigned i; - for (i = 1; i < binfo->nlevels; i++) { - bit = goff; - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[binfo->levels[i].group_offset + goff]; - g = *gp; - propagate = (g == 0); - assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) - == 0); - g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - if (!propagate) - break; - } - } -#endif /* USE_TREE */ -} - -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/chunk.h b/sources/jemalloc/include-linux/jemalloc/internal/chunk.h deleted file mode 100755 index 55df9acc..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/chunk.h +++ /dev/null @@ -1,97 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * Size and alignment of memory chunks that are allocated by the OS's virtual - * memory system. - */ -#define LG_CHUNK_DEFAULT 21 - -/* Return the chunk address for allocation address a. */ -#define CHUNK_ADDR2BASE(a) \ - ((void *)((uintptr_t)(a) & ~chunksize_mask)) - -/* Return the chunk offset of address a. */ -#define CHUNK_ADDR2OFFSET(a) \ - ((size_t)((uintptr_t)(a) & chunksize_mask)) - -/* Return the smallest chunk multiple that is >= s. */ -#define CHUNK_CEILING(s) \ - (((s) + chunksize_mask) & ~chunksize_mask) - -#define CHUNK_HOOKS_INITIALIZER { \ - NULL, \ - NULL, \ - NULL, \ - NULL, \ - NULL, \ - NULL, \ - NULL \ -} - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern size_t opt_lg_chunk; -extern const char *opt_dss; - -extern rtree_t chunks_rtree; - -extern size_t chunksize; -extern size_t chunksize_mask; /* (chunksize - 1). */ -extern size_t chunk_npages; - -extern const chunk_hooks_t chunk_hooks_default; - -chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena); -chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, - const chunk_hooks_t *chunk_hooks); - -bool chunk_register(const void *chunk, const extent_node_t *node, - bool *gdump); -void chunk_deregister(const void *chunk, const extent_node_t *node); -void *chunk_alloc_base(size_t size); -void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, - size_t *sn, bool *zero, bool *commit, bool dalloc_node); -void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, - size_t *sn, bool *zero, bool *commit); -void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn, - bool committed); -void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn, - bool zeroed, bool committed); -bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset, - size_t length); -bool chunk_boot(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -extent_node_t *chunk_lookup(const void *chunk, bool dependent); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_)) -JEMALLOC_INLINE extent_node_t * -chunk_lookup(const void *ptr, bool dependent) -{ - - return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent)); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - -#include "jemalloc/internal/chunk_dss.h" -#include "jemalloc/internal/chunk_mmap.h" diff --git a/sources/jemalloc/include-linux/jemalloc/internal/chunk_dss.h b/sources/jemalloc/include-linux/jemalloc/internal/chunk_dss.h deleted file mode 100755 index da8511ba..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/chunk_dss.h +++ /dev/null @@ -1,37 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef enum { - dss_prec_disabled = 0, - dss_prec_primary = 1, - dss_prec_secondary = 2, - - dss_prec_limit = 3 -} dss_prec_t; -#define DSS_PREC_DEFAULT dss_prec_secondary -#define DSS_DEFAULT "secondary" - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -extern const char *dss_prec_names[]; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -dss_prec_t chunk_dss_prec_get(void); -bool chunk_dss_prec_set(dss_prec_t dss_prec); -void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, - size_t size, size_t alignment, bool *zero, bool *commit); -bool chunk_in_dss(void *chunk); -bool chunk_dss_mergeable(void *chunk_a, void *chunk_b); -void chunk_dss_boot(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/chunk_mmap.h b/sources/jemalloc/include-linux/jemalloc/internal/chunk_mmap.h deleted file mode 100755 index 6f2d0ac2..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/chunk_mmap.h +++ /dev/null @@ -1,21 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, - bool *zero, bool *commit); -bool chunk_dalloc_mmap(void *chunk, size_t size); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/ckh.h b/sources/jemalloc/include-linux/jemalloc/internal/ckh.h deleted file mode 100755 index f75ad90b..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/ckh.h +++ /dev/null @@ -1,86 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct ckh_s ckh_t; -typedef struct ckhc_s ckhc_t; - -/* Typedefs to allow easy function pointer passing. */ -typedef void ckh_hash_t (const void *, size_t[2]); -typedef bool ckh_keycomp_t (const void *, const void *); - -/* Maintain counters used to get an idea of performance. */ -/* #define CKH_COUNT */ -/* Print counter values in ckh_delete() (requires CKH_COUNT). */ -/* #define CKH_VERBOSE */ - -/* - * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit - * one bucket per L1 cache line. - */ -#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -/* Hash table cell. */ -struct ckhc_s { - const void *key; - const void *data; -}; - -struct ckh_s { -#ifdef CKH_COUNT - /* Counters used to get an idea of performance. */ - uint64_t ngrows; - uint64_t nshrinks; - uint64_t nshrinkfails; - uint64_t ninserts; - uint64_t nrelocs; -#endif - - /* Used for pseudo-random number generation. */ - uint64_t prng_state; - - /* Total number of items. */ - size_t count; - - /* - * Minimum and current number of hash table buckets. There are - * 2^LG_CKH_BUCKET_CELLS cells per bucket. - */ - unsigned lg_minbuckets; - unsigned lg_curbuckets; - - /* Hash and comparison functions. */ - ckh_hash_t *hash; - ckh_keycomp_t *keycomp; - - /* Hash table with 2^lg_curbuckets buckets. */ - ckhc_t *tab; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, - ckh_keycomp_t *keycomp); -void ckh_delete(tsd_t *tsd, ckh_t *ckh); -size_t ckh_count(ckh_t *ckh); -bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); -bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); -bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, - void **data); -bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data); -void ckh_string_hash(const void *key, size_t r_hash[2]); -bool ckh_string_keycomp(const void *k1, const void *k2); -void ckh_pointer_hash(const void *key, size_t r_hash[2]); -bool ckh_pointer_keycomp(const void *k1, const void *k2); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/ctl.h b/sources/jemalloc/include-linux/jemalloc/internal/ctl.h deleted file mode 100755 index af0f6d7c..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/ctl.h +++ /dev/null @@ -1,118 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct ctl_node_s ctl_node_t; -typedef struct ctl_named_node_s ctl_named_node_t; -typedef struct ctl_indexed_node_s ctl_indexed_node_t; -typedef struct ctl_arena_stats_s ctl_arena_stats_t; -typedef struct ctl_stats_s ctl_stats_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct ctl_node_s { - bool named; -}; - -struct ctl_named_node_s { - struct ctl_node_s node; - const char *name; - /* If (nchildren == 0), this is a terminal node. */ - unsigned nchildren; - const ctl_node_t *children; - int (*ctl)(tsd_t *, const size_t *, size_t, void *, - size_t *, void *, size_t); -}; - -struct ctl_indexed_node_s { - struct ctl_node_s node; - const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t, - size_t); -}; - -struct ctl_arena_stats_s { - bool initialized; - unsigned nthreads; - const char *dss; - ssize_t lg_dirty_mult; - ssize_t decay_time; - size_t pactive; - size_t pdirty; - - /* The remainder are only populated if config_stats is true. */ - - arena_stats_t astats; - - /* Aggregate stats for small size classes, based on bin stats. */ - size_t allocated_small; - uint64_t nmalloc_small; - uint64_t ndalloc_small; - uint64_t nrequests_small; - - malloc_bin_stats_t bstats[NBINS]; - malloc_large_stats_t *lstats; /* nlclasses elements. */ - malloc_huge_stats_t *hstats; /* nhclasses elements. */ -}; - -struct ctl_stats_s { - size_t allocated; - size_t active; - size_t metadata; - size_t resident; - size_t mapped; - size_t retained; - unsigned narenas; - ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, - void *newp, size_t newlen); -int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, - size_t *miblenp); - -int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen); -bool ctl_boot(void); -void ctl_prefork(tsdn_t *tsdn); -void ctl_postfork_parent(tsdn_t *tsdn); -void ctl_postfork_child(tsdn_t *tsdn); - -#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ - if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ - != 0) { \ - malloc_printf( \ - ": Failure in xmallctl(\"%s\", ...)\n", \ - name); \ - abort(); \ - } \ -} while (0) - -#define xmallctlnametomib(name, mibp, miblenp) do { \ - if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ - malloc_printf(": Failure in " \ - "xmallctlnametomib(\"%s\", ...)\n", name); \ - abort(); \ - } \ -} while (0) - -#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ - if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ - newlen) != 0) { \ - malloc_write( \ - ": Failure in xmallctlbymib()\n"); \ - abort(); \ - } \ -} while (0) - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/sources/jemalloc/include-linux/jemalloc/internal/extent.h b/sources/jemalloc/include-linux/jemalloc/internal/extent.h deleted file mode 100755 index fc77f9f5..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/extent.h +++ /dev/null @@ -1,275 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct extent_node_s extent_node_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -/* Tree of extents. Use accessor functions for en_* fields. */ -struct extent_node_s { - /* Arena from which this extent came, if any. */ - arena_t *en_arena; - - /* Pointer to the extent that this tree node is responsible for. */ - void *en_addr; - - /* Total region size. */ - size_t en_size; - - /* - * Serial number (potentially non-unique). - * - * In principle serial numbers can wrap around on 32-bit systems if - * JEMALLOC_MUNMAP is defined, but as long as comparison functions fall - * back on address comparison for equal serial numbers, stable (if - * imperfect) ordering is maintained. - * - * Serial numbers may not be unique even in the absence of wrap-around, - * e.g. when splitting an extent and assigning the same serial number to - * both resulting adjacent extents. - */ - size_t en_sn; - - /* - * The zeroed flag is used by chunk recycling code to track whether - * memory is zero-filled. - */ - bool en_zeroed; - - /* - * True if physical memory is committed to the extent, whether - * explicitly or implicitly as on a system that overcommits and - * satisfies physical memory needs on demand via soft page faults. - */ - bool en_committed; - - /* - * The achunk flag is used to validate that huge allocation lookups - * don't return arena chunks. - */ - bool en_achunk; - - /* Profile counters, used for huge objects. */ - prof_tctx_t *en_prof_tctx; - - /* Linkage for arena's runs_dirty and chunks_cache rings. */ - arena_runs_dirty_link_t rd; - qr(extent_node_t) cc_link; - - union { - /* Linkage for the size/sn/address-ordered tree. */ - rb_node(extent_node_t) szsnad_link; - - /* Linkage for arena's achunks, huge, and node_cache lists. */ - ql_elm(extent_node_t) ql_link; - }; - - /* Linkage for the address-ordered tree. */ - rb_node(extent_node_t) ad_link; -}; -typedef rb_tree(extent_node_t) extent_tree_t; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#ifdef JEMALLOC_JET -size_t extent_size_quantize_floor(size_t size); -#endif -size_t extent_size_quantize_ceil(size_t size); - -rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t) - -rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -arena_t *extent_node_arena_get(const extent_node_t *node); -void *extent_node_addr_get(const extent_node_t *node); -size_t extent_node_size_get(const extent_node_t *node); -size_t extent_node_sn_get(const extent_node_t *node); -bool extent_node_zeroed_get(const extent_node_t *node); -bool extent_node_committed_get(const extent_node_t *node); -bool extent_node_achunk_get(const extent_node_t *node); -prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node); -void extent_node_arena_set(extent_node_t *node, arena_t *arena); -void extent_node_addr_set(extent_node_t *node, void *addr); -void extent_node_size_set(extent_node_t *node, size_t size); -void extent_node_sn_set(extent_node_t *node, size_t sn); -void extent_node_zeroed_set(extent_node_t *node, bool zeroed); -void extent_node_committed_set(extent_node_t *node, bool committed); -void extent_node_achunk_set(extent_node_t *node, bool achunk); -void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx); -void extent_node_init(extent_node_t *node, arena_t *arena, void *addr, - size_t size, size_t sn, bool zeroed, bool committed); -void extent_node_dirty_linkage_init(extent_node_t *node); -void extent_node_dirty_insert(extent_node_t *node, - arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty); -void extent_node_dirty_remove(extent_node_t *node); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_)) -JEMALLOC_INLINE arena_t * -extent_node_arena_get(const extent_node_t *node) -{ - - return (node->en_arena); -} - -JEMALLOC_INLINE void * -extent_node_addr_get(const extent_node_t *node) -{ - - return (node->en_addr); -} - -JEMALLOC_INLINE size_t -extent_node_size_get(const extent_node_t *node) -{ - - return (node->en_size); -} - -JEMALLOC_INLINE size_t -extent_node_sn_get(const extent_node_t *node) -{ - - return (node->en_sn); -} - -JEMALLOC_INLINE bool -extent_node_zeroed_get(const extent_node_t *node) -{ - - return (node->en_zeroed); -} - -JEMALLOC_INLINE bool -extent_node_committed_get(const extent_node_t *node) -{ - - assert(!node->en_achunk); - return (node->en_committed); -} - -JEMALLOC_INLINE bool -extent_node_achunk_get(const extent_node_t *node) -{ - - return (node->en_achunk); -} - -JEMALLOC_INLINE prof_tctx_t * -extent_node_prof_tctx_get(const extent_node_t *node) -{ - - return (node->en_prof_tctx); -} - -JEMALLOC_INLINE void -extent_node_arena_set(extent_node_t *node, arena_t *arena) -{ - - node->en_arena = arena; -} - -JEMALLOC_INLINE void -extent_node_addr_set(extent_node_t *node, void *addr) -{ - - node->en_addr = addr; -} - -JEMALLOC_INLINE void -extent_node_size_set(extent_node_t *node, size_t size) -{ - - node->en_size = size; -} - -JEMALLOC_INLINE void -extent_node_sn_set(extent_node_t *node, size_t sn) -{ - - node->en_sn = sn; -} - -JEMALLOC_INLINE void -extent_node_zeroed_set(extent_node_t *node, bool zeroed) -{ - - node->en_zeroed = zeroed; -} - -JEMALLOC_INLINE void -extent_node_committed_set(extent_node_t *node, bool committed) -{ - - node->en_committed = committed; -} - -JEMALLOC_INLINE void -extent_node_achunk_set(extent_node_t *node, bool achunk) -{ - - node->en_achunk = achunk; -} - -JEMALLOC_INLINE void -extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx) -{ - - node->en_prof_tctx = tctx; -} - -JEMALLOC_INLINE void -extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size, - size_t sn, bool zeroed, bool committed) -{ - - extent_node_arena_set(node, arena); - extent_node_addr_set(node, addr); - extent_node_size_set(node, size); - extent_node_sn_set(node, sn); - extent_node_zeroed_set(node, zeroed); - extent_node_committed_set(node, committed); - extent_node_achunk_set(node, false); - if (config_prof) - extent_node_prof_tctx_set(node, NULL); -} - -JEMALLOC_INLINE void -extent_node_dirty_linkage_init(extent_node_t *node) -{ - - qr_new(&node->rd, rd_link); - qr_new(node, cc_link); -} - -JEMALLOC_INLINE void -extent_node_dirty_insert(extent_node_t *node, - arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty) -{ - - qr_meld(runs_dirty, &node->rd, rd_link); - qr_meld(chunks_dirty, node, cc_link); -} - -JEMALLOC_INLINE void -extent_node_dirty_remove(extent_node_t *node) -{ - - qr_remove(&node->rd, rd_link); - qr_remove(node, cc_link); -} - -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/sources/jemalloc/include-linux/jemalloc/internal/hash.h b/sources/jemalloc/include-linux/jemalloc/internal/hash.h deleted file mode 100755 index 1ff2d9a0..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/hash.h +++ /dev/null @@ -1,357 +0,0 @@ -/* - * The following hash function is based on MurmurHash3, placed into the public - * domain by Austin Appleby. See https://github.com/aappleby/smhasher for - * details. - */ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -uint32_t hash_x86_32(const void *key, int len, uint32_t seed); -void hash_x86_128(const void *key, const int len, uint32_t seed, - uint64_t r_out[2]); -void hash_x64_128(const void *key, const int len, const uint32_t seed, - uint64_t r_out[2]); -void hash(const void *key, size_t len, const uint32_t seed, - size_t r_hash[2]); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_)) -/******************************************************************************/ -/* Internal implementation. */ -JEMALLOC_INLINE uint32_t -hash_rotl_32(uint32_t x, int8_t r) -{ - - return ((x << r) | (x >> (32 - r))); -} - -JEMALLOC_INLINE uint64_t -hash_rotl_64(uint64_t x, int8_t r) -{ - - return ((x << r) | (x >> (64 - r))); -} - -JEMALLOC_INLINE uint32_t -hash_get_block_32(const uint32_t *p, int i) -{ - - /* Handle unaligned read. */ - if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) { - uint32_t ret; - - memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t)); - return (ret); - } - - return (p[i]); -} - -JEMALLOC_INLINE uint64_t -hash_get_block_64(const uint64_t *p, int i) -{ - - /* Handle unaligned read. */ - if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) { - uint64_t ret; - - memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t)); - return (ret); - } - - return (p[i]); -} - -JEMALLOC_INLINE uint32_t -hash_fmix_32(uint32_t h) -{ - - h ^= h >> 16; - h *= 0x85ebca6b; - h ^= h >> 13; - h *= 0xc2b2ae35; - h ^= h >> 16; - - return (h); -} - -JEMALLOC_INLINE uint64_t -hash_fmix_64(uint64_t k) -{ - - k ^= k >> 33; - k *= KQU(0xff51afd7ed558ccd); - k ^= k >> 33; - k *= KQU(0xc4ceb9fe1a85ec53); - k ^= k >> 33; - - return (k); -} - -JEMALLOC_INLINE uint32_t -hash_x86_32(const void *key, int len, uint32_t seed) -{ - const uint8_t *data = (const uint8_t *) key; - const int nblocks = len / 4; - - uint32_t h1 = seed; - - const uint32_t c1 = 0xcc9e2d51; - const uint32_t c2 = 0x1b873593; - - /* body */ - { - const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); - int i; - - for (i = -nblocks; i; i++) { - uint32_t k1 = hash_get_block_32(blocks, i); - - k1 *= c1; - k1 = hash_rotl_32(k1, 15); - k1 *= c2; - - h1 ^= k1; - h1 = hash_rotl_32(h1, 13); - h1 = h1*5 + 0xe6546b64; - } - } - - /* tail */ - { - const uint8_t *tail = (const uint8_t *) (data + nblocks*4); - - uint32_t k1 = 0; - - switch (len & 3) { - case 3: k1 ^= tail[2] << 16; - case 2: k1 ^= tail[1] << 8; - case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); - k1 *= c2; h1 ^= k1; - } - } - - /* finalization */ - h1 ^= len; - - h1 = hash_fmix_32(h1); - - return (h1); -} - -UNUSED JEMALLOC_INLINE void -hash_x86_128(const void *key, const int len, uint32_t seed, - uint64_t r_out[2]) -{ - const uint8_t * data = (const uint8_t *) key; - const int nblocks = len / 16; - - uint32_t h1 = seed; - uint32_t h2 = seed; - uint32_t h3 = seed; - uint32_t h4 = seed; - - const uint32_t c1 = 0x239b961b; - const uint32_t c2 = 0xab0e9789; - const uint32_t c3 = 0x38b34ae5; - const uint32_t c4 = 0xa1e38b93; - - /* body */ - { - const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); - int i; - - for (i = -nblocks; i; i++) { - uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); - uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); - uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); - uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); - - k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; - - h1 = hash_rotl_32(h1, 19); h1 += h2; - h1 = h1*5 + 0x561ccd1b; - - k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; - - h2 = hash_rotl_32(h2, 17); h2 += h3; - h2 = h2*5 + 0x0bcaa747; - - k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; - - h3 = hash_rotl_32(h3, 15); h3 += h4; - h3 = h3*5 + 0x96cd1c35; - - k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; - - h4 = hash_rotl_32(h4, 13); h4 += h1; - h4 = h4*5 + 0x32ac3b17; - } - } - - /* tail */ - { - const uint8_t *tail = (const uint8_t *) (data + nblocks*16); - uint32_t k1 = 0; - uint32_t k2 = 0; - uint32_t k3 = 0; - uint32_t k4 = 0; - - switch (len & 15) { - case 15: k4 ^= tail[14] << 16; - case 14: k4 ^= tail[13] << 8; - case 13: k4 ^= tail[12] << 0; - k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; - - case 12: k3 ^= tail[11] << 24; - case 11: k3 ^= tail[10] << 16; - case 10: k3 ^= tail[ 9] << 8; - case 9: k3 ^= tail[ 8] << 0; - k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; - - case 8: k2 ^= tail[ 7] << 24; - case 7: k2 ^= tail[ 6] << 16; - case 6: k2 ^= tail[ 5] << 8; - case 5: k2 ^= tail[ 4] << 0; - k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; - - case 4: k1 ^= tail[ 3] << 24; - case 3: k1 ^= tail[ 2] << 16; - case 2: k1 ^= tail[ 1] << 8; - case 1: k1 ^= tail[ 0] << 0; - k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; - } - } - - /* finalization */ - h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; - - h1 += h2; h1 += h3; h1 += h4; - h2 += h1; h3 += h1; h4 += h1; - - h1 = hash_fmix_32(h1); - h2 = hash_fmix_32(h2); - h3 = hash_fmix_32(h3); - h4 = hash_fmix_32(h4); - - h1 += h2; h1 += h3; h1 += h4; - h2 += h1; h3 += h1; h4 += h1; - - r_out[0] = (((uint64_t) h2) << 32) | h1; - r_out[1] = (((uint64_t) h4) << 32) | h3; -} - -UNUSED JEMALLOC_INLINE void -hash_x64_128(const void *key, const int len, const uint32_t seed, - uint64_t r_out[2]) -{ - const uint8_t *data = (const uint8_t *) key; - const int nblocks = len / 16; - - uint64_t h1 = seed; - uint64_t h2 = seed; - - const uint64_t c1 = KQU(0x87c37b91114253d5); - const uint64_t c2 = KQU(0x4cf5ad432745937f); - - /* body */ - { - const uint64_t *blocks = (const uint64_t *) (data); - int i; - - for (i = 0; i < nblocks; i++) { - uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); - uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); - - k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; - - h1 = hash_rotl_64(h1, 27); h1 += h2; - h1 = h1*5 + 0x52dce729; - - k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; - - h2 = hash_rotl_64(h2, 31); h2 += h1; - h2 = h2*5 + 0x38495ab5; - } - } - - /* tail */ - { - const uint8_t *tail = (const uint8_t*)(data + nblocks*16); - uint64_t k1 = 0; - uint64_t k2 = 0; - - switch (len & 15) { - case 15: k2 ^= ((uint64_t)(tail[14])) << 48; - case 14: k2 ^= ((uint64_t)(tail[13])) << 40; - case 13: k2 ^= ((uint64_t)(tail[12])) << 32; - case 12: k2 ^= ((uint64_t)(tail[11])) << 24; - case 11: k2 ^= ((uint64_t)(tail[10])) << 16; - case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; - case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; - k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; - - case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; - case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; - case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; - case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; - case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; - case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; - case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; - case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; - k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; - } - } - - /* finalization */ - h1 ^= len; h2 ^= len; - - h1 += h2; - h2 += h1; - - h1 = hash_fmix_64(h1); - h2 = hash_fmix_64(h2); - - h1 += h2; - h2 += h1; - - r_out[0] = h1; - r_out[1] = h2; -} - -/******************************************************************************/ -/* API. */ -JEMALLOC_INLINE void -hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) -{ - - assert(len <= INT_MAX); /* Unfortunate implementation limitation. */ - -#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) - hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash); -#else - { - uint64_t hashes[2]; - hash_x86_128(key, (int)len, seed, hashes); - r_hash[0] = (size_t)hashes[0]; - r_hash[1] = (size_t)hashes[1]; - } -#endif -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/huge.h b/sources/jemalloc/include-linux/jemalloc/internal/huge.h deleted file mode 100755 index 22184d9b..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/huge.h +++ /dev/null @@ -1,35 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); -void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, bool zero); -bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, - size_t usize_min, size_t usize_max, bool zero); -void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, - size_t usize, size_t alignment, bool zero, tcache_t *tcache); -#ifdef JEMALLOC_JET -typedef void (huge_dalloc_junk_t)(void *, size_t); -extern huge_dalloc_junk_t *huge_dalloc_junk; -#endif -void huge_dalloc(tsdn_t *tsdn, void *ptr); -arena_t *huge_aalloc(const void *ptr); -size_t huge_salloc(tsdn_t *tsdn, const void *ptr); -prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr); -void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx); -void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal.h b/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal.h deleted file mode 100755 index f8df7996..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal.h +++ /dev/null @@ -1,1294 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_H -#define JEMALLOC_INTERNAL_H - -#include "jemalloc_internal_defs.h" -#include "jemalloc/internal/jemalloc_internal_decls.h" - -#ifdef JEMALLOC_UTRACE -#include -#endif - -#define JEMALLOC_NO_DEMANGLE -#ifdef JEMALLOC_JET -# define JEMALLOC_N(n) jet_##n -# include "jemalloc/internal/public_namespace.h" -# define JEMALLOC_NO_RENAME -# include "../jemalloc.h" -# undef JEMALLOC_NO_RENAME -#else -# define JEMALLOC_N(n) je_##n -# include "../jemalloc.h" -#endif -#include "jemalloc/internal/private_namespace.h" - -static const bool config_debug = -#ifdef JEMALLOC_DEBUG - true -#else - false -#endif - ; -static const bool have_dss = -#ifdef JEMALLOC_DSS - true -#else - false -#endif - ; -static const bool config_fill = -#ifdef JEMALLOC_FILL - true -#else - false -#endif - ; -static const bool config_lazy_lock = -#ifdef JEMALLOC_LAZY_LOCK - true -#else - false -#endif - ; -static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF; -static const bool config_prof = -#ifdef JEMALLOC_PROF - true -#else - false -#endif - ; -static const bool config_prof_libgcc = -#ifdef JEMALLOC_PROF_LIBGCC - true -#else - false -#endif - ; -static const bool config_prof_libunwind = -#ifdef JEMALLOC_PROF_LIBUNWIND - true -#else - false -#endif - ; -static const bool maps_coalesce = -#ifdef JEMALLOC_MAPS_COALESCE - true -#else - false -#endif - ; -static const bool config_munmap = -#ifdef JEMALLOC_MUNMAP - true -#else - false -#endif - ; -static const bool config_stats = -#ifdef JEMALLOC_STATS - true -#else - false -#endif - ; -static const bool config_tcache = -#ifdef JEMALLOC_TCACHE - true -#else - false -#endif - ; -static const bool config_thp = -#ifdef JEMALLOC_THP - true -#else - false -#endif - ; -static const bool config_tls = -#ifdef JEMALLOC_TLS - true -#else - false -#endif - ; -static const bool config_utrace = -#ifdef JEMALLOC_UTRACE - true -#else - false -#endif - ; -static const bool config_valgrind = -#ifdef JEMALLOC_VALGRIND - true -#else - false -#endif - ; -static const bool config_xmalloc = -#ifdef JEMALLOC_XMALLOC - true -#else - false -#endif - ; -static const bool config_ivsalloc = -#ifdef JEMALLOC_IVSALLOC - true -#else - false -#endif - ; -static const bool config_cache_oblivious = -#ifdef JEMALLOC_CACHE_OBLIVIOUS - true -#else - false -#endif - ; - -#ifdef JEMALLOC_C11ATOMICS -#include -#endif - -#ifdef JEMALLOC_ATOMIC9 -#include -#endif - -#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) -#include -#endif - -#ifdef JEMALLOC_ZONE -#include -#include -#include -#endif - -#include "jemalloc/internal/ph.h" -#ifndef __PGI -#define RB_COMPACT -#endif -#include "jemalloc/internal/rb.h" -#include "jemalloc/internal/qr.h" -#include "jemalloc/internal/ql.h" - -/* - * jemalloc can conceptually be broken into components (arena, tcache, etc.), - * but there are circular dependencies that cannot be broken without - * substantial performance degradation. In order to reduce the effect on - * visual code flow, read the header files in multiple passes, with one of the - * following cpp variables defined during each pass: - * - * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data - * types. - * JEMALLOC_H_STRUCTS : Data structures. - * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. - * JEMALLOC_H_INLINES : Inline functions. - */ -/******************************************************************************/ -#define JEMALLOC_H_TYPES - -#include "jemalloc/internal/jemalloc_internal_macros.h" - -/* Page size index type. */ -typedef unsigned pszind_t; - -/* Size class index type. */ -typedef unsigned szind_t; - -/* - * Flags bits: - * - * a: arena - * t: tcache - * 0: unused - * z: zero - * n: alignment - * - * aaaaaaaa aaaatttt tttttttt 0znnnnnn - */ -#define MALLOCX_ARENA_MASK ((int)~0xfffff) -#define MALLOCX_ARENA_MAX 0xffe -#define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU) -#define MALLOCX_TCACHE_MAX 0xffd -#define MALLOCX_LG_ALIGN_MASK ((int)0x3f) -/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */ -#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \ - (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)) -#define MALLOCX_ALIGN_GET(flags) \ - (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1)) -#define MALLOCX_ZERO_GET(flags) \ - ((bool)(flags & MALLOCX_ZERO)) - -#define MALLOCX_TCACHE_GET(flags) \ - (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2) -#define MALLOCX_ARENA_GET(flags) \ - (((unsigned)(((unsigned)flags) >> 20)) - 1) - -/* Smallest size class to support. */ -#define TINY_MIN (1U << LG_TINY_MIN) - -/* - * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size - * classes). - */ -#ifndef LG_QUANTUM -# if (defined(__i386__) || defined(_M_IX86)) -# define LG_QUANTUM 4 -# endif -# ifdef __ia64__ -# define LG_QUANTUM 4 -# endif -# ifdef __alpha__ -# define LG_QUANTUM 4 -# endif -# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__)) -# define LG_QUANTUM 4 -# endif -# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) -# define LG_QUANTUM 4 -# endif -# ifdef __arm__ -# define LG_QUANTUM 3 -# endif -# ifdef __aarch64__ -# define LG_QUANTUM 4 -# endif -# ifdef __hppa__ -# define LG_QUANTUM 4 -# endif -# ifdef __mips__ -# define LG_QUANTUM 3 -# endif -# ifdef __or1k__ -# define LG_QUANTUM 3 -# endif -# ifdef __powerpc__ -# define LG_QUANTUM 4 -# endif -# ifdef __riscv__ -# define LG_QUANTUM 4 -# endif -# ifdef __s390__ -# define LG_QUANTUM 4 -# endif -# ifdef __SH4__ -# define LG_QUANTUM 4 -# endif -# ifdef __tile__ -# define LG_QUANTUM 4 -# endif -# ifdef __le32__ -# define LG_QUANTUM 4 -# endif -# ifndef LG_QUANTUM -# error "Unknown minimum alignment for architecture; specify via " - "--with-lg-quantum" -# endif -#endif - -#define QUANTUM ((size_t)(1U << LG_QUANTUM)) -#define QUANTUM_MASK (QUANTUM - 1) - -/* Return the smallest quantum multiple that is >= a. */ -#define QUANTUM_CEILING(a) \ - (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) - -#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) -#define LONG_MASK (LONG - 1) - -/* Return the smallest long multiple that is >= a. */ -#define LONG_CEILING(a) \ - (((a) + LONG_MASK) & ~LONG_MASK) - -#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) -#define PTR_MASK (SIZEOF_PTR - 1) - -/* Return the smallest (void *) multiple that is >= a. */ -#define PTR_CEILING(a) \ - (((a) + PTR_MASK) & ~PTR_MASK) - -/* - * Maximum size of L1 cache line. This is used to avoid cache line aliasing. - * In addition, this controls the spacing of cacheline-spaced size classes. - * - * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can - * only handle raw constants. - */ -#define LG_CACHELINE 6 -#define CACHELINE 64 -#define CACHELINE_MASK (CACHELINE - 1) - -/* Return the smallest cacheline multiple that is >= s. */ -#define CACHELINE_CEILING(s) \ - (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) - -/* Page size. LG_PAGE is determined by the configure script. */ -#ifdef PAGE_MASK -# undef PAGE_MASK -#endif -#define PAGE ((size_t)(1U << LG_PAGE)) -#define PAGE_MASK ((size_t)(PAGE - 1)) - -/* Return the page base address for the page containing address a. */ -#define PAGE_ADDR2BASE(a) \ - ((void *)((uintptr_t)(a) & ~PAGE_MASK)) - -/* Return the smallest pagesize multiple that is >= s. */ -#define PAGE_CEILING(s) \ - (((s) + PAGE_MASK) & ~PAGE_MASK) - -/* Return the nearest aligned address at or below a. */ -#define ALIGNMENT_ADDR2BASE(a, alignment) \ - ((void *)((uintptr_t)(a) & ((~(alignment)) + 1))) - -/* Return the offset between a and the nearest aligned address at or below a. */ -#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ - ((size_t)((uintptr_t)(a) & (alignment - 1))) - -/* Return the smallest alignment multiple that is >= s. */ -#define ALIGNMENT_CEILING(s, alignment) \ - (((s) + (alignment - 1)) & ((~(alignment)) + 1)) - -/* Declare a variable-length array. */ -#if __STDC_VERSION__ < 199901L -# ifdef _MSC_VER -# include -# define alloca _alloca -# else -# ifdef JEMALLOC_HAS_ALLOCA_H -# include -# else -# include -# endif -# endif -# define VARIABLE_ARRAY(type, name, count) \ - type *name = alloca(sizeof(type) * (count)) -#else -# define VARIABLE_ARRAY(type, name, count) type name[(count)] -#endif - -#include "jemalloc/internal/nstime.h" -#include "jemalloc/internal/valgrind.h" -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/spin.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ticker.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/smoothstep.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/witness.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/arena.h" -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/pages.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" - -#undef JEMALLOC_H_TYPES -/******************************************************************************/ -#define JEMALLOC_H_STRUCTS - -#include "jemalloc/internal/nstime.h" -#include "jemalloc/internal/valgrind.h" -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/spin.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ticker.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/smoothstep.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/witness.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/bitmap.h" -#define JEMALLOC_ARENA_STRUCTS_A -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_STRUCTS_A -#include "jemalloc/internal/extent.h" -#define JEMALLOC_ARENA_STRUCTS_B -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_STRUCTS_B -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/pages.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" - -#include "jemalloc/internal/tsd.h" - -#undef JEMALLOC_H_STRUCTS -/******************************************************************************/ -#define JEMALLOC_H_EXTERNS - -extern bool opt_abort; -extern const char *opt_junk; -extern bool opt_junk_alloc; -extern bool opt_junk_free; -extern size_t opt_quarantine; -extern bool opt_redzone; -extern bool opt_utrace; -extern bool opt_xmalloc; -extern bool opt_zero; -extern unsigned opt_narenas; - -extern bool in_valgrind; - -/* Number of CPUs. */ -extern unsigned ncpus; - -/* Number of arenas used for automatic multiplexing of threads and arenas. */ -extern unsigned narenas_auto; - -/* - * Arenas that are used to service external requests. Not all elements of the - * arenas array are necessarily used; arenas are created lazily as needed. - */ -extern arena_t **arenas; - -/* - * pind2sz_tab encodes the same information as could be computed by - * pind2sz_compute(). - */ -extern size_t const pind2sz_tab[NPSIZES]; -/* - * index2size_tab encodes the same information as could be computed (at - * unacceptable cost in some code paths) by index2size_compute(). - */ -extern size_t const index2size_tab[NSIZES]; -/* - * size2index_tab is a compact lookup table that rounds request sizes up to - * size classes. In order to reduce cache footprint, the table is compressed, - * and all accesses are via size2index(). - */ -extern uint8_t const size2index_tab[]; - -arena_t *a0get(void); -void *a0malloc(size_t size); -void a0dalloc(void *ptr); -void *bootstrap_malloc(size_t size); -void *bootstrap_calloc(size_t num, size_t size); -void bootstrap_free(void *ptr); -unsigned narenas_total_get(void); -arena_t *arena_init(tsdn_t *tsdn, unsigned ind); -arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); -arena_t *arena_choose_hard(tsd_t *tsd, bool internal); -void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); -void thread_allocated_cleanup(tsd_t *tsd); -void thread_deallocated_cleanup(tsd_t *tsd); -void iarena_cleanup(tsd_t *tsd); -void arena_cleanup(tsd_t *tsd); -void arenas_tdata_cleanup(tsd_t *tsd); -void narenas_tdata_cleanup(tsd_t *tsd); -void arenas_tdata_bypass_cleanup(tsd_t *tsd); -void jemalloc_prefork(void); -void jemalloc_postfork_parent(void); -void jemalloc_postfork_child(void); - -#include "jemalloc/internal/nstime.h" -#include "jemalloc/internal/valgrind.h" -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/spin.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ticker.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/smoothstep.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/witness.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/arena.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/pages.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" -#include "jemalloc/internal/tsd.h" - -#undef JEMALLOC_H_EXTERNS -/******************************************************************************/ -#define JEMALLOC_H_INLINES - -#include "jemalloc/internal/nstime.h" -#include "jemalloc/internal/valgrind.h" -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/spin.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ticker.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/smoothstep.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/witness.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/pages.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" - -#ifndef JEMALLOC_ENABLE_INLINE -pszind_t psz2ind(size_t psz); -size_t pind2sz_compute(pszind_t pind); -size_t pind2sz_lookup(pszind_t pind); -size_t pind2sz(pszind_t pind); -size_t psz2u(size_t psz); -szind_t size2index_compute(size_t size); -szind_t size2index_lookup(size_t size); -szind_t size2index(size_t size); -size_t index2size_compute(szind_t index); -size_t index2size_lookup(szind_t index); -size_t index2size(szind_t index); -size_t s2u_compute(size_t size); -size_t s2u_lookup(size_t size); -size_t s2u(size_t size); -size_t sa2u(size_t size, size_t alignment); -arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal); -arena_t *arena_choose(tsd_t *tsd, arena_t *arena); -arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena); -arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind, - bool refresh_if_missing); -arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing); -ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) -JEMALLOC_INLINE pszind_t -psz2ind(size_t psz) -{ - - if (unlikely(psz > HUGE_MAXCLASS)) - return (NPSIZES); - { - pszind_t x = lg_floor((psz<<1)-1); - pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x - - (LG_SIZE_CLASS_GROUP + LG_PAGE); - pszind_t grp = shift << LG_SIZE_CLASS_GROUP; - - pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? - LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; - - size_t delta_inverse_mask = ZI(-1) << lg_delta; - pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) & - ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); - - pszind_t ind = grp + mod; - return (ind); - } -} - -JEMALLOC_INLINE size_t -pind2sz_compute(pszind_t pind) -{ - - { - size_t grp = pind >> LG_SIZE_CLASS_GROUP; - size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); - - size_t grp_size_mask = ~((!!grp)-1); - size_t grp_size = ((ZU(1) << (LG_PAGE + - (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; - - size_t shift = (grp == 0) ? 1 : grp; - size_t lg_delta = shift + (LG_PAGE-1); - size_t mod_size = (mod+1) << lg_delta; - - size_t sz = grp_size + mod_size; - return (sz); - } -} - -JEMALLOC_INLINE size_t -pind2sz_lookup(pszind_t pind) -{ - size_t ret = (size_t)pind2sz_tab[pind]; - assert(ret == pind2sz_compute(pind)); - return (ret); -} - -JEMALLOC_INLINE size_t -pind2sz(pszind_t pind) -{ - - assert(pind < NPSIZES); - return (pind2sz_lookup(pind)); -} - -JEMALLOC_INLINE size_t -psz2u(size_t psz) -{ - - if (unlikely(psz > HUGE_MAXCLASS)) - return (0); - { - size_t x = lg_floor((psz<<1)-1); - size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? - LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; - size_t delta = ZU(1) << lg_delta; - size_t delta_mask = delta - 1; - size_t usize = (psz + delta_mask) & ~delta_mask; - return (usize); - } -} - -JEMALLOC_INLINE szind_t -size2index_compute(size_t size) -{ - - if (unlikely(size > HUGE_MAXCLASS)) - return (NSIZES); -#if (NTBINS != 0) - if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { - szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; - szind_t lg_ceil = lg_floor(pow2_ceil_zu(size)); - return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin); - } -#endif - { - szind_t x = lg_floor((size<<1)-1); - szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 : - x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM); - szind_t grp = shift << LG_SIZE_CLASS_GROUP; - - szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) - ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; - - size_t delta_inverse_mask = ZI(-1) << lg_delta; - szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) & - ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); - - szind_t index = NTBINS + grp + mod; - return (index); - } -} - -JEMALLOC_ALWAYS_INLINE szind_t -size2index_lookup(size_t size) -{ - - assert(size <= LOOKUP_MAXCLASS); - { - szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]); - assert(ret == size2index_compute(size)); - return (ret); - } -} - -JEMALLOC_ALWAYS_INLINE szind_t -size2index(size_t size) -{ - - assert(size > 0); - if (likely(size <= LOOKUP_MAXCLASS)) - return (size2index_lookup(size)); - return (size2index_compute(size)); -} - -JEMALLOC_INLINE size_t -index2size_compute(szind_t index) -{ - -#if (NTBINS > 0) - if (index < NTBINS) - return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index)); -#endif - { - size_t reduced_index = index - NTBINS; - size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP; - size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) - - 1); - - size_t grp_size_mask = ~((!!grp)-1); - size_t grp_size = ((ZU(1) << (LG_QUANTUM + - (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; - - size_t shift = (grp == 0) ? 1 : grp; - size_t lg_delta = shift + (LG_QUANTUM-1); - size_t mod_size = (mod+1) << lg_delta; - - size_t usize = grp_size + mod_size; - return (usize); - } -} - -JEMALLOC_ALWAYS_INLINE size_t -index2size_lookup(szind_t index) -{ - size_t ret = (size_t)index2size_tab[index]; - assert(ret == index2size_compute(index)); - return (ret); -} - -JEMALLOC_ALWAYS_INLINE size_t -index2size(szind_t index) -{ - - assert(index < NSIZES); - return (index2size_lookup(index)); -} - -JEMALLOC_ALWAYS_INLINE size_t -s2u_compute(size_t size) -{ - - if (unlikely(size > HUGE_MAXCLASS)) - return (0); -#if (NTBINS > 0) - if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { - size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; - size_t lg_ceil = lg_floor(pow2_ceil_zu(size)); - return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : - (ZU(1) << lg_ceil)); - } -#endif - { - size_t x = lg_floor((size<<1)-1); - size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) - ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; - size_t delta = ZU(1) << lg_delta; - size_t delta_mask = delta - 1; - size_t usize = (size + delta_mask) & ~delta_mask; - return (usize); - } -} - -JEMALLOC_ALWAYS_INLINE size_t -s2u_lookup(size_t size) -{ - size_t ret = index2size_lookup(size2index_lookup(size)); - - assert(ret == s2u_compute(size)); - return (ret); -} - -/* - * Compute usable size that would result from allocating an object with the - * specified size. - */ -JEMALLOC_ALWAYS_INLINE size_t -s2u(size_t size) -{ - - assert(size > 0); - if (likely(size <= LOOKUP_MAXCLASS)) - return (s2u_lookup(size)); - return (s2u_compute(size)); -} - -/* - * Compute usable size that would result from allocating an object with the - * specified size and alignment. - */ -JEMALLOC_ALWAYS_INLINE size_t -sa2u(size_t size, size_t alignment) -{ - size_t usize; - - assert(alignment != 0 && ((alignment - 1) & alignment) == 0); - - /* Try for a small size class. */ - if (size <= SMALL_MAXCLASS && alignment < PAGE) { - /* - * Round size up to the nearest multiple of alignment. - * - * This done, we can take advantage of the fact that for each - * small size class, every object is aligned at the smallest - * power of two that is non-zero in the base two representation - * of the size. For example: - * - * Size | Base 2 | Minimum alignment - * -----+----------+------------------ - * 96 | 1100000 | 32 - * 144 | 10100000 | 32 - * 192 | 11000000 | 64 - */ - usize = s2u(ALIGNMENT_CEILING(size, alignment)); - if (usize < LARGE_MINCLASS) - return (usize); - } - - /* Try for a large size class. */ - if (likely(size <= large_maxclass) && likely(alignment < chunksize)) { - /* - * We can't achieve subpage alignment, so round up alignment - * to the minimum that can actually be supported. - */ - alignment = PAGE_CEILING(alignment); - - /* Make sure result is a large size class. */ - usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size); - - /* - * Calculate the size of the over-size run that arena_palloc() - * would need to allocate in order to guarantee the alignment. - */ - if (usize + large_pad + alignment - PAGE <= arena_maxrun) - return (usize); - } - - /* Huge size class. Beware of overflow. */ - - if (unlikely(alignment > HUGE_MAXCLASS)) - return (0); - - /* - * We can't achieve subchunk alignment, so round up alignment to the - * minimum that can actually be supported. - */ - alignment = CHUNK_CEILING(alignment); - - /* Make sure result is a huge size class. */ - if (size <= chunksize) - usize = chunksize; - else { - usize = s2u(size); - if (usize < size) { - /* size_t overflow. */ - return (0); - } - } - - /* - * Calculate the multi-chunk mapping that huge_palloc() would need in - * order to guarantee the alignment. - */ - if (usize + alignment - PAGE < usize) { - /* size_t overflow. */ - return (0); - } - return (usize); -} - -/* Choose an arena based on a per-thread value. */ -JEMALLOC_INLINE arena_t * -arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) -{ - arena_t *ret; - - if (arena != NULL) - return (arena); - - ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd); - if (unlikely(ret == NULL)) - ret = arena_choose_hard(tsd, internal); - - return (ret); -} - -JEMALLOC_INLINE arena_t * -arena_choose(tsd_t *tsd, arena_t *arena) -{ - - return (arena_choose_impl(tsd, arena, false)); -} - -JEMALLOC_INLINE arena_t * -arena_ichoose(tsd_t *tsd, arena_t *arena) -{ - - return (arena_choose_impl(tsd, arena, true)); -} - -JEMALLOC_INLINE arena_tdata_t * -arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) -{ - arena_tdata_t *tdata; - arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); - - if (unlikely(arenas_tdata == NULL)) { - /* arenas_tdata hasn't been initialized yet. */ - return (arena_tdata_get_hard(tsd, ind)); - } - if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) { - /* - * ind is invalid, cache is old (too small), or tdata to be - * initialized. - */ - return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) : - NULL); - } - - tdata = &arenas_tdata[ind]; - if (likely(tdata != NULL) || !refresh_if_missing) - return (tdata); - return (arena_tdata_get_hard(tsd, ind)); -} - -JEMALLOC_INLINE arena_t * -arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) -{ - arena_t *ret; - - assert(ind <= MALLOCX_ARENA_MAX); - - ret = arenas[ind]; - if (unlikely(ret == NULL)) { - ret = atomic_read_p((void *)&arenas[ind]); - if (init_if_missing && unlikely(ret == NULL)) - ret = arena_init(tsdn, ind); - } - return (ret); -} - -JEMALLOC_INLINE ticker_t * -decay_ticker_get(tsd_t *tsd, unsigned ind) -{ - arena_tdata_t *tdata; - - tdata = arena_tdata_get(tsd, ind, true); - if (unlikely(tdata == NULL)) - return (NULL); - return (&tdata->decay_ticker); -} -#endif - -#include "jemalloc/internal/bitmap.h" -/* - * Include portions of arena.h interleaved with tcache.h in order to resolve - * circular dependencies. - */ -#define JEMALLOC_ARENA_INLINE_A -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_INLINE_A -#include "jemalloc/internal/tcache.h" -#define JEMALLOC_ARENA_INLINE_B -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_INLINE_B -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" - -#ifndef JEMALLOC_ENABLE_INLINE -arena_t *iaalloc(const void *ptr); -size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote); -void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, - tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path); -void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, - bool slow_path); -void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, bool is_metadata, arena_t *arena); -void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, arena_t *arena); -void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero); -size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote); -size_t u2rz(size_t usize); -size_t p2rz(tsdn_t *tsdn, const void *ptr); -void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata, - bool slow_path); -void idalloc(tsd_t *tsd, void *ptr); -void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path); -void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, - bool slow_path); -void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, - bool slow_path); -void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero, tcache_t *tcache, - arena_t *arena); -void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, - size_t alignment, bool zero, tcache_t *tcache, arena_t *arena); -void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, - size_t alignment, bool zero); -bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) -JEMALLOC_ALWAYS_INLINE arena_t * -iaalloc(const void *ptr) -{ - - assert(ptr != NULL); - - return (arena_aalloc(ptr)); -} - -/* - * Typical usage: - * tsdn_t *tsdn = [...] - * void *ptr = [...] - * size_t sz = isalloc(tsdn, ptr, config_prof); - */ -JEMALLOC_ALWAYS_INLINE size_t -isalloc(tsdn_t *tsdn, const void *ptr, bool demote) -{ - - assert(ptr != NULL); - /* Demotion only makes sense if config_prof is true. */ - assert(config_prof || !demote); - - return (arena_salloc(tsdn, ptr, demote)); -} - -JEMALLOC_ALWAYS_INLINE void * -iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, - bool is_metadata, arena_t *arena, bool slow_path) -{ - void *ret; - - assert(size != 0); - assert(!is_metadata || tcache == NULL); - assert(!is_metadata || arena == NULL || arena->ind < narenas_auto); - - ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path); - if (config_stats && is_metadata && likely(ret != NULL)) { - arena_metadata_allocated_add(iaalloc(ret), - isalloc(tsdn, ret, config_prof)); - } - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) -{ - - return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true), - false, NULL, slow_path)); -} - -JEMALLOC_ALWAYS_INLINE void * -ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, bool is_metadata, arena_t *arena) -{ - void *ret; - - assert(usize != 0); - assert(usize == sa2u(usize, alignment)); - assert(!is_metadata || tcache == NULL); - assert(!is_metadata || arena == NULL || arena->ind < narenas_auto); - - ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache); - assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); - if (config_stats && is_metadata && likely(ret != NULL)) { - arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret, - config_prof)); - } - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, arena_t *arena) -{ - - return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena)); -} - -JEMALLOC_ALWAYS_INLINE void * -ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) -{ - - return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero, - tcache_get(tsd, true), false, NULL)); -} - -JEMALLOC_ALWAYS_INLINE size_t -ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote) -{ - extent_node_t *node; - - /* Return 0 if ptr is not within a chunk managed by jemalloc. */ - node = chunk_lookup(ptr, false); - if (node == NULL) - return (0); - /* Only arena chunks should be looked up via interior pointers. */ - assert(extent_node_addr_get(node) == ptr || - extent_node_achunk_get(node)); - - return (isalloc(tsdn, ptr, demote)); -} - -JEMALLOC_INLINE size_t -u2rz(size_t usize) -{ - size_t ret; - - if (usize <= SMALL_MAXCLASS) { - szind_t binind = size2index(usize); - ret = arena_bin_info[binind].redzone_size; - } else - ret = 0; - - return (ret); -} - -JEMALLOC_INLINE size_t -p2rz(tsdn_t *tsdn, const void *ptr) -{ - size_t usize = isalloc(tsdn, ptr, false); - - return (u2rz(usize)); -} - -JEMALLOC_ALWAYS_INLINE void -idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata, - bool slow_path) -{ - - assert(ptr != NULL); - assert(!is_metadata || tcache == NULL); - assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto); - if (config_stats && is_metadata) { - arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr, - config_prof)); - } - - arena_dalloc(tsdn, ptr, tcache, slow_path); -} - -JEMALLOC_ALWAYS_INLINE void -idalloc(tsd_t *tsd, void *ptr) -{ - - idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true); -} - -JEMALLOC_ALWAYS_INLINE void -iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) -{ - - if (slow_path && config_fill && unlikely(opt_quarantine)) - quarantine(tsd, ptr); - else - idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path); -} - -JEMALLOC_ALWAYS_INLINE void -isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, - bool slow_path) -{ - - arena_sdalloc(tsdn, ptr, size, tcache, slow_path); -} - -JEMALLOC_ALWAYS_INLINE void -isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path) -{ - - if (slow_path && config_fill && unlikely(opt_quarantine)) - quarantine(tsd, ptr); - else - isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path); -} - -JEMALLOC_ALWAYS_INLINE void * -iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) -{ - void *p; - size_t usize, copysize; - - usize = sa2u(size + extra, alignment); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) - return (NULL); - p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena); - if (p == NULL) { - if (extra == 0) - return (NULL); - /* Try again, without extra this time. */ - usize = sa2u(size, alignment); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) - return (NULL); - p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, - arena); - if (p == NULL) - return (NULL); - } - /* - * Copy at most size bytes (not size+extra), since the caller has no - * expectation that the extra bytes will be reliably preserved. - */ - copysize = (size < oldsize) ? size : oldsize; - memcpy(p, ptr, copysize); - isqalloc(tsd, ptr, oldsize, tcache, true); - return (p); -} - -JEMALLOC_ALWAYS_INLINE void * -iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, - bool zero, tcache_t *tcache, arena_t *arena) -{ - - assert(ptr != NULL); - assert(size != 0); - - if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) - != 0) { - /* - * Existing object alignment is inadequate; allocate new space - * and copy. - */ - return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment, - zero, tcache, arena)); - } - - return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero, - tcache)); -} - -JEMALLOC_ALWAYS_INLINE void * -iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, - bool zero) -{ - - return (iralloct(tsd, ptr, oldsize, size, alignment, zero, - tcache_get(tsd, true), NULL)); -} - -JEMALLOC_ALWAYS_INLINE bool -ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, - size_t alignment, bool zero) -{ - - assert(ptr != NULL); - assert(size != 0); - - if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) - != 0) { - /* Existing object alignment is inadequate. */ - return (true); - } - - return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero)); -} -#endif - -#include "jemalloc/internal/prof.h" - -#undef JEMALLOC_H_INLINES -/******************************************************************************/ -#endif /* JEMALLOC_INTERNAL_H */ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_decls.h b/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_decls.h deleted file mode 100755 index c907d910..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_decls.h +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_DECLS_H -#define JEMALLOC_INTERNAL_DECLS_H - -#include -#ifdef _WIN32 -# include -# include "msvc_compat/windows_extra.h" - -#else -# include -# include -# if !defined(__pnacl__) && !defined(__native_client__) -# include -# if !defined(SYS_write) && defined(__NR_write) -# define SYS_write __NR_write -# endif -# include -# endif -# include -# ifdef JEMALLOC_OS_UNFAIR_LOCK -# include -# endif -# ifdef JEMALLOC_GLIBC_MALLOC_HOOK -# include -# endif -# include -# include -# include -# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME -# include -# endif -#endif -#include - -#include -#ifndef SIZE_T_MAX -# define SIZE_T_MAX SIZE_MAX -#endif -#include -#include -#include -#include -#include -#include -#ifndef offsetof -# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) -#endif -#include -#include -#include -#ifdef _MSC_VER -# include -typedef intptr_t ssize_t; -# define PATH_MAX 1024 -# define STDERR_FILENO 2 -# define __func__ __FUNCTION__ -# ifdef JEMALLOC_HAS_RESTRICT -# define restrict __restrict -# endif -/* Disable warnings about deprecated system functions. */ -# pragma warning(disable: 4996) -#if _MSC_VER < 1800 -static int -isblank(int c) -{ - - return (c == '\t' || c == ' '); -} -#endif -#else -# include -#endif -#include - -#endif /* JEMALLOC_INTERNAL_H */ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_defs.h b/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_defs.h deleted file mode 100755 index c02e814e..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_defs.h +++ /dev/null @@ -1,335 +0,0 @@ -/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */ -#ifndef JEMALLOC_INTERNAL_DEFS_H_ -#define JEMALLOC_INTERNAL_DEFS_H_ - -//#define PUB_MEMORY_TRACKER - -/* - * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all - * public APIs to be prefixed. This makes it possible, with some care, to use - * multiple allocators simultaneously. - */ -#define JEMALLOC_PREFIX "je_" -#define JEMALLOC_CPREFIX "JE_" - -/* - * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. - * For shared libraries, symbol visibility mechanisms prevent these symbols - * from being exported, but for static libraries, naming collisions are a real - * possibility. - */ -#define JEMALLOC_PRIVATE_NAMESPACE je_ - -/* - * Hyper-threaded CPUs may need a special instruction inside spin loops in - * order to yield to another virtual CPU. - */ -#ifdef __x86_64__ -#define CPU_SPINWAIT __asm__ volatile("pause") -#else -#define CPU_SPINWAIT -#endif - -/* Defined if C11 atomics are available. */ -/* #undef JEMALLOC_C11ATOMICS */ - -/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ -/* #undef JEMALLOC_ATOMIC9 */ - -/* - * Defined if OSAtomic*() functions are available, as provided by Darwin, and - * documented in the atomic(3) manual page. - */ -/* #undef JEMALLOC_OSATOMIC */ - -/* - * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and - * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite - * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the - * functions are defined in libgcc instead of being inlines). - */ -/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */ - -/* - * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and - * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite - * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the - * functions are defined in libgcc instead of being inlines). - */ -/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */ - -/* - * Defined if __builtin_clz() and __builtin_clzl() are available. - */ -#define JEMALLOC_HAVE_BUILTIN_CLZ - -/* - * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. - */ -/* #undef JEMALLOC_OS_UNFAIR_LOCK */ - -/* - * Defined if OSSpin*() functions are available, as provided by Darwin, and - * documented in the spinlock(3) manual page. - */ -/* #undef JEMALLOC_OSSPIN */ - -/* Defined if syscall(2) is usable. */ -#define JEMALLOC_USE_SYSCALL - -/* - * Defined if secure_getenv(3) is available. - */ -/* #undef JEMALLOC_HAVE_SECURE_GETENV */ - -/* - * Defined if issetugid(2) is available. - */ -/* #undef JEMALLOC_HAVE_ISSETUGID */ - -/* Defined if pthread_atfork(3) is available. */ -#define JEMALLOC_HAVE_PTHREAD_ATFORK - -/* - * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. - */ -/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */ - -/* - * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. - */ -#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1 - -/* - * Defined if mach_absolute_time() is available. - */ -/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */ - -/* - * Defined if _malloc_thread_cleanup() exists. At least in the case of - * FreeBSD, pthread_key_create() allocates, which if used during malloc - * bootstrapping will cause recursion into the pthreads library. Therefore, if - * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in - * malloc_tsd. - */ -/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */ - -/* - * Defined if threaded initialization is known to be safe on this platform. - * Among other things, it must be possible to initialize a mutex without - * triggering allocation in order for threaded allocation to be safe. - */ -#define JEMALLOC_THREADED_INIT - -/* - * Defined if the pthreads implementation defines - * _pthread_mutex_init_calloc_cb(), in which case the function is used in order - * to avoid recursive allocation during mutex initialization. - */ -/* #undef JEMALLOC_MUTEX_INIT_CB */ - -/* Non-empty if the tls_model attribute is supported. */ -#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) - -/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ -#define JEMALLOC_CC_SILENCE - -/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */ -/* #undef JEMALLOC_CODE_COVERAGE */ - -/* - * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables - * inline functions. - */ -#ifdef PUB_MEMORY_TRACKER -#define JEMALLOC_DEBUG -#endif - -/* JEMALLOC_STATS enables statistics calculation. */ -#ifdef PUB_MEMORY_TRACKER -#define JEMALLOC_STATS -#endif - -/* JEMALLOC_PROF enables allocation profiling. */ -/* #undef JEMALLOC_PROF */ - -/* Use libunwind for profile backtracing if defined. */ -/* #undef JEMALLOC_PROF_LIBUNWIND */ - -/* Use libgcc for profile backtracing if defined. */ -/* #undef JEMALLOC_PROF_LIBGCC */ - -/* Use gcc intrinsics for profile backtracing if defined. */ -/* #undef JEMALLOC_PROF_GCC */ - -/* - * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. - * This makes it possible to allocate/deallocate objects without any locking - * when the cache is in the steady state. - */ -#ifndef PUB_MEMORY_TRACKER -#define JEMALLOC_TCACHE -#endif - -/* - * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage - * segment (DSS). - */ -#define JEMALLOC_DSS - -/* Support memory filling (junk/zero/quarantine/redzone). */ -#define JEMALLOC_FILL - -/* Support utrace(2)-based tracing. */ -/* #undef JEMALLOC_UTRACE */ - -/* Support Valgrind. */ -/* #undef JEMALLOC_VALGRIND */ - -/* Support optional abort() on OOM. */ -/* #undef JEMALLOC_XMALLOC */ - -/* Support lazy locking (avoid locking unless a second thread is launched). */ -/* #undef JEMALLOC_LAZY_LOCK */ - -/* Minimum size class to support is 2^LG_TINY_MIN bytes. */ -#define LG_TINY_MIN 3 - -/* - * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size - * classes). - */ -/* #undef LG_QUANTUM */ - -/* One page is 2^LG_PAGE bytes. */ -#define LG_PAGE 12 - -/* - * If defined, adjacent virtual memory mappings with identical attributes - * automatically coalesce, and they fragment when changes are made to subranges. - * This is the normal order of things for mmap()/munmap(), but on Windows - * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. - * mappings do *not* coalesce/fragment. - */ -#define JEMALLOC_MAPS_COALESCE - -/* - * If defined, use munmap() to unmap freed chunks, rather than storing them for - * later reuse. This is disabled by default on Linux because common sequences - * of mmap()/munmap() calls will cause virtual memory map holes. - */ -/* #undef JEMALLOC_MUNMAP */ - -/* TLS is used to map arenas and magazine caches to threads. */ -#define JEMALLOC_TLS - -/* - * Used to mark unreachable code to quiet "end of non-void" compiler warnings. - * Don't use this directly; instead use unreachable() from util.h - */ -#define JEMALLOC_INTERNAL_UNREACHABLE abort - -/* - * ffs*() functions to use for bitmapping. Don't use these directly; instead, - * use ffs_*() from util.h. - */ -#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll -#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl -#define JEMALLOC_INTERNAL_FFS __builtin_ffs - -/* - * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside - * within jemalloc-owned chunks before dereferencing them. - */ -#ifdef PUB_MEMORY_TRACKER -#define JEMALLOC_IVSALLOC -#endif - -/* - * If defined, explicitly attempt to more uniformly distribute large allocation - * pointer alignments across all cache indices. - */ -#define JEMALLOC_CACHE_OBLIVIOUS - -/* - * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. - */ -/* #undef JEMALLOC_ZONE */ - -/* - * Methods for determining whether the OS overcommits. - * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's - * /proc/sys/vm.overcommit_memory file. - * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. - */ -/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */ -#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY - -/* Defined if madvise(2) is available. */ -#define JEMALLOC_HAVE_MADVISE - -/* - * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE - * arguments to madvise(2). - */ -/* #undef JEMALLOC_HAVE_MADVISE_HUGE */ - -/* - * Methods for purging unused pages differ between operating systems. - * - * madvise(..., MADV_FREE) : This marks pages as being unused, such that they - * will be discarded rather than swapped out. - * madvise(..., MADV_DONTNEED) : This immediately discards pages, such that - * new pages will be demand-zeroed if the - * address region is later touched. - */ -/* #undef JEMALLOC_PURGE_MADVISE_FREE */ -#define JEMALLOC_PURGE_MADVISE_DONTNEED - -/* Defined if transparent huge page support is enabled. */ -/* #undef JEMALLOC_THP */ - -/* Define if operating system has alloca.h header. */ -#define JEMALLOC_HAS_ALLOCA_H 1 - -/* C99 restrict keyword supported. */ -/*#define JEMALLOC_HAS_RESTRICT 1 */ - -/* For use by hash code. */ -/* #undef JEMALLOC_BIG_ENDIAN */ - -/* sizeof(int) == 2^LG_SIZEOF_INT. */ -#define LG_SIZEOF_INT 2 - -/* sizeof(long) == 2^LG_SIZEOF_LONG. */ -#if (defined(__LP64__) && __LP64__) -#define LG_SIZEOF_LONG 3 -#else -#define LG_SIZEOF_LONG 2 -#endif - -/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ -#define LG_SIZEOF_LONG_LONG 3 - -/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ -#define LG_SIZEOF_INTMAX_T 3 - -/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ -#define JEMALLOC_GLIBC_MALLOC_HOOK - -/* glibc memalign hook. */ -#define JEMALLOC_GLIBC_MEMALIGN_HOOK - -/* Adaptive mutex support in pthreads. */ -#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP - -/* - * If defined, jemalloc symbols are not exported (doesn't work when - * JEMALLOC_PREFIX is not defined). - */ -#define JEMALLOC_EXPORT /**/ - -/* config.malloc_conf options string. */ -#define JEMALLOC_CONFIG_MALLOC_CONF "" - -#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_macros.h b/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_macros.h deleted file mode 100755 index a08ba772..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_macros.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for - * functions that are static inline functions if inlining is enabled, and - * single-definition library-private functions if inlining is disabled. - * - * JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in - * which case the denoted functions are always static, regardless of whether - * inlining is enabled. - */ -#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE) - /* Disable inlining to make debugging/profiling easier. */ -# define JEMALLOC_ALWAYS_INLINE -# define JEMALLOC_ALWAYS_INLINE_C static -# define JEMALLOC_INLINE -# define JEMALLOC_INLINE_C static -# define inline -#else -# define JEMALLOC_ENABLE_INLINE -# ifdef JEMALLOC_HAVE_ATTR -# define JEMALLOC_ALWAYS_INLINE \ - static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) -# define JEMALLOC_ALWAYS_INLINE_C \ - static inline JEMALLOC_ATTR(always_inline) -# else -# define JEMALLOC_ALWAYS_INLINE static inline -# define JEMALLOC_ALWAYS_INLINE_C static inline -# endif -# define JEMALLOC_INLINE static inline -# define JEMALLOC_INLINE_C static inline -# ifdef _MSC_VER -# define inline _inline -# endif -#endif - -#ifdef JEMALLOC_CC_SILENCE -# define UNUSED JEMALLOC_ATTR(unused) -#else -# define UNUSED -#endif - -#define ZU(z) ((size_t)z) -#define ZI(z) ((ssize_t)z) -#define QU(q) ((uint64_t)q) -#define QI(q) ((int64_t)q) - -#define KZU(z) ZU(z##ULL) -#define KZI(z) ZI(z##LL) -#define KQU(q) QU(q##ULL) -#define KQI(q) QI(q##LL) - -#ifndef __DECONST -# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) -#endif - -#ifndef JEMALLOC_HAS_RESTRICT -# define restrict -#endif diff --git a/sources/jemalloc/include-linux/jemalloc/internal/mb.h b/sources/jemalloc/include-linux/jemalloc/internal/mb.h deleted file mode 100755 index e58da5c3..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/mb.h +++ /dev/null @@ -1,115 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void mb_write(void); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_)) -#ifdef __i386__ -/* - * According to the Intel Architecture Software Developer's Manual, current - * processors execute instructions in order from the perspective of other - * processors in a multiprocessor system, but 1) Intel reserves the right to - * change that, and 2) the compiler's optimizer could re-order instructions if - * there weren't some form of barrier. Therefore, even if running on an - * architecture that does not need memory barriers (everything through at least - * i686), an "optimizer barrier" is necessary. - */ -JEMALLOC_INLINE void -mb_write(void) -{ - -# if 0 - /* This is a true memory barrier. */ - asm volatile ("pusha;" - "xor %%eax,%%eax;" - "cpuid;" - "popa;" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -# else - /* - * This is hopefully enough to keep the compiler from reordering - * instructions around this one. - */ - asm volatile ("nop;" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -# endif -} -#elif (defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE void -mb_write(void) -{ - - asm volatile ("sfence" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -#elif defined(__powerpc__) -JEMALLOC_INLINE void -mb_write(void) -{ - - asm volatile ("eieio" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -#elif defined(__sparc__) && defined(__arch64__) -JEMALLOC_INLINE void -mb_write(void) -{ - - asm volatile ("membar #StoreStore" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -#elif defined(__tile__) -JEMALLOC_INLINE void -mb_write(void) -{ - - __sync_synchronize(); -} -#else -/* - * This is much slower than a simple memory barrier, but the semantics of mutex - * unlock make this work. - */ -JEMALLOC_INLINE void -mb_write(void) -{ - malloc_mutex_t mtx; - - malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT); - malloc_mutex_lock(TSDN_NULL, &mtx); - malloc_mutex_unlock(TSDN_NULL, &mtx); -} -#endif -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/mutex.h b/sources/jemalloc/include-linux/jemalloc/internal/mutex.h deleted file mode 100755 index 2b4b1c31..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/mutex.h +++ /dev/null @@ -1,145 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct malloc_mutex_s malloc_mutex_t; - -#ifdef _WIN32 -# define MALLOC_MUTEX_INITIALIZER -#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) -# define MALLOC_MUTEX_INITIALIZER \ - {OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} -#elif (defined(JEMALLOC_OSSPIN)) -# define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} -#elif (defined(JEMALLOC_MUTEX_INIT_CB)) -# define MALLOC_MUTEX_INITIALIZER \ - {PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} -#else -# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \ - defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) -# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP -# define MALLOC_MUTEX_INITIALIZER \ - {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \ - WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} -# else -# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT -# define MALLOC_MUTEX_INITIALIZER \ - {PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} -# endif -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct malloc_mutex_s { -#ifdef _WIN32 -# if _WIN32_WINNT >= 0x0600 - SRWLOCK lock; -# else - CRITICAL_SECTION lock; -# endif -#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) - os_unfair_lock lock; -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLock lock; -#elif (defined(JEMALLOC_MUTEX_INIT_CB)) - pthread_mutex_t lock; - malloc_mutex_t *postponed_next; -#else - pthread_mutex_t lock; -#endif - witness_t witness; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#ifdef JEMALLOC_LAZY_LOCK -extern bool isthreaded; -#else -# undef isthreaded /* Undo private_namespace.h definition. */ -# define isthreaded true -#endif - -bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, - witness_rank_t rank); -void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); -void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); -void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); -bool malloc_mutex_boot(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex); -void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex); -void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex); -void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) -JEMALLOC_INLINE void -malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) -{ - - witness_assert_not_owner(tsdn, &mutex->witness); - if (isthreaded) { -#ifdef _WIN32 -# if _WIN32_WINNT >= 0x0600 - AcquireSRWLockExclusive(&mutex->lock); -# else - EnterCriticalSection(&mutex->lock); -# endif -#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) - os_unfair_lock_lock(&mutex->lock); -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLockLock(&mutex->lock); -#else - pthread_mutex_lock(&mutex->lock); -#endif - } - witness_lock(tsdn, &mutex->witness); -} - -JEMALLOC_INLINE void -malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) -{ - - witness_unlock(tsdn, &mutex->witness); - if (isthreaded) { -#ifdef _WIN32 -# if _WIN32_WINNT >= 0x0600 - ReleaseSRWLockExclusive(&mutex->lock); -# else - LeaveCriticalSection(&mutex->lock); -# endif -#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) - os_unfair_lock_unlock(&mutex->lock); -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLockUnlock(&mutex->lock); -#else - pthread_mutex_unlock(&mutex->lock); -#endif - } -} - -JEMALLOC_INLINE void -malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) -{ - - witness_assert_owner(tsdn, &mutex->witness); -} - -JEMALLOC_INLINE void -malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) -{ - - witness_assert_not_owner(tsdn, &mutex->witness); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/nstime.h b/sources/jemalloc/include-linux/jemalloc/internal/nstime.h deleted file mode 100755 index 93b27dc8..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/nstime.h +++ /dev/null @@ -1,48 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct nstime_s nstime_t; - -/* Maximum supported number of seconds (~584 years). */ -#define NSTIME_SEC_MAX KQU(18446744072) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct nstime_s { - uint64_t ns; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void nstime_init(nstime_t *time, uint64_t ns); -void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec); -uint64_t nstime_ns(const nstime_t *time); -uint64_t nstime_sec(const nstime_t *time); -uint64_t nstime_nsec(const nstime_t *time); -void nstime_copy(nstime_t *time, const nstime_t *source); -int nstime_compare(const nstime_t *a, const nstime_t *b); -void nstime_add(nstime_t *time, const nstime_t *addend); -void nstime_subtract(nstime_t *time, const nstime_t *subtrahend); -void nstime_imultiply(nstime_t *time, uint64_t multiplier); -void nstime_idivide(nstime_t *time, uint64_t divisor); -uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor); -#ifdef JEMALLOC_JET -typedef bool (nstime_monotonic_t)(void); -extern nstime_monotonic_t *nstime_monotonic; -typedef bool (nstime_update_t)(nstime_t *); -extern nstime_update_t *nstime_update; -#else -bool nstime_monotonic(void); -bool nstime_update(nstime_t *time); -#endif - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/pages.h b/sources/jemalloc/include-linux/jemalloc/internal/pages.h deleted file mode 100755 index 4ae9f156..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/pages.h +++ /dev/null @@ -1,29 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *pages_map(void *addr, size_t size, bool *commit); -void pages_unmap(void *addr, size_t size); -void *pages_trim(void *addr, size_t alloc_size, size_t leadsize, - size_t size, bool *commit); -bool pages_commit(void *addr, size_t size); -bool pages_decommit(void *addr, size_t size); -bool pages_purge(void *addr, size_t size); -bool pages_huge(void *addr, size_t size); -bool pages_nohuge(void *addr, size_t size); -void pages_boot(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/sources/jemalloc/include-linux/jemalloc/internal/ph.h b/sources/jemalloc/include-linux/jemalloc/internal/ph.h deleted file mode 100755 index 4f91c333..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/ph.h +++ /dev/null @@ -1,345 +0,0 @@ -/* - * A Pairing Heap implementation. - * - * "The Pairing Heap: A New Form of Self-Adjusting Heap" - * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf - * - * With auxiliary twopass list, described in a follow on paper. - * - * "Pairing Heaps: Experiments and Analysis" - * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf - * - ******************************************************************************* - */ - -#ifndef PH_H_ -#define PH_H_ - -/* Node structure. */ -#define phn(a_type) \ -struct { \ - a_type *phn_prev; \ - a_type *phn_next; \ - a_type *phn_lchild; \ -} - -/* Root structure. */ -#define ph(a_type) \ -struct { \ - a_type *ph_root; \ -} - -/* Internal utility macros. */ -#define phn_lchild_get(a_type, a_field, a_phn) \ - (a_phn->a_field.phn_lchild) -#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \ - a_phn->a_field.phn_lchild = a_lchild; \ -} while (0) - -#define phn_next_get(a_type, a_field, a_phn) \ - (a_phn->a_field.phn_next) -#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \ - a_phn->a_field.phn_prev = a_prev; \ -} while (0) - -#define phn_prev_get(a_type, a_field, a_phn) \ - (a_phn->a_field.phn_prev) -#define phn_next_set(a_type, a_field, a_phn, a_next) do { \ - a_phn->a_field.phn_next = a_next; \ -} while (0) - -#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \ - a_type *phn0child; \ - \ - assert(a_phn0 != NULL); \ - assert(a_phn1 != NULL); \ - assert(a_cmp(a_phn0, a_phn1) <= 0); \ - \ - phn_prev_set(a_type, a_field, a_phn1, a_phn0); \ - phn0child = phn_lchild_get(a_type, a_field, a_phn0); \ - phn_next_set(a_type, a_field, a_phn1, phn0child); \ - if (phn0child != NULL) \ - phn_prev_set(a_type, a_field, phn0child, a_phn1); \ - phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \ -} while (0) - -#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \ - if (a_phn0 == NULL) \ - r_phn = a_phn1; \ - else if (a_phn1 == NULL) \ - r_phn = a_phn0; \ - else if (a_cmp(a_phn0, a_phn1) < 0) { \ - phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \ - a_cmp); \ - r_phn = a_phn0; \ - } else { \ - phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \ - a_cmp); \ - r_phn = a_phn1; \ - } \ -} while (0) - -#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \ - a_type *head = NULL; \ - a_type *tail = NULL; \ - a_type *phn0 = a_phn; \ - a_type *phn1 = phn_next_get(a_type, a_field, phn0); \ - \ - /* \ - * Multipass merge, wherein the first two elements of a FIFO \ - * are repeatedly merged, and each result is appended to the \ - * singly linked FIFO, until the FIFO contains only a single \ - * element. We start with a sibling list but no reference to \ - * its tail, so we do a single pass over the sibling list to \ - * populate the FIFO. \ - */ \ - if (phn1 != NULL) { \ - a_type *phnrest = phn_next_get(a_type, a_field, phn1); \ - if (phnrest != NULL) \ - phn_prev_set(a_type, a_field, phnrest, NULL); \ - phn_prev_set(a_type, a_field, phn0, NULL); \ - phn_next_set(a_type, a_field, phn0, NULL); \ - phn_prev_set(a_type, a_field, phn1, NULL); \ - phn_next_set(a_type, a_field, phn1, NULL); \ - phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \ - head = tail = phn0; \ - phn0 = phnrest; \ - while (phn0 != NULL) { \ - phn1 = phn_next_get(a_type, a_field, phn0); \ - if (phn1 != NULL) { \ - phnrest = phn_next_get(a_type, a_field, \ - phn1); \ - if (phnrest != NULL) { \ - phn_prev_set(a_type, a_field, \ - phnrest, NULL); \ - } \ - phn_prev_set(a_type, a_field, phn0, \ - NULL); \ - phn_next_set(a_type, a_field, phn0, \ - NULL); \ - phn_prev_set(a_type, a_field, phn1, \ - NULL); \ - phn_next_set(a_type, a_field, phn1, \ - NULL); \ - phn_merge(a_type, a_field, phn0, phn1, \ - a_cmp, phn0); \ - phn_next_set(a_type, a_field, tail, \ - phn0); \ - tail = phn0; \ - phn0 = phnrest; \ - } else { \ - phn_next_set(a_type, a_field, tail, \ - phn0); \ - tail = phn0; \ - phn0 = NULL; \ - } \ - } \ - phn0 = head; \ - phn1 = phn_next_get(a_type, a_field, phn0); \ - if (phn1 != NULL) { \ - while (true) { \ - head = phn_next_get(a_type, a_field, \ - phn1); \ - assert(phn_prev_get(a_type, a_field, \ - phn0) == NULL); \ - phn_next_set(a_type, a_field, phn0, \ - NULL); \ - assert(phn_prev_get(a_type, a_field, \ - phn1) == NULL); \ - phn_next_set(a_type, a_field, phn1, \ - NULL); \ - phn_merge(a_type, a_field, phn0, phn1, \ - a_cmp, phn0); \ - if (head == NULL) \ - break; \ - phn_next_set(a_type, a_field, tail, \ - phn0); \ - tail = phn0; \ - phn0 = head; \ - phn1 = phn_next_get(a_type, a_field, \ - phn0); \ - } \ - } \ - } \ - r_phn = phn0; \ -} while (0) - -#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \ - a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \ - if (phn != NULL) { \ - phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \ - phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \ - phn_prev_set(a_type, a_field, phn, NULL); \ - ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \ - assert(phn_next_get(a_type, a_field, phn) == NULL); \ - phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \ - a_ph->ph_root); \ - } \ -} while (0) - -#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \ - a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \ - if (lchild == NULL) \ - r_phn = NULL; \ - else { \ - ph_merge_siblings(a_type, a_field, lchild, a_cmp, \ - r_phn); \ - } \ -} while (0) - -/* - * The ph_proto() macro generates function prototypes that correspond to the - * functions generated by an equivalently parameterized call to ph_gen(). - */ -#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \ -a_attr void a_prefix##new(a_ph_type *ph); \ -a_attr bool a_prefix##empty(a_ph_type *ph); \ -a_attr a_type *a_prefix##first(a_ph_type *ph); \ -a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \ -a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \ -a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn); - -/* - * The ph_gen() macro generates a type-specific pairing heap implementation, - * based on the above cpp macros. - */ -#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \ -a_attr void \ -a_prefix##new(a_ph_type *ph) \ -{ \ - \ - memset(ph, 0, sizeof(ph(a_type))); \ -} \ -a_attr bool \ -a_prefix##empty(a_ph_type *ph) \ -{ \ - \ - return (ph->ph_root == NULL); \ -} \ -a_attr a_type * \ -a_prefix##first(a_ph_type *ph) \ -{ \ - \ - if (ph->ph_root == NULL) \ - return (NULL); \ - ph_merge_aux(a_type, a_field, ph, a_cmp); \ - return (ph->ph_root); \ -} \ -a_attr void \ -a_prefix##insert(a_ph_type *ph, a_type *phn) \ -{ \ - \ - memset(&phn->a_field, 0, sizeof(phn(a_type))); \ - \ - /* \ - * Treat the root as an aux list during insertion, and lazily \ - * merge during a_prefix##remove_first(). For elements that \ - * are inserted, then removed via a_prefix##remove() before the \ - * aux list is ever processed, this makes insert/remove \ - * constant-time, whereas eager merging would make insert \ - * O(log n). \ - */ \ - if (ph->ph_root == NULL) \ - ph->ph_root = phn; \ - else { \ - phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \ - a_field, ph->ph_root)); \ - if (phn_next_get(a_type, a_field, ph->ph_root) != \ - NULL) { \ - phn_prev_set(a_type, a_field, \ - phn_next_get(a_type, a_field, ph->ph_root), \ - phn); \ - } \ - phn_prev_set(a_type, a_field, phn, ph->ph_root); \ - phn_next_set(a_type, a_field, ph->ph_root, phn); \ - } \ -} \ -a_attr a_type * \ -a_prefix##remove_first(a_ph_type *ph) \ -{ \ - a_type *ret; \ - \ - if (ph->ph_root == NULL) \ - return (NULL); \ - ph_merge_aux(a_type, a_field, ph, a_cmp); \ - \ - ret = ph->ph_root; \ - \ - ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ - ph->ph_root); \ - \ - return (ret); \ -} \ -a_attr void \ -a_prefix##remove(a_ph_type *ph, a_type *phn) \ -{ \ - a_type *replace, *parent; \ - \ - /* \ - * We can delete from aux list without merging it, but we need \ - * to merge if we are dealing with the root node. \ - */ \ - if (ph->ph_root == phn) { \ - ph_merge_aux(a_type, a_field, ph, a_cmp); \ - if (ph->ph_root == phn) { \ - ph_merge_children(a_type, a_field, ph->ph_root, \ - a_cmp, ph->ph_root); \ - return; \ - } \ - } \ - \ - /* Get parent (if phn is leftmost child) before mutating. */ \ - if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \ - if (phn_lchild_get(a_type, a_field, parent) != phn) \ - parent = NULL; \ - } \ - /* Find a possible replacement node, and link to parent. */ \ - ph_merge_children(a_type, a_field, phn, a_cmp, replace); \ - /* Set next/prev for sibling linked list. */ \ - if (replace != NULL) { \ - if (parent != NULL) { \ - phn_prev_set(a_type, a_field, replace, parent); \ - phn_lchild_set(a_type, a_field, parent, \ - replace); \ - } else { \ - phn_prev_set(a_type, a_field, replace, \ - phn_prev_get(a_type, a_field, phn)); \ - if (phn_prev_get(a_type, a_field, phn) != \ - NULL) { \ - phn_next_set(a_type, a_field, \ - phn_prev_get(a_type, a_field, phn), \ - replace); \ - } \ - } \ - phn_next_set(a_type, a_field, replace, \ - phn_next_get(a_type, a_field, phn)); \ - if (phn_next_get(a_type, a_field, phn) != NULL) { \ - phn_prev_set(a_type, a_field, \ - phn_next_get(a_type, a_field, phn), \ - replace); \ - } \ - } else { \ - if (parent != NULL) { \ - a_type *next = phn_next_get(a_type, a_field, \ - phn); \ - phn_lchild_set(a_type, a_field, parent, next); \ - if (next != NULL) { \ - phn_prev_set(a_type, a_field, next, \ - parent); \ - } \ - } else { \ - assert(phn_prev_get(a_type, a_field, phn) != \ - NULL); \ - phn_next_set(a_type, a_field, \ - phn_prev_get(a_type, a_field, phn), \ - phn_next_get(a_type, a_field, phn)); \ - } \ - if (phn_next_get(a_type, a_field, phn) != NULL) { \ - phn_prev_set(a_type, a_field, \ - phn_next_get(a_type, a_field, phn), \ - phn_prev_get(a_type, a_field, phn)); \ - } \ - } \ -} - -#endif /* PH_H_ */ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/private_namespace.h b/sources/jemalloc/include-linux/jemalloc/internal/private_namespace.h deleted file mode 100755 index 7ebeeba8..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/private_namespace.h +++ /dev/null @@ -1,639 +0,0 @@ -#define a0dalloc JEMALLOC_N(a0dalloc) -#define a0get JEMALLOC_N(a0get) -#define a0malloc JEMALLOC_N(a0malloc) -#define arena_aalloc JEMALLOC_N(arena_aalloc) -#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small) -#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge) -#define arena_bin_index JEMALLOC_N(arena_bin_index) -#define arena_bin_info JEMALLOC_N(arena_bin_info) -#define arena_bitselm_get_const JEMALLOC_N(arena_bitselm_get_const) -#define arena_bitselm_get_mutable JEMALLOC_N(arena_bitselm_get_mutable) -#define arena_boot JEMALLOC_N(arena_boot) -#define arena_choose JEMALLOC_N(arena_choose) -#define arena_choose_hard JEMALLOC_N(arena_choose_hard) -#define arena_choose_impl JEMALLOC_N(arena_choose_impl) -#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge) -#define arena_chunk_cache_maybe_insert JEMALLOC_N(arena_chunk_cache_maybe_insert) -#define arena_chunk_cache_maybe_remove JEMALLOC_N(arena_chunk_cache_maybe_remove) -#define arena_chunk_dalloc_huge JEMALLOC_N(arena_chunk_dalloc_huge) -#define arena_chunk_ralloc_huge_expand JEMALLOC_N(arena_chunk_ralloc_huge_expand) -#define arena_chunk_ralloc_huge_shrink JEMALLOC_N(arena_chunk_ralloc_huge_shrink) -#define arena_chunk_ralloc_huge_similar JEMALLOC_N(arena_chunk_ralloc_huge_similar) -#define arena_cleanup JEMALLOC_N(arena_cleanup) -#define arena_dalloc JEMALLOC_N(arena_dalloc) -#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin) -#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked) -#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) -#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large) -#define arena_dalloc_large_junked_locked JEMALLOC_N(arena_dalloc_large_junked_locked) -#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small) -#define arena_decay_tick JEMALLOC_N(arena_decay_tick) -#define arena_decay_ticks JEMALLOC_N(arena_decay_ticks) -#define arena_decay_time_default_get JEMALLOC_N(arena_decay_time_default_get) -#define arena_decay_time_default_set JEMALLOC_N(arena_decay_time_default_set) -#define arena_decay_time_get JEMALLOC_N(arena_decay_time_get) -#define arena_decay_time_set JEMALLOC_N(arena_decay_time_set) -#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get) -#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set) -#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next) -#define arena_get JEMALLOC_N(arena_get) -#define arena_ichoose JEMALLOC_N(arena_ichoose) -#define arena_init JEMALLOC_N(arena_init) -#define arena_lg_dirty_mult_default_get JEMALLOC_N(arena_lg_dirty_mult_default_get) -#define arena_lg_dirty_mult_default_set JEMALLOC_N(arena_lg_dirty_mult_default_set) -#define arena_lg_dirty_mult_get JEMALLOC_N(arena_lg_dirty_mult_get) -#define arena_lg_dirty_mult_set JEMALLOC_N(arena_lg_dirty_mult_set) -#define arena_malloc JEMALLOC_N(arena_malloc) -#define arena_malloc_hard JEMALLOC_N(arena_malloc_hard) -#define arena_malloc_large JEMALLOC_N(arena_malloc_large) -#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get) -#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get) -#define arena_mapbits_decommitted_get JEMALLOC_N(arena_mapbits_decommitted_get) -#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get) -#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get) -#define arena_mapbits_internal_set JEMALLOC_N(arena_mapbits_internal_set) -#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set) -#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get) -#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set) -#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get) -#define arena_mapbits_size_decode JEMALLOC_N(arena_mapbits_size_decode) -#define arena_mapbits_size_encode JEMALLOC_N(arena_mapbits_size_encode) -#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get) -#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set) -#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set) -#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get) -#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set) -#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get) -#define arena_mapbitsp_get_const JEMALLOC_N(arena_mapbitsp_get_const) -#define arena_mapbitsp_get_mutable JEMALLOC_N(arena_mapbitsp_get_mutable) -#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read) -#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write) -#define arena_maxrun JEMALLOC_N(arena_maxrun) -#define arena_maybe_purge JEMALLOC_N(arena_maybe_purge) -#define arena_metadata_allocated_add JEMALLOC_N(arena_metadata_allocated_add) -#define arena_metadata_allocated_get JEMALLOC_N(arena_metadata_allocated_get) -#define arena_metadata_allocated_sub JEMALLOC_N(arena_metadata_allocated_sub) -#define arena_migrate JEMALLOC_N(arena_migrate) -#define arena_miscelm_get_const JEMALLOC_N(arena_miscelm_get_const) -#define arena_miscelm_get_mutable JEMALLOC_N(arena_miscelm_get_mutable) -#define arena_miscelm_to_pageind JEMALLOC_N(arena_miscelm_to_pageind) -#define arena_miscelm_to_rpages JEMALLOC_N(arena_miscelm_to_rpages) -#define arena_new JEMALLOC_N(arena_new) -#define arena_node_alloc JEMALLOC_N(arena_node_alloc) -#define arena_node_dalloc JEMALLOC_N(arena_node_dalloc) -#define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec) -#define arena_nthreads_get JEMALLOC_N(arena_nthreads_get) -#define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc) -#define arena_palloc JEMALLOC_N(arena_palloc) -#define arena_postfork_child JEMALLOC_N(arena_postfork_child) -#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent) -#define arena_prefork0 JEMALLOC_N(arena_prefork0) -#define arena_prefork1 JEMALLOC_N(arena_prefork1) -#define arena_prefork2 JEMALLOC_N(arena_prefork2) -#define arena_prefork3 JEMALLOC_N(arena_prefork3) -#define arena_prof_accum JEMALLOC_N(arena_prof_accum) -#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl) -#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked) -#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted) -#define arena_prof_tctx_get JEMALLOC_N(arena_prof_tctx_get) -#define arena_prof_tctx_reset JEMALLOC_N(arena_prof_tctx_reset) -#define arena_prof_tctx_set JEMALLOC_N(arena_prof_tctx_set) -#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get) -#define arena_purge JEMALLOC_N(arena_purge) -#define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small) -#define arena_ralloc JEMALLOC_N(arena_ralloc) -#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) -#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move) -#define arena_rd_to_miscelm JEMALLOC_N(arena_rd_to_miscelm) -#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) -#define arena_reset JEMALLOC_N(arena_reset) -#define arena_run_regind JEMALLOC_N(arena_run_regind) -#define arena_run_to_miscelm JEMALLOC_N(arena_run_to_miscelm) -#define arena_salloc JEMALLOC_N(arena_salloc) -#define arena_sdalloc JEMALLOC_N(arena_sdalloc) -#define arena_stats_merge JEMALLOC_N(arena_stats_merge) -#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small) -#define arena_tdata_get JEMALLOC_N(arena_tdata_get) -#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard) -#define arenas JEMALLOC_N(arenas) -#define arenas_tdata_bypass_cleanup JEMALLOC_N(arenas_tdata_bypass_cleanup) -#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup) -#define atomic_add_p JEMALLOC_N(atomic_add_p) -#define atomic_add_u JEMALLOC_N(atomic_add_u) -#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32) -#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64) -#define atomic_add_z JEMALLOC_N(atomic_add_z) -#define atomic_cas_p JEMALLOC_N(atomic_cas_p) -#define atomic_cas_u JEMALLOC_N(atomic_cas_u) -#define atomic_cas_uint32 JEMALLOC_N(atomic_cas_uint32) -#define atomic_cas_uint64 JEMALLOC_N(atomic_cas_uint64) -#define atomic_cas_z JEMALLOC_N(atomic_cas_z) -#define atomic_sub_p JEMALLOC_N(atomic_sub_p) -#define atomic_sub_u JEMALLOC_N(atomic_sub_u) -#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32) -#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64) -#define atomic_sub_z JEMALLOC_N(atomic_sub_z) -#define atomic_write_p JEMALLOC_N(atomic_write_p) -#define atomic_write_u JEMALLOC_N(atomic_write_u) -#define atomic_write_uint32 JEMALLOC_N(atomic_write_uint32) -#define atomic_write_uint64 JEMALLOC_N(atomic_write_uint64) -#define atomic_write_z JEMALLOC_N(atomic_write_z) -#define base_alloc JEMALLOC_N(base_alloc) -#define base_boot JEMALLOC_N(base_boot) -#define base_postfork_child JEMALLOC_N(base_postfork_child) -#define base_postfork_parent JEMALLOC_N(base_postfork_parent) -#define base_prefork JEMALLOC_N(base_prefork) -#define base_stats_get JEMALLOC_N(base_stats_get) -#define bitmap_full JEMALLOC_N(bitmap_full) -#define bitmap_get JEMALLOC_N(bitmap_get) -#define bitmap_info_init JEMALLOC_N(bitmap_info_init) -#define bitmap_init JEMALLOC_N(bitmap_init) -#define bitmap_set JEMALLOC_N(bitmap_set) -#define bitmap_sfu JEMALLOC_N(bitmap_sfu) -#define bitmap_size JEMALLOC_N(bitmap_size) -#define bitmap_unset JEMALLOC_N(bitmap_unset) -#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc) -#define bootstrap_free JEMALLOC_N(bootstrap_free) -#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc) -#define bt_init JEMALLOC_N(bt_init) -#define buferror JEMALLOC_N(buferror) -#define chunk_alloc_base JEMALLOC_N(chunk_alloc_base) -#define chunk_alloc_cache JEMALLOC_N(chunk_alloc_cache) -#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss) -#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap) -#define chunk_alloc_wrapper JEMALLOC_N(chunk_alloc_wrapper) -#define chunk_boot JEMALLOC_N(chunk_boot) -#define chunk_dalloc_cache JEMALLOC_N(chunk_dalloc_cache) -#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap) -#define chunk_dalloc_wrapper JEMALLOC_N(chunk_dalloc_wrapper) -#define chunk_deregister JEMALLOC_N(chunk_deregister) -#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot) -#define chunk_dss_mergeable JEMALLOC_N(chunk_dss_mergeable) -#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get) -#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set) -#define chunk_hooks_default JEMALLOC_N(chunk_hooks_default) -#define chunk_hooks_get JEMALLOC_N(chunk_hooks_get) -#define chunk_hooks_set JEMALLOC_N(chunk_hooks_set) -#define chunk_in_dss JEMALLOC_N(chunk_in_dss) -#define chunk_lookup JEMALLOC_N(chunk_lookup) -#define chunk_npages JEMALLOC_N(chunk_npages) -#define chunk_purge_wrapper JEMALLOC_N(chunk_purge_wrapper) -#define chunk_register JEMALLOC_N(chunk_register) -#define chunks_rtree JEMALLOC_N(chunks_rtree) -#define chunksize JEMALLOC_N(chunksize) -#define chunksize_mask JEMALLOC_N(chunksize_mask) -#define ckh_count JEMALLOC_N(ckh_count) -#define ckh_delete JEMALLOC_N(ckh_delete) -#define ckh_insert JEMALLOC_N(ckh_insert) -#define ckh_iter JEMALLOC_N(ckh_iter) -#define ckh_new JEMALLOC_N(ckh_new) -#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash) -#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp) -#define ckh_remove JEMALLOC_N(ckh_remove) -#define ckh_search JEMALLOC_N(ckh_search) -#define ckh_string_hash JEMALLOC_N(ckh_string_hash) -#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp) -#define ctl_boot JEMALLOC_N(ctl_boot) -#define ctl_bymib JEMALLOC_N(ctl_bymib) -#define ctl_byname JEMALLOC_N(ctl_byname) -#define ctl_nametomib JEMALLOC_N(ctl_nametomib) -#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child) -#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent) -#define ctl_prefork JEMALLOC_N(ctl_prefork) -#define decay_ticker_get JEMALLOC_N(decay_ticker_get) -#define dss_prec_names JEMALLOC_N(dss_prec_names) -#define extent_node_achunk_get JEMALLOC_N(extent_node_achunk_get) -#define extent_node_achunk_set JEMALLOC_N(extent_node_achunk_set) -#define extent_node_addr_get JEMALLOC_N(extent_node_addr_get) -#define extent_node_addr_set JEMALLOC_N(extent_node_addr_set) -#define extent_node_arena_get JEMALLOC_N(extent_node_arena_get) -#define extent_node_arena_set JEMALLOC_N(extent_node_arena_set) -#define extent_node_committed_get JEMALLOC_N(extent_node_committed_get) -#define extent_node_committed_set JEMALLOC_N(extent_node_committed_set) -#define extent_node_dirty_insert JEMALLOC_N(extent_node_dirty_insert) -#define extent_node_dirty_linkage_init JEMALLOC_N(extent_node_dirty_linkage_init) -#define extent_node_dirty_remove JEMALLOC_N(extent_node_dirty_remove) -#define extent_node_init JEMALLOC_N(extent_node_init) -#define extent_node_prof_tctx_get JEMALLOC_N(extent_node_prof_tctx_get) -#define extent_node_prof_tctx_set JEMALLOC_N(extent_node_prof_tctx_set) -#define extent_node_size_get JEMALLOC_N(extent_node_size_get) -#define extent_node_size_set JEMALLOC_N(extent_node_size_set) -#define extent_node_sn_get JEMALLOC_N(extent_node_sn_get) -#define extent_node_sn_set JEMALLOC_N(extent_node_sn_set) -#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get) -#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set) -#define extent_size_quantize_ceil JEMALLOC_N(extent_size_quantize_ceil) -#define extent_size_quantize_floor JEMALLOC_N(extent_size_quantize_floor) -#define extent_tree_ad_destroy JEMALLOC_N(extent_tree_ad_destroy) -#define extent_tree_ad_destroy_recurse JEMALLOC_N(extent_tree_ad_destroy_recurse) -#define extent_tree_ad_empty JEMALLOC_N(extent_tree_ad_empty) -#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first) -#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert) -#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter) -#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse) -#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start) -#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last) -#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new) -#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next) -#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch) -#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev) -#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch) -#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove) -#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter) -#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse) -#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start) -#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search) -#define extent_tree_szsnad_destroy JEMALLOC_N(extent_tree_szsnad_destroy) -#define extent_tree_szsnad_destroy_recurse JEMALLOC_N(extent_tree_szsnad_destroy_recurse) -#define extent_tree_szsnad_empty JEMALLOC_N(extent_tree_szsnad_empty) -#define extent_tree_szsnad_first JEMALLOC_N(extent_tree_szsnad_first) -#define extent_tree_szsnad_insert JEMALLOC_N(extent_tree_szsnad_insert) -#define extent_tree_szsnad_iter JEMALLOC_N(extent_tree_szsnad_iter) -#define extent_tree_szsnad_iter_recurse JEMALLOC_N(extent_tree_szsnad_iter_recurse) -#define extent_tree_szsnad_iter_start JEMALLOC_N(extent_tree_szsnad_iter_start) -#define extent_tree_szsnad_last JEMALLOC_N(extent_tree_szsnad_last) -#define extent_tree_szsnad_new JEMALLOC_N(extent_tree_szsnad_new) -#define extent_tree_szsnad_next JEMALLOC_N(extent_tree_szsnad_next) -#define extent_tree_szsnad_nsearch JEMALLOC_N(extent_tree_szsnad_nsearch) -#define extent_tree_szsnad_prev JEMALLOC_N(extent_tree_szsnad_prev) -#define extent_tree_szsnad_psearch JEMALLOC_N(extent_tree_szsnad_psearch) -#define extent_tree_szsnad_remove JEMALLOC_N(extent_tree_szsnad_remove) -#define extent_tree_szsnad_reverse_iter JEMALLOC_N(extent_tree_szsnad_reverse_iter) -#define extent_tree_szsnad_reverse_iter_recurse JEMALLOC_N(extent_tree_szsnad_reverse_iter_recurse) -#define extent_tree_szsnad_reverse_iter_start JEMALLOC_N(extent_tree_szsnad_reverse_iter_start) -#define extent_tree_szsnad_search JEMALLOC_N(extent_tree_szsnad_search) -#define ffs_llu JEMALLOC_N(ffs_llu) -#define ffs_lu JEMALLOC_N(ffs_lu) -#define ffs_u JEMALLOC_N(ffs_u) -#define ffs_u32 JEMALLOC_N(ffs_u32) -#define ffs_u64 JEMALLOC_N(ffs_u64) -#define ffs_zu JEMALLOC_N(ffs_zu) -#define get_errno JEMALLOC_N(get_errno) -#define hash JEMALLOC_N(hash) -#define hash_fmix_32 JEMALLOC_N(hash_fmix_32) -#define hash_fmix_64 JEMALLOC_N(hash_fmix_64) -#define hash_get_block_32 JEMALLOC_N(hash_get_block_32) -#define hash_get_block_64 JEMALLOC_N(hash_get_block_64) -#define hash_rotl_32 JEMALLOC_N(hash_rotl_32) -#define hash_rotl_64 JEMALLOC_N(hash_rotl_64) -#define hash_x64_128 JEMALLOC_N(hash_x64_128) -#define hash_x86_128 JEMALLOC_N(hash_x86_128) -#define hash_x86_32 JEMALLOC_N(hash_x86_32) -#define huge_aalloc JEMALLOC_N(huge_aalloc) -#define huge_dalloc JEMALLOC_N(huge_dalloc) -#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) -#define huge_malloc JEMALLOC_N(huge_malloc) -#define huge_palloc JEMALLOC_N(huge_palloc) -#define huge_prof_tctx_get JEMALLOC_N(huge_prof_tctx_get) -#define huge_prof_tctx_reset JEMALLOC_N(huge_prof_tctx_reset) -#define huge_prof_tctx_set JEMALLOC_N(huge_prof_tctx_set) -#define huge_ralloc JEMALLOC_N(huge_ralloc) -#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move) -#define huge_salloc JEMALLOC_N(huge_salloc) -#define iaalloc JEMALLOC_N(iaalloc) -#define ialloc JEMALLOC_N(ialloc) -#define iallocztm JEMALLOC_N(iallocztm) -#define iarena_cleanup JEMALLOC_N(iarena_cleanup) -#define idalloc JEMALLOC_N(idalloc) -#define idalloctm JEMALLOC_N(idalloctm) -#define in_valgrind JEMALLOC_N(in_valgrind) -#define index2size JEMALLOC_N(index2size) -#define index2size_compute JEMALLOC_N(index2size_compute) -#define index2size_lookup JEMALLOC_N(index2size_lookup) -#define index2size_tab JEMALLOC_N(index2size_tab) -#define ipalloc JEMALLOC_N(ipalloc) -#define ipalloct JEMALLOC_N(ipalloct) -#define ipallocztm JEMALLOC_N(ipallocztm) -#define iqalloc JEMALLOC_N(iqalloc) -#define iralloc JEMALLOC_N(iralloc) -#define iralloct JEMALLOC_N(iralloct) -#define iralloct_realign JEMALLOC_N(iralloct_realign) -#define isalloc JEMALLOC_N(isalloc) -#define isdalloct JEMALLOC_N(isdalloct) -#define isqalloc JEMALLOC_N(isqalloc) -#define isthreaded JEMALLOC_N(isthreaded) -#define ivsalloc JEMALLOC_N(ivsalloc) -#define ixalloc JEMALLOC_N(ixalloc) -#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child) -#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent) -#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork) -#define large_maxclass JEMALLOC_N(large_maxclass) -#define lg_floor JEMALLOC_N(lg_floor) -#define lg_prof_sample JEMALLOC_N(lg_prof_sample) -#define malloc_cprintf JEMALLOC_N(malloc_cprintf) -#define malloc_mutex_assert_not_owner JEMALLOC_N(malloc_mutex_assert_not_owner) -#define malloc_mutex_assert_owner JEMALLOC_N(malloc_mutex_assert_owner) -#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot) -#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init) -#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock) -#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child) -#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent) -#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork) -#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock) -#define malloc_printf JEMALLOC_N(malloc_printf) -#define malloc_snprintf JEMALLOC_N(malloc_snprintf) -#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax) -#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0) -#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1) -#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register) -#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc) -#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc) -#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup) -#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf) -#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf) -#define malloc_write JEMALLOC_N(malloc_write) -#define map_bias JEMALLOC_N(map_bias) -#define map_misc_offset JEMALLOC_N(map_misc_offset) -#define mb_write JEMALLOC_N(mb_write) -#define narenas_auto JEMALLOC_N(narenas_auto) -#define narenas_tdata_cleanup JEMALLOC_N(narenas_tdata_cleanup) -#define narenas_total_get JEMALLOC_N(narenas_total_get) -#define ncpus JEMALLOC_N(ncpus) -#define nhbins JEMALLOC_N(nhbins) -#define nhclasses JEMALLOC_N(nhclasses) -#define nlclasses JEMALLOC_N(nlclasses) -#define nstime_add JEMALLOC_N(nstime_add) -#define nstime_compare JEMALLOC_N(nstime_compare) -#define nstime_copy JEMALLOC_N(nstime_copy) -#define nstime_divide JEMALLOC_N(nstime_divide) -#define nstime_idivide JEMALLOC_N(nstime_idivide) -#define nstime_imultiply JEMALLOC_N(nstime_imultiply) -#define nstime_init JEMALLOC_N(nstime_init) -#define nstime_init2 JEMALLOC_N(nstime_init2) -#define nstime_monotonic JEMALLOC_N(nstime_monotonic) -#define nstime_ns JEMALLOC_N(nstime_ns) -#define nstime_nsec JEMALLOC_N(nstime_nsec) -#define nstime_sec JEMALLOC_N(nstime_sec) -#define nstime_subtract JEMALLOC_N(nstime_subtract) -#define nstime_update JEMALLOC_N(nstime_update) -#define opt_abort JEMALLOC_N(opt_abort) -#define opt_decay_time JEMALLOC_N(opt_decay_time) -#define opt_dss JEMALLOC_N(opt_dss) -#define opt_junk JEMALLOC_N(opt_junk) -#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc) -#define opt_junk_free JEMALLOC_N(opt_junk_free) -#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk) -#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult) -#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval) -#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample) -#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max) -#define opt_narenas JEMALLOC_N(opt_narenas) -#define opt_prof JEMALLOC_N(opt_prof) -#define opt_prof_accum JEMALLOC_N(opt_prof_accum) -#define opt_prof_active JEMALLOC_N(opt_prof_active) -#define opt_prof_final JEMALLOC_N(opt_prof_final) -#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump) -#define opt_prof_leak JEMALLOC_N(opt_prof_leak) -#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix) -#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init) -#define opt_purge JEMALLOC_N(opt_purge) -#define opt_quarantine JEMALLOC_N(opt_quarantine) -#define opt_redzone JEMALLOC_N(opt_redzone) -#define opt_stats_print JEMALLOC_N(opt_stats_print) -#define opt_tcache JEMALLOC_N(opt_tcache) -#define opt_thp JEMALLOC_N(opt_thp) -#define opt_utrace JEMALLOC_N(opt_utrace) -#define opt_xmalloc JEMALLOC_N(opt_xmalloc) -#define opt_zero JEMALLOC_N(opt_zero) -#define p2rz JEMALLOC_N(p2rz) -#define pages_boot JEMALLOC_N(pages_boot) -#define pages_commit JEMALLOC_N(pages_commit) -#define pages_decommit JEMALLOC_N(pages_decommit) -#define pages_huge JEMALLOC_N(pages_huge) -#define pages_map JEMALLOC_N(pages_map) -#define pages_nohuge JEMALLOC_N(pages_nohuge) -#define pages_purge JEMALLOC_N(pages_purge) -#define pages_trim JEMALLOC_N(pages_trim) -#define pages_unmap JEMALLOC_N(pages_unmap) -#define pind2sz JEMALLOC_N(pind2sz) -#define pind2sz_compute JEMALLOC_N(pind2sz_compute) -#define pind2sz_lookup JEMALLOC_N(pind2sz_lookup) -#define pind2sz_tab JEMALLOC_N(pind2sz_tab) -#define pow2_ceil_u32 JEMALLOC_N(pow2_ceil_u32) -#define pow2_ceil_u64 JEMALLOC_N(pow2_ceil_u64) -#define pow2_ceil_zu JEMALLOC_N(pow2_ceil_zu) -#define prng_lg_range_u32 JEMALLOC_N(prng_lg_range_u32) -#define prng_lg_range_u64 JEMALLOC_N(prng_lg_range_u64) -#define prng_lg_range_zu JEMALLOC_N(prng_lg_range_zu) -#define prng_range_u32 JEMALLOC_N(prng_range_u32) -#define prng_range_u64 JEMALLOC_N(prng_range_u64) -#define prng_range_zu JEMALLOC_N(prng_range_zu) -#define prng_state_next_u32 JEMALLOC_N(prng_state_next_u32) -#define prng_state_next_u64 JEMALLOC_N(prng_state_next_u64) -#define prng_state_next_zu JEMALLOC_N(prng_state_next_zu) -#define prof_active JEMALLOC_N(prof_active) -#define prof_active_get JEMALLOC_N(prof_active_get) -#define prof_active_get_unlocked JEMALLOC_N(prof_active_get_unlocked) -#define prof_active_set JEMALLOC_N(prof_active_set) -#define prof_alloc_prep JEMALLOC_N(prof_alloc_prep) -#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback) -#define prof_backtrace JEMALLOC_N(prof_backtrace) -#define prof_boot0 JEMALLOC_N(prof_boot0) -#define prof_boot1 JEMALLOC_N(prof_boot1) -#define prof_boot2 JEMALLOC_N(prof_boot2) -#define prof_bt_count JEMALLOC_N(prof_bt_count) -#define prof_dump_header JEMALLOC_N(prof_dump_header) -#define prof_dump_open JEMALLOC_N(prof_dump_open) -#define prof_free JEMALLOC_N(prof_free) -#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object) -#define prof_gdump JEMALLOC_N(prof_gdump) -#define prof_gdump_get JEMALLOC_N(prof_gdump_get) -#define prof_gdump_get_unlocked JEMALLOC_N(prof_gdump_get_unlocked) -#define prof_gdump_set JEMALLOC_N(prof_gdump_set) -#define prof_gdump_val JEMALLOC_N(prof_gdump_val) -#define prof_idump JEMALLOC_N(prof_idump) -#define prof_interval JEMALLOC_N(prof_interval) -#define prof_lookup JEMALLOC_N(prof_lookup) -#define prof_malloc JEMALLOC_N(prof_malloc) -#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object) -#define prof_mdump JEMALLOC_N(prof_mdump) -#define prof_postfork_child JEMALLOC_N(prof_postfork_child) -#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent) -#define prof_prefork0 JEMALLOC_N(prof_prefork0) -#define prof_prefork1 JEMALLOC_N(prof_prefork1) -#define prof_realloc JEMALLOC_N(prof_realloc) -#define prof_reset JEMALLOC_N(prof_reset) -#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update) -#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update) -#define prof_tctx_get JEMALLOC_N(prof_tctx_get) -#define prof_tctx_reset JEMALLOC_N(prof_tctx_reset) -#define prof_tctx_set JEMALLOC_N(prof_tctx_set) -#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup) -#define prof_tdata_count JEMALLOC_N(prof_tdata_count) -#define prof_tdata_get JEMALLOC_N(prof_tdata_get) -#define prof_tdata_init JEMALLOC_N(prof_tdata_init) -#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit) -#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get) -#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get) -#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set) -#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set) -#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get) -#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set) -#define psz2ind JEMALLOC_N(psz2ind) -#define psz2u JEMALLOC_N(psz2u) -#define purge_mode_names JEMALLOC_N(purge_mode_names) -#define quarantine JEMALLOC_N(quarantine) -#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook) -#define quarantine_alloc_hook_work JEMALLOC_N(quarantine_alloc_hook_work) -#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup) -#define rtree_child_read JEMALLOC_N(rtree_child_read) -#define rtree_child_read_hard JEMALLOC_N(rtree_child_read_hard) -#define rtree_child_tryread JEMALLOC_N(rtree_child_tryread) -#define rtree_delete JEMALLOC_N(rtree_delete) -#define rtree_get JEMALLOC_N(rtree_get) -#define rtree_new JEMALLOC_N(rtree_new) -#define rtree_node_valid JEMALLOC_N(rtree_node_valid) -#define rtree_set JEMALLOC_N(rtree_set) -#define rtree_start_level JEMALLOC_N(rtree_start_level) -#define rtree_subkey JEMALLOC_N(rtree_subkey) -#define rtree_subtree_read JEMALLOC_N(rtree_subtree_read) -#define rtree_subtree_read_hard JEMALLOC_N(rtree_subtree_read_hard) -#define rtree_subtree_tryread JEMALLOC_N(rtree_subtree_tryread) -#define rtree_val_read JEMALLOC_N(rtree_val_read) -#define rtree_val_write JEMALLOC_N(rtree_val_write) -#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) -#define run_quantize_floor JEMALLOC_N(run_quantize_floor) -#define s2u JEMALLOC_N(s2u) -#define s2u_compute JEMALLOC_N(s2u_compute) -#define s2u_lookup JEMALLOC_N(s2u_lookup) -#define sa2u JEMALLOC_N(sa2u) -#define set_errno JEMALLOC_N(set_errno) -#define size2index JEMALLOC_N(size2index) -#define size2index_compute JEMALLOC_N(size2index_compute) -#define size2index_lookup JEMALLOC_N(size2index_lookup) -#define size2index_tab JEMALLOC_N(size2index_tab) -#define spin_adaptive JEMALLOC_N(spin_adaptive) -#define spin_init JEMALLOC_N(spin_init) -#define stats_cactive JEMALLOC_N(stats_cactive) -#define stats_cactive_add JEMALLOC_N(stats_cactive_add) -#define stats_cactive_get JEMALLOC_N(stats_cactive_get) -#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub) -#define stats_print JEMALLOC_N(stats_print) -#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy) -#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large) -#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small) -#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard) -#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate) -#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large) -#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small) -#define tcache_bin_info JEMALLOC_N(tcache_bin_info) -#define tcache_boot JEMALLOC_N(tcache_boot) -#define tcache_cleanup JEMALLOC_N(tcache_cleanup) -#define tcache_create JEMALLOC_N(tcache_create) -#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large) -#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small) -#define tcache_enabled_cleanup JEMALLOC_N(tcache_enabled_cleanup) -#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get) -#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set) -#define tcache_event JEMALLOC_N(tcache_event) -#define tcache_event_hard JEMALLOC_N(tcache_event_hard) -#define tcache_flush JEMALLOC_N(tcache_flush) -#define tcache_get JEMALLOC_N(tcache_get) -#define tcache_get_hard JEMALLOC_N(tcache_get_hard) -#define tcache_maxclass JEMALLOC_N(tcache_maxclass) -#define tcache_postfork_child JEMALLOC_N(tcache_postfork_child) -#define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent) -#define tcache_prefork JEMALLOC_N(tcache_prefork) -#define tcache_salloc JEMALLOC_N(tcache_salloc) -#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge) -#define tcaches JEMALLOC_N(tcaches) -#define tcaches_create JEMALLOC_N(tcaches_create) -#define tcaches_destroy JEMALLOC_N(tcaches_destroy) -#define tcaches_flush JEMALLOC_N(tcaches_flush) -#define tcaches_get JEMALLOC_N(tcaches_get) -#define thread_allocated_cleanup JEMALLOC_N(thread_allocated_cleanup) -#define thread_deallocated_cleanup JEMALLOC_N(thread_deallocated_cleanup) -#define ticker_copy JEMALLOC_N(ticker_copy) -#define ticker_init JEMALLOC_N(ticker_init) -#define ticker_read JEMALLOC_N(ticker_read) -#define ticker_tick JEMALLOC_N(ticker_tick) -#define ticker_ticks JEMALLOC_N(ticker_ticks) -#define tsd_arena_get JEMALLOC_N(tsd_arena_get) -#define tsd_arena_set JEMALLOC_N(tsd_arena_set) -#define tsd_arenap_get JEMALLOC_N(tsd_arenap_get) -#define tsd_arenas_tdata_bypass_get JEMALLOC_N(tsd_arenas_tdata_bypass_get) -#define tsd_arenas_tdata_bypass_set JEMALLOC_N(tsd_arenas_tdata_bypass_set) -#define tsd_arenas_tdata_bypassp_get JEMALLOC_N(tsd_arenas_tdata_bypassp_get) -#define tsd_arenas_tdata_get JEMALLOC_N(tsd_arenas_tdata_get) -#define tsd_arenas_tdata_set JEMALLOC_N(tsd_arenas_tdata_set) -#define tsd_arenas_tdatap_get JEMALLOC_N(tsd_arenas_tdatap_get) -#define tsd_boot JEMALLOC_N(tsd_boot) -#define tsd_boot0 JEMALLOC_N(tsd_boot0) -#define tsd_boot1 JEMALLOC_N(tsd_boot1) -#define tsd_booted JEMALLOC_N(tsd_booted) -#define tsd_booted_get JEMALLOC_N(tsd_booted_get) -#define tsd_cleanup JEMALLOC_N(tsd_cleanup) -#define tsd_cleanup_wrapper JEMALLOC_N(tsd_cleanup_wrapper) -#define tsd_fetch JEMALLOC_N(tsd_fetch) -#define tsd_fetch_impl JEMALLOC_N(tsd_fetch_impl) -#define tsd_get JEMALLOC_N(tsd_get) -#define tsd_get_allocates JEMALLOC_N(tsd_get_allocates) -#define tsd_iarena_get JEMALLOC_N(tsd_iarena_get) -#define tsd_iarena_set JEMALLOC_N(tsd_iarena_set) -#define tsd_iarenap_get JEMALLOC_N(tsd_iarenap_get) -#define tsd_initialized JEMALLOC_N(tsd_initialized) -#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion) -#define tsd_init_finish JEMALLOC_N(tsd_init_finish) -#define tsd_init_head JEMALLOC_N(tsd_init_head) -#define tsd_narenas_tdata_get JEMALLOC_N(tsd_narenas_tdata_get) -#define tsd_narenas_tdata_set JEMALLOC_N(tsd_narenas_tdata_set) -#define tsd_narenas_tdatap_get JEMALLOC_N(tsd_narenas_tdatap_get) -#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get) -#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set) -#define tsd_nominal JEMALLOC_N(tsd_nominal) -#define tsd_prof_tdata_get JEMALLOC_N(tsd_prof_tdata_get) -#define tsd_prof_tdata_set JEMALLOC_N(tsd_prof_tdata_set) -#define tsd_prof_tdatap_get JEMALLOC_N(tsd_prof_tdatap_get) -#define tsd_quarantine_get JEMALLOC_N(tsd_quarantine_get) -#define tsd_quarantine_set JEMALLOC_N(tsd_quarantine_set) -#define tsd_quarantinep_get JEMALLOC_N(tsd_quarantinep_get) -#define tsd_set JEMALLOC_N(tsd_set) -#define tsd_tcache_enabled_get JEMALLOC_N(tsd_tcache_enabled_get) -#define tsd_tcache_enabled_set JEMALLOC_N(tsd_tcache_enabled_set) -#define tsd_tcache_enabledp_get JEMALLOC_N(tsd_tcache_enabledp_get) -#define tsd_tcache_get JEMALLOC_N(tsd_tcache_get) -#define tsd_tcache_set JEMALLOC_N(tsd_tcache_set) -#define tsd_tcachep_get JEMALLOC_N(tsd_tcachep_get) -#define tsd_thread_allocated_get JEMALLOC_N(tsd_thread_allocated_get) -#define tsd_thread_allocated_set JEMALLOC_N(tsd_thread_allocated_set) -#define tsd_thread_allocatedp_get JEMALLOC_N(tsd_thread_allocatedp_get) -#define tsd_thread_deallocated_get JEMALLOC_N(tsd_thread_deallocated_get) -#define tsd_thread_deallocated_set JEMALLOC_N(tsd_thread_deallocated_set) -#define tsd_thread_deallocatedp_get JEMALLOC_N(tsd_thread_deallocatedp_get) -#define tsd_tls JEMALLOC_N(tsd_tls) -#define tsd_tsd JEMALLOC_N(tsd_tsd) -#define tsd_tsdn JEMALLOC_N(tsd_tsdn) -#define tsd_witness_fork_get JEMALLOC_N(tsd_witness_fork_get) -#define tsd_witness_fork_set JEMALLOC_N(tsd_witness_fork_set) -#define tsd_witness_forkp_get JEMALLOC_N(tsd_witness_forkp_get) -#define tsd_witnesses_get JEMALLOC_N(tsd_witnesses_get) -#define tsd_witnesses_set JEMALLOC_N(tsd_witnesses_set) -#define tsd_witnessesp_get JEMALLOC_N(tsd_witnessesp_get) -#define tsdn_fetch JEMALLOC_N(tsdn_fetch) -#define tsdn_null JEMALLOC_N(tsdn_null) -#define tsdn_tsd JEMALLOC_N(tsdn_tsd) -#define u2rz JEMALLOC_N(u2rz) -#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block) -#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined) -#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess) -#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined) -#define witness_assert_depth JEMALLOC_N(witness_assert_depth) -#define witness_assert_depth_to_rank JEMALLOC_N(witness_assert_depth_to_rank) -#define witness_assert_lockless JEMALLOC_N(witness_assert_lockless) -#define witness_assert_not_owner JEMALLOC_N(witness_assert_not_owner) -#define witness_assert_owner JEMALLOC_N(witness_assert_owner) -#define witness_depth_error JEMALLOC_N(witness_depth_error) -#define witness_fork_cleanup JEMALLOC_N(witness_fork_cleanup) -#define witness_init JEMALLOC_N(witness_init) -#define witness_lock JEMALLOC_N(witness_lock) -#define witness_lock_error JEMALLOC_N(witness_lock_error) -#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error) -#define witness_owner JEMALLOC_N(witness_owner) -#define witness_owner_error JEMALLOC_N(witness_owner_error) -#define witness_postfork_child JEMALLOC_N(witness_postfork_child) -#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent) -#define witness_prefork JEMALLOC_N(witness_prefork) -#define witness_unlock JEMALLOC_N(witness_unlock) -#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup) -#define zone_register JEMALLOC_N(zone_register) diff --git a/sources/jemalloc/include-linux/jemalloc/internal/private_symbols.txt b/sources/jemalloc/include-linux/jemalloc/internal/private_symbols.txt deleted file mode 100755 index 60b57e5a..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/private_symbols.txt +++ /dev/null @@ -1,639 +0,0 @@ -a0dalloc -a0get -a0malloc -arena_aalloc -arena_alloc_junk_small -arena_basic_stats_merge -arena_bin_index -arena_bin_info -arena_bitselm_get_const -arena_bitselm_get_mutable -arena_boot -arena_choose -arena_choose_hard -arena_choose_impl -arena_chunk_alloc_huge -arena_chunk_cache_maybe_insert -arena_chunk_cache_maybe_remove -arena_chunk_dalloc_huge -arena_chunk_ralloc_huge_expand -arena_chunk_ralloc_huge_shrink -arena_chunk_ralloc_huge_similar -arena_cleanup -arena_dalloc -arena_dalloc_bin -arena_dalloc_bin_junked_locked -arena_dalloc_junk_large -arena_dalloc_junk_small -arena_dalloc_large -arena_dalloc_large_junked_locked -arena_dalloc_small -arena_decay_tick -arena_decay_ticks -arena_decay_time_default_get -arena_decay_time_default_set -arena_decay_time_get -arena_decay_time_set -arena_dss_prec_get -arena_dss_prec_set -arena_extent_sn_next -arena_get -arena_ichoose -arena_init -arena_lg_dirty_mult_default_get -arena_lg_dirty_mult_default_set -arena_lg_dirty_mult_get -arena_lg_dirty_mult_set -arena_malloc -arena_malloc_hard -arena_malloc_large -arena_mapbits_allocated_get -arena_mapbits_binind_get -arena_mapbits_decommitted_get -arena_mapbits_dirty_get -arena_mapbits_get -arena_mapbits_internal_set -arena_mapbits_large_binind_set -arena_mapbits_large_get -arena_mapbits_large_set -arena_mapbits_large_size_get -arena_mapbits_size_decode -arena_mapbits_size_encode -arena_mapbits_small_runind_get -arena_mapbits_small_set -arena_mapbits_unallocated_set -arena_mapbits_unallocated_size_get -arena_mapbits_unallocated_size_set -arena_mapbits_unzeroed_get -arena_mapbitsp_get_const -arena_mapbitsp_get_mutable -arena_mapbitsp_read -arena_mapbitsp_write -arena_maxrun -arena_maybe_purge -arena_metadata_allocated_add -arena_metadata_allocated_get -arena_metadata_allocated_sub -arena_migrate -arena_miscelm_get_const -arena_miscelm_get_mutable -arena_miscelm_to_pageind -arena_miscelm_to_rpages -arena_new -arena_node_alloc -arena_node_dalloc -arena_nthreads_dec -arena_nthreads_get -arena_nthreads_inc -arena_palloc -arena_postfork_child -arena_postfork_parent -arena_prefork0 -arena_prefork1 -arena_prefork2 -arena_prefork3 -arena_prof_accum -arena_prof_accum_impl -arena_prof_accum_locked -arena_prof_promoted -arena_prof_tctx_get -arena_prof_tctx_reset -arena_prof_tctx_set -arena_ptr_small_binind_get -arena_purge -arena_quarantine_junk_small -arena_ralloc -arena_ralloc_junk_large -arena_ralloc_no_move -arena_rd_to_miscelm -arena_redzone_corruption -arena_reset -arena_run_regind -arena_run_to_miscelm -arena_salloc -arena_sdalloc -arena_stats_merge -arena_tcache_fill_small -arena_tdata_get -arena_tdata_get_hard -arenas -arenas_tdata_bypass_cleanup -arenas_tdata_cleanup -atomic_add_p -atomic_add_u -atomic_add_uint32 -atomic_add_uint64 -atomic_add_z -atomic_cas_p -atomic_cas_u -atomic_cas_uint32 -atomic_cas_uint64 -atomic_cas_z -atomic_sub_p -atomic_sub_u -atomic_sub_uint32 -atomic_sub_uint64 -atomic_sub_z -atomic_write_p -atomic_write_u -atomic_write_uint32 -atomic_write_uint64 -atomic_write_z -base_alloc -base_boot -base_postfork_child -base_postfork_parent -base_prefork -base_stats_get -bitmap_full -bitmap_get -bitmap_info_init -bitmap_init -bitmap_set -bitmap_sfu -bitmap_size -bitmap_unset -bootstrap_calloc -bootstrap_free -bootstrap_malloc -bt_init -buferror -chunk_alloc_base -chunk_alloc_cache -chunk_alloc_dss -chunk_alloc_mmap -chunk_alloc_wrapper -chunk_boot -chunk_dalloc_cache -chunk_dalloc_mmap -chunk_dalloc_wrapper -chunk_deregister -chunk_dss_boot -chunk_dss_mergeable -chunk_dss_prec_get -chunk_dss_prec_set -chunk_hooks_default -chunk_hooks_get -chunk_hooks_set -chunk_in_dss -chunk_lookup -chunk_npages -chunk_purge_wrapper -chunk_register -chunks_rtree -chunksize -chunksize_mask -ckh_count -ckh_delete -ckh_insert -ckh_iter -ckh_new -ckh_pointer_hash -ckh_pointer_keycomp -ckh_remove -ckh_search -ckh_string_hash -ckh_string_keycomp -ctl_boot -ctl_bymib -ctl_byname -ctl_nametomib -ctl_postfork_child -ctl_postfork_parent -ctl_prefork -decay_ticker_get -dss_prec_names -extent_node_achunk_get -extent_node_achunk_set -extent_node_addr_get -extent_node_addr_set -extent_node_arena_get -extent_node_arena_set -extent_node_committed_get -extent_node_committed_set -extent_node_dirty_insert -extent_node_dirty_linkage_init -extent_node_dirty_remove -extent_node_init -extent_node_prof_tctx_get -extent_node_prof_tctx_set -extent_node_size_get -extent_node_size_set -extent_node_sn_get -extent_node_sn_set -extent_node_zeroed_get -extent_node_zeroed_set -extent_size_quantize_ceil -extent_size_quantize_floor -extent_tree_ad_destroy -extent_tree_ad_destroy_recurse -extent_tree_ad_empty -extent_tree_ad_first -extent_tree_ad_insert -extent_tree_ad_iter -extent_tree_ad_iter_recurse -extent_tree_ad_iter_start -extent_tree_ad_last -extent_tree_ad_new -extent_tree_ad_next -extent_tree_ad_nsearch -extent_tree_ad_prev -extent_tree_ad_psearch -extent_tree_ad_remove -extent_tree_ad_reverse_iter -extent_tree_ad_reverse_iter_recurse -extent_tree_ad_reverse_iter_start -extent_tree_ad_search -extent_tree_szsnad_destroy -extent_tree_szsnad_destroy_recurse -extent_tree_szsnad_empty -extent_tree_szsnad_first -extent_tree_szsnad_insert -extent_tree_szsnad_iter -extent_tree_szsnad_iter_recurse -extent_tree_szsnad_iter_start -extent_tree_szsnad_last -extent_tree_szsnad_new -extent_tree_szsnad_next -extent_tree_szsnad_nsearch -extent_tree_szsnad_prev -extent_tree_szsnad_psearch -extent_tree_szsnad_remove -extent_tree_szsnad_reverse_iter -extent_tree_szsnad_reverse_iter_recurse -extent_tree_szsnad_reverse_iter_start -extent_tree_szsnad_search -ffs_llu -ffs_lu -ffs_u -ffs_u32 -ffs_u64 -ffs_zu -get_errno -hash -hash_fmix_32 -hash_fmix_64 -hash_get_block_32 -hash_get_block_64 -hash_rotl_32 -hash_rotl_64 -hash_x64_128 -hash_x86_128 -hash_x86_32 -huge_aalloc -huge_dalloc -huge_dalloc_junk -huge_malloc -huge_palloc -huge_prof_tctx_get -huge_prof_tctx_reset -huge_prof_tctx_set -huge_ralloc -huge_ralloc_no_move -huge_salloc -iaalloc -ialloc -iallocztm -iarena_cleanup -idalloc -idalloctm -in_valgrind -index2size -index2size_compute -index2size_lookup -index2size_tab -ipalloc -ipalloct -ipallocztm -iqalloc -iralloc -iralloct -iralloct_realign -isalloc -isdalloct -isqalloc -isthreaded -ivsalloc -ixalloc -jemalloc_postfork_child -jemalloc_postfork_parent -jemalloc_prefork -large_maxclass -lg_floor -lg_prof_sample -malloc_cprintf -malloc_mutex_assert_not_owner -malloc_mutex_assert_owner -malloc_mutex_boot -malloc_mutex_init -malloc_mutex_lock -malloc_mutex_postfork_child -malloc_mutex_postfork_parent -malloc_mutex_prefork -malloc_mutex_unlock -malloc_printf -malloc_snprintf -malloc_strtoumax -malloc_tsd_boot0 -malloc_tsd_boot1 -malloc_tsd_cleanup_register -malloc_tsd_dalloc -malloc_tsd_malloc -malloc_tsd_no_cleanup -malloc_vcprintf -malloc_vsnprintf -malloc_write -map_bias -map_misc_offset -mb_write -narenas_auto -narenas_tdata_cleanup -narenas_total_get -ncpus -nhbins -nhclasses -nlclasses -nstime_add -nstime_compare -nstime_copy -nstime_divide -nstime_idivide -nstime_imultiply -nstime_init -nstime_init2 -nstime_monotonic -nstime_ns -nstime_nsec -nstime_sec -nstime_subtract -nstime_update -opt_abort -opt_decay_time -opt_dss -opt_junk -opt_junk_alloc -opt_junk_free -opt_lg_chunk -opt_lg_dirty_mult -opt_lg_prof_interval -opt_lg_prof_sample -opt_lg_tcache_max -opt_narenas -opt_prof -opt_prof_accum -opt_prof_active -opt_prof_final -opt_prof_gdump -opt_prof_leak -opt_prof_prefix -opt_prof_thread_active_init -opt_purge -opt_quarantine -opt_redzone -opt_stats_print -opt_tcache -opt_thp -opt_utrace -opt_xmalloc -opt_zero -p2rz -pages_boot -pages_commit -pages_decommit -pages_huge -pages_map -pages_nohuge -pages_purge -pages_trim -pages_unmap -pind2sz -pind2sz_compute -pind2sz_lookup -pind2sz_tab -pow2_ceil_u32 -pow2_ceil_u64 -pow2_ceil_zu -prng_lg_range_u32 -prng_lg_range_u64 -prng_lg_range_zu -prng_range_u32 -prng_range_u64 -prng_range_zu -prng_state_next_u32 -prng_state_next_u64 -prng_state_next_zu -prof_active -prof_active_get -prof_active_get_unlocked -prof_active_set -prof_alloc_prep -prof_alloc_rollback -prof_backtrace -prof_boot0 -prof_boot1 -prof_boot2 -prof_bt_count -prof_dump_header -prof_dump_open -prof_free -prof_free_sampled_object -prof_gdump -prof_gdump_get -prof_gdump_get_unlocked -prof_gdump_set -prof_gdump_val -prof_idump -prof_interval -prof_lookup -prof_malloc -prof_malloc_sample_object -prof_mdump -prof_postfork_child -prof_postfork_parent -prof_prefork0 -prof_prefork1 -prof_realloc -prof_reset -prof_sample_accum_update -prof_sample_threshold_update -prof_tctx_get -prof_tctx_reset -prof_tctx_set -prof_tdata_cleanup -prof_tdata_count -prof_tdata_get -prof_tdata_init -prof_tdata_reinit -prof_thread_active_get -prof_thread_active_init_get -prof_thread_active_init_set -prof_thread_active_set -prof_thread_name_get -prof_thread_name_set -psz2ind -psz2u -purge_mode_names -quarantine -quarantine_alloc_hook -quarantine_alloc_hook_work -quarantine_cleanup -rtree_child_read -rtree_child_read_hard -rtree_child_tryread -rtree_delete -rtree_get -rtree_new -rtree_node_valid -rtree_set -rtree_start_level -rtree_subkey -rtree_subtree_read -rtree_subtree_read_hard -rtree_subtree_tryread -rtree_val_read -rtree_val_write -run_quantize_ceil -run_quantize_floor -s2u -s2u_compute -s2u_lookup -sa2u -set_errno -size2index -size2index_compute -size2index_lookup -size2index_tab -spin_adaptive -spin_init -stats_cactive -stats_cactive_add -stats_cactive_get -stats_cactive_sub -stats_print -tcache_alloc_easy -tcache_alloc_large -tcache_alloc_small -tcache_alloc_small_hard -tcache_arena_reassociate -tcache_bin_flush_large -tcache_bin_flush_small -tcache_bin_info -tcache_boot -tcache_cleanup -tcache_create -tcache_dalloc_large -tcache_dalloc_small -tcache_enabled_cleanup -tcache_enabled_get -tcache_enabled_set -tcache_event -tcache_event_hard -tcache_flush -tcache_get -tcache_get_hard -tcache_maxclass -tcache_postfork_child -tcache_postfork_parent -tcache_prefork -tcache_salloc -tcache_stats_merge -tcaches -tcaches_create -tcaches_destroy -tcaches_flush -tcaches_get -thread_allocated_cleanup -thread_deallocated_cleanup -ticker_copy -ticker_init -ticker_read -ticker_tick -ticker_ticks -tsd_arena_get -tsd_arena_set -tsd_arenap_get -tsd_arenas_tdata_bypass_get -tsd_arenas_tdata_bypass_set -tsd_arenas_tdata_bypassp_get -tsd_arenas_tdata_get -tsd_arenas_tdata_set -tsd_arenas_tdatap_get -tsd_boot -tsd_boot0 -tsd_boot1 -tsd_booted -tsd_booted_get -tsd_cleanup -tsd_cleanup_wrapper -tsd_fetch -tsd_fetch_impl -tsd_get -tsd_get_allocates -tsd_iarena_get -tsd_iarena_set -tsd_iarenap_get -tsd_initialized -tsd_init_check_recursion -tsd_init_finish -tsd_init_head -tsd_narenas_tdata_get -tsd_narenas_tdata_set -tsd_narenas_tdatap_get -tsd_wrapper_get -tsd_wrapper_set -tsd_nominal -tsd_prof_tdata_get -tsd_prof_tdata_set -tsd_prof_tdatap_get -tsd_quarantine_get -tsd_quarantine_set -tsd_quarantinep_get -tsd_set -tsd_tcache_enabled_get -tsd_tcache_enabled_set -tsd_tcache_enabledp_get -tsd_tcache_get -tsd_tcache_set -tsd_tcachep_get -tsd_thread_allocated_get -tsd_thread_allocated_set -tsd_thread_allocatedp_get -tsd_thread_deallocated_get -tsd_thread_deallocated_set -tsd_thread_deallocatedp_get -tsd_tls -tsd_tsd -tsd_tsdn -tsd_witness_fork_get -tsd_witness_fork_set -tsd_witness_forkp_get -tsd_witnesses_get -tsd_witnesses_set -tsd_witnessesp_get -tsdn_fetch -tsdn_null -tsdn_tsd -u2rz -valgrind_freelike_block -valgrind_make_mem_defined -valgrind_make_mem_noaccess -valgrind_make_mem_undefined -witness_assert_depth -witness_assert_depth_to_rank -witness_assert_lockless -witness_assert_not_owner -witness_assert_owner -witness_depth_error -witness_fork_cleanup -witness_init -witness_lock -witness_lock_error -witness_not_owner_error -witness_owner -witness_owner_error -witness_postfork_child -witness_postfork_parent -witness_prefork -witness_unlock -witnesses_cleanup -zone_register diff --git a/sources/jemalloc/include-linux/jemalloc/internal/private_unnamespace.h b/sources/jemalloc/include-linux/jemalloc/internal/private_unnamespace.h deleted file mode 100755 index 27038c63..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/private_unnamespace.h +++ /dev/null @@ -1,639 +0,0 @@ -#undef a0dalloc -#undef a0get -#undef a0malloc -#undef arena_aalloc -#undef arena_alloc_junk_small -#undef arena_basic_stats_merge -#undef arena_bin_index -#undef arena_bin_info -#undef arena_bitselm_get_const -#undef arena_bitselm_get_mutable -#undef arena_boot -#undef arena_choose -#undef arena_choose_hard -#undef arena_choose_impl -#undef arena_chunk_alloc_huge -#undef arena_chunk_cache_maybe_insert -#undef arena_chunk_cache_maybe_remove -#undef arena_chunk_dalloc_huge -#undef arena_chunk_ralloc_huge_expand -#undef arena_chunk_ralloc_huge_shrink -#undef arena_chunk_ralloc_huge_similar -#undef arena_cleanup -#undef arena_dalloc -#undef arena_dalloc_bin -#undef arena_dalloc_bin_junked_locked -#undef arena_dalloc_junk_large -#undef arena_dalloc_junk_small -#undef arena_dalloc_large -#undef arena_dalloc_large_junked_locked -#undef arena_dalloc_small -#undef arena_decay_tick -#undef arena_decay_ticks -#undef arena_decay_time_default_get -#undef arena_decay_time_default_set -#undef arena_decay_time_get -#undef arena_decay_time_set -#undef arena_dss_prec_get -#undef arena_dss_prec_set -#undef arena_extent_sn_next -#undef arena_get -#undef arena_ichoose -#undef arena_init -#undef arena_lg_dirty_mult_default_get -#undef arena_lg_dirty_mult_default_set -#undef arena_lg_dirty_mult_get -#undef arena_lg_dirty_mult_set -#undef arena_malloc -#undef arena_malloc_hard -#undef arena_malloc_large -#undef arena_mapbits_allocated_get -#undef arena_mapbits_binind_get -#undef arena_mapbits_decommitted_get -#undef arena_mapbits_dirty_get -#undef arena_mapbits_get -#undef arena_mapbits_internal_set -#undef arena_mapbits_large_binind_set -#undef arena_mapbits_large_get -#undef arena_mapbits_large_set -#undef arena_mapbits_large_size_get -#undef arena_mapbits_size_decode -#undef arena_mapbits_size_encode -#undef arena_mapbits_small_runind_get -#undef arena_mapbits_small_set -#undef arena_mapbits_unallocated_set -#undef arena_mapbits_unallocated_size_get -#undef arena_mapbits_unallocated_size_set -#undef arena_mapbits_unzeroed_get -#undef arena_mapbitsp_get_const -#undef arena_mapbitsp_get_mutable -#undef arena_mapbitsp_read -#undef arena_mapbitsp_write -#undef arena_maxrun -#undef arena_maybe_purge -#undef arena_metadata_allocated_add -#undef arena_metadata_allocated_get -#undef arena_metadata_allocated_sub -#undef arena_migrate -#undef arena_miscelm_get_const -#undef arena_miscelm_get_mutable -#undef arena_miscelm_to_pageind -#undef arena_miscelm_to_rpages -#undef arena_new -#undef arena_node_alloc -#undef arena_node_dalloc -#undef arena_nthreads_dec -#undef arena_nthreads_get -#undef arena_nthreads_inc -#undef arena_palloc -#undef arena_postfork_child -#undef arena_postfork_parent -#undef arena_prefork0 -#undef arena_prefork1 -#undef arena_prefork2 -#undef arena_prefork3 -#undef arena_prof_accum -#undef arena_prof_accum_impl -#undef arena_prof_accum_locked -#undef arena_prof_promoted -#undef arena_prof_tctx_get -#undef arena_prof_tctx_reset -#undef arena_prof_tctx_set -#undef arena_ptr_small_binind_get -#undef arena_purge -#undef arena_quarantine_junk_small -#undef arena_ralloc -#undef arena_ralloc_junk_large -#undef arena_ralloc_no_move -#undef arena_rd_to_miscelm -#undef arena_redzone_corruption -#undef arena_reset -#undef arena_run_regind -#undef arena_run_to_miscelm -#undef arena_salloc -#undef arena_sdalloc -#undef arena_stats_merge -#undef arena_tcache_fill_small -#undef arena_tdata_get -#undef arena_tdata_get_hard -#undef arenas -#undef arenas_tdata_bypass_cleanup -#undef arenas_tdata_cleanup -#undef atomic_add_p -#undef atomic_add_u -#undef atomic_add_uint32 -#undef atomic_add_uint64 -#undef atomic_add_z -#undef atomic_cas_p -#undef atomic_cas_u -#undef atomic_cas_uint32 -#undef atomic_cas_uint64 -#undef atomic_cas_z -#undef atomic_sub_p -#undef atomic_sub_u -#undef atomic_sub_uint32 -#undef atomic_sub_uint64 -#undef atomic_sub_z -#undef atomic_write_p -#undef atomic_write_u -#undef atomic_write_uint32 -#undef atomic_write_uint64 -#undef atomic_write_z -#undef base_alloc -#undef base_boot -#undef base_postfork_child -#undef base_postfork_parent -#undef base_prefork -#undef base_stats_get -#undef bitmap_full -#undef bitmap_get -#undef bitmap_info_init -#undef bitmap_init -#undef bitmap_set -#undef bitmap_sfu -#undef bitmap_size -#undef bitmap_unset -#undef bootstrap_calloc -#undef bootstrap_free -#undef bootstrap_malloc -#undef bt_init -#undef buferror -#undef chunk_alloc_base -#undef chunk_alloc_cache -#undef chunk_alloc_dss -#undef chunk_alloc_mmap -#undef chunk_alloc_wrapper -#undef chunk_boot -#undef chunk_dalloc_cache -#undef chunk_dalloc_mmap -#undef chunk_dalloc_wrapper -#undef chunk_deregister -#undef chunk_dss_boot -#undef chunk_dss_mergeable -#undef chunk_dss_prec_get -#undef chunk_dss_prec_set -#undef chunk_hooks_default -#undef chunk_hooks_get -#undef chunk_hooks_set -#undef chunk_in_dss -#undef chunk_lookup -#undef chunk_npages -#undef chunk_purge_wrapper -#undef chunk_register -#undef chunks_rtree -#undef chunksize -#undef chunksize_mask -#undef ckh_count -#undef ckh_delete -#undef ckh_insert -#undef ckh_iter -#undef ckh_new -#undef ckh_pointer_hash -#undef ckh_pointer_keycomp -#undef ckh_remove -#undef ckh_search -#undef ckh_string_hash -#undef ckh_string_keycomp -#undef ctl_boot -#undef ctl_bymib -#undef ctl_byname -#undef ctl_nametomib -#undef ctl_postfork_child -#undef ctl_postfork_parent -#undef ctl_prefork -#undef decay_ticker_get -#undef dss_prec_names -#undef extent_node_achunk_get -#undef extent_node_achunk_set -#undef extent_node_addr_get -#undef extent_node_addr_set -#undef extent_node_arena_get -#undef extent_node_arena_set -#undef extent_node_committed_get -#undef extent_node_committed_set -#undef extent_node_dirty_insert -#undef extent_node_dirty_linkage_init -#undef extent_node_dirty_remove -#undef extent_node_init -#undef extent_node_prof_tctx_get -#undef extent_node_prof_tctx_set -#undef extent_node_size_get -#undef extent_node_size_set -#undef extent_node_sn_get -#undef extent_node_sn_set -#undef extent_node_zeroed_get -#undef extent_node_zeroed_set -#undef extent_size_quantize_ceil -#undef extent_size_quantize_floor -#undef extent_tree_ad_destroy -#undef extent_tree_ad_destroy_recurse -#undef extent_tree_ad_empty -#undef extent_tree_ad_first -#undef extent_tree_ad_insert -#undef extent_tree_ad_iter -#undef extent_tree_ad_iter_recurse -#undef extent_tree_ad_iter_start -#undef extent_tree_ad_last -#undef extent_tree_ad_new -#undef extent_tree_ad_next -#undef extent_tree_ad_nsearch -#undef extent_tree_ad_prev -#undef extent_tree_ad_psearch -#undef extent_tree_ad_remove -#undef extent_tree_ad_reverse_iter -#undef extent_tree_ad_reverse_iter_recurse -#undef extent_tree_ad_reverse_iter_start -#undef extent_tree_ad_search -#undef extent_tree_szsnad_destroy -#undef extent_tree_szsnad_destroy_recurse -#undef extent_tree_szsnad_empty -#undef extent_tree_szsnad_first -#undef extent_tree_szsnad_insert -#undef extent_tree_szsnad_iter -#undef extent_tree_szsnad_iter_recurse -#undef extent_tree_szsnad_iter_start -#undef extent_tree_szsnad_last -#undef extent_tree_szsnad_new -#undef extent_tree_szsnad_next -#undef extent_tree_szsnad_nsearch -#undef extent_tree_szsnad_prev -#undef extent_tree_szsnad_psearch -#undef extent_tree_szsnad_remove -#undef extent_tree_szsnad_reverse_iter -#undef extent_tree_szsnad_reverse_iter_recurse -#undef extent_tree_szsnad_reverse_iter_start -#undef extent_tree_szsnad_search -#undef ffs_llu -#undef ffs_lu -#undef ffs_u -#undef ffs_u32 -#undef ffs_u64 -#undef ffs_zu -#undef get_errno -#undef hash -#undef hash_fmix_32 -#undef hash_fmix_64 -#undef hash_get_block_32 -#undef hash_get_block_64 -#undef hash_rotl_32 -#undef hash_rotl_64 -#undef hash_x64_128 -#undef hash_x86_128 -#undef hash_x86_32 -#undef huge_aalloc -#undef huge_dalloc -#undef huge_dalloc_junk -#undef huge_malloc -#undef huge_palloc -#undef huge_prof_tctx_get -#undef huge_prof_tctx_reset -#undef huge_prof_tctx_set -#undef huge_ralloc -#undef huge_ralloc_no_move -#undef huge_salloc -#undef iaalloc -#undef ialloc -#undef iallocztm -#undef iarena_cleanup -#undef idalloc -#undef idalloctm -#undef in_valgrind -#undef index2size -#undef index2size_compute -#undef index2size_lookup -#undef index2size_tab -#undef ipalloc -#undef ipalloct -#undef ipallocztm -#undef iqalloc -#undef iralloc -#undef iralloct -#undef iralloct_realign -#undef isalloc -#undef isdalloct -#undef isqalloc -#undef isthreaded -#undef ivsalloc -#undef ixalloc -#undef jemalloc_postfork_child -#undef jemalloc_postfork_parent -#undef jemalloc_prefork -#undef large_maxclass -#undef lg_floor -#undef lg_prof_sample -#undef malloc_cprintf -#undef malloc_mutex_assert_not_owner -#undef malloc_mutex_assert_owner -#undef malloc_mutex_boot -#undef malloc_mutex_init -#undef malloc_mutex_lock -#undef malloc_mutex_postfork_child -#undef malloc_mutex_postfork_parent -#undef malloc_mutex_prefork -#undef malloc_mutex_unlock -#undef malloc_printf -#undef malloc_snprintf -#undef malloc_strtoumax -#undef malloc_tsd_boot0 -#undef malloc_tsd_boot1 -#undef malloc_tsd_cleanup_register -#undef malloc_tsd_dalloc -#undef malloc_tsd_malloc -#undef malloc_tsd_no_cleanup -#undef malloc_vcprintf -#undef malloc_vsnprintf -#undef malloc_write -#undef map_bias -#undef map_misc_offset -#undef mb_write -#undef narenas_auto -#undef narenas_tdata_cleanup -#undef narenas_total_get -#undef ncpus -#undef nhbins -#undef nhclasses -#undef nlclasses -#undef nstime_add -#undef nstime_compare -#undef nstime_copy -#undef nstime_divide -#undef nstime_idivide -#undef nstime_imultiply -#undef nstime_init -#undef nstime_init2 -#undef nstime_monotonic -#undef nstime_ns -#undef nstime_nsec -#undef nstime_sec -#undef nstime_subtract -#undef nstime_update -#undef opt_abort -#undef opt_decay_time -#undef opt_dss -#undef opt_junk -#undef opt_junk_alloc -#undef opt_junk_free -#undef opt_lg_chunk -#undef opt_lg_dirty_mult -#undef opt_lg_prof_interval -#undef opt_lg_prof_sample -#undef opt_lg_tcache_max -#undef opt_narenas -#undef opt_prof -#undef opt_prof_accum -#undef opt_prof_active -#undef opt_prof_final -#undef opt_prof_gdump -#undef opt_prof_leak -#undef opt_prof_prefix -#undef opt_prof_thread_active_init -#undef opt_purge -#undef opt_quarantine -#undef opt_redzone -#undef opt_stats_print -#undef opt_tcache -#undef opt_thp -#undef opt_utrace -#undef opt_xmalloc -#undef opt_zero -#undef p2rz -#undef pages_boot -#undef pages_commit -#undef pages_decommit -#undef pages_huge -#undef pages_map -#undef pages_nohuge -#undef pages_purge -#undef pages_trim -#undef pages_unmap -#undef pind2sz -#undef pind2sz_compute -#undef pind2sz_lookup -#undef pind2sz_tab -#undef pow2_ceil_u32 -#undef pow2_ceil_u64 -#undef pow2_ceil_zu -#undef prng_lg_range_u32 -#undef prng_lg_range_u64 -#undef prng_lg_range_zu -#undef prng_range_u32 -#undef prng_range_u64 -#undef prng_range_zu -#undef prng_state_next_u32 -#undef prng_state_next_u64 -#undef prng_state_next_zu -#undef prof_active -#undef prof_active_get -#undef prof_active_get_unlocked -#undef prof_active_set -#undef prof_alloc_prep -#undef prof_alloc_rollback -#undef prof_backtrace -#undef prof_boot0 -#undef prof_boot1 -#undef prof_boot2 -#undef prof_bt_count -#undef prof_dump_header -#undef prof_dump_open -#undef prof_free -#undef prof_free_sampled_object -#undef prof_gdump -#undef prof_gdump_get -#undef prof_gdump_get_unlocked -#undef prof_gdump_set -#undef prof_gdump_val -#undef prof_idump -#undef prof_interval -#undef prof_lookup -#undef prof_malloc -#undef prof_malloc_sample_object -#undef prof_mdump -#undef prof_postfork_child -#undef prof_postfork_parent -#undef prof_prefork0 -#undef prof_prefork1 -#undef prof_realloc -#undef prof_reset -#undef prof_sample_accum_update -#undef prof_sample_threshold_update -#undef prof_tctx_get -#undef prof_tctx_reset -#undef prof_tctx_set -#undef prof_tdata_cleanup -#undef prof_tdata_count -#undef prof_tdata_get -#undef prof_tdata_init -#undef prof_tdata_reinit -#undef prof_thread_active_get -#undef prof_thread_active_init_get -#undef prof_thread_active_init_set -#undef prof_thread_active_set -#undef prof_thread_name_get -#undef prof_thread_name_set -#undef psz2ind -#undef psz2u -#undef purge_mode_names -#undef quarantine -#undef quarantine_alloc_hook -#undef quarantine_alloc_hook_work -#undef quarantine_cleanup -#undef rtree_child_read -#undef rtree_child_read_hard -#undef rtree_child_tryread -#undef rtree_delete -#undef rtree_get -#undef rtree_new -#undef rtree_node_valid -#undef rtree_set -#undef rtree_start_level -#undef rtree_subkey -#undef rtree_subtree_read -#undef rtree_subtree_read_hard -#undef rtree_subtree_tryread -#undef rtree_val_read -#undef rtree_val_write -#undef run_quantize_ceil -#undef run_quantize_floor -#undef s2u -#undef s2u_compute -#undef s2u_lookup -#undef sa2u -#undef set_errno -#undef size2index -#undef size2index_compute -#undef size2index_lookup -#undef size2index_tab -#undef spin_adaptive -#undef spin_init -#undef stats_cactive -#undef stats_cactive_add -#undef stats_cactive_get -#undef stats_cactive_sub -#undef stats_print -#undef tcache_alloc_easy -#undef tcache_alloc_large -#undef tcache_alloc_small -#undef tcache_alloc_small_hard -#undef tcache_arena_reassociate -#undef tcache_bin_flush_large -#undef tcache_bin_flush_small -#undef tcache_bin_info -#undef tcache_boot -#undef tcache_cleanup -#undef tcache_create -#undef tcache_dalloc_large -#undef tcache_dalloc_small -#undef tcache_enabled_cleanup -#undef tcache_enabled_get -#undef tcache_enabled_set -#undef tcache_event -#undef tcache_event_hard -#undef tcache_flush -#undef tcache_get -#undef tcache_get_hard -#undef tcache_maxclass -#undef tcache_postfork_child -#undef tcache_postfork_parent -#undef tcache_prefork -#undef tcache_salloc -#undef tcache_stats_merge -#undef tcaches -#undef tcaches_create -#undef tcaches_destroy -#undef tcaches_flush -#undef tcaches_get -#undef thread_allocated_cleanup -#undef thread_deallocated_cleanup -#undef ticker_copy -#undef ticker_init -#undef ticker_read -#undef ticker_tick -#undef ticker_ticks -#undef tsd_arena_get -#undef tsd_arena_set -#undef tsd_arenap_get -#undef tsd_arenas_tdata_bypass_get -#undef tsd_arenas_tdata_bypass_set -#undef tsd_arenas_tdata_bypassp_get -#undef tsd_arenas_tdata_get -#undef tsd_arenas_tdata_set -#undef tsd_arenas_tdatap_get -#undef tsd_boot -#undef tsd_boot0 -#undef tsd_boot1 -#undef tsd_booted -#undef tsd_booted_get -#undef tsd_cleanup -#undef tsd_cleanup_wrapper -#undef tsd_fetch -#undef tsd_fetch_impl -#undef tsd_get -#undef tsd_get_allocates -#undef tsd_iarena_get -#undef tsd_iarena_set -#undef tsd_iarenap_get -#undef tsd_initialized -#undef tsd_init_check_recursion -#undef tsd_init_finish -#undef tsd_init_head -#undef tsd_narenas_tdata_get -#undef tsd_narenas_tdata_set -#undef tsd_narenas_tdatap_get -#undef tsd_wrapper_get -#undef tsd_wrapper_set -#undef tsd_nominal -#undef tsd_prof_tdata_get -#undef tsd_prof_tdata_set -#undef tsd_prof_tdatap_get -#undef tsd_quarantine_get -#undef tsd_quarantine_set -#undef tsd_quarantinep_get -#undef tsd_set -#undef tsd_tcache_enabled_get -#undef tsd_tcache_enabled_set -#undef tsd_tcache_enabledp_get -#undef tsd_tcache_get -#undef tsd_tcache_set -#undef tsd_tcachep_get -#undef tsd_thread_allocated_get -#undef tsd_thread_allocated_set -#undef tsd_thread_allocatedp_get -#undef tsd_thread_deallocated_get -#undef tsd_thread_deallocated_set -#undef tsd_thread_deallocatedp_get -#undef tsd_tls -#undef tsd_tsd -#undef tsd_tsdn -#undef tsd_witness_fork_get -#undef tsd_witness_fork_set -#undef tsd_witness_forkp_get -#undef tsd_witnesses_get -#undef tsd_witnesses_set -#undef tsd_witnessesp_get -#undef tsdn_fetch -#undef tsdn_null -#undef tsdn_tsd -#undef u2rz -#undef valgrind_freelike_block -#undef valgrind_make_mem_defined -#undef valgrind_make_mem_noaccess -#undef valgrind_make_mem_undefined -#undef witness_assert_depth -#undef witness_assert_depth_to_rank -#undef witness_assert_lockless -#undef witness_assert_not_owner -#undef witness_assert_owner -#undef witness_depth_error -#undef witness_fork_cleanup -#undef witness_init -#undef witness_lock -#undef witness_lock_error -#undef witness_not_owner_error -#undef witness_owner -#undef witness_owner_error -#undef witness_postfork_child -#undef witness_postfork_parent -#undef witness_prefork -#undef witness_unlock -#undef witnesses_cleanup -#undef zone_register diff --git a/sources/jemalloc/include-linux/jemalloc/internal/prng.h b/sources/jemalloc/include-linux/jemalloc/internal/prng.h deleted file mode 100755 index c2bda19c..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/prng.h +++ /dev/null @@ -1,207 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * Simple linear congruential pseudo-random number generator: - * - * prng(y) = (a*x + c) % m - * - * where the following constants ensure maximal period: - * - * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. - * c == Odd number (relatively prime to 2^n). - * m == 2^32 - * - * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. - * - * This choice of m has the disadvantage that the quality of the bits is - * proportional to bit position. For example, the lowest bit has a cycle of 2, - * the next has a cycle of 4, etc. For this reason, we prefer to use the upper - * bits. - */ - -#define PRNG_A_32 UINT32_C(1103515241) -#define PRNG_C_32 UINT32_C(12347) - -#define PRNG_A_64 UINT64_C(6364136223846793005) -#define PRNG_C_64 UINT64_C(1442695040888963407) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -uint32_t prng_state_next_u32(uint32_t state); -uint64_t prng_state_next_u64(uint64_t state); -size_t prng_state_next_zu(size_t state); - -uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range, - bool atomic); -uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range); -size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic); - -uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic); -uint64_t prng_range_u64(uint64_t *state, uint64_t range); -size_t prng_range_zu(size_t *state, size_t range, bool atomic); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_)) -JEMALLOC_ALWAYS_INLINE uint32_t -prng_state_next_u32(uint32_t state) -{ - - return ((state * PRNG_A_32) + PRNG_C_32); -} - -JEMALLOC_ALWAYS_INLINE uint64_t -prng_state_next_u64(uint64_t state) -{ - - return ((state * PRNG_A_64) + PRNG_C_64); -} - -JEMALLOC_ALWAYS_INLINE size_t -prng_state_next_zu(size_t state) -{ - -#if LG_SIZEOF_PTR == 2 - return ((state * PRNG_A_32) + PRNG_C_32); -#elif LG_SIZEOF_PTR == 3 - return ((state * PRNG_A_64) + PRNG_C_64); -#else -#error Unsupported pointer size -#endif -} - -JEMALLOC_ALWAYS_INLINE uint32_t -prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic) -{ - uint32_t ret, state1; - - assert(lg_range > 0); - assert(lg_range <= 32); - - if (atomic) { - uint32_t state0; - - do { - state0 = atomic_read_uint32(state); - state1 = prng_state_next_u32(state0); - } while (atomic_cas_uint32(state, state0, state1)); - } else { - state1 = prng_state_next_u32(*state); - *state = state1; - } - ret = state1 >> (32 - lg_range); - - return (ret); -} - -/* 64-bit atomic operations cannot be supported on all relevant platforms. */ -JEMALLOC_ALWAYS_INLINE uint64_t -prng_lg_range_u64(uint64_t *state, unsigned lg_range) -{ - uint64_t ret, state1; - - assert(lg_range > 0); - assert(lg_range <= 64); - - state1 = prng_state_next_u64(*state); - *state = state1; - ret = state1 >> (64 - lg_range); - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE size_t -prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic) -{ - size_t ret, state1; - - assert(lg_range > 0); - assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR)); - - if (atomic) { - size_t state0; - - do { - state0 = atomic_read_z(state); - state1 = prng_state_next_zu(state0); - } while (atomic_cas_z(state, state0, state1)); - } else { - state1 = prng_state_next_zu(*state); - *state = state1; - } - ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range); - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE uint32_t -prng_range_u32(uint32_t *state, uint32_t range, bool atomic) -{ - uint32_t ret; - unsigned lg_range; - - assert(range > 1); - - /* Compute the ceiling of lg(range). */ - lg_range = ffs_u32(pow2_ceil_u32(range)) - 1; - - /* Generate a result in [0..range) via repeated trial. */ - do { - ret = prng_lg_range_u32(state, lg_range, atomic); - } while (ret >= range); - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE uint64_t -prng_range_u64(uint64_t *state, uint64_t range) -{ - uint64_t ret; - unsigned lg_range; - - assert(range > 1); - - /* Compute the ceiling of lg(range). */ - lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; - - /* Generate a result in [0..range) via repeated trial. */ - do { - ret = prng_lg_range_u64(state, lg_range); - } while (ret >= range); - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE size_t -prng_range_zu(size_t *state, size_t range, bool atomic) -{ - size_t ret; - unsigned lg_range; - - assert(range > 1); - - /* Compute the ceiling of lg(range). */ - lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; - - /* Generate a result in [0..range) via repeated trial. */ - do { - ret = prng_lg_range_zu(state, lg_range, atomic); - } while (ret >= range); - - return (ret); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/prof.h b/sources/jemalloc/include-linux/jemalloc/internal/prof.h deleted file mode 100755 index 8293b71e..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/prof.h +++ /dev/null @@ -1,547 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct prof_bt_s prof_bt_t; -typedef struct prof_cnt_s prof_cnt_t; -typedef struct prof_tctx_s prof_tctx_t; -typedef struct prof_gctx_s prof_gctx_t; -typedef struct prof_tdata_s prof_tdata_t; - -/* Option defaults. */ -#ifdef JEMALLOC_PROF -# define PROF_PREFIX_DEFAULT "jeprof" -#else -# define PROF_PREFIX_DEFAULT "" -#endif -#define LG_PROF_SAMPLE_DEFAULT 19 -#define LG_PROF_INTERVAL_DEFAULT -1 - -/* - * Hard limit on stack backtrace depth. The version of prof_backtrace() that - * is based on __builtin_return_address() necessarily has a hard-coded number - * of backtrace frame handlers, and should be kept in sync with this setting. - */ -#define PROF_BT_MAX 128 - -/* Initial hash table size. */ -#define PROF_CKH_MINITEMS 64 - -/* Size of memory buffer to use when writing dump files. */ -#define PROF_DUMP_BUFSIZE 65536 - -/* Size of stack-allocated buffer used by prof_printf(). */ -#define PROF_PRINTF_BUFSIZE 128 - -/* - * Number of mutexes shared among all gctx's. No space is allocated for these - * unless profiling is enabled, so it's okay to over-provision. - */ -#define PROF_NCTX_LOCKS 1024 - -/* - * Number of mutexes shared among all tdata's. No space is allocated for these - * unless profiling is enabled, so it's okay to over-provision. - */ -#define PROF_NTDATA_LOCKS 256 - -/* - * prof_tdata pointers close to NULL are used to encode state information that - * is used for cleaning up during thread shutdown. - */ -#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) -#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) -#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct prof_bt_s { - /* Backtrace, stored as len program counters. */ - void **vec; - unsigned len; -}; - -#ifdef JEMALLOC_PROF_LIBGCC -/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ -typedef struct { - prof_bt_t *bt; - unsigned max; -} prof_unwind_data_t; -#endif - -struct prof_cnt_s { - /* Profiling counters. */ - uint64_t curobjs; - uint64_t curbytes; - uint64_t accumobjs; - uint64_t accumbytes; -}; - -typedef enum { - prof_tctx_state_initializing, - prof_tctx_state_nominal, - prof_tctx_state_dumping, - prof_tctx_state_purgatory /* Dumper must finish destroying. */ -} prof_tctx_state_t; - -struct prof_tctx_s { - /* Thread data for thread that performed the allocation. */ - prof_tdata_t *tdata; - - /* - * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be - * defunct during teardown. - */ - uint64_t thr_uid; - uint64_t thr_discrim; - - /* Profiling counters, protected by tdata->lock. */ - prof_cnt_t cnts; - - /* Associated global context. */ - prof_gctx_t *gctx; - - /* - * UID that distinguishes multiple tctx's created by the same thread, - * but coexisting in gctx->tctxs. There are two ways that such - * coexistence can occur: - * - A dumper thread can cause a tctx to be retained in the purgatory - * state. - * - Although a single "producer" thread must create all tctx's which - * share the same thr_uid, multiple "consumers" can each concurrently - * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only - * gets called once each time cnts.cur{objs,bytes} drop to 0, but this - * threshold can be hit again before the first consumer finishes - * executing prof_tctx_destroy(). - */ - uint64_t tctx_uid; - - /* Linkage into gctx's tctxs. */ - rb_node(prof_tctx_t) tctx_link; - - /* - * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents - * sample vs destroy race. - */ - bool prepared; - - /* Current dump-related state, protected by gctx->lock. */ - prof_tctx_state_t state; - - /* - * Copy of cnts snapshotted during early dump phase, protected by - * dump_mtx. - */ - prof_cnt_t dump_cnts; -}; -typedef rb_tree(prof_tctx_t) prof_tctx_tree_t; - -struct prof_gctx_s { - /* Protects nlimbo, cnt_summed, and tctxs. */ - malloc_mutex_t *lock; - - /* - * Number of threads that currently cause this gctx to be in a state of - * limbo due to one of: - * - Initializing this gctx. - * - Initializing per thread counters associated with this gctx. - * - Preparing to destroy this gctx. - * - Dumping a heap profile that includes this gctx. - * nlimbo must be 1 (single destroyer) in order to safely destroy the - * gctx. - */ - unsigned nlimbo; - - /* - * Tree of profile counters, one for each thread that has allocated in - * this context. - */ - prof_tctx_tree_t tctxs; - - /* Linkage for tree of contexts to be dumped. */ - rb_node(prof_gctx_t) dump_link; - - /* Temporary storage for summation during dump. */ - prof_cnt_t cnt_summed; - - /* Associated backtrace. */ - prof_bt_t bt; - - /* Backtrace vector, variable size, referred to by bt. */ - void *vec[1]; -}; -typedef rb_tree(prof_gctx_t) prof_gctx_tree_t; - -struct prof_tdata_s { - malloc_mutex_t *lock; - - /* Monotonically increasing unique thread identifier. */ - uint64_t thr_uid; - - /* - * Monotonically increasing discriminator among tdata structures - * associated with the same thr_uid. - */ - uint64_t thr_discrim; - - /* Included in heap profile dumps if non-NULL. */ - char *thread_name; - - bool attached; - bool expired; - - rb_node(prof_tdata_t) tdata_link; - - /* - * Counter used to initialize prof_tctx_t's tctx_uid. No locking is - * necessary when incrementing this field, because only one thread ever - * does so. - */ - uint64_t tctx_uid_next; - - /* - * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks - * backtraces for which it has non-zero allocation/deallocation counters - * associated with thread-specific prof_tctx_t objects. Other threads - * may write to prof_tctx_t contents when freeing associated objects. - */ - ckh_t bt2tctx; - - /* Sampling state. */ - uint64_t prng_state; - uint64_t bytes_until_sample; - - /* State used to avoid dumping while operating on prof internals. */ - bool enq; - bool enq_idump; - bool enq_gdump; - - /* - * Set to true during an early dump phase for tdata's which are - * currently being dumped. New threads' tdata's have this initialized - * to false so that they aren't accidentally included in later dump - * phases. - */ - bool dumping; - - /* - * True if profiling is active for this tdata's thread - * (thread.prof.active mallctl). - */ - bool active; - - /* Temporary storage for summation during dump. */ - prof_cnt_t cnt_summed; - - /* Backtrace vector, used for calls to prof_backtrace(). */ - void *vec[PROF_BT_MAX]; -}; -typedef rb_tree(prof_tdata_t) prof_tdata_tree_t; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern bool opt_prof; -extern bool opt_prof_active; -extern bool opt_prof_thread_active_init; -extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ -extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ -extern bool opt_prof_gdump; /* High-water memory dumping. */ -extern bool opt_prof_final; /* Final profile dumping. */ -extern bool opt_prof_leak; /* Dump leak summary at exit. */ -extern bool opt_prof_accum; /* Report cumulative bytes. */ -extern char opt_prof_prefix[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PATH_MAX + -#endif - 1]; - -/* Accessed via prof_active_[gs]et{_unlocked,}(). */ -extern bool prof_active; - -/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ -extern bool prof_gdump_val; - -/* - * Profile dump interval, measured in bytes allocated. Each arena triggers a - * profile dump when it reaches this threshold. The effect is that the - * interval between profile dumps averages prof_interval, though the actual - * interval between dumps will tend to be sporadic, and the interval will be a - * maximum of approximately (prof_interval * narenas). - */ -extern uint64_t prof_interval; - -/* - * Initialized as opt_lg_prof_sample, and potentially modified during profiling - * resets. - */ -extern size_t lg_prof_sample; - -void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); -void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, - prof_tctx_t *tctx); -void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx); -void bt_init(prof_bt_t *bt, void **vec); -void prof_backtrace(prof_bt_t *bt); -prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); -#ifdef JEMALLOC_JET -size_t prof_tdata_count(void); -size_t prof_bt_count(void); -const prof_cnt_t *prof_cnt_all(void); -typedef int (prof_dump_open_t)(bool, const char *); -extern prof_dump_open_t *prof_dump_open; -typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *); -extern prof_dump_header_t *prof_dump_header; -#endif -void prof_idump(tsdn_t *tsdn); -bool prof_mdump(tsd_t *tsd, const char *filename); -void prof_gdump(tsdn_t *tsdn); -prof_tdata_t *prof_tdata_init(tsd_t *tsd); -prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); -void prof_reset(tsd_t *tsd, size_t lg_sample); -void prof_tdata_cleanup(tsd_t *tsd); -bool prof_active_get(tsdn_t *tsdn); -bool prof_active_set(tsdn_t *tsdn, bool active); -const char *prof_thread_name_get(tsd_t *tsd); -int prof_thread_name_set(tsd_t *tsd, const char *thread_name); -bool prof_thread_active_get(tsd_t *tsd); -bool prof_thread_active_set(tsd_t *tsd, bool active); -bool prof_thread_active_init_get(tsdn_t *tsdn); -bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); -bool prof_gdump_get(tsdn_t *tsdn); -bool prof_gdump_set(tsdn_t *tsdn, bool active); -void prof_boot0(void); -void prof_boot1(void); -bool prof_boot2(tsd_t *tsd); -void prof_prefork0(tsdn_t *tsdn); -void prof_prefork1(tsdn_t *tsdn); -void prof_postfork_parent(tsdn_t *tsdn); -void prof_postfork_child(tsdn_t *tsdn); -void prof_sample_threshold_update(prof_tdata_t *tdata); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -bool prof_active_get_unlocked(void); -bool prof_gdump_get_unlocked(void); -prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create); -prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr); -void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, - prof_tctx_t *tctx); -void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, - const void *old_ptr, prof_tctx_t *tctx); -bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit, - prof_tdata_t **tdata_out); -prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, - bool update); -void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, - prof_tctx_t *tctx); -void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, - prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr, - size_t old_usize, prof_tctx_t *old_tctx); -void prof_free(tsd_t *tsd, const void *ptr, size_t usize); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) -JEMALLOC_ALWAYS_INLINE bool -prof_active_get_unlocked(void) -{ - - /* - * Even if opt_prof is true, sampling can be temporarily disabled by - * setting prof_active to false. No locking is used when reading - * prof_active in the fast path, so there are no guarantees regarding - * how long it will take for all threads to notice state changes. - */ - return (prof_active); -} - -JEMALLOC_ALWAYS_INLINE bool -prof_gdump_get_unlocked(void) -{ - - /* - * No locking is used when reading prof_gdump_val in the fast path, so - * there are no guarantees regarding how long it will take for all - * threads to notice state changes. - */ - return (prof_gdump_val); -} - -JEMALLOC_ALWAYS_INLINE prof_tdata_t * -prof_tdata_get(tsd_t *tsd, bool create) -{ - prof_tdata_t *tdata; - - cassert(config_prof); - - tdata = tsd_prof_tdata_get(tsd); - if (create) { - if (unlikely(tdata == NULL)) { - if (tsd_nominal(tsd)) { - tdata = prof_tdata_init(tsd); - tsd_prof_tdata_set(tsd, tdata); - } - } else if (unlikely(tdata->expired)) { - tdata = prof_tdata_reinit(tsd, tdata); - tsd_prof_tdata_set(tsd, tdata); - } - assert(tdata == NULL || tdata->attached); - } - - return (tdata); -} - -JEMALLOC_ALWAYS_INLINE prof_tctx_t * -prof_tctx_get(tsdn_t *tsdn, const void *ptr) -{ - - cassert(config_prof); - assert(ptr != NULL); - - return (arena_prof_tctx_get(tsdn, ptr)); -} - -JEMALLOC_ALWAYS_INLINE void -prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) -{ - - cassert(config_prof); - assert(ptr != NULL); - - arena_prof_tctx_set(tsdn, ptr, usize, tctx); -} - -JEMALLOC_ALWAYS_INLINE void -prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr, - prof_tctx_t *old_tctx) -{ - - cassert(config_prof); - assert(ptr != NULL); - - arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx); -} - -JEMALLOC_ALWAYS_INLINE bool -prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update, - prof_tdata_t **tdata_out) -{ - prof_tdata_t *tdata; - - cassert(config_prof); - - tdata = prof_tdata_get(tsd, true); - if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) - tdata = NULL; - - if (tdata_out != NULL) - *tdata_out = tdata; - - if (unlikely(tdata == NULL)) - return (true); - - if (likely(tdata->bytes_until_sample >= usize)) { - if (update) - tdata->bytes_until_sample -= usize; - return (true); - } else { - /* Compute new sample threshold. */ - if (update) - prof_sample_threshold_update(tdata); - return (!tdata->active); - } -} - -JEMALLOC_ALWAYS_INLINE prof_tctx_t * -prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) -{ - prof_tctx_t *ret; - prof_tdata_t *tdata; - prof_bt_t bt; - - assert(usize == s2u(usize)); - - if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update, - &tdata))) - ret = (prof_tctx_t *)(uintptr_t)1U; - else { - bt_init(&bt, tdata->vec); - prof_backtrace(&bt); - ret = prof_lookup(tsd, &bt); - } - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void -prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) -{ - - cassert(config_prof); - assert(ptr != NULL); - assert(usize == isalloc(tsdn, ptr, true)); - - if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) - prof_malloc_sample_object(tsdn, ptr, usize, tctx); - else - prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U); -} - -JEMALLOC_ALWAYS_INLINE void -prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, - bool prof_active, bool updated, const void *old_ptr, size_t old_usize, - prof_tctx_t *old_tctx) -{ - bool sampled, old_sampled; - - cassert(config_prof); - assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); - - if (prof_active && !updated && ptr != NULL) { - assert(usize == isalloc(tsd_tsdn(tsd), ptr, true)); - if (prof_sample_accum_update(tsd, usize, true, NULL)) { - /* - * Don't sample. The usize passed to prof_alloc_prep() - * was larger than what actually got allocated, so a - * backtrace was captured for this allocation, even - * though its actual usize was insufficient to cross the - * sample threshold. - */ - prof_alloc_rollback(tsd, tctx, true); - tctx = (prof_tctx_t *)(uintptr_t)1U; - } - } - - sampled = ((uintptr_t)tctx > (uintptr_t)1U); - old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U); - - if (unlikely(sampled)) - prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx); - else - prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx); - - if (unlikely(old_sampled)) - prof_free_sampled_object(tsd, old_usize, old_tctx); -} - -JEMALLOC_ALWAYS_INLINE void -prof_free(tsd_t *tsd, const void *ptr, size_t usize) -{ - prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); - - cassert(config_prof); - assert(usize == isalloc(tsd_tsdn(tsd), ptr, true)); - - if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) - prof_free_sampled_object(tsd, usize, tctx); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/public_namespace.h b/sources/jemalloc/include-linux/jemalloc/internal/public_namespace.h deleted file mode 100755 index c43cb615..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/public_namespace.h +++ /dev/null @@ -1,22 +0,0 @@ -#define je_malloc_conf JEMALLOC_N(malloc_conf) -#define je_malloc_message JEMALLOC_N(malloc_message) -#define je_malloc JEMALLOC_N(malloc) -#define je_calloc JEMALLOC_N(calloc) -#define je_posix_memalign JEMALLOC_N(posix_memalign) -#define je_aligned_alloc JEMALLOC_N(aligned_alloc) -#define je_realloc JEMALLOC_N(realloc) -#define je_free JEMALLOC_N(free) -#define je_mallocx JEMALLOC_N(mallocx) -#define je_rallocx JEMALLOC_N(rallocx) -#define je_xallocx JEMALLOC_N(xallocx) -#define je_sallocx JEMALLOC_N(sallocx) -#define je_dallocx JEMALLOC_N(dallocx) -#define je_sdallocx JEMALLOC_N(sdallocx) -#define je_nallocx JEMALLOC_N(nallocx) -#define je_mallctl JEMALLOC_N(mallctl) -#define je_mallctlnametomib JEMALLOC_N(mallctlnametomib) -#define je_mallctlbymib JEMALLOC_N(mallctlbymib) -#define je_malloc_stats_print JEMALLOC_N(malloc_stats_print) -#define je_malloc_usable_size JEMALLOC_N(malloc_usable_size) -#define je_memalign JEMALLOC_N(memalign) -#define je_valloc JEMALLOC_N(valloc) diff --git a/sources/jemalloc/include-linux/jemalloc/internal/public_symbols.txt b/sources/jemalloc/include-linux/jemalloc/internal/public_symbols.txt deleted file mode 100755 index d2c566d4..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/public_symbols.txt +++ /dev/null @@ -1,22 +0,0 @@ -malloc_conf:je_malloc_conf -malloc_message:je_malloc_message -malloc:je_malloc -calloc:je_calloc -posix_memalign:je_posix_memalign -aligned_alloc:je_aligned_alloc -realloc:je_realloc -free:je_free -mallocx:je_mallocx -rallocx:je_rallocx -xallocx:je_xallocx -sallocx:je_sallocx -dallocx:je_dallocx -sdallocx:je_sdallocx -nallocx:je_nallocx -mallctl:je_mallctl -mallctlnametomib:je_mallctlnametomib -mallctlbymib:je_mallctlbymib -malloc_stats_print:je_malloc_stats_print -malloc_usable_size:je_malloc_usable_size -memalign:je_memalign -valloc:je_valloc diff --git a/sources/jemalloc/include-linux/jemalloc/internal/public_unnamespace.h b/sources/jemalloc/include-linux/jemalloc/internal/public_unnamespace.h deleted file mode 100755 index 46819485..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/public_unnamespace.h +++ /dev/null @@ -1,22 +0,0 @@ -#undef je_malloc_conf -#undef je_malloc_message -#undef je_malloc -#undef je_calloc -#undef je_posix_memalign -#undef je_aligned_alloc -#undef je_realloc -#undef je_free -#undef je_mallocx -#undef je_rallocx -#undef je_xallocx -#undef je_sallocx -#undef je_dallocx -#undef je_sdallocx -#undef je_nallocx -#undef je_mallctl -#undef je_mallctlnametomib -#undef je_mallctlbymib -#undef je_malloc_stats_print -#undef je_malloc_usable_size -#undef je_memalign -#undef je_valloc diff --git a/sources/jemalloc/include-linux/jemalloc/internal/ql.h b/sources/jemalloc/include-linux/jemalloc/internal/ql.h deleted file mode 100755 index 1834bb85..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/ql.h +++ /dev/null @@ -1,81 +0,0 @@ -/* List definitions. */ -#define ql_head(a_type) \ -struct { \ - a_type *qlh_first; \ -} - -#define ql_head_initializer(a_head) {NULL} - -#define ql_elm(a_type) qr(a_type) - -/* List functions. */ -#define ql_new(a_head) do { \ - (a_head)->qlh_first = NULL; \ -} while (0) - -#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) - -#define ql_first(a_head) ((a_head)->qlh_first) - -#define ql_last(a_head, a_field) \ - ((ql_first(a_head) != NULL) \ - ? qr_prev(ql_first(a_head), a_field) : NULL) - -#define ql_next(a_head, a_elm, a_field) \ - ((ql_last(a_head, a_field) != (a_elm)) \ - ? qr_next((a_elm), a_field) : NULL) - -#define ql_prev(a_head, a_elm, a_field) \ - ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ - : NULL) - -#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ - qr_before_insert((a_qlelm), (a_elm), a_field); \ - if (ql_first(a_head) == (a_qlelm)) { \ - ql_first(a_head) = (a_elm); \ - } \ -} while (0) - -#define ql_after_insert(a_qlelm, a_elm, a_field) \ - qr_after_insert((a_qlelm), (a_elm), a_field) - -#define ql_head_insert(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) != NULL) { \ - qr_before_insert(ql_first(a_head), (a_elm), a_field); \ - } \ - ql_first(a_head) = (a_elm); \ -} while (0) - -#define ql_tail_insert(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) != NULL) { \ - qr_before_insert(ql_first(a_head), (a_elm), a_field); \ - } \ - ql_first(a_head) = qr_next((a_elm), a_field); \ -} while (0) - -#define ql_remove(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) == (a_elm)) { \ - ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ - } \ - if (ql_first(a_head) != (a_elm)) { \ - qr_remove((a_elm), a_field); \ - } else { \ - ql_first(a_head) = NULL; \ - } \ -} while (0) - -#define ql_head_remove(a_head, a_type, a_field) do { \ - a_type *t = ql_first(a_head); \ - ql_remove((a_head), t, a_field); \ -} while (0) - -#define ql_tail_remove(a_head, a_type, a_field) do { \ - a_type *t = ql_last(a_head, a_field); \ - ql_remove((a_head), t, a_field); \ -} while (0) - -#define ql_foreach(a_var, a_head, a_field) \ - qr_foreach((a_var), ql_first(a_head), a_field) - -#define ql_reverse_foreach(a_var, a_head, a_field) \ - qr_reverse_foreach((a_var), ql_first(a_head), a_field) diff --git a/sources/jemalloc/include-linux/jemalloc/internal/qr.h b/sources/jemalloc/include-linux/jemalloc/internal/qr.h deleted file mode 100755 index 0fbaec25..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/qr.h +++ /dev/null @@ -1,69 +0,0 @@ -/* Ring definitions. */ -#define qr(a_type) \ -struct { \ - a_type *qre_next; \ - a_type *qre_prev; \ -} - -/* Ring functions. */ -#define qr_new(a_qr, a_field) do { \ - (a_qr)->a_field.qre_next = (a_qr); \ - (a_qr)->a_field.qre_prev = (a_qr); \ -} while (0) - -#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) - -#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) - -#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ - (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ - (a_qr)->a_field.qre_next = (a_qrelm); \ - (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ - (a_qrelm)->a_field.qre_prev = (a_qr); \ -} while (0) - -#define qr_after_insert(a_qrelm, a_qr, a_field) \ - do \ - { \ - (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ - (a_qr)->a_field.qre_prev = (a_qrelm); \ - (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ - (a_qrelm)->a_field.qre_next = (a_qr); \ - } while (0) - -#define qr_meld(a_qr_a, a_qr_b, a_field) do { \ - void *t; \ - (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ - (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ - t = (a_qr_a)->a_field.qre_prev; \ - (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ - (a_qr_b)->a_field.qre_prev = t; \ -} while (0) - -/* - * qr_meld() and qr_split() are functionally equivalent, so there's no need to - * have two copies of the code. - */ -#define qr_split(a_qr_a, a_qr_b, a_field) \ - qr_meld((a_qr_a), (a_qr_b), a_field) - -#define qr_remove(a_qr, a_field) do { \ - (a_qr)->a_field.qre_prev->a_field.qre_next \ - = (a_qr)->a_field.qre_next; \ - (a_qr)->a_field.qre_next->a_field.qre_prev \ - = (a_qr)->a_field.qre_prev; \ - (a_qr)->a_field.qre_next = (a_qr); \ - (a_qr)->a_field.qre_prev = (a_qr); \ -} while (0) - -#define qr_foreach(var, a_qr, a_field) \ - for ((var) = (a_qr); \ - (var) != NULL; \ - (var) = (((var)->a_field.qre_next != (a_qr)) \ - ? (var)->a_field.qre_next : NULL)) - -#define qr_reverse_foreach(var, a_qr, a_field) \ - for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ - (var) != NULL; \ - (var) = (((var) != (a_qr)) \ - ? (var)->a_field.qre_prev : NULL)) diff --git a/sources/jemalloc/include-linux/jemalloc/internal/quarantine.h b/sources/jemalloc/include-linux/jemalloc/internal/quarantine.h deleted file mode 100755 index ae607399..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/quarantine.h +++ /dev/null @@ -1,60 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct quarantine_obj_s quarantine_obj_t; -typedef struct quarantine_s quarantine_t; - -/* Default per thread quarantine size if valgrind is enabled. */ -#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct quarantine_obj_s { - void *ptr; - size_t usize; -}; - -struct quarantine_s { - size_t curbytes; - size_t curobjs; - size_t first; -#define LG_MAXOBJS_INIT 10 - size_t lg_maxobjs; - quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void quarantine_alloc_hook_work(tsd_t *tsd); -void quarantine(tsd_t *tsd, void *ptr); -void quarantine_cleanup(tsd_t *tsd); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void quarantine_alloc_hook(void); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_)) -JEMALLOC_ALWAYS_INLINE void -quarantine_alloc_hook(void) -{ - tsd_t *tsd; - - assert(config_fill && opt_quarantine); - - tsd = tsd_fetch(); - if (tsd_quarantine_get(tsd) == NULL) - quarantine_alloc_hook_work(tsd); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/sources/jemalloc/include-linux/jemalloc/internal/rb.h b/sources/jemalloc/include-linux/jemalloc/internal/rb.h deleted file mode 100755 index 3770342f..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/rb.h +++ /dev/null @@ -1,1003 +0,0 @@ -/*- - ******************************************************************************* - * - * cpp macro implementation of left-leaning 2-3 red-black trees. Parent - * pointers are not used, and color bits are stored in the least significant - * bit of right-child pointers (if RB_COMPACT is defined), thus making node - * linkage as compact as is possible for red-black trees. - * - * Usage: - * - * #include - * #include - * #define NDEBUG // (Optional, see assert(3).) - * #include - * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) - * #include - * ... - * - ******************************************************************************* - */ - -#ifndef RB_H_ -#define RB_H_ - -#ifdef RB_COMPACT -/* Node structure. */ -#define rb_node(a_type) \ -struct { \ - a_type *rbn_left; \ - a_type *rbn_right_red; \ -} -#else -#define rb_node(a_type) \ -struct { \ - a_type *rbn_left; \ - a_type *rbn_right; \ - bool rbn_red; \ -} -#endif - -/* Root structure. */ -#define rb_tree(a_type) \ -struct { \ - a_type *rbt_root; \ -} - -/* Left accessors. */ -#define rbtn_left_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_left) -#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ - (a_node)->a_field.rbn_left = a_left; \ -} while (0) - -#ifdef RB_COMPACT -/* Right accessors. */ -#define rbtn_right_get(a_type, a_field, a_node) \ - ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ - & ((ssize_t)-2))) -#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ - | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ -} while (0) - -/* Color accessors. */ -#define rbtn_red_get(a_type, a_field, a_node) \ - ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ - & ((size_t)1))) -#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ - (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ - | ((ssize_t)a_red)); \ -} while (0) -#define rbtn_red_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ - (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ -} while (0) -#define rbtn_black_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ - (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ -} while (0) - -/* Node initializer. */ -#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ - /* Bookkeeping bit cannot be used by node pointer. */ \ - assert(((uintptr_t)(a_node) & 0x1) == 0); \ - rbtn_left_set(a_type, a_field, (a_node), NULL); \ - rbtn_right_set(a_type, a_field, (a_node), NULL); \ - rbtn_red_set(a_type, a_field, (a_node)); \ -} while (0) -#else -/* Right accessors. */ -#define rbtn_right_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_right) -#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ - (a_node)->a_field.rbn_right = a_right; \ -} while (0) - -/* Color accessors. */ -#define rbtn_red_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_red) -#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ - (a_node)->a_field.rbn_red = (a_red); \ -} while (0) -#define rbtn_red_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_red = true; \ -} while (0) -#define rbtn_black_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_red = false; \ -} while (0) - -/* Node initializer. */ -#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ - rbtn_left_set(a_type, a_field, (a_node), NULL); \ - rbtn_right_set(a_type, a_field, (a_node), NULL); \ - rbtn_red_set(a_type, a_field, (a_node)); \ -} while (0) -#endif - -/* Tree initializer. */ -#define rb_new(a_type, a_field, a_rbt) do { \ - (a_rbt)->rbt_root = NULL; \ -} while (0) - -/* Internal utility macros. */ -#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ - (r_node) = (a_root); \ - if ((r_node) != NULL) { \ - for (; \ - rbtn_left_get(a_type, a_field, (r_node)) != NULL; \ - (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ - } \ - } \ -} while (0) - -#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ - (r_node) = (a_root); \ - if ((r_node) != NULL) { \ - for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \ - (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \ - } \ - } \ -} while (0) - -#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ - (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ - rbtn_right_set(a_type, a_field, (a_node), \ - rbtn_left_get(a_type, a_field, (r_node))); \ - rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ -} while (0) - -#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ - (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ - rbtn_left_set(a_type, a_field, (a_node), \ - rbtn_right_get(a_type, a_field, (r_node))); \ - rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ -} while (0) - -/* - * The rb_proto() macro generates function prototypes that correspond to the - * functions generated by an equivalently parameterized call to rb_gen(). - */ - -#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ -a_attr void \ -a_prefix##new(a_rbt_type *rbtree); \ -a_attr bool \ -a_prefix##empty(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##first(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##last(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##next(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ -a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ -a_prefix##search(a_rbt_type *rbtree, const a_type *key); \ -a_attr a_type * \ -a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \ -a_attr a_type * \ -a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \ -a_attr void \ -a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ -a_attr void \ -a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ -a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ - a_rbt_type *, a_type *, void *), void *arg); \ -a_attr a_type * \ -a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \ -a_attr void \ -a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ - void *arg); - -/* - * The rb_gen() macro generates a type-specific red-black tree implementation, - * based on the above cpp macros. - * - * Arguments: - * - * a_attr : Function attribute for generated functions (ex: static). - * a_prefix : Prefix for generated functions (ex: ex_). - * a_rb_type : Type for red-black tree data structure (ex: ex_t). - * a_type : Type for red-black tree node data structure (ex: ex_node_t). - * a_field : Name of red-black tree node linkage (ex: ex_link). - * a_cmp : Node comparison function name, with the following prototype: - * int (a_cmp *)(a_type *a_node, a_type *a_other); - * ^^^^^^ - * or a_key - * Interpretation of comparison function return values: - * -1 : a_node < a_other - * 0 : a_node == a_other - * 1 : a_node > a_other - * In all cases, the a_node or a_key macro argument is the first - * argument to the comparison function, which makes it possible - * to write comparison functions that treat the first argument - * specially. - * - * Assuming the following setup: - * - * typedef struct ex_node_s ex_node_t; - * struct ex_node_s { - * rb_node(ex_node_t) ex_link; - * }; - * typedef rb_tree(ex_node_t) ex_t; - * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) - * - * The following API is generated: - * - * static void - * ex_new(ex_t *tree); - * Description: Initialize a red-black tree structure. - * Args: - * tree: Pointer to an uninitialized red-black tree object. - * - * static bool - * ex_empty(ex_t *tree); - * Description: Determine whether tree is empty. - * Args: - * tree: Pointer to an initialized red-black tree object. - * Ret: True if tree is empty, false otherwise. - * - * static ex_node_t * - * ex_first(ex_t *tree); - * static ex_node_t * - * ex_last(ex_t *tree); - * Description: Get the first/last node in tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * Ret: First/last node in tree, or NULL if tree is empty. - * - * static ex_node_t * - * ex_next(ex_t *tree, ex_node_t *node); - * static ex_node_t * - * ex_prev(ex_t *tree, ex_node_t *node); - * Description: Get node's successor/predecessor. - * Args: - * tree: Pointer to an initialized red-black tree object. - * node: A node in tree. - * Ret: node's successor/predecessor in tree, or NULL if node is - * last/first. - * - * static ex_node_t * - * ex_search(ex_t *tree, const ex_node_t *key); - * Description: Search for node that matches key. - * Args: - * tree: Pointer to an initialized red-black tree object. - * key : Search key. - * Ret: Node in tree that matches key, or NULL if no match. - * - * static ex_node_t * - * ex_nsearch(ex_t *tree, const ex_node_t *key); - * static ex_node_t * - * ex_psearch(ex_t *tree, const ex_node_t *key); - * Description: Search for node that matches key. If no match is found, - * return what would be key's successor/predecessor, were - * key in tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * key : Search key. - * Ret: Node in tree that matches key, or if no match, hypothetical node's - * successor/predecessor (NULL if no successor/predecessor). - * - * static void - * ex_insert(ex_t *tree, ex_node_t *node); - * Description: Insert node into tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * node: Node to be inserted into tree. - * - * static void - * ex_remove(ex_t *tree, ex_node_t *node); - * Description: Remove node from tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * node: Node in tree to be removed. - * - * static ex_node_t * - * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, - * ex_node_t *, void *), void *arg); - * static ex_node_t * - * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, - * ex_node_t *, void *), void *arg); - * Description: Iterate forward/backward over tree, starting at node. If - * tree is modified, iteration must be immediately - * terminated by the callback function that causes the - * modification. - * Args: - * tree : Pointer to an initialized red-black tree object. - * start: Node at which to start iteration, or NULL to start at - * first/last node. - * cb : Callback function, which is called for each node during - * iteration. Under normal circumstances the callback function - * should return NULL, which causes iteration to continue. If a - * callback function returns non-NULL, iteration is immediately - * terminated and the non-NULL return value is returned by the - * iterator. This is useful for re-starting iteration after - * modifying tree. - * arg : Opaque pointer passed to cb(). - * Ret: NULL if iteration completed, or the non-NULL callback return value - * that caused termination of the iteration. - * - * static void - * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg); - * Description: Iterate over the tree with post-order traversal, remove - * each node, and run the callback if non-null. This is - * used for destroying a tree without paying the cost to - * rebalance it. The tree must not be otherwise altered - * during traversal. - * Args: - * tree: Pointer to an initialized red-black tree object. - * cb : Callback function, which, if non-null, is called for each node - * during iteration. There is no way to stop iteration once it - * has begun. - * arg : Opaque pointer passed to cb(). - */ -#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ -a_attr void \ -a_prefix##new(a_rbt_type *rbtree) { \ - rb_new(a_type, a_field, rbtree); \ -} \ -a_attr bool \ -a_prefix##empty(a_rbt_type *rbtree) { \ - return (rbtree->rbt_root == NULL); \ -} \ -a_attr a_type * \ -a_prefix##first(a_rbt_type *rbtree) { \ - a_type *ret; \ - rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##last(a_rbt_type *rbtree) { \ - a_type *ret; \ - rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ - a_type *ret; \ - if (rbtn_right_get(a_type, a_field, node) != NULL) { \ - rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ - a_field, node), ret); \ - } else { \ - a_type *tnode = rbtree->rbt_root; \ - assert(tnode != NULL); \ - ret = NULL; \ - while (true) { \ - int cmp = (a_cmp)(node, tnode); \ - if (cmp < 0) { \ - ret = tnode; \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - break; \ - } \ - assert(tnode != NULL); \ - } \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ - a_type *ret; \ - if (rbtn_left_get(a_type, a_field, node) != NULL) { \ - rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ - a_field, node), ret); \ - } else { \ - a_type *tnode = rbtree->rbt_root; \ - assert(tnode != NULL); \ - ret = NULL; \ - while (true) { \ - int cmp = (a_cmp)(node, tnode); \ - if (cmp < 0) { \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - ret = tnode; \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - break; \ - } \ - assert(tnode != NULL); \ - } \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \ - a_type *ret; \ - int cmp; \ - ret = rbtree->rbt_root; \ - while (ret != NULL \ - && (cmp = (a_cmp)(key, ret)) != 0) { \ - if (cmp < 0) { \ - ret = rbtn_left_get(a_type, a_field, ret); \ - } else { \ - ret = rbtn_right_get(a_type, a_field, ret); \ - } \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \ - a_type *ret; \ - a_type *tnode = rbtree->rbt_root; \ - ret = NULL; \ - while (tnode != NULL) { \ - int cmp = (a_cmp)(key, tnode); \ - if (cmp < 0) { \ - ret = tnode; \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - ret = tnode; \ - break; \ - } \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \ - a_type *ret; \ - a_type *tnode = rbtree->rbt_root; \ - ret = NULL; \ - while (tnode != NULL) { \ - int cmp = (a_cmp)(key, tnode); \ - if (cmp < 0) { \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - ret = tnode; \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - ret = tnode; \ - break; \ - } \ - } \ - return (ret); \ -} \ -a_attr void \ -a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ - struct { \ - a_type *node; \ - int cmp; \ - } path[sizeof(void *) << 4], *pathp; \ - rbt_node_new(a_type, a_field, rbtree, node); \ - /* Wind. */ \ - path->node = rbtree->rbt_root; \ - for (pathp = path; pathp->node != NULL; pathp++) { \ - int cmp = pathp->cmp = a_cmp(node, pathp->node); \ - assert(cmp != 0); \ - if (cmp < 0) { \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } else { \ - pathp[1].node = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - } \ - } \ - pathp->node = node; \ - /* Unwind. */ \ - for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ - a_type *cnode = pathp->node; \ - if (pathp->cmp < 0) { \ - a_type *left = pathp[1].node; \ - rbtn_left_set(a_type, a_field, cnode, left); \ - if (rbtn_red_get(a_type, a_field, left)) { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ - leftleft)) { \ - /* Fix up 4-node. */ \ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, cnode, tnode); \ - cnode = tnode; \ - } \ - } else { \ - return; \ - } \ - } else { \ - a_type *right = pathp[1].node; \ - rbtn_right_set(a_type, a_field, cnode, right); \ - if (rbtn_red_get(a_type, a_field, right)) { \ - a_type *left = rbtn_left_get(a_type, a_field, cnode); \ - if (left != NULL && rbtn_red_get(a_type, a_field, \ - left)) { \ - /* Split 4-node. */ \ - rbtn_black_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, right); \ - rbtn_red_set(a_type, a_field, cnode); \ - } else { \ - /* Lean left. */ \ - a_type *tnode; \ - bool tred = rbtn_red_get(a_type, a_field, cnode); \ - rbtn_rotate_left(a_type, a_field, cnode, tnode); \ - rbtn_color_set(a_type, a_field, tnode, tred); \ - rbtn_red_set(a_type, a_field, cnode); \ - cnode = tnode; \ - } \ - } else { \ - return; \ - } \ - } \ - pathp->node = cnode; \ - } \ - /* Set root, and make it black. */ \ - rbtree->rbt_root = path->node; \ - rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ -} \ -a_attr void \ -a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ - struct { \ - a_type *node; \ - int cmp; \ - } *pathp, *nodep, path[sizeof(void *) << 4]; \ - /* Wind. */ \ - nodep = NULL; /* Silence compiler warning. */ \ - path->node = rbtree->rbt_root; \ - for (pathp = path; pathp->node != NULL; pathp++) { \ - int cmp = pathp->cmp = a_cmp(node, pathp->node); \ - if (cmp < 0) { \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } else { \ - pathp[1].node = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - if (cmp == 0) { \ - /* Find node's successor, in preparation for swap. */ \ - pathp->cmp = 1; \ - nodep = pathp; \ - for (pathp++; pathp->node != NULL; \ - pathp++) { \ - pathp->cmp = -1; \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } \ - break; \ - } \ - } \ - } \ - assert(nodep->node == node); \ - pathp--; \ - if (pathp->node != node) { \ - /* Swap node with its successor. */ \ - bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ - rbtn_color_set(a_type, a_field, pathp->node, \ - rbtn_red_get(a_type, a_field, node)); \ - rbtn_left_set(a_type, a_field, pathp->node, \ - rbtn_left_get(a_type, a_field, node)); \ - /* If node's successor is its right child, the following code */\ - /* will do the wrong thing for the right child pointer. */\ - /* However, it doesn't matter, because the pointer will be */\ - /* properly set when the successor is pruned. */\ - rbtn_right_set(a_type, a_field, pathp->node, \ - rbtn_right_get(a_type, a_field, node)); \ - rbtn_color_set(a_type, a_field, node, tred); \ - /* The pruned leaf node's child pointers are never accessed */\ - /* again, so don't bother setting them to nil. */\ - nodep->node = pathp->node; \ - pathp->node = node; \ - if (nodep == path) { \ - rbtree->rbt_root = nodep->node; \ - } else { \ - if (nodep[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, nodep[-1].node, \ - nodep->node); \ - } else { \ - rbtn_right_set(a_type, a_field, nodep[-1].node, \ - nodep->node); \ - } \ - } \ - } else { \ - a_type *left = rbtn_left_get(a_type, a_field, node); \ - if (left != NULL) { \ - /* node has no successor, but it has a left child. */\ - /* Splice node out, without losing the left child. */\ - assert(!rbtn_red_get(a_type, a_field, node)); \ - assert(rbtn_red_get(a_type, a_field, left)); \ - rbtn_black_set(a_type, a_field, left); \ - if (pathp == path) { \ - rbtree->rbt_root = left; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - left); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - left); \ - } \ - } \ - return; \ - } else if (pathp == path) { \ - /* The tree only contained one node. */ \ - rbtree->rbt_root = NULL; \ - return; \ - } \ - } \ - if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - /* Prune red node, which requires no fixup. */ \ - assert(pathp[-1].cmp < 0); \ - rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \ - return; \ - } \ - /* The node to be pruned is black, so unwind until balance is */\ - /* restored. */\ - pathp->node = NULL; \ - for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ - assert(pathp->cmp != 0); \ - if (pathp->cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp->node, \ - pathp[1].node); \ - if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - a_type *right = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - a_type *rightleft = rbtn_left_get(a_type, a_field, \ - right); \ - a_type *tnode; \ - if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ - rightleft)) { \ - /* In the following diagrams, ||, //, and \\ */\ - /* indicate the path to the removed node. */\ - /* */\ - /* || */\ - /* pathp(r) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - /* */\ - rbtn_black_set(a_type, a_field, pathp->node); \ - rbtn_rotate_right(a_type, a_field, right, tnode); \ - rbtn_right_set(a_type, a_field, pathp->node, tnode);\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - } else { \ - /* || */\ - /* pathp(r) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - /* */\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - } \ - /* Balance restored, but rotation modified subtree */\ - /* root. */\ - assert((uintptr_t)pathp > (uintptr_t)path); \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - return; \ - } else { \ - a_type *right = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - a_type *rightleft = rbtn_left_get(a_type, a_field, \ - right); \ - if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ - rightleft)) { \ - /* || */\ - /* pathp(b) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, rightleft); \ - rbtn_rotate_right(a_type, a_field, right, tnode); \ - rbtn_right_set(a_type, a_field, pathp->node, tnode);\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ - /* subtree root, which may actually be the tree */\ - /* root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - a_type *tnode; \ - rbtn_red_set(a_type, a_field, pathp->node); \ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - pathp->node = tnode; \ - } \ - } \ - } else { \ - a_type *left; \ - rbtn_right_set(a_type, a_field, pathp->node, \ - pathp[1].node); \ - left = rbtn_left_get(a_type, a_field, pathp->node); \ - if (rbtn_red_get(a_type, a_field, left)) { \ - a_type *tnode; \ - a_type *leftright = rbtn_right_get(a_type, a_field, \ - left); \ - a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ - leftright); \ - if (leftrightleft != NULL && rbtn_red_get(a_type, \ - a_field, leftrightleft)) { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (r) (b) */\ - /* \ */\ - /* (b) */\ - /* / */\ - /* (r) */\ - a_type *unode; \ - rbtn_black_set(a_type, a_field, leftrightleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - unode); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - rbtn_right_set(a_type, a_field, unode, tnode); \ - rbtn_rotate_left(a_type, a_field, unode, tnode); \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (r) (b) */\ - /* \ */\ - /* (b) */\ - /* / */\ - /* (b) */\ - assert(leftright != NULL); \ - rbtn_red_set(a_type, a_field, leftright); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - rbtn_black_set(a_type, a_field, tnode); \ - } \ - /* Balance restored, but rotation modified subtree */\ - /* root, which may actually be the tree root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - } \ - return; \ - } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ - leftleft)) { \ - /* || */\ - /* pathp(r) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, pathp->node); \ - rbtn_red_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ - /* subtree root. */\ - assert((uintptr_t)pathp > (uintptr_t)path); \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(r) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - rbtn_red_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, pathp->node); \ - /* Balance restored. */ \ - return; \ - } \ - } else { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ - leftleft)) { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ - /* subtree root, which may actually be the tree */\ - /* root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - rbtn_red_set(a_type, a_field, left); \ - } \ - } \ - } \ - } \ - /* Set root. */ \ - rbtree->rbt_root = path->node; \ - assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \ -} \ -a_attr a_type * \ -a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - if (node == NULL) { \ - return (NULL); \ - } else { \ - a_type *ret; \ - if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ - a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \ - arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - int cmp = a_cmp(start, node); \ - if (cmp < 0) { \ - a_type *ret; \ - if ((ret = a_prefix##iter_start(rbtree, start, \ - rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \ - (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ - } else if (cmp > 0) { \ - return (a_prefix##iter_start(rbtree, start, \ - rbtn_right_get(a_type, a_field, node), cb, arg)); \ - } else { \ - a_type *ret; \ - if ((ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ - a_rbt_type *, a_type *, void *), void *arg) { \ - a_type *ret; \ - if (start != NULL) { \ - ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ - cb, arg); \ - } else { \ - ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - if (node == NULL) { \ - return (NULL); \ - } else { \ - a_type *ret; \ - if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ - (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ - a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ - void *arg) { \ - int cmp = a_cmp(start, node); \ - if (cmp > 0) { \ - a_type *ret; \ - if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ - rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ - (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } else if (cmp < 0) { \ - return (a_prefix##reverse_iter_start(rbtree, start, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } else { \ - a_type *ret; \ - if ((ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - a_type *ret; \ - if (start != NULL) { \ - ret = a_prefix##reverse_iter_start(rbtree, start, \ - rbtree->rbt_root, cb, arg); \ - } else { \ - ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ - cb, arg); \ - } \ - return (ret); \ -} \ -a_attr void \ -a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \ - a_type *, void *), void *arg) { \ - if (node == NULL) { \ - return; \ - } \ - a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \ - node), cb, arg); \ - rbtn_left_set(a_type, a_field, (node), NULL); \ - a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \ - node), cb, arg); \ - rbtn_right_set(a_type, a_field, (node), NULL); \ - if (cb) { \ - cb(node, arg); \ - } \ -} \ -a_attr void \ -a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ - void *arg) { \ - a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \ - rbtree->rbt_root = NULL; \ -} - -#endif /* RB_H_ */ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/rtree.h b/sources/jemalloc/include-linux/jemalloc/internal/rtree.h deleted file mode 100755 index 8d0c584d..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/rtree.h +++ /dev/null @@ -1,366 +0,0 @@ -/* - * This radix tree implementation is tailored to the singular purpose of - * associating metadata with chunks that are currently owned by jemalloc. - * - ******************************************************************************* - */ -#ifdef JEMALLOC_H_TYPES - -typedef struct rtree_node_elm_s rtree_node_elm_t; -typedef struct rtree_level_s rtree_level_t; -typedef struct rtree_s rtree_t; - -/* - * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the - * machine address width. - */ -#define LG_RTREE_BITS_PER_LEVEL 4 -#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL) -/* Maximum rtree height. */ -#define RTREE_HEIGHT_MAX \ - ((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL) - -/* Used for two-stage lock-free node initialization. */ -#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1) - -/* - * The node allocation callback function's argument is the number of contiguous - * rtree_node_elm_t structures to allocate, and the resulting memory must be - * zeroed. - */ -typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t); -typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *); - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct rtree_node_elm_s { - union { - void *pun; - rtree_node_elm_t *child; - extent_node_t *val; - }; -}; - -struct rtree_level_s { - /* - * A non-NULL subtree points to a subtree rooted along the hypothetical - * path to the leaf node corresponding to key 0. Depending on what keys - * have been used to store to the tree, an arbitrary combination of - * subtree pointers may remain NULL. - * - * Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4. - * This results in a 3-level tree, and the leftmost leaf can be directly - * accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding - * 0x00000000) can be accessed via subtrees[1], and the remainder of the - * tree can be accessed via subtrees[0]. - * - * levels[0] : [ | 0x0001******** | 0x0002******** | ...] - * - * levels[1] : [ | 0x00000001**** | 0x00000002**** | ... ] - * - * levels[2] : [val(0x000000000000) | val(0x000000000001) | ...] - * - * This has practical implications on x64, which currently uses only the - * lower 47 bits of virtual address space in userland, thus leaving - * subtrees[0] unused and avoiding a level of tree traversal. - */ - union { - void *subtree_pun; - rtree_node_elm_t *subtree; - }; - /* Number of key bits distinguished by this level. */ - unsigned bits; - /* - * Cumulative number of key bits distinguished by traversing to - * corresponding tree level. - */ - unsigned cumbits; -}; - -struct rtree_s { - rtree_node_alloc_t *alloc; - rtree_node_dalloc_t *dalloc; - unsigned height; - /* - * Precomputed table used to convert from the number of leading 0 key - * bits to which subtree level to start at. - */ - unsigned start_level[RTREE_HEIGHT_MAX]; - rtree_level_t levels[RTREE_HEIGHT_MAX]; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, - rtree_node_dalloc_t *dalloc); -void rtree_delete(rtree_t *rtree); -rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree, - unsigned level); -rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree, - rtree_node_elm_t *elm, unsigned level); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -unsigned rtree_start_level(rtree_t *rtree, uintptr_t key); -uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level); - -bool rtree_node_valid(rtree_node_elm_t *node); -rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm, - bool dependent); -rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, - unsigned level, bool dependent); -extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, - bool dependent); -void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, - const extent_node_t *val); -rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level, - bool dependent); -rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level, - bool dependent); - -extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent); -bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) -JEMALLOC_ALWAYS_INLINE unsigned -rtree_start_level(rtree_t *rtree, uintptr_t key) -{ - unsigned start_level; - - if (unlikely(key == 0)) - return (rtree->height - 1); - - start_level = rtree->start_level[lg_floor(key) >> - LG_RTREE_BITS_PER_LEVEL]; - assert(start_level < rtree->height); - return (start_level); -} - -JEMALLOC_ALWAYS_INLINE uintptr_t -rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level) -{ - - return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - - rtree->levels[level].cumbits)) & ((ZU(1) << - rtree->levels[level].bits) - 1)); -} - -JEMALLOC_ALWAYS_INLINE bool -rtree_node_valid(rtree_node_elm_t *node) -{ - - return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING); -} - -JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * -rtree_child_tryread(rtree_node_elm_t *elm, bool dependent) -{ - rtree_node_elm_t *child; - - /* Double-checked read (first read may be stale. */ - child = elm->child; - if (!dependent && !rtree_node_valid(child)) - child = atomic_read_p(&elm->pun); - assert(!dependent || child != NULL); - return (child); -} - -JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * -rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level, - bool dependent) -{ - rtree_node_elm_t *child; - - child = rtree_child_tryread(elm, dependent); - if (!dependent && unlikely(!rtree_node_valid(child))) - child = rtree_child_read_hard(rtree, elm, level); - assert(!dependent || child != NULL); - return (child); -} - -JEMALLOC_ALWAYS_INLINE extent_node_t * -rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent) -{ - - if (dependent) { - /* - * Reading a val on behalf of a pointer to a valid allocation is - * guaranteed to be a clean read even without synchronization, - * because the rtree update became visible in memory before the - * pointer came into existence. - */ - return (elm->val); - } else { - /* - * An arbitrary read, e.g. on behalf of ivsalloc(), may not be - * dependent on a previous rtree write, which means a stale read - * could result if synchronization were omitted here. - */ - return (atomic_read_p(&elm->pun)); - } -} - -JEMALLOC_INLINE void -rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val) -{ - - atomic_write_p(&elm->pun, val); -} - -JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * -rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent) -{ - rtree_node_elm_t *subtree; - - /* Double-checked read (first read may be stale. */ - subtree = rtree->levels[level].subtree; - if (!dependent && unlikely(!rtree_node_valid(subtree))) - subtree = atomic_read_p(&rtree->levels[level].subtree_pun); - assert(!dependent || subtree != NULL); - return (subtree); -} - -JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * -rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent) -{ - rtree_node_elm_t *subtree; - - subtree = rtree_subtree_tryread(rtree, level, dependent); - if (!dependent && unlikely(!rtree_node_valid(subtree))) - subtree = rtree_subtree_read_hard(rtree, level); - assert(!dependent || subtree != NULL); - return (subtree); -} - -JEMALLOC_ALWAYS_INLINE extent_node_t * -rtree_get(rtree_t *rtree, uintptr_t key, bool dependent) -{ - uintptr_t subkey; - unsigned start_level; - rtree_node_elm_t *node; - - start_level = rtree_start_level(rtree, key); - - node = rtree_subtree_tryread(rtree, start_level, dependent); -#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height) - switch (start_level + RTREE_GET_BIAS) { -#define RTREE_GET_SUBTREE(level) \ - case level: \ - assert(level < (RTREE_HEIGHT_MAX-1)); \ - if (!dependent && unlikely(!rtree_node_valid(node))) \ - return (NULL); \ - subkey = rtree_subkey(rtree, key, level - \ - RTREE_GET_BIAS); \ - node = rtree_child_tryread(&node[subkey], dependent); \ - /* Fall through. */ -#define RTREE_GET_LEAF(level) \ - case level: \ - assert(level == (RTREE_HEIGHT_MAX-1)); \ - if (!dependent && unlikely(!rtree_node_valid(node))) \ - return (NULL); \ - subkey = rtree_subkey(rtree, key, level - \ - RTREE_GET_BIAS); \ - /* \ - * node is a leaf, so it contains values rather than \ - * child pointers. \ - */ \ - return (rtree_val_read(rtree, &node[subkey], \ - dependent)); -#if RTREE_HEIGHT_MAX > 1 - RTREE_GET_SUBTREE(0) -#endif -#if RTREE_HEIGHT_MAX > 2 - RTREE_GET_SUBTREE(1) -#endif -#if RTREE_HEIGHT_MAX > 3 - RTREE_GET_SUBTREE(2) -#endif -#if RTREE_HEIGHT_MAX > 4 - RTREE_GET_SUBTREE(3) -#endif -#if RTREE_HEIGHT_MAX > 5 - RTREE_GET_SUBTREE(4) -#endif -#if RTREE_HEIGHT_MAX > 6 - RTREE_GET_SUBTREE(5) -#endif -#if RTREE_HEIGHT_MAX > 7 - RTREE_GET_SUBTREE(6) -#endif -#if RTREE_HEIGHT_MAX > 8 - RTREE_GET_SUBTREE(7) -#endif -#if RTREE_HEIGHT_MAX > 9 - RTREE_GET_SUBTREE(8) -#endif -#if RTREE_HEIGHT_MAX > 10 - RTREE_GET_SUBTREE(9) -#endif -#if RTREE_HEIGHT_MAX > 11 - RTREE_GET_SUBTREE(10) -#endif -#if RTREE_HEIGHT_MAX > 12 - RTREE_GET_SUBTREE(11) -#endif -#if RTREE_HEIGHT_MAX > 13 - RTREE_GET_SUBTREE(12) -#endif -#if RTREE_HEIGHT_MAX > 14 - RTREE_GET_SUBTREE(13) -#endif -#if RTREE_HEIGHT_MAX > 15 - RTREE_GET_SUBTREE(14) -#endif -#if RTREE_HEIGHT_MAX > 16 -# error Unsupported RTREE_HEIGHT_MAX -#endif - RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1) -#undef RTREE_GET_SUBTREE -#undef RTREE_GET_LEAF - default: not_reached(); - } -#undef RTREE_GET_BIAS - not_reached(); -} - -JEMALLOC_INLINE bool -rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val) -{ - uintptr_t subkey; - unsigned i, start_level; - rtree_node_elm_t *node, *child; - - start_level = rtree_start_level(rtree, key); - - node = rtree_subtree_read(rtree, start_level, false); - if (node == NULL) - return (true); - for (i = start_level; /**/; i++, node = child) { - subkey = rtree_subkey(rtree, key, i); - if (i == rtree->height - 1) { - /* - * node is a leaf, so it contains values rather than - * child pointers. - */ - rtree_val_write(rtree, &node[subkey], val); - return (false); - } - assert(i + 1 < rtree->height); - child = rtree_child_read(rtree, &node[subkey], i, false); - if (child == NULL) - return (true); - } - not_reached(); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/size_classes.h b/sources/jemalloc/include-linux/jemalloc/internal/size_classes.h deleted file mode 100755 index e4edc4bc..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/size_classes.h +++ /dev/null @@ -1,1428 +0,0 @@ -/* This file was automatically generated by size_classes.sh. */ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to - * be defined prior to inclusion, and it in turn defines: - * - * LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling. - * SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz, - * bin, lg_delta_lookup) tuples. - * index: Size class index. - * lg_grp: Lg group base size (no deltas added). - * lg_delta: Lg delta to previous size class. - * ndelta: Delta multiplier. size == 1< 255) -# error "Too many small size classes" -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/smoothstep.h b/sources/jemalloc/include-linux/jemalloc/internal/smoothstep.h deleted file mode 100755 index c5333cca..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/smoothstep.h +++ /dev/null @@ -1,246 +0,0 @@ -/* - * This file was generated by the following command: - * sh smoothstep.sh smoother 200 24 3 15 - */ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * This header defines a precomputed table based on the smoothstep family of - * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0 - * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so - * that floating point math can be avoided. - * - * 3 2 - * smoothstep(x) = -2x + 3x - * - * 5 4 3 - * smootherstep(x) = 6x - 15x + 10x - * - * 7 6 5 4 - * smootheststep(x) = -20x + 70x - 84x + 35x - */ - -#define SMOOTHSTEP_VARIANT "smoother" -#define SMOOTHSTEP_NSTEPS 200 -#define SMOOTHSTEP_BFP 24 -#define SMOOTHSTEP \ - /* STEP(step, h, x, y) */ \ - STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \ - STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \ - STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \ - STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \ - STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \ - STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \ - STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \ - STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \ - STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \ - STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \ - STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \ - STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \ - STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \ - STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \ - STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \ - STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \ - STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \ - STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \ - STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \ - STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \ - STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \ - STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \ - STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \ - STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \ - STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \ - STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \ - STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \ - STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \ - STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \ - STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \ - STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \ - STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \ - STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \ - STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \ - STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \ - STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \ - STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \ - STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \ - STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \ - STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \ - STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \ - STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \ - STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \ - STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \ - STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \ - STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \ - STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \ - STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \ - STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \ - STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \ - STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \ - STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \ - STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \ - STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \ - STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \ - STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \ - STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \ - STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \ - STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \ - STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \ - STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \ - STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \ - STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \ - STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \ - STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \ - STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \ - STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \ - STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \ - STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \ - STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \ - STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \ - STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \ - STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \ - STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \ - STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \ - STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \ - STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \ - STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \ - STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \ - STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \ - STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \ - STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \ - STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \ - STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \ - STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \ - STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \ - STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \ - STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \ - STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \ - STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \ - STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \ - STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \ - STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \ - STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \ - STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \ - STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \ - STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \ - STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \ - STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \ - STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \ - STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \ - STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \ - STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \ - STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \ - STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \ - STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \ - STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \ - STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \ - STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \ - STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \ - STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \ - STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \ - STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \ - STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \ - STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \ - STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \ - STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \ - STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \ - STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \ - STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \ - STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \ - STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \ - STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \ - STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \ - STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \ - STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \ - STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \ - STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \ - STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \ - STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \ - STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \ - STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \ - STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \ - STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \ - STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \ - STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \ - STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \ - STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \ - STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \ - STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \ - STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \ - STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \ - STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \ - STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \ - STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \ - STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \ - STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \ - STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \ - STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \ - STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \ - STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \ - STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \ - STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \ - STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \ - STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \ - STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \ - STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \ - STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \ - STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \ - STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \ - STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \ - STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \ - STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \ - STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \ - STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \ - STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \ - STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \ - STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \ - STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \ - STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \ - STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \ - STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \ - STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \ - STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \ - STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \ - STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \ - STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \ - STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \ - STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \ - STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \ - STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \ - STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \ - STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \ - STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \ - STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \ - STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \ - STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \ - STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \ - STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \ - STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \ - STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \ - STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \ - STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \ - STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \ - STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \ - STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \ - STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \ - STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \ - STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \ - STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \ - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/spin.h b/sources/jemalloc/include-linux/jemalloc/internal/spin.h deleted file mode 100755 index 9ef5ceb9..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/spin.h +++ /dev/null @@ -1,51 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct spin_s spin_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct spin_s { - unsigned iteration; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void spin_init(spin_t *spin); -void spin_adaptive(spin_t *spin); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_SPIN_C_)) -JEMALLOC_INLINE void -spin_init(spin_t *spin) -{ - - spin->iteration = 0; -} - -JEMALLOC_INLINE void -spin_adaptive(spin_t *spin) -{ - volatile uint64_t i; - - for (i = 0; i < (KQU(1) << spin->iteration); i++) - CPU_SPINWAIT; - - if (spin->iteration < 63) - spin->iteration++; -} - -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/sources/jemalloc/include-linux/jemalloc/internal/stats.h b/sources/jemalloc/include-linux/jemalloc/internal/stats.h deleted file mode 100755 index 04e7dae1..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/stats.h +++ /dev/null @@ -1,197 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct tcache_bin_stats_s tcache_bin_stats_t; -typedef struct malloc_bin_stats_s malloc_bin_stats_t; -typedef struct malloc_large_stats_s malloc_large_stats_t; -typedef struct malloc_huge_stats_s malloc_huge_stats_t; -typedef struct arena_stats_s arena_stats_t; -typedef struct chunk_stats_s chunk_stats_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct tcache_bin_stats_s { - /* - * Number of allocation requests that corresponded to the size of this - * bin. - */ - uint64_t nrequests; -}; - -struct malloc_bin_stats_s { - /* - * Total number of allocation/deallocation requests served directly by - * the bin. Note that tcache may allocate an object, then recycle it - * many times, resulting many increments to nrequests, but only one - * each to nmalloc and ndalloc. - */ - uint64_t nmalloc; - uint64_t ndalloc; - - /* - * Number of allocation requests that correspond to the size of this - * bin. This includes requests served by tcache, though tcache only - * periodically merges into this counter. - */ - uint64_t nrequests; - - /* - * Current number of regions of this size class, including regions - * currently cached by tcache. - */ - size_t curregs; - - /* Number of tcache fills from this bin. */ - uint64_t nfills; - - /* Number of tcache flushes to this bin. */ - uint64_t nflushes; - - /* Total number of runs created for this bin's size class. */ - uint64_t nruns; - - /* - * Total number of runs reused by extracting them from the runs tree for - * this bin's size class. - */ - uint64_t reruns; - - /* Current number of runs in this bin. */ - size_t curruns; -}; - -struct malloc_large_stats_s { - /* - * Total number of allocation/deallocation requests served directly by - * the arena. Note that tcache may allocate an object, then recycle it - * many times, resulting many increments to nrequests, but only one - * each to nmalloc and ndalloc. - */ - uint64_t nmalloc; - uint64_t ndalloc; - - /* - * Number of allocation requests that correspond to this size class. - * This includes requests served by tcache, though tcache only - * periodically merges into this counter. - */ - uint64_t nrequests; - - /* - * Current number of runs of this size class, including runs currently - * cached by tcache. - */ - size_t curruns; -}; - -struct malloc_huge_stats_s { - /* - * Total number of allocation/deallocation requests served directly by - * the arena. - */ - uint64_t nmalloc; - uint64_t ndalloc; - - /* Current number of (multi-)chunk allocations of this size class. */ - size_t curhchunks; -}; - -struct arena_stats_s { - /* Number of bytes currently mapped. */ - size_t mapped; - - /* - * Number of bytes currently retained as a side effect of munmap() being - * disabled/bypassed. Retained bytes are technically mapped (though - * always decommitted or purged), but they are excluded from the mapped - * statistic (above). - */ - size_t retained; - - /* - * Total number of purge sweeps, total number of madvise calls made, - * and total pages purged in order to keep dirty unused memory under - * control. - */ - uint64_t npurge; - uint64_t nmadvise; - uint64_t purged; - - /* - * Number of bytes currently mapped purely for metadata purposes, and - * number of bytes currently allocated for internal metadata. - */ - size_t metadata_mapped; - size_t metadata_allocated; /* Protected via atomic_*_z(). */ - - /* Per-size-category statistics. */ - size_t allocated_large; - uint64_t nmalloc_large; - uint64_t ndalloc_large; - uint64_t nrequests_large; - - size_t allocated_huge; - uint64_t nmalloc_huge; - uint64_t ndalloc_huge; - - /* One element for each large size class. */ - malloc_large_stats_t *lstats; - - /* One element for each huge size class. */ - malloc_huge_stats_t *hstats; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern bool opt_stats_print; - -extern size_t stats_cactive; - -void stats_print(void (*write)(void *, const char *), void *cbopaque, - const char *opts); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -size_t stats_cactive_get(void); -void stats_cactive_add(size_t size); -void stats_cactive_sub(size_t size); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_)) -JEMALLOC_INLINE size_t -stats_cactive_get(void) -{ - - return (atomic_read_z(&stats_cactive)); -} - -JEMALLOC_INLINE void -stats_cactive_add(size_t size) -{ - - assert(size > 0); - assert((size & chunksize_mask) == 0); - - atomic_add_z(&stats_cactive, size); -} - -JEMALLOC_INLINE void -stats_cactive_sub(size_t size) -{ - - assert(size > 0); - assert((size & chunksize_mask) == 0); - - atomic_sub_z(&stats_cactive, size); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/tcache.h b/sources/jemalloc/include-linux/jemalloc/internal/tcache.h deleted file mode 100755 index 5fe5ebfa..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/tcache.h +++ /dev/null @@ -1,472 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct tcache_bin_info_s tcache_bin_info_t; -typedef struct tcache_bin_s tcache_bin_t; -typedef struct tcache_s tcache_t; -typedef struct tcaches_s tcaches_t; - -/* - * tcache pointers close to NULL are used to encode state information that is - * used for two purposes: preventing thread caching on a per thread basis and - * cleaning up during thread shutdown. - */ -#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) -#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) -#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) -#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY - -/* - * Absolute minimum number of cache slots for each small bin. - */ -#define TCACHE_NSLOTS_SMALL_MIN 20 - -/* - * Absolute maximum number of cache slots for each small bin in the thread - * cache. This is an additional constraint beyond that imposed as: twice the - * number of regions per run for this size class. - * - * This constant must be an even number. - */ -#define TCACHE_NSLOTS_SMALL_MAX 200 - -/* Number of cache slots for large size classes. */ -#define TCACHE_NSLOTS_LARGE 20 - -/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ -#define LG_TCACHE_MAXCLASS_DEFAULT 15 - -/* - * TCACHE_GC_SWEEP is the approximate number of allocation events between - * full GC sweeps. Integer rounding may cause the actual number to be - * slightly higher, since GC is performed incrementally. - */ -#define TCACHE_GC_SWEEP 8192 - -/* Number of tcache allocation/deallocation events between incremental GCs. */ -#define TCACHE_GC_INCR \ - ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1)) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -typedef enum { - tcache_enabled_false = 0, /* Enable cast to/from bool. */ - tcache_enabled_true = 1, - tcache_enabled_default = 2 -} tcache_enabled_t; - -/* - * Read-only information associated with each element of tcache_t's tbins array - * is stored separately, mainly to reduce memory usage. - */ -struct tcache_bin_info_s { - unsigned ncached_max; /* Upper limit on ncached. */ -}; - -struct tcache_bin_s { - tcache_bin_stats_t tstats; - int low_water; /* Min # cached since last GC. */ - unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */ - unsigned ncached; /* # of cached objects. */ - /* - * To make use of adjacent cacheline prefetch, the items in the avail - * stack goes to higher address for newer allocations. avail points - * just above the available space, which means that - * avail[-ncached, ... -1] are available items and the lowest item will - * be allocated first. - */ - void **avail; /* Stack of available objects. */ -}; - -struct tcache_s { - ql_elm(tcache_t) link; /* Used for aggregating stats. */ - uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */ - ticker_t gc_ticker; /* Drives incremental GC. */ - szind_t next_gc_bin; /* Next bin to GC. */ - tcache_bin_t tbins[1]; /* Dynamically sized. */ - /* - * The pointer stacks associated with tbins follow as a contiguous - * array. During tcache initialization, the avail pointer in each - * element of tbins is initialized to point to the proper offset within - * this array. - */ -}; - -/* Linkage for list of available (previously used) explicit tcache IDs. */ -struct tcaches_s { - union { - tcache_t *tcache; - tcaches_t *next; - }; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern bool opt_tcache; -extern ssize_t opt_lg_tcache_max; - -extern tcache_bin_info_t *tcache_bin_info; - -/* - * Number of tcache bins. There are NBINS small-object bins, plus 0 or more - * large-object bins. - */ -extern unsigned nhbins; - -/* Maximum cached size class. */ -extern size_t tcache_maxclass; - -/* - * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and - * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are - * completely disjoint from this data structure. tcaches starts off as a sparse - * array, so it has no physical memory footprint until individual pages are - * touched. This allows the entire array to be allocated the first time an - * explicit tcache is created without a disproportionate impact on memory usage. - */ -extern tcaches_t *tcaches; - -size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); -void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); -void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, - tcache_bin_t *tbin, szind_t binind, bool *tcache_success); -void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, - szind_t binind, unsigned rem); -void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, - unsigned rem, tcache_t *tcache); -void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, - arena_t *oldarena, arena_t *newarena); -tcache_t *tcache_get_hard(tsd_t *tsd); -tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena); -void tcache_cleanup(tsd_t *tsd); -void tcache_enabled_cleanup(tsd_t *tsd); -void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); -bool tcaches_create(tsd_t *tsd, unsigned *r_ind); -void tcaches_flush(tsd_t *tsd, unsigned ind); -void tcaches_destroy(tsd_t *tsd, unsigned ind); -bool tcache_boot(tsdn_t *tsdn); -void tcache_prefork(tsdn_t *tsdn); -void tcache_postfork_parent(tsdn_t *tsdn); -void tcache_postfork_child(tsdn_t *tsdn); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void tcache_event(tsd_t *tsd, tcache_t *tcache); -void tcache_flush(void); -bool tcache_enabled_get(void); -tcache_t *tcache_get(tsd_t *tsd, bool create); -void tcache_enabled_set(bool enabled); -void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success); -void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, - size_t size, szind_t ind, bool zero, bool slow_path); -void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, - size_t size, szind_t ind, bool zero, bool slow_path); -void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, - szind_t binind, bool slow_path); -void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, - size_t size, bool slow_path); -tcache_t *tcaches_get(tsd_t *tsd, unsigned ind); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_)) -JEMALLOC_INLINE void -tcache_flush(void) -{ - tsd_t *tsd; - - cassert(config_tcache); - - tsd = tsd_fetch(); - tcache_cleanup(tsd); -} - -JEMALLOC_INLINE bool -tcache_enabled_get(void) -{ - tsd_t *tsd; - tcache_enabled_t tcache_enabled; - - cassert(config_tcache); - - tsd = tsd_fetch(); - tcache_enabled = tsd_tcache_enabled_get(tsd); - if (tcache_enabled == tcache_enabled_default) { - tcache_enabled = (tcache_enabled_t)opt_tcache; - tsd_tcache_enabled_set(tsd, tcache_enabled); - } - - return ((bool)tcache_enabled); -} - -JEMALLOC_INLINE void -tcache_enabled_set(bool enabled) -{ - tsd_t *tsd; - tcache_enabled_t tcache_enabled; - - cassert(config_tcache); - - tsd = tsd_fetch(); - - tcache_enabled = (tcache_enabled_t)enabled; - tsd_tcache_enabled_set(tsd, tcache_enabled); - - if (!enabled) - tcache_cleanup(tsd); -} - -JEMALLOC_ALWAYS_INLINE tcache_t * -tcache_get(tsd_t *tsd, bool create) -{ - tcache_t *tcache; - - if (!config_tcache) - return (NULL); - - tcache = tsd_tcache_get(tsd); - if (!create) - return (tcache); - if (unlikely(tcache == NULL) && tsd_nominal(tsd)) { - tcache = tcache_get_hard(tsd); - tsd_tcache_set(tsd, tcache); - } - - return (tcache); -} - -JEMALLOC_ALWAYS_INLINE void -tcache_event(tsd_t *tsd, tcache_t *tcache) -{ - - if (TCACHE_GC_INCR == 0) - return; - - if (unlikely(ticker_tick(&tcache->gc_ticker))) - tcache_event_hard(tsd, tcache); -} - -JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success) -{ - void *ret; - - if (unlikely(tbin->ncached == 0)) { - tbin->low_water = -1; - *tcache_success = false; - return (NULL); - } - /* - * tcache_success (instead of ret) should be checked upon the return of - * this function. We avoid checking (ret == NULL) because there is - * never a null stored on the avail stack (which is unknown to the - * compiler), and eagerly checking ret would cause pipeline stall - * (waiting for the cacheline). - */ - *tcache_success = true; - ret = *(tbin->avail - tbin->ncached); - tbin->ncached--; - - if (unlikely((int)tbin->ncached < tbin->low_water)) - tbin->low_water = tbin->ncached; - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, - szind_t binind, bool zero, bool slow_path) -{ - void *ret; - tcache_bin_t *tbin; - bool tcache_success; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - - assert(binind < NBINS); - tbin = &tcache->tbins[binind]; - ret = tcache_alloc_easy(tbin, &tcache_success); - assert(tcache_success == (ret != NULL)); - if (unlikely(!tcache_success)) { - bool tcache_hard_success; - arena = arena_choose(tsd, arena); - if (unlikely(arena == NULL)) - return (NULL); - - ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, - tbin, binind, &tcache_hard_success); - if (tcache_hard_success == false) - return (NULL); - } - - assert(ret); - /* - * Only compute usize if required. The checks in the following if - * statement are all static. - */ - if (config_prof || (slow_path && config_fill) || unlikely(zero)) { - usize = index2size(binind); - assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize); - } - - if (likely(!zero)) { - if (slow_path && config_fill) { - if (unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ret, - &arena_bin_info[binind], false); - } else if (unlikely(opt_zero)) - memset(ret, 0, usize); - } - } else { - if (slow_path && config_fill && unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ret, &arena_bin_info[binind], - true); - } - memset(ret, 0, usize); - } - - if (config_stats) - tbin->tstats.nrequests++; - if (config_prof) - tcache->prof_accumbytes += usize; - tcache_event(tsd, tcache); - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, - szind_t binind, bool zero, bool slow_path) -{ - void *ret; - tcache_bin_t *tbin; - bool tcache_success; - - assert(binind < nhbins); - tbin = &tcache->tbins[binind]; - ret = tcache_alloc_easy(tbin, &tcache_success); - assert(tcache_success == (ret != NULL)); - if (unlikely(!tcache_success)) { - /* - * Only allocate one large object at a time, because it's quite - * expensive to create one and not use it. - */ - arena = arena_choose(tsd, arena); - if (unlikely(arena == NULL)) - return (NULL); - - ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero); - if (ret == NULL) - return (NULL); - } else { - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - - /* Only compute usize on demand */ - if (config_prof || (slow_path && config_fill) || - unlikely(zero)) { - usize = index2size(binind); - assert(usize <= tcache_maxclass); - } - - if (config_prof && usize == LARGE_MINCLASS) { - arena_chunk_t *chunk = - (arena_chunk_t *)CHUNK_ADDR2BASE(ret); - size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> - LG_PAGE); - arena_mapbits_large_binind_set(chunk, pageind, - BININD_INVALID); - } - if (likely(!zero)) { - if (slow_path && config_fill) { - if (unlikely(opt_junk_alloc)) { - memset(ret, JEMALLOC_ALLOC_JUNK, - usize); - } else if (unlikely(opt_zero)) - memset(ret, 0, usize); - } - } else - memset(ret, 0, usize); - - if (config_stats) - tbin->tstats.nrequests++; - if (config_prof) - tcache->prof_accumbytes += usize; - } - - tcache_event(tsd, tcache); - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void -tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, - bool slow_path) -{ - tcache_bin_t *tbin; - tcache_bin_info_t *tbin_info; - - assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS); - - if (slow_path && config_fill && unlikely(opt_junk_free)) - arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); - - tbin = &tcache->tbins[binind]; - tbin_info = &tcache_bin_info[binind]; - if (unlikely(tbin->ncached == tbin_info->ncached_max)) { - tcache_bin_flush_small(tsd, tcache, tbin, binind, - (tbin_info->ncached_max >> 1)); - } - assert(tbin->ncached < tbin_info->ncached_max); - tbin->ncached++; - *(tbin->avail - tbin->ncached) = ptr; - - tcache_event(tsd, tcache); -} - -JEMALLOC_ALWAYS_INLINE void -tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size, - bool slow_path) -{ - szind_t binind; - tcache_bin_t *tbin; - tcache_bin_info_t *tbin_info; - - assert((size & PAGE_MASK) == 0); - assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS); - assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass); - - binind = size2index(size); - - if (slow_path && config_fill && unlikely(opt_junk_free)) - arena_dalloc_junk_large(ptr, size); - - tbin = &tcache->tbins[binind]; - tbin_info = &tcache_bin_info[binind]; - if (unlikely(tbin->ncached == tbin_info->ncached_max)) { - tcache_bin_flush_large(tsd, tbin, binind, - (tbin_info->ncached_max >> 1), tcache); - } - assert(tbin->ncached < tbin_info->ncached_max); - tbin->ncached++; - *(tbin->avail - tbin->ncached) = ptr; - - tcache_event(tsd, tcache); -} - -JEMALLOC_ALWAYS_INLINE tcache_t * -tcaches_get(tsd_t *tsd, unsigned ind) -{ - tcaches_t *elm = &tcaches[ind]; - if (unlikely(elm->tcache == NULL)) { - elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd, - NULL)); - } - return (elm->tcache); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/ticker.h b/sources/jemalloc/include-linux/jemalloc/internal/ticker.h deleted file mode 100755 index 4696e56d..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/ticker.h +++ /dev/null @@ -1,75 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct ticker_s ticker_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct ticker_s { - int32_t tick; - int32_t nticks; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void ticker_init(ticker_t *ticker, int32_t nticks); -void ticker_copy(ticker_t *ticker, const ticker_t *other); -int32_t ticker_read(const ticker_t *ticker); -bool ticker_ticks(ticker_t *ticker, int32_t nticks); -bool ticker_tick(ticker_t *ticker); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_)) -JEMALLOC_INLINE void -ticker_init(ticker_t *ticker, int32_t nticks) -{ - - ticker->tick = nticks; - ticker->nticks = nticks; -} - -JEMALLOC_INLINE void -ticker_copy(ticker_t *ticker, const ticker_t *other) -{ - - *ticker = *other; -} - -JEMALLOC_INLINE int32_t -ticker_read(const ticker_t *ticker) -{ - - return (ticker->tick); -} - -JEMALLOC_INLINE bool -ticker_ticks(ticker_t *ticker, int32_t nticks) -{ - - if (unlikely(ticker->tick < nticks)) { - ticker->tick = ticker->nticks; - return (true); - } - ticker->tick -= nticks; - return(false); -} - -JEMALLOC_INLINE bool -ticker_tick(ticker_t *ticker) -{ - - return (ticker_ticks(ticker, 1)); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/tsd.h b/sources/jemalloc/include-linux/jemalloc/internal/tsd.h deleted file mode 100755 index 9f374335..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/tsd.h +++ /dev/null @@ -1,788 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* Maximum number of malloc_tsd users with cleanup functions. */ -#define MALLOC_TSD_CLEANUPS_MAX 2 - -typedef bool (*malloc_tsd_cleanup_t)(void); - -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -typedef struct tsd_init_block_s tsd_init_block_t; -typedef struct tsd_init_head_s tsd_init_head_t; -#endif - -typedef struct tsd_s tsd_t; -typedef struct tsdn_s tsdn_t; - -#define TSDN_NULL ((tsdn_t *)0) - -typedef enum { - tsd_state_uninitialized, - tsd_state_nominal, - tsd_state_purgatory, - tsd_state_reincarnated -} tsd_state_t; - -/* - * TLS/TSD-agnostic macro-based implementation of thread-specific data. There - * are five macros that support (at least) three use cases: file-private, - * library-private, and library-private inlined. Following is an example - * library-private tsd variable: - * - * In example.h: - * typedef struct { - * int x; - * int y; - * } example_t; - * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0}) - * malloc_tsd_types(example_, example_t) - * malloc_tsd_protos(, example_, example_t) - * malloc_tsd_externs(example_, example_t) - * In example.c: - * malloc_tsd_data(, example_, example_t, EX_INITIALIZER) - * malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER, - * example_tsd_cleanup) - * - * The result is a set of generated functions, e.g.: - * - * bool example_tsd_boot(void) {...} - * bool example_tsd_booted_get(void) {...} - * example_t *example_tsd_get(bool init) {...} - * void example_tsd_set(example_t *val) {...} - * - * Note that all of the functions deal in terms of (a_type *) rather than - * (a_type) so that it is possible to support non-pointer types (unlike - * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is - * cast to (void *). This means that the cleanup function needs to cast the - * function argument to (a_type *), then dereference the resulting pointer to - * access fields, e.g. - * - * void - * example_tsd_cleanup(void *arg) - * { - * example_t *example = (example_t *)arg; - * - * example->x = 42; - * [...] - * if ([want the cleanup function to be called again]) - * example_tsd_set(example); - * } - * - * If example_tsd_set() is called within example_tsd_cleanup(), it will be - * called again. This is similar to how pthreads TSD destruction works, except - * that pthreads only calls the cleanup function again if the value was set to - * non-NULL. - */ - -/* malloc_tsd_types(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_types(a_name, a_type) -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_types(a_name, a_type) -#elif (defined(_WIN32)) -#define malloc_tsd_types(a_name, a_type) \ -typedef struct { \ - bool initialized; \ - a_type val; \ -} a_name##tsd_wrapper_t; -#else -#define malloc_tsd_types(a_name, a_type) \ -typedef struct { \ - bool initialized; \ - a_type val; \ -} a_name##tsd_wrapper_t; -#endif - -/* malloc_tsd_protos(). */ -#define malloc_tsd_protos(a_attr, a_name, a_type) \ -a_attr bool \ -a_name##tsd_boot0(void); \ -a_attr void \ -a_name##tsd_boot1(void); \ -a_attr bool \ -a_name##tsd_boot(void); \ -a_attr bool \ -a_name##tsd_booted_get(void); \ -a_attr a_type * \ -a_name##tsd_get(bool init); \ -a_attr void \ -a_name##tsd_set(a_type *val); - -/* malloc_tsd_externs(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_externs(a_name, a_type) \ -extern __thread a_type a_name##tsd_tls; \ -extern __thread bool a_name##tsd_initialized; \ -extern bool a_name##tsd_booted; -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_externs(a_name, a_type) \ -extern __thread a_type a_name##tsd_tls; \ -extern pthread_key_t a_name##tsd_tsd; \ -extern bool a_name##tsd_booted; -#elif (defined(_WIN32)) -#define malloc_tsd_externs(a_name, a_type) \ -extern DWORD a_name##tsd_tsd; \ -extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ -extern bool a_name##tsd_booted; -#else -#define malloc_tsd_externs(a_name, a_type) \ -extern pthread_key_t a_name##tsd_tsd; \ -extern tsd_init_head_t a_name##tsd_init_head; \ -extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ -extern bool a_name##tsd_booted; -#endif - -/* malloc_tsd_data(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr __thread a_type JEMALLOC_TLS_MODEL \ - a_name##tsd_tls = a_initializer; \ -a_attr __thread bool JEMALLOC_TLS_MODEL \ - a_name##tsd_initialized = false; \ -a_attr bool a_name##tsd_booted = false; -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr __thread a_type JEMALLOC_TLS_MODEL \ - a_name##tsd_tls = a_initializer; \ -a_attr pthread_key_t a_name##tsd_tsd; \ -a_attr bool a_name##tsd_booted = false; -#elif (defined(_WIN32)) -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr DWORD a_name##tsd_tsd; \ -a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ - false, \ - a_initializer \ -}; \ -a_attr bool a_name##tsd_booted = false; -#else -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr pthread_key_t a_name##tsd_tsd; \ -a_attr tsd_init_head_t a_name##tsd_init_head = { \ - ql_head_initializer(blocks), \ - MALLOC_MUTEX_INITIALIZER \ -}; \ -a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ - false, \ - a_initializer \ -}; \ -a_attr bool a_name##tsd_booted = false; -#endif - -/* malloc_tsd_funcs(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##tsd_cleanup_wrapper(void) \ -{ \ - \ - if (a_name##tsd_initialized) { \ - a_name##tsd_initialized = false; \ - a_cleanup(&a_name##tsd_tls); \ - } \ - return (a_name##tsd_initialized); \ -} \ -a_attr bool \ -a_name##tsd_boot0(void) \ -{ \ - \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - malloc_tsd_cleanup_register( \ - &a_name##tsd_cleanup_wrapper); \ - } \ - a_name##tsd_booted = true; \ - return (false); \ -} \ -a_attr void \ -a_name##tsd_boot1(void) \ -{ \ - \ - /* Do nothing. */ \ -} \ -a_attr bool \ -a_name##tsd_boot(void) \ -{ \ - \ - return (a_name##tsd_boot0()); \ -} \ -a_attr bool \ -a_name##tsd_booted_get(void) \ -{ \ - \ - return (a_name##tsd_booted); \ -} \ -a_attr bool \ -a_name##tsd_get_allocates(void) \ -{ \ - \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##tsd_get(bool init) \ -{ \ - \ - assert(a_name##tsd_booted); \ - return (&a_name##tsd_tls); \ -} \ -a_attr void \ -a_name##tsd_set(a_type *val) \ -{ \ - \ - assert(a_name##tsd_booted); \ - a_name##tsd_tls = (*val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - a_name##tsd_initialized = true; \ -} -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##tsd_boot0(void) \ -{ \ - \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \ - 0) \ - return (true); \ - } \ - a_name##tsd_booted = true; \ - return (false); \ -} \ -a_attr void \ -a_name##tsd_boot1(void) \ -{ \ - \ - /* Do nothing. */ \ -} \ -a_attr bool \ -a_name##tsd_boot(void) \ -{ \ - \ - return (a_name##tsd_boot0()); \ -} \ -a_attr bool \ -a_name##tsd_booted_get(void) \ -{ \ - \ - return (a_name##tsd_booted); \ -} \ -a_attr bool \ -a_name##tsd_get_allocates(void) \ -{ \ - \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##tsd_get(bool init) \ -{ \ - \ - assert(a_name##tsd_booted); \ - return (&a_name##tsd_tls); \ -} \ -a_attr void \ -a_name##tsd_set(a_type *val) \ -{ \ - \ - assert(a_name##tsd_booted); \ - a_name##tsd_tls = (*val); \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - if (pthread_setspecific(a_name##tsd_tsd, \ - (void *)(&a_name##tsd_tls))) { \ - malloc_write(": Error" \ - " setting TSD for "#a_name"\n"); \ - if (opt_abort) \ - abort(); \ - } \ - } \ -} -#elif (defined(_WIN32)) -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##tsd_cleanup_wrapper(void) \ -{ \ - DWORD error = GetLastError(); \ - a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ - TlsGetValue(a_name##tsd_tsd); \ - SetLastError(error); \ - \ - if (wrapper == NULL) \ - return (false); \ - if (a_cleanup != malloc_tsd_no_cleanup && \ - wrapper->initialized) { \ - wrapper->initialized = false; \ - a_cleanup(&wrapper->val); \ - if (wrapper->initialized) { \ - /* Trigger another cleanup round. */ \ - return (true); \ - } \ - } \ - malloc_tsd_dalloc(wrapper); \ - return (false); \ -} \ -a_attr void \ -a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ -{ \ - \ - if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \ - malloc_write(": Error setting" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ -} \ -a_attr a_name##tsd_wrapper_t * \ -a_name##tsd_wrapper_get(bool init) \ -{ \ - DWORD error = GetLastError(); \ - a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ - TlsGetValue(a_name##tsd_tsd); \ - SetLastError(error); \ - \ - if (init && unlikely(wrapper == NULL)) { \ - wrapper = (a_name##tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ - if (wrapper == NULL) { \ - malloc_write(": Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } else { \ - wrapper->initialized = false; \ - wrapper->val = a_initializer; \ - } \ - a_name##tsd_wrapper_set(wrapper); \ - } \ - return (wrapper); \ -} \ -a_attr bool \ -a_name##tsd_boot0(void) \ -{ \ - \ - a_name##tsd_tsd = TlsAlloc(); \ - if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \ - return (true); \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - malloc_tsd_cleanup_register( \ - &a_name##tsd_cleanup_wrapper); \ - } \ - a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ - a_name##tsd_booted = true; \ - return (false); \ -} \ -a_attr void \ -a_name##tsd_boot1(void) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - wrapper = (a_name##tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ - if (wrapper == NULL) { \ - malloc_write(": Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ - memcpy(wrapper, &a_name##tsd_boot_wrapper, \ - sizeof(a_name##tsd_wrapper_t)); \ - a_name##tsd_wrapper_set(wrapper); \ -} \ -a_attr bool \ -a_name##tsd_boot(void) \ -{ \ - \ - if (a_name##tsd_boot0()) \ - return (true); \ - a_name##tsd_boot1(); \ - return (false); \ -} \ -a_attr bool \ -a_name##tsd_booted_get(void) \ -{ \ - \ - return (a_name##tsd_booted); \ -} \ -a_attr bool \ -a_name##tsd_get_allocates(void) \ -{ \ - \ - return (true); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##tsd_get(bool init) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - \ - assert(a_name##tsd_booted); \ - wrapper = a_name##tsd_wrapper_get(init); \ - if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \ - return (NULL); \ - return (&wrapper->val); \ -} \ -a_attr void \ -a_name##tsd_set(a_type *val) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - \ - assert(a_name##tsd_booted); \ - wrapper = a_name##tsd_wrapper_get(true); \ - wrapper->val = *(val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - wrapper->initialized = true; \ -} -#else -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr void \ -a_name##tsd_cleanup_wrapper(void *arg) \ -{ \ - a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \ - \ - if (a_cleanup != malloc_tsd_no_cleanup && \ - wrapper->initialized) { \ - wrapper->initialized = false; \ - a_cleanup(&wrapper->val); \ - if (wrapper->initialized) { \ - /* Trigger another cleanup round. */ \ - if (pthread_setspecific(a_name##tsd_tsd, \ - (void *)wrapper)) { \ - malloc_write(": Error" \ - " setting TSD for "#a_name"\n"); \ - if (opt_abort) \ - abort(); \ - } \ - return; \ - } \ - } \ - malloc_tsd_dalloc(wrapper); \ -} \ -a_attr void \ -a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ -{ \ - \ - if (pthread_setspecific(a_name##tsd_tsd, \ - (void *)wrapper)) { \ - malloc_write(": Error setting" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ -} \ -a_attr a_name##tsd_wrapper_t * \ -a_name##tsd_wrapper_get(bool init) \ -{ \ - a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ - pthread_getspecific(a_name##tsd_tsd); \ - \ - if (init && unlikely(wrapper == NULL)) { \ - tsd_init_block_t block; \ - wrapper = (a_name##tsd_wrapper_t *) \ - tsd_init_check_recursion(&a_name##tsd_init_head, \ - &block); \ - if (wrapper) \ - return (wrapper); \ - wrapper = (a_name##tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ - block.data = (void *)wrapper; \ - if (wrapper == NULL) { \ - malloc_write(": Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } else { \ - wrapper->initialized = false; \ - wrapper->val = a_initializer; \ - } \ - a_name##tsd_wrapper_set(wrapper); \ - tsd_init_finish(&a_name##tsd_init_head, &block); \ - } \ - return (wrapper); \ -} \ -a_attr bool \ -a_name##tsd_boot0(void) \ -{ \ - \ - if (pthread_key_create(&a_name##tsd_tsd, \ - a_name##tsd_cleanup_wrapper) != 0) \ - return (true); \ - a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ - a_name##tsd_booted = true; \ - return (false); \ -} \ -a_attr void \ -a_name##tsd_boot1(void) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - wrapper = (a_name##tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ - if (wrapper == NULL) { \ - malloc_write(": Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ - memcpy(wrapper, &a_name##tsd_boot_wrapper, \ - sizeof(a_name##tsd_wrapper_t)); \ - a_name##tsd_wrapper_set(wrapper); \ -} \ -a_attr bool \ -a_name##tsd_boot(void) \ -{ \ - \ - if (a_name##tsd_boot0()) \ - return (true); \ - a_name##tsd_boot1(); \ - return (false); \ -} \ -a_attr bool \ -a_name##tsd_booted_get(void) \ -{ \ - \ - return (a_name##tsd_booted); \ -} \ -a_attr bool \ -a_name##tsd_get_allocates(void) \ -{ \ - \ - return (true); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##tsd_get(bool init) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - \ - assert(a_name##tsd_booted); \ - wrapper = a_name##tsd_wrapper_get(init); \ - if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \ - return (NULL); \ - return (&wrapper->val); \ -} \ -a_attr void \ -a_name##tsd_set(a_type *val) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - \ - assert(a_name##tsd_booted); \ - wrapper = a_name##tsd_wrapper_get(true); \ - wrapper->val = *(val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - wrapper->initialized = true; \ -} -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -struct tsd_init_block_s { - ql_elm(tsd_init_block_t) link; - pthread_t thread; - void *data; -}; -struct tsd_init_head_s { - ql_head(tsd_init_block_t) blocks; - malloc_mutex_t lock; -}; -#endif - -#define MALLOC_TSD \ -/* O(name, type) */ \ - O(tcache, tcache_t *) \ - O(thread_allocated, uint64_t) \ - O(thread_deallocated, uint64_t) \ - O(prof_tdata, prof_tdata_t *) \ - O(iarena, arena_t *) \ - O(arena, arena_t *) \ - O(arenas_tdata, arena_tdata_t *) \ - O(narenas_tdata, unsigned) \ - O(arenas_tdata_bypass, bool) \ - O(tcache_enabled, tcache_enabled_t) \ - O(quarantine, quarantine_t *) \ - O(witnesses, witness_list_t) \ - O(witness_fork, bool) \ - -#define TSD_INITIALIZER { \ - tsd_state_uninitialized, \ - NULL, \ - 0, \ - 0, \ - NULL, \ - NULL, \ - NULL, \ - NULL, \ - 0, \ - false, \ - tcache_enabled_default, \ - NULL, \ - ql_head_initializer(witnesses), \ - false \ -} - -struct tsd_s { - tsd_state_t state; -#define O(n, t) \ - t n; -MALLOC_TSD -#undef O -}; - -/* - * Wrapper around tsd_t that makes it possible to avoid implicit conversion - * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be - * explicitly converted to tsd_t, which is non-nullable. - */ -struct tsdn_s { - tsd_t tsd; -}; - -static const tsd_t tsd_initializer = TSD_INITIALIZER; - -malloc_tsd_types(, tsd_t) - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *malloc_tsd_malloc(size_t size); -void malloc_tsd_dalloc(void *wrapper); -void malloc_tsd_no_cleanup(void *arg); -void malloc_tsd_cleanup_register(bool (*f)(void)); -tsd_t *malloc_tsd_boot0(void); -void malloc_tsd_boot1(void); -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -void *tsd_init_check_recursion(tsd_init_head_t *head, - tsd_init_block_t *block); -void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); -#endif -void tsd_cleanup(void *arg); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t) - -tsd_t *tsd_fetch_impl(bool init); -tsd_t *tsd_fetch(void); -tsdn_t *tsd_tsdn(tsd_t *tsd); -bool tsd_nominal(tsd_t *tsd); -#define O(n, t) \ -t *tsd_##n##p_get(tsd_t *tsd); \ -t tsd_##n##_get(tsd_t *tsd); \ -void tsd_##n##_set(tsd_t *tsd, t n); -MALLOC_TSD -#undef O -tsdn_t *tsdn_fetch(void); -bool tsdn_null(const tsdn_t *tsdn); -tsd_t *tsdn_tsd(tsdn_t *tsdn); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_)) -malloc_tsd_externs(, tsd_t) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup) - -JEMALLOC_ALWAYS_INLINE tsd_t * -tsd_fetch_impl(bool init) -{ - tsd_t *tsd = tsd_get(init); - - if (!init && tsd_get_allocates() && tsd == NULL) - return (NULL); - assert(tsd != NULL); - - if (unlikely(tsd->state != tsd_state_nominal)) { - if (tsd->state == tsd_state_uninitialized) { - tsd->state = tsd_state_nominal; - /* Trigger cleanup handler registration. */ - tsd_set(tsd); - } else if (tsd->state == tsd_state_purgatory) { - tsd->state = tsd_state_reincarnated; - tsd_set(tsd); - } else - assert(tsd->state == tsd_state_reincarnated); - } - - return (tsd); -} - -JEMALLOC_ALWAYS_INLINE tsd_t * -tsd_fetch(void) -{ - - return (tsd_fetch_impl(true)); -} - -JEMALLOC_ALWAYS_INLINE tsdn_t * -tsd_tsdn(tsd_t *tsd) -{ - - return ((tsdn_t *)tsd); -} - -JEMALLOC_INLINE bool -tsd_nominal(tsd_t *tsd) -{ - - return (tsd->state == tsd_state_nominal); -} - -#define O(n, t) \ -JEMALLOC_ALWAYS_INLINE t * \ -tsd_##n##p_get(tsd_t *tsd) \ -{ \ - \ - return (&tsd->n); \ -} \ - \ -JEMALLOC_ALWAYS_INLINE t \ -tsd_##n##_get(tsd_t *tsd) \ -{ \ - \ - return (*tsd_##n##p_get(tsd)); \ -} \ - \ -JEMALLOC_ALWAYS_INLINE void \ -tsd_##n##_set(tsd_t *tsd, t n) \ -{ \ - \ - assert(tsd->state == tsd_state_nominal); \ - tsd->n = n; \ -} -MALLOC_TSD -#undef O - -JEMALLOC_ALWAYS_INLINE tsdn_t * -tsdn_fetch(void) -{ - - if (!tsd_booted_get()) - return (NULL); - - return (tsd_tsdn(tsd_fetch_impl(false))); -} - -JEMALLOC_ALWAYS_INLINE bool -tsdn_null(const tsdn_t *tsdn) -{ - - return (tsdn == NULL); -} - -JEMALLOC_ALWAYS_INLINE tsd_t * -tsdn_tsd(tsdn_t *tsdn) -{ - - assert(!tsdn_null(tsdn)); - - return (&tsdn->tsd); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/util.h b/sources/jemalloc/include-linux/jemalloc/internal/util.h deleted file mode 100755 index 4b56d652..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/util.h +++ /dev/null @@ -1,342 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#ifdef _WIN32 -# ifdef _WIN64 -# define FMT64_PREFIX "ll" -# define FMTPTR_PREFIX "ll" -# else -# define FMT64_PREFIX "ll" -# define FMTPTR_PREFIX "" -# endif -# define FMTd32 "d" -# define FMTu32 "u" -# define FMTx32 "x" -# define FMTd64 FMT64_PREFIX "d" -# define FMTu64 FMT64_PREFIX "u" -# define FMTx64 FMT64_PREFIX "x" -# define FMTdPTR FMTPTR_PREFIX "d" -# define FMTuPTR FMTPTR_PREFIX "u" -# define FMTxPTR FMTPTR_PREFIX "x" -#else -# include -# define FMTd32 PRId32 -# define FMTu32 PRIu32 -# define FMTx32 PRIx32 -# define FMTd64 PRId64 -# define FMTu64 PRIu64 -# define FMTx64 PRIx64 -# define FMTdPTR PRIdPTR -# define FMTuPTR PRIuPTR -# define FMTxPTR PRIxPTR -#endif - -/* Size of stack-allocated buffer passed to buferror(). */ -#define BUFERROR_BUF 64 - -/* - * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be - * large enough for all possible uses within jemalloc. - */ -#define MALLOC_PRINTF_BUFSIZE 4096 - -/* Junk fill patterns. */ -#ifndef JEMALLOC_ALLOC_JUNK -# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) -#endif -#ifndef JEMALLOC_FREE_JUNK -# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) -#endif - -/* - * Wrap a cpp argument that contains commas such that it isn't broken up into - * multiple arguments. - */ -#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ - -/* - * Silence compiler warnings due to uninitialized values. This is used - * wherever the compiler fails to recognize that the variable is never used - * uninitialized. - */ -#ifdef JEMALLOC_CC_SILENCE -# define JEMALLOC_CC_SILENCE_INIT(v) = v -#else -# define JEMALLOC_CC_SILENCE_INIT(v) -#endif - -#ifdef __GNUC__ -# define likely(x) __builtin_expect(!!(x), 1) -# define unlikely(x) __builtin_expect(!!(x), 0) -#else -# define likely(x) !!(x) -# define unlikely(x) !!(x) -#endif - -#if !defined(JEMALLOC_INTERNAL_UNREACHABLE) -# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure -#endif - -#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() - -#include "jemalloc/internal/assert.h" - -/* Use to assert a particular configuration, e.g., cassert(config_debug). */ -#define cassert(c) do { \ - if (unlikely(!(c))) \ - not_reached(); \ -} while (0) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -int buferror(int err, char *buf, size_t buflen); -uintmax_t malloc_strtoumax(const char *restrict nptr, - char **restrict endptr, int base); -void malloc_write(const char *s); - -/* - * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating - * point math. - */ -size_t malloc_vsnprintf(char *str, size_t size, const char *format, - va_list ap); -size_t malloc_snprintf(char *str, size_t size, const char *format, ...) - JEMALLOC_FORMAT_PRINTF(3, 4); -void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, va_list ap); -void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, - const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); -void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -unsigned ffs_llu(unsigned long long bitmap); -unsigned ffs_lu(unsigned long bitmap); -unsigned ffs_u(unsigned bitmap); -unsigned ffs_zu(size_t bitmap); -unsigned ffs_u64(uint64_t bitmap); -unsigned ffs_u32(uint32_t bitmap); -uint64_t pow2_ceil_u64(uint64_t x); -uint32_t pow2_ceil_u32(uint32_t x); -size_t pow2_ceil_zu(size_t x); -unsigned lg_floor(size_t x); -void set_errno(int errnum); -int get_errno(void); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) - -/* Sanity check. */ -#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \ - || !defined(JEMALLOC_INTERNAL_FFS) -# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure -#endif - -JEMALLOC_ALWAYS_INLINE unsigned -ffs_llu(unsigned long long bitmap) -{ - - return (JEMALLOC_INTERNAL_FFSLL(bitmap)); -} - -JEMALLOC_ALWAYS_INLINE unsigned -ffs_lu(unsigned long bitmap) -{ - - return (JEMALLOC_INTERNAL_FFSL(bitmap)); -} - -JEMALLOC_ALWAYS_INLINE unsigned -ffs_u(unsigned bitmap) -{ - - return (JEMALLOC_INTERNAL_FFS(bitmap)); -} - -JEMALLOC_ALWAYS_INLINE unsigned -ffs_zu(size_t bitmap) -{ - -#if LG_SIZEOF_PTR == LG_SIZEOF_INT - return (ffs_u(bitmap)); -#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG - return (ffs_lu(bitmap)); -#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG - return (ffs_llu(bitmap)); -#else -#error No implementation for size_t ffs() -#endif -} - -JEMALLOC_ALWAYS_INLINE unsigned -ffs_u64(uint64_t bitmap) -{ - -#if LG_SIZEOF_LONG == 3 - return (ffs_lu(bitmap)); -#elif LG_SIZEOF_LONG_LONG == 3 - return (ffs_llu(bitmap)); -#else -#error No implementation for 64-bit ffs() -#endif -} - -JEMALLOC_ALWAYS_INLINE unsigned -ffs_u32(uint32_t bitmap) -{ - -#if LG_SIZEOF_INT == 2 - return (ffs_u(bitmap)); -#else -#error No implementation for 32-bit ffs() -#endif - return (ffs_u(bitmap)); -} - -JEMALLOC_INLINE uint64_t -pow2_ceil_u64(uint64_t x) -{ - - x--; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; - x |= x >> 32; - x++; - return (x); -} - -JEMALLOC_INLINE uint32_t -pow2_ceil_u32(uint32_t x) -{ - - x--; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; - x++; - return (x); -} - -/* Compute the smallest power of 2 that is >= x. */ -JEMALLOC_INLINE size_t -pow2_ceil_zu(size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - return (pow2_ceil_u64(x)); -#else - return (pow2_ceil_u32(x)); -#endif -} - -#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE unsigned -lg_floor(size_t x) -{ - size_t ret; - - assert(x != 0); - - asm ("bsr %1, %0" - : "=r"(ret) // Outputs. - : "r"(x) // Inputs. - ); - assert(ret < UINT_MAX); - return ((unsigned)ret); -} -#elif (defined(_MSC_VER)) -JEMALLOC_INLINE unsigned -lg_floor(size_t x) -{ - unsigned long ret; - - assert(x != 0); - -#if (LG_SIZEOF_PTR == 3) - _BitScanReverse64(&ret, x); -#elif (LG_SIZEOF_PTR == 2) - _BitScanReverse(&ret, x); -#else -# error "Unsupported type size for lg_floor()" -#endif - assert(ret < UINT_MAX); - return ((unsigned)ret); -} -#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) -JEMALLOC_INLINE unsigned -lg_floor(size_t x) -{ - - assert(x != 0); - -#if (LG_SIZEOF_PTR == LG_SIZEOF_INT) - return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x)); -#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG) - return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x)); -#else -# error "Unsupported type size for lg_floor()" -#endif -} -#else -JEMALLOC_INLINE unsigned -lg_floor(size_t x) -{ - - assert(x != 0); - - x |= (x >> 1); - x |= (x >> 2); - x |= (x >> 4); - x |= (x >> 8); - x |= (x >> 16); -#if (LG_SIZEOF_PTR == 3) - x |= (x >> 32); -#endif - if (x == SIZE_T_MAX) - return ((8 << LG_SIZEOF_PTR) - 1); - x++; - return (ffs_zu(x) - 2); -} -#endif - -/* Set error code. */ -JEMALLOC_INLINE void -set_errno(int errnum) -{ - -#ifdef _WIN32 - SetLastError(errnum); -#else - errno = errnum; -#endif -} - -/* Get last error code. */ -JEMALLOC_INLINE int -get_errno(void) -{ - -#ifdef _WIN32 - return (GetLastError()); -#else - return (errno); -#endif -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/valgrind.h b/sources/jemalloc/include-linux/jemalloc/internal/valgrind.h deleted file mode 100755 index 877a142b..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/valgrind.h +++ /dev/null @@ -1,128 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#ifdef JEMALLOC_VALGRIND -#include - -/* - * The size that is reported to Valgrind must be consistent through a chain of - * malloc..realloc..realloc calls. Request size isn't recorded anywhere in - * jemalloc, so it is critical that all callers of these macros provide usize - * rather than request size. As a result, buffer overflow detection is - * technically weakened for the standard API, though it is generally accepted - * practice to consider any extra bytes reported by malloc_usable_size() as - * usable space. - */ -#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \ - if (unlikely(in_valgrind)) \ - valgrind_make_mem_noaccess(ptr, usize); \ -} while (0) -#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \ - if (unlikely(in_valgrind)) \ - valgrind_make_mem_undefined(ptr, usize); \ -} while (0) -#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \ - if (unlikely(in_valgrind)) \ - valgrind_make_mem_defined(ptr, usize); \ -} while (0) -/* - * The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro - * calls must be embedded in macros rather than in functions so that when - * Valgrind reports errors, there are no extra stack frames in the backtraces. - */ -#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \ - if (unlikely(in_valgrind && cond)) { \ - VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \ - zero); \ - } \ -} while (0) -#define JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr) \ - (false) -#define JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr) \ - ((ptr) != (old_ptr)) -#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr) \ - (false) -#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr) \ - (ptr == NULL) -#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr) \ - (false) -#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr) \ - (old_ptr == NULL) -#define JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null, \ - old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do { \ - if (unlikely(in_valgrind)) { \ - size_t rzsize = p2rz(tsdn, ptr); \ - \ - if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr, \ - old_ptr)) { \ - VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ - usize, rzsize); \ - if (zero && old_usize < usize) { \ - valgrind_make_mem_defined( \ - (void *)((uintptr_t)ptr + \ - old_usize), usize - old_usize); \ - } \ - } else { \ - if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_## \ - old_ptr_null(old_ptr)) { \ - valgrind_freelike_block(old_ptr, \ - old_rzsize); \ - } \ - if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_## \ - ptr_null(ptr)) { \ - size_t copy_size = (old_usize < usize) \ - ? old_usize : usize; \ - size_t tail_size = usize - copy_size; \ - VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ - rzsize, false); \ - if (copy_size > 0) { \ - valgrind_make_mem_defined(ptr, \ - copy_size); \ - } \ - if (zero && tail_size > 0) { \ - valgrind_make_mem_defined( \ - (void *)((uintptr_t)ptr + \ - copy_size), tail_size); \ - } \ - } \ - } \ - } \ -} while (0) -#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ - if (unlikely(in_valgrind)) \ - valgrind_freelike_block(ptr, rzsize); \ -} while (0) -#else -#define RUNNING_ON_VALGRIND ((unsigned)0) -#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0) -#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0) -#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0) -#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0) -#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \ - ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ - zero) do {} while (0) -#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#ifdef JEMALLOC_VALGRIND -void valgrind_make_mem_noaccess(void *ptr, size_t usize); -void valgrind_make_mem_undefined(void *ptr, size_t usize); -void valgrind_make_mem_defined(void *ptr, size_t usize); -void valgrind_freelike_block(void *ptr, size_t usize); -#endif - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/sources/jemalloc/include-linux/jemalloc/internal/witness.h b/sources/jemalloc/include-linux/jemalloc/internal/witness.h deleted file mode 100755 index 30d8c7e9..00000000 --- a/sources/jemalloc/include-linux/jemalloc/internal/witness.h +++ /dev/null @@ -1,304 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct witness_s witness_t; -typedef unsigned witness_rank_t; -typedef ql_head(witness_t) witness_list_t; -typedef int witness_comp_t (const witness_t *, const witness_t *); - -/* - * Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by - * the witness machinery. - */ -#define WITNESS_RANK_OMIT 0U - -#define WITNESS_RANK_MIN 1U - -#define WITNESS_RANK_INIT 1U -#define WITNESS_RANK_CTL 1U -#define WITNESS_RANK_TCACHES 2U -#define WITNESS_RANK_ARENAS 3U - -#define WITNESS_RANK_PROF_DUMP 4U -#define WITNESS_RANK_PROF_BT2GCTX 5U -#define WITNESS_RANK_PROF_TDATAS 6U -#define WITNESS_RANK_PROF_TDATA 7U -#define WITNESS_RANK_PROF_GCTX 8U - -/* - * Used as an argument to witness_assert_depth_to_rank() in order to validate - * depth excluding non-core locks with lower ranks. Since the rank argument to - * witness_assert_depth_to_rank() is inclusive rather than exclusive, this - * definition can have the same value as the minimally ranked core lock. - */ -#define WITNESS_RANK_CORE 9U - -#define WITNESS_RANK_ARENA 9U -#define WITNESS_RANK_ARENA_CHUNKS 10U -#define WITNESS_RANK_ARENA_NODE_CACHE 11U - -#define WITNESS_RANK_BASE 12U - -#define WITNESS_RANK_LEAF 0xffffffffU -#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF -#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF -#define WITNESS_RANK_DSS WITNESS_RANK_LEAF -#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF -#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF -#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF -#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF -#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF - -#define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}} - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct witness_s { - /* Name, used for printing lock order reversal messages. */ - const char *name; - - /* - * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses - * must be acquired in order of increasing rank. - */ - witness_rank_t rank; - - /* - * If two witnesses are of equal rank and they have the samp comp - * function pointer, it is called as a last attempt to differentiate - * between witnesses of equal rank. - */ - witness_comp_t *comp; - - /* Linkage for thread's currently owned locks. */ - ql_elm(witness_t) link; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void witness_init(witness_t *witness, const char *name, witness_rank_t rank, - witness_comp_t *comp); -#ifdef JEMALLOC_JET -typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *); -extern witness_lock_error_t *witness_lock_error; -#else -void witness_lock_error(const witness_list_t *witnesses, - const witness_t *witness); -#endif -#ifdef JEMALLOC_JET -typedef void (witness_owner_error_t)(const witness_t *); -extern witness_owner_error_t *witness_owner_error; -#else -void witness_owner_error(const witness_t *witness); -#endif -#ifdef JEMALLOC_JET -typedef void (witness_not_owner_error_t)(const witness_t *); -extern witness_not_owner_error_t *witness_not_owner_error; -#else -void witness_not_owner_error(const witness_t *witness); -#endif -#ifdef JEMALLOC_JET -typedef void (witness_depth_error_t)(const witness_list_t *, - witness_rank_t rank_inclusive, unsigned depth); -extern witness_depth_error_t *witness_depth_error; -#else -void witness_depth_error(const witness_list_t *witnesses, - witness_rank_t rank_inclusive, unsigned depth); -#endif - -void witnesses_cleanup(tsd_t *tsd); -void witness_fork_cleanup(tsd_t *tsd); -void witness_prefork(tsd_t *tsd); -void witness_postfork_parent(tsd_t *tsd); -void witness_postfork_child(tsd_t *tsd); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -bool witness_owner(tsd_t *tsd, const witness_t *witness); -void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness); -void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness); -void witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive, - unsigned depth); -void witness_assert_depth(tsdn_t *tsdn, unsigned depth); -void witness_assert_lockless(tsdn_t *tsdn); -void witness_lock(tsdn_t *tsdn, witness_t *witness); -void witness_unlock(tsdn_t *tsdn, witness_t *witness); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) -JEMALLOC_INLINE bool -witness_owner(tsd_t *tsd, const witness_t *witness) -{ - witness_list_t *witnesses; - witness_t *w; - - cassert(config_debug); - - witnesses = tsd_witnessesp_get(tsd); - ql_foreach(w, witnesses, link) { - if (w == witness) - return (true); - } - - return (false); -} - -JEMALLOC_INLINE void -witness_assert_owner(tsdn_t *tsdn, const witness_t *witness) -{ - tsd_t *tsd; - - if (!config_debug) - return; - - if (tsdn_null(tsdn)) - return; - tsd = tsdn_tsd(tsdn); - if (witness->rank == WITNESS_RANK_OMIT) - return; - - if (witness_owner(tsd, witness)) - return; - witness_owner_error(witness); -} - -JEMALLOC_INLINE void -witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness) -{ - tsd_t *tsd; - witness_list_t *witnesses; - witness_t *w; - - if (!config_debug) - return; - - if (tsdn_null(tsdn)) - return; - tsd = tsdn_tsd(tsdn); - if (witness->rank == WITNESS_RANK_OMIT) - return; - - witnesses = tsd_witnessesp_get(tsd); - ql_foreach(w, witnesses, link) { - if (w == witness) - witness_not_owner_error(witness); - } -} - -JEMALLOC_INLINE void -witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive, - unsigned depth) { - tsd_t *tsd; - unsigned d; - witness_list_t *witnesses; - witness_t *w; - - if (!config_debug) - return; - - if (tsdn_null(tsdn)) - return; - tsd = tsdn_tsd(tsdn); - - d = 0; - witnesses = tsd_witnessesp_get(tsd); - w = ql_last(witnesses, link); - if (w != NULL) { - ql_reverse_foreach(w, witnesses, link) { - if (w->rank < rank_inclusive) { - break; - } - d++; - } - } - if (d != depth) - witness_depth_error(witnesses, rank_inclusive, depth); -} - -JEMALLOC_INLINE void -witness_assert_depth(tsdn_t *tsdn, unsigned depth) { - witness_assert_depth_to_rank(tsdn, WITNESS_RANK_MIN, depth); -} - -JEMALLOC_INLINE void -witness_assert_lockless(tsdn_t *tsdn) { - witness_assert_depth(tsdn, 0); -} - -JEMALLOC_INLINE void -witness_lock(tsdn_t *tsdn, witness_t *witness) -{ - tsd_t *tsd; - witness_list_t *witnesses; - witness_t *w; - - if (!config_debug) - return; - - if (tsdn_null(tsdn)) - return; - tsd = tsdn_tsd(tsdn); - if (witness->rank == WITNESS_RANK_OMIT) - return; - - witness_assert_not_owner(tsdn, witness); - - witnesses = tsd_witnessesp_get(tsd); - w = ql_last(witnesses, link); - if (w == NULL) { - /* No other locks; do nothing. */ - } else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) { - /* Forking, and relaxed ranking satisfied. */ - } else if (w->rank > witness->rank) { - /* Not forking, rank order reversal. */ - witness_lock_error(witnesses, witness); - } else if (w->rank == witness->rank && (w->comp == NULL || w->comp != - witness->comp || w->comp(w, witness) > 0)) { - /* - * Missing/incompatible comparison function, or comparison - * function indicates rank order reversal. - */ - witness_lock_error(witnesses, witness); - } - - ql_elm_new(witness, link); - ql_tail_insert(witnesses, witness, link); -} - -JEMALLOC_INLINE void -witness_unlock(tsdn_t *tsdn, witness_t *witness) -{ - tsd_t *tsd; - witness_list_t *witnesses; - - if (!config_debug) - return; - - if (tsdn_null(tsdn)) - return; - tsd = tsdn_tsd(tsdn); - if (witness->rank == WITNESS_RANK_OMIT) - return; - - /* - * Check whether owner before removal, rather than relying on - * witness_assert_owner() to abort, so that unit tests can test this - * function's failure mode without causing undefined behavior. - */ - if (witness_owner(tsd, witness)) { - witnesses = tsd_witnessesp_get(tsd); - ql_remove(witnesses, witness, link); - } else - witness_assert_owner(tsdn, witness); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc.h b/sources/jemalloc/include-linux/jemalloc/jemalloc.h deleted file mode 100755 index 2d0c0b17..00000000 --- a/sources/jemalloc/include-linux/jemalloc/jemalloc.h +++ /dev/null @@ -1,386 +0,0 @@ -#ifndef JEMALLOC_H_ -#define JEMALLOC_H_ -#ifdef __cplusplus -extern "C" { -#endif - -/* Defined if __attribute__((...)) syntax is supported. */ -#define JEMALLOC_HAVE_ATTR - -/* Defined if alloc_size attribute is supported. */ -#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE - -/* Defined if format(gnu_printf, ...) attribute is supported. */ -#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF - -/* Defined if format(printf, ...) attribute is supported. */ -#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF - -/* - * Define overrides for non-standard allocator-related functions if they are - * present on the system. - */ -#define JEMALLOC_OVERRIDE_MEMALIGN -#define JEMALLOC_OVERRIDE_VALLOC - -/* - * At least Linux omits the "const" in: - * - * size_t malloc_usable_size(const void *ptr); - * - * Match the operating system's prototype. - */ -#define JEMALLOC_USABLE_SIZE_CONST - -/* - * If defined, specify throw() for the public function prototypes when compiling - * with C++. The only justification for this is to match the prototypes that - * glibc defines. - */ -#define JEMALLOC_USE_CXX_THROW - -#ifdef _MSC_VER -# ifdef _WIN64 -# define LG_SIZEOF_PTR_WIN 3 -# else -# define LG_SIZEOF_PTR_WIN 2 -# endif -#endif - -/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ -#if (defined(__LP64__) && __LP64__) -#define LG_SIZEOF_PTR 3 -#else -#define LG_SIZEOF_PTR 2 -#endif - -/* - * Name mangling for public symbols is controlled by --with-mangling and - * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by - * these macro definitions. - */ -#ifndef JEMALLOC_NO_RENAME -# define je_malloc_conf je_malloc_conf -# define je_malloc_message je_malloc_message -# define je_malloc je_malloc -# define je_calloc je_calloc -# define je_posix_memalign je_posix_memalign -# define je_aligned_alloc je_aligned_alloc -# define je_realloc je_realloc -# define je_free je_free -# define je_mallocx je_mallocx -# define je_rallocx je_rallocx -# define je_xallocx je_xallocx -# define je_sallocx je_sallocx -# define je_dallocx je_dallocx -# define je_sdallocx je_sdallocx -# define je_nallocx je_nallocx -# define je_mallctl je_mallctl -# define je_mallctlnametomib je_mallctlnametomib -# define je_mallctlbymib je_mallctlbymib -# define je_malloc_stats_print je_malloc_stats_print -# define je_malloc_usable_size je_malloc_usable_size -# define je_memalign je_memalign -# define je_valloc je_valloc -#endif - -#include -#include -#include -#include -#include - -#define JEMALLOC_VERSION "0.0.0-0-g0000000000000000000000000000000000000000" -#define JEMALLOC_VERSION_MAJOR 0 -#define JEMALLOC_VERSION_MINOR 0 -#define JEMALLOC_VERSION_BUGFIX 0 -#define JEMALLOC_VERSION_NREV 0 -#define JEMALLOC_VERSION_GID "0000000000000000000000000000000000000000" - -# define MALLOCX_LG_ALIGN(la) ((int)(la)) -# if LG_SIZEOF_PTR == 2 -# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) -# else -# define MALLOCX_ALIGN(a) \ - ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ - ffs((int)(((size_t)(a))>>32))+31)) -# endif -# define MALLOCX_ZERO ((int)0x40) -/* - * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 - * encodes MALLOCX_TCACHE_NONE. - */ -# define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) -# define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) -/* - * Bias arena index bits so that 0 encodes "use an automatically chosen arena". - */ -# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) - -#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) -# define JEMALLOC_CXX_THROW throw() -#else -# define JEMALLOC_CXX_THROW -#endif - -#if _MSC_VER -# define JEMALLOC_ATTR(s) -# define JEMALLOC_ALIGNED(s) __declspec(align(s)) -# define JEMALLOC_ALLOC_SIZE(s) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) -# ifndef JEMALLOC_EXPORT -# ifdef DLLEXPORT -# define JEMALLOC_EXPORT __declspec(dllexport) -# else -# define JEMALLOC_EXPORT __declspec(dllimport) -# endif -# endif -# define JEMALLOC_FORMAT_PRINTF(s, i) -# define JEMALLOC_NOINLINE __declspec(noinline) -# ifdef __cplusplus -# define JEMALLOC_NOTHROW __declspec(nothrow) -# else -# define JEMALLOC_NOTHROW -# endif -# define JEMALLOC_SECTION(s) __declspec(allocate(s)) -# define JEMALLOC_RESTRICT_RETURN __declspec(restrict) -# if _MSC_VER >= 1900 && !defined(__EDG__) -# define JEMALLOC_ALLOCATOR __declspec(allocator) -# else -# define JEMALLOC_ALLOCATOR -# endif -#elif defined(JEMALLOC_HAVE_ATTR) -# define JEMALLOC_ATTR(s) __attribute__((s)) -# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) -# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE -# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) -# else -# define JEMALLOC_ALLOC_SIZE(s) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) -# endif -# ifndef JEMALLOC_EXPORT -# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) -# endif -# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF -# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) -# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) -# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) -# else -# define JEMALLOC_FORMAT_PRINTF(s, i) -# endif -# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) -# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) -# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) -# define JEMALLOC_RESTRICT_RETURN -# define JEMALLOC_ALLOCATOR -#else -# define JEMALLOC_ATTR(s) -# define JEMALLOC_ALIGNED(s) -# define JEMALLOC_ALLOC_SIZE(s) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) -# define JEMALLOC_EXPORT -# define JEMALLOC_FORMAT_PRINTF(s, i) -# define JEMALLOC_NOINLINE -# define JEMALLOC_NOTHROW -# define JEMALLOC_SECTION(s) -# define JEMALLOC_RESTRICT_RETURN -# define JEMALLOC_ALLOCATOR -#endif - -/* - * The je_ prefix on the following public symbol declarations is an artifact - * of namespace management, and should be omitted in application code unless - * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h). - */ -extern JEMALLOC_EXPORT const char *je_malloc_conf; -extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, - const char *s); - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_malloc(size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr, - size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment, - size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) - JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) - JEMALLOC_CXX_THROW; - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags) - JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size, - int flags) JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size, - size_t extra, int flags); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr, - int flags) JEMALLOC_ATTR(pure); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, - int flags); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags) - JEMALLOC_ATTR(pure); - -JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, - void *oldp, size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, - size_t *mibp, size_t *miblenp); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print( - void (*write_cb)(void *, const char *), void *je_cbopaque, - const char *opts); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size( - JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; - -#ifdef JEMALLOC_OVERRIDE_MEMALIGN -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); -#endif - -#ifdef JEMALLOC_OVERRIDE_VALLOC -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW - JEMALLOC_ATTR(malloc); -#endif - -/* - * void * - * chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero, - * bool *commit, unsigned arena_ind); - */ -typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned); - -/* - * bool - * chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind); - */ -typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned); - -/* - * bool - * chunk_commit(void *chunk, size_t size, size_t offset, size_t length, - * unsigned arena_ind); - */ -typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned); - -/* - * bool - * chunk_decommit(void *chunk, size_t size, size_t offset, size_t length, - * unsigned arena_ind); - */ -typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned); - -/* - * bool - * chunk_purge(void *chunk, size_t size, size_t offset, size_t length, - * unsigned arena_ind); - */ -typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned); - -/* - * bool - * chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b, - * bool committed, unsigned arena_ind); - */ -typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned); - -/* - * bool - * chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, - * bool committed, unsigned arena_ind); - */ -typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned); - -typedef struct { - chunk_alloc_t *alloc; - chunk_dalloc_t *dalloc; - chunk_commit_t *commit; - chunk_decommit_t *decommit; - chunk_purge_t *purge; - chunk_split_t *split; - chunk_merge_t *merge; -} chunk_hooks_t; - -/* - * By default application code must explicitly refer to mangled symbol names, - * so that it is possible to use jemalloc in conjunction with another allocator - * in the same application. Define JEMALLOC_MANGLE in order to cause automatic - * name mangling that matches the API prefixing that happened as a result of - * --with-mangling and/or --with-jemalloc-prefix configuration settings. - */ -#ifdef JEMALLOC_MANGLE -# ifndef JEMALLOC_NO_DEMANGLE -# define JEMALLOC_NO_DEMANGLE -# endif -# define malloc_conf je_malloc_conf -# define malloc_message je_malloc_message -# define malloc je_malloc -# define calloc je_calloc -# define posix_memalign je_posix_memalign -# define aligned_alloc je_aligned_alloc -# define realloc je_realloc -# define free je_free -# define mallocx je_mallocx -# define rallocx je_rallocx -# define xallocx je_xallocx -# define sallocx je_sallocx -# define dallocx je_dallocx -# define sdallocx je_sdallocx -# define nallocx je_nallocx -# define mallctl je_mallctl -# define mallctlnametomib je_mallctlnametomib -# define mallctlbymib je_mallctlbymib -# define malloc_stats_print je_malloc_stats_print -# define malloc_usable_size je_malloc_usable_size -# define memalign je_memalign -# define valloc je_valloc -#endif - -/* - * The je_* macros can be used as stable alternative names for the - * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily - * meant for use in jemalloc itself, but it can be used by application code to - * provide isolation from the name mangling specified via --with-mangling - * and/or --with-jemalloc-prefix. - */ -#ifndef JEMALLOC_NO_DEMANGLE -# undef je_malloc_conf -# undef je_malloc_message -# undef je_malloc -# undef je_calloc -# undef je_posix_memalign -# undef je_aligned_alloc -# undef je_realloc -# undef je_free -# undef je_mallocx -# undef je_rallocx -# undef je_xallocx -# undef je_sallocx -# undef je_dallocx -# undef je_sdallocx -# undef je_nallocx -# undef je_mallctl -# undef je_mallctlnametomib -# undef je_mallctlbymib -# undef je_malloc_stats_print -# undef je_malloc_usable_size -# undef je_memalign -# undef je_valloc -#endif - -#ifdef __cplusplus -} -#endif -#endif /* JEMALLOC_H_ */ diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc_defs.h b/sources/jemalloc/include-linux/jemalloc/jemalloc_defs.h deleted file mode 100755 index 9a5948d5..00000000 --- a/sources/jemalloc/include-linux/jemalloc/jemalloc_defs.h +++ /dev/null @@ -1,50 +0,0 @@ -/* include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */ -/* Defined if __attribute__((...)) syntax is supported. */ -#define JEMALLOC_HAVE_ATTR - -/* Defined if alloc_size attribute is supported. */ -#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE - -/* Defined if format(gnu_printf, ...) attribute is supported. */ -#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF - -/* Defined if format(printf, ...) attribute is supported. */ -#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF - -/* - * Define overrides for non-standard allocator-related functions if they are - * present on the system. - */ -#define JEMALLOC_OVERRIDE_MEMALIGN -#define JEMALLOC_OVERRIDE_VALLOC - -/* - * At least Linux omits the "const" in: - * - * size_t malloc_usable_size(const void *ptr); - * - * Match the operating system's prototype. - */ -#define JEMALLOC_USABLE_SIZE_CONST - -/* - * If defined, specify throw() for the public function prototypes when compiling - * with C++. The only justification for this is to match the prototypes that - * glibc defines. - */ -#define JEMALLOC_USE_CXX_THROW - -#ifdef _MSC_VER -# ifdef _WIN64 -# define LG_SIZEOF_PTR_WIN 3 -# else -# define LG_SIZEOF_PTR_WIN 2 -# endif -#endif - -/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ -#if (defined(__LP64__) && __LP64__) -#define LG_SIZEOF_PTR 3 -#else -#define LG_SIZEOF_PTR 2 -#endif diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc_macros.h b/sources/jemalloc/include-linux/jemalloc/jemalloc_macros.h deleted file mode 100755 index fdc318ef..00000000 --- a/sources/jemalloc/include-linux/jemalloc/jemalloc_macros.h +++ /dev/null @@ -1,103 +0,0 @@ -#include -#include -#include -#include -#include - -#define JEMALLOC_VERSION "0.0.0-0-g0000000000000000000000000000000000000000" -#define JEMALLOC_VERSION_MAJOR 0 -#define JEMALLOC_VERSION_MINOR 0 -#define JEMALLOC_VERSION_BUGFIX 0 -#define JEMALLOC_VERSION_NREV 0 -#define JEMALLOC_VERSION_GID "0000000000000000000000000000000000000000" - -# define MALLOCX_LG_ALIGN(la) ((int)(la)) -# if LG_SIZEOF_PTR == 2 -# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) -# else -# define MALLOCX_ALIGN(a) \ - ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ - ffs((int)(((size_t)(a))>>32))+31)) -# endif -# define MALLOCX_ZERO ((int)0x40) -/* - * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 - * encodes MALLOCX_TCACHE_NONE. - */ -# define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) -# define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) -/* - * Bias arena index bits so that 0 encodes "use an automatically chosen arena". - */ -# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) - -#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) -# define JEMALLOC_CXX_THROW throw() -#else -# define JEMALLOC_CXX_THROW -#endif - -#if _MSC_VER -# define JEMALLOC_ATTR(s) -# define JEMALLOC_ALIGNED(s) __declspec(align(s)) -# define JEMALLOC_ALLOC_SIZE(s) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) -# ifndef JEMALLOC_EXPORT -# ifdef DLLEXPORT -# define JEMALLOC_EXPORT __declspec(dllexport) -# else -# define JEMALLOC_EXPORT __declspec(dllimport) -# endif -# endif -# define JEMALLOC_FORMAT_PRINTF(s, i) -# define JEMALLOC_NOINLINE __declspec(noinline) -# ifdef __cplusplus -# define JEMALLOC_NOTHROW __declspec(nothrow) -# else -# define JEMALLOC_NOTHROW -# endif -# define JEMALLOC_SECTION(s) __declspec(allocate(s)) -# define JEMALLOC_RESTRICT_RETURN __declspec(restrict) -# if _MSC_VER >= 1900 && !defined(__EDG__) -# define JEMALLOC_ALLOCATOR __declspec(allocator) -# else -# define JEMALLOC_ALLOCATOR -# endif -#elif defined(JEMALLOC_HAVE_ATTR) -# define JEMALLOC_ATTR(s) __attribute__((s)) -# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) -# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE -# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) -# else -# define JEMALLOC_ALLOC_SIZE(s) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) -# endif -# ifndef JEMALLOC_EXPORT -# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) -# endif -# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF -# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) -# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) -# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) -# else -# define JEMALLOC_FORMAT_PRINTF(s, i) -# endif -# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) -# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) -# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) -# define JEMALLOC_RESTRICT_RETURN -# define JEMALLOC_ALLOCATOR -#else -# define JEMALLOC_ATTR(s) -# define JEMALLOC_ALIGNED(s) -# define JEMALLOC_ALLOC_SIZE(s) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) -# define JEMALLOC_EXPORT -# define JEMALLOC_FORMAT_PRINTF(s, i) -# define JEMALLOC_NOINLINE -# define JEMALLOC_NOTHROW -# define JEMALLOC_SECTION(s) -# define JEMALLOC_RESTRICT_RETURN -# define JEMALLOC_ALLOCATOR -#endif diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc_mangle.h b/sources/jemalloc/include-linux/jemalloc/jemalloc_mangle.h deleted file mode 100755 index 34872e83..00000000 --- a/sources/jemalloc/include-linux/jemalloc/jemalloc_mangle.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * By default application code must explicitly refer to mangled symbol names, - * so that it is possible to use jemalloc in conjunction with another allocator - * in the same application. Define JEMALLOC_MANGLE in order to cause automatic - * name mangling that matches the API prefixing that happened as a result of - * --with-mangling and/or --with-jemalloc-prefix configuration settings. - */ -#ifdef JEMALLOC_MANGLE -# ifndef JEMALLOC_NO_DEMANGLE -# define JEMALLOC_NO_DEMANGLE -# endif -# define malloc_conf je_malloc_conf -# define malloc_message je_malloc_message -# define malloc je_malloc -# define calloc je_calloc -# define posix_memalign je_posix_memalign -# define aligned_alloc je_aligned_alloc -# define realloc je_realloc -# define free je_free -# define mallocx je_mallocx -# define rallocx je_rallocx -# define xallocx je_xallocx -# define sallocx je_sallocx -# define dallocx je_dallocx -# define sdallocx je_sdallocx -# define nallocx je_nallocx -# define mallctl je_mallctl -# define mallctlnametomib je_mallctlnametomib -# define mallctlbymib je_mallctlbymib -# define malloc_stats_print je_malloc_stats_print -# define malloc_usable_size je_malloc_usable_size -# define memalign je_memalign -# define valloc je_valloc -#endif - -/* - * The je_* macros can be used as stable alternative names for the - * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily - * meant for use in jemalloc itself, but it can be used by application code to - * provide isolation from the name mangling specified via --with-mangling - * and/or --with-jemalloc-prefix. - */ -#ifndef JEMALLOC_NO_DEMANGLE -# undef je_malloc_conf -# undef je_malloc_message -# undef je_malloc -# undef je_calloc -# undef je_posix_memalign -# undef je_aligned_alloc -# undef je_realloc -# undef je_free -# undef je_mallocx -# undef je_rallocx -# undef je_xallocx -# undef je_sallocx -# undef je_dallocx -# undef je_sdallocx -# undef je_nallocx -# undef je_mallctl -# undef je_mallctlnametomib -# undef je_mallctlbymib -# undef je_malloc_stats_print -# undef je_malloc_usable_size -# undef je_memalign -# undef je_valloc -#endif diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc_mangle_jet.h b/sources/jemalloc/include-linux/jemalloc/jemalloc_mangle_jet.h deleted file mode 100755 index db5b7b0c..00000000 --- a/sources/jemalloc/include-linux/jemalloc/jemalloc_mangle_jet.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * By default application code must explicitly refer to mangled symbol names, - * so that it is possible to use jemalloc in conjunction with another allocator - * in the same application. Define JEMALLOC_MANGLE in order to cause automatic - * name mangling that matches the API prefixing that happened as a result of - * --with-mangling and/or --with-jemalloc-prefix configuration settings. - */ -#ifdef JEMALLOC_MANGLE -# ifndef JEMALLOC_NO_DEMANGLE -# define JEMALLOC_NO_DEMANGLE -# endif -# define malloc_conf jet_malloc_conf -# define malloc_message jet_malloc_message -# define malloc jet_malloc -# define calloc jet_calloc -# define posix_memalign jet_posix_memalign -# define aligned_alloc jet_aligned_alloc -# define realloc jet_realloc -# define free jet_free -# define mallocx jet_mallocx -# define rallocx jet_rallocx -# define xallocx jet_xallocx -# define sallocx jet_sallocx -# define dallocx jet_dallocx -# define sdallocx jet_sdallocx -# define nallocx jet_nallocx -# define mallctl jet_mallctl -# define mallctlnametomib jet_mallctlnametomib -# define mallctlbymib jet_mallctlbymib -# define malloc_stats_print jet_malloc_stats_print -# define malloc_usable_size jet_malloc_usable_size -# define memalign jet_memalign -# define valloc jet_valloc -#endif - -/* - * The jet_* macros can be used as stable alternative names for the - * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily - * meant for use in jemalloc itself, but it can be used by application code to - * provide isolation from the name mangling specified via --with-mangling - * and/or --with-jemalloc-prefix. - */ -#ifndef JEMALLOC_NO_DEMANGLE -# undef jet_malloc_conf -# undef jet_malloc_message -# undef jet_malloc -# undef jet_calloc -# undef jet_posix_memalign -# undef jet_aligned_alloc -# undef jet_realloc -# undef jet_free -# undef jet_mallocx -# undef jet_rallocx -# undef jet_xallocx -# undef jet_sallocx -# undef jet_dallocx -# undef jet_sdallocx -# undef jet_nallocx -# undef jet_mallctl -# undef jet_mallctlnametomib -# undef jet_mallctlbymib -# undef jet_malloc_stats_print -# undef jet_malloc_usable_size -# undef jet_memalign -# undef jet_valloc -#endif diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc_protos.h b/sources/jemalloc/include-linux/jemalloc/jemalloc_protos.h deleted file mode 100755 index ff025e30..00000000 --- a/sources/jemalloc/include-linux/jemalloc/jemalloc_protos.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * The je_ prefix on the following public symbol declarations is an artifact - * of namespace management, and should be omitted in application code unless - * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h). - */ -extern JEMALLOC_EXPORT const char *je_malloc_conf; -extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, - const char *s); - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_malloc(size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr, - size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment, - size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) - JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) - JEMALLOC_CXX_THROW; - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags) - JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size, - int flags) JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size, - size_t extra, int flags); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr, - int flags) JEMALLOC_ATTR(pure); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, - int flags); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags) - JEMALLOC_ATTR(pure); - -JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, - void *oldp, size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, - size_t *mibp, size_t *miblenp); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print( - void (*write_cb)(void *, const char *), void *je_cbopaque, - const char *opts); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size( - JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; - -#ifdef JEMALLOC_OVERRIDE_MEMALIGN -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); -#endif - -#ifdef JEMALLOC_OVERRIDE_VALLOC -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW - JEMALLOC_ATTR(malloc); -#endif diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc_protos_jet.h b/sources/jemalloc/include-linux/jemalloc/jemalloc_protos_jet.h deleted file mode 100755 index f71efef0..00000000 --- a/sources/jemalloc/include-linux/jemalloc/jemalloc_protos_jet.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * The jet_ prefix on the following public symbol declarations is an artifact - * of namespace management, and should be omitted in application code unless - * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h). - */ -extern JEMALLOC_EXPORT const char *jet_malloc_conf; -extern JEMALLOC_EXPORT void (*jet_malloc_message)(void *cbopaque, - const char *s); - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *jet_malloc(size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *jet_calloc(size_t num, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_posix_memalign(void **memptr, - size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *jet_aligned_alloc(size_t alignment, - size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) - JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *jet_realloc(void *ptr, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_free(void *ptr) - JEMALLOC_CXX_THROW; - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *jet_mallocx(size_t size, int flags) - JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *jet_rallocx(void *ptr, size_t size, - int flags) JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_xallocx(void *ptr, size_t size, - size_t extra, int flags); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_sallocx(const void *ptr, - int flags) JEMALLOC_ATTR(pure); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_dallocx(void *ptr, int flags); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_sdallocx(void *ptr, size_t size, - int flags); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_nallocx(size_t size, int flags) - JEMALLOC_ATTR(pure); - -JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctl(const char *name, - void *oldp, size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctlnametomib(const char *name, - size_t *mibp, size_t *miblenp); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctlbymib(const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_malloc_stats_print( - void (*write_cb)(void *, const char *), void *jet_cbopaque, - const char *opts); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_malloc_usable_size( - JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; - -#ifdef JEMALLOC_OVERRIDE_MEMALIGN -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *jet_memalign(size_t alignment, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); -#endif - -#ifdef JEMALLOC_OVERRIDE_VALLOC -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *jet_valloc(size_t size) JEMALLOC_CXX_THROW - JEMALLOC_ATTR(malloc); -#endif diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc_rename.h b/sources/jemalloc/include-linux/jemalloc/jemalloc_rename.h deleted file mode 100755 index 22e53206..00000000 --- a/sources/jemalloc/include-linux/jemalloc/jemalloc_rename.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Name mangling for public symbols is controlled by --with-mangling and - * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by - * these macro definitions. - */ -#ifndef JEMALLOC_NO_RENAME -# define je_malloc_conf je_malloc_conf -# define je_malloc_message je_malloc_message -# define je_malloc je_malloc -# define je_calloc je_calloc -# define je_posix_memalign je_posix_memalign -# define je_aligned_alloc je_aligned_alloc -# define je_realloc je_realloc -# define je_free je_free -# define je_mallocx je_mallocx -# define je_rallocx je_rallocx -# define je_xallocx je_xallocx -# define je_sallocx je_sallocx -# define je_dallocx je_dallocx -# define je_sdallocx je_sdallocx -# define je_nallocx je_nallocx -# define je_mallctl je_mallctl -# define je_mallctlnametomib je_mallctlnametomib -# define je_mallctlbymib je_mallctlbymib -# define je_malloc_stats_print je_malloc_stats_print -# define je_malloc_usable_size je_malloc_usable_size -# define je_memalign je_memalign -# define je_valloc je_valloc -#endif diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc_typedefs.h b/sources/jemalloc/include-linux/jemalloc/jemalloc_typedefs.h deleted file mode 100755 index fa7b350a..00000000 --- a/sources/jemalloc/include-linux/jemalloc/jemalloc_typedefs.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * void * - * chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero, - * bool *commit, unsigned arena_ind); - */ -typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned); - -/* - * bool - * chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind); - */ -typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned); - -/* - * bool - * chunk_commit(void *chunk, size_t size, size_t offset, size_t length, - * unsigned arena_ind); - */ -typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned); - -/* - * bool - * chunk_decommit(void *chunk, size_t size, size_t offset, size_t length, - * unsigned arena_ind); - */ -typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned); - -/* - * bool - * chunk_purge(void *chunk, size_t size, size_t offset, size_t length, - * unsigned arena_ind); - */ -typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned); - -/* - * bool - * chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b, - * bool committed, unsigned arena_ind); - */ -typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned); - -/* - * bool - * chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, - * bool committed, unsigned arena_ind); - */ -typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned); - -typedef struct { - chunk_alloc_t *alloc; - chunk_dalloc_t *dalloc; - chunk_commit_t *commit; - chunk_decommit_t *decommit; - chunk_purge_t *purge; - chunk_split_t *split; - chunk_merge_t *merge; -} chunk_hooks_t; diff --git a/sources/jemalloc/include-win/jemalloc/internal/arena.h b/sources/jemalloc/include-win/jemalloc/internal/arena.h deleted file mode 100755 index 119e3a59..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/arena.h +++ /dev/null @@ -1,1528 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) - -/* Maximum number of regions in one run. */ -#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN) -#define RUN_MAXREGS (1U << LG_RUN_MAXREGS) - -/* - * Minimum redzone size. Redzones may be larger than this if necessary to - * preserve region alignment. - */ -#define REDZONE_MINSIZE 16 - -/* - * The minimum ratio of active:dirty pages per arena is computed as: - * - * (nactive >> lg_dirty_mult) >= ndirty - * - * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as - * many active pages as dirty pages. - */ -#define LG_DIRTY_MULT_DEFAULT 3 - -typedef enum { - purge_mode_ratio = 0, - purge_mode_decay = 1, - - purge_mode_limit = 2 -} purge_mode_t; -#define PURGE_DEFAULT purge_mode_ratio -/* Default decay time in seconds. */ -#define DECAY_TIME_DEFAULT 10 -/* Number of event ticks between time checks. */ -#define DECAY_NTICKS_PER_UPDATE 1000 - -typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t; -typedef struct arena_avail_links_s arena_avail_links_t; -typedef struct arena_run_s arena_run_t; -typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t; -typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t; -typedef struct arena_chunk_s arena_chunk_t; -typedef struct arena_bin_info_s arena_bin_info_t; -typedef struct arena_decay_s arena_decay_t; -typedef struct arena_bin_s arena_bin_t; -typedef struct arena_s arena_t; -typedef struct arena_tdata_s arena_tdata_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#ifdef JEMALLOC_ARENA_STRUCTS_A -struct arena_run_s { - /* Index of bin this run is associated with. */ - szind_t binind; - - /* Number of free regions in run. */ - unsigned nfree; - - /* Per region allocated/deallocated bitmap. */ - bitmap_t bitmap[BITMAP_GROUPS_MAX]; -}; - -/* Each element of the chunk map corresponds to one page within the chunk. */ -struct arena_chunk_map_bits_s { - /* - * Run address (or size) and various flags are stored together. The bit - * layout looks like (assuming 32-bit system): - * - * ???????? ???????? ???nnnnn nnndumla - * - * ? : Unallocated: Run address for first/last pages, unset for internal - * pages. - * Small: Run page offset. - * Large: Run page count for first page, unset for trailing pages. - * n : binind for small size class, BININD_INVALID for large size class. - * d : dirty? - * u : unzeroed? - * m : decommitted? - * l : large? - * a : allocated? - * - * Following are example bit patterns for the three types of runs. - * - * p : run page offset - * s : run size - * n : binind for size class; large objects set these to BININD_INVALID - * x : don't care - * - : 0 - * + : 1 - * [DUMLA] : bit set - * [dumla] : bit unset - * - * Unallocated (clean): - * ssssssss ssssssss sss+++++ +++dum-a - * xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx - * ssssssss ssssssss sss+++++ +++dUm-a - * - * Unallocated (dirty): - * ssssssss ssssssss sss+++++ +++D-m-a - * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - * ssssssss ssssssss sss+++++ +++D-m-a - * - * Small: - * pppppppp pppppppp pppnnnnn nnnd---A - * pppppppp pppppppp pppnnnnn nnn----A - * pppppppp pppppppp pppnnnnn nnnd---A - * - * Large: - * ssssssss ssssssss sss+++++ +++D--LA - * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - * -------- -------- ---+++++ +++D--LA - * - * Large (sampled, size <= LARGE_MINCLASS): - * ssssssss ssssssss sssnnnnn nnnD--LA - * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - * -------- -------- ---+++++ +++D--LA - * - * Large (not sampled, size == LARGE_MINCLASS): - * ssssssss ssssssss sss+++++ +++D--LA - * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - * -------- -------- ---+++++ +++D--LA - */ - size_t bits; -#define CHUNK_MAP_ALLOCATED ((size_t)0x01U) -#define CHUNK_MAP_LARGE ((size_t)0x02U) -#define CHUNK_MAP_STATE_MASK ((size_t)0x3U) - -#define CHUNK_MAP_DECOMMITTED ((size_t)0x04U) -#define CHUNK_MAP_UNZEROED ((size_t)0x08U) -#define CHUNK_MAP_DIRTY ((size_t)0x10U) -#define CHUNK_MAP_FLAGS_MASK ((size_t)0x1cU) - -#define CHUNK_MAP_BININD_SHIFT 5 -#define BININD_INVALID ((size_t)0xffU) -#define CHUNK_MAP_BININD_MASK (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) -#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK - -#define CHUNK_MAP_RUNIND_SHIFT (CHUNK_MAP_BININD_SHIFT + 8) -#define CHUNK_MAP_SIZE_SHIFT (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE) -#define CHUNK_MAP_SIZE_MASK \ - (~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK)) -}; - -struct arena_runs_dirty_link_s { - qr(arena_runs_dirty_link_t) rd_link; -}; - -/* - * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just - * like arena_chunk_map_bits_t. Two separate arrays are stored within each - * chunk header in order to improve cache locality. - */ -struct arena_chunk_map_misc_s { - /* - * Linkage for run heaps. There are two disjoint uses: - * - * 1) arena_t's runs_avail heaps. - * 2) arena_run_t conceptually uses this linkage for in-use non-full - * runs, rather than directly embedding linkage. - */ - phn(arena_chunk_map_misc_t) ph_link; - - union { - /* Linkage for list of dirty runs. */ - arena_runs_dirty_link_t rd; - - /* Profile counters, used for large object runs. */ - union { - void *prof_tctx_pun; - prof_tctx_t *prof_tctx; - }; - - /* Small region run metadata. */ - arena_run_t run; - }; -}; -typedef ph(arena_chunk_map_misc_t) arena_run_heap_t; -#endif /* JEMALLOC_ARENA_STRUCTS_A */ - -#ifdef JEMALLOC_ARENA_STRUCTS_B -/* Arena chunk header. */ -struct arena_chunk_s { - /* - * A pointer to the arena that owns the chunk is stored within the node. - * This field as a whole is used by chunks_rtree to support both - * ivsalloc() and core-based debugging. - */ - extent_node_t node; - - /* - * True if memory could be backed by transparent huge pages. This is - * only directly relevant to Linux, since it is the only supported - * platform on which jemalloc interacts with explicit transparent huge - * page controls. - */ - bool hugepage; - - /* - * Map of pages within chunk that keeps track of free/large/small. The - * first map_bias entries are omitted, since the chunk header does not - * need to be tracked in the map. This omission saves a header page - * for common chunk sizes (e.g. 4 MiB). - */ - arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */ -}; - -/* - * Read-only information associated with each element of arena_t's bins array - * is stored separately, partly to reduce memory usage (only one copy, rather - * than one per arena), but mainly to avoid false cacheline sharing. - * - * Each run has the following layout: - * - * /--------------------\ - * | pad? | - * |--------------------| - * | redzone | - * reg0_offset | region 0 | - * | redzone | - * |--------------------| \ - * | redzone | | - * | region 1 | > reg_interval - * | redzone | / - * |--------------------| - * | ... | - * | ... | - * | ... | - * |--------------------| - * | redzone | - * | region nregs-1 | - * | redzone | - * |--------------------| - * | alignment pad? | - * \--------------------/ - * - * reg_interval has at least the same minimum alignment as reg_size; this - * preserves the alignment constraint that sa2u() depends on. Alignment pad is - * either 0 or redzone_size; it is present only if needed to align reg0_offset. - */ -struct arena_bin_info_s { - /* Size of regions in a run for this bin's size class. */ - size_t reg_size; - - /* Redzone size. */ - size_t redzone_size; - - /* Interval between regions (reg_size + (redzone_size << 1)). */ - size_t reg_interval; - - /* Total size of a run for this bin's size class. */ - size_t run_size; - - /* Total number of regions in a run for this bin's size class. */ - uint32_t nregs; - - /* - * Metadata used to manipulate bitmaps for runs associated with this - * bin. - */ - bitmap_info_t bitmap_info; - - /* Offset of first region in a run for this bin's size class. */ - uint32_t reg0_offset; -}; - -struct arena_decay_s { - /* - * Approximate time in seconds from the creation of a set of unused - * dirty pages until an equivalent set of unused dirty pages is purged - * and/or reused. - */ - ssize_t time; - /* time / SMOOTHSTEP_NSTEPS. */ - nstime_t interval; - /* - * Time at which the current decay interval logically started. We do - * not actually advance to a new epoch until sometime after it starts - * because of scheduling and computation delays, and it is even possible - * to completely skip epochs. In all cases, during epoch advancement we - * merge all relevant activity into the most recently recorded epoch. - */ - nstime_t epoch; - /* Deadline randomness generator. */ - uint64_t jitter_state; - /* - * Deadline for current epoch. This is the sum of interval and per - * epoch jitter which is a uniform random variable in [0..interval). - * Epochs always advance by precise multiples of interval, but we - * randomize the deadline to reduce the likelihood of arenas purging in - * lockstep. - */ - nstime_t deadline; - /* - * Number of dirty pages at beginning of current epoch. During epoch - * advancement we use the delta between arena->decay.ndirty and - * arena->ndirty to determine how many dirty pages, if any, were - * generated. - */ - size_t ndirty; - /* - * Trailing log of how many unused dirty pages were generated during - * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last - * element is the most recent epoch. Corresponding epoch times are - * relative to epoch. - */ - size_t backlog[SMOOTHSTEP_NSTEPS]; -}; - -struct arena_bin_s { - /* - * All operations on runcur, runs, and stats require that lock be - * locked. Run allocation/deallocation are protected by the arena lock, - * which may be acquired while holding one or more bin locks, but not - * vise versa. - */ - malloc_mutex_t lock; - - /* - * Current run being used to service allocations of this bin's size - * class. - */ - arena_run_t *runcur; - - /* - * Heap of non-full runs. This heap is used when looking for an - * existing run when runcur is no longer usable. We choose the - * non-full run that is lowest in memory; this policy tends to keep - * objects packed well, and it can also help reduce the number of - * almost-empty chunks. - */ - arena_run_heap_t runs; - - /* Bin statistics. */ - malloc_bin_stats_t stats; -}; - -struct arena_s { - /* This arena's index within the arenas array. */ - unsigned ind; - - /* - * Number of threads currently assigned to this arena, synchronized via - * atomic operations. Each thread has two distinct assignments, one for - * application-serving allocation, and the other for internal metadata - * allocation. Internal metadata must not be allocated from arenas - * created via the arenas.extend mallctl, because the arena..reset - * mallctl indiscriminately discards all allocations for the affected - * arena. - * - * 0: Application allocation. - * 1: Internal metadata allocation. - */ - unsigned nthreads[2]; - - /* - * There are three classes of arena operations from a locking - * perspective: - * 1) Thread assignment (modifies nthreads) is synchronized via atomics. - * 2) Bin-related operations are protected by bin locks. - * 3) Chunk- and run-related operations are protected by this mutex. - */ - malloc_mutex_t lock; - - arena_stats_t stats; - /* - * List of tcaches for extant threads associated with this arena. - * Stats from these are merged incrementally, and at exit if - * opt_stats_print is enabled. - */ - ql_head(tcache_t) tcache_ql; - - uint64_t prof_accumbytes; - - /* - * PRNG state for cache index randomization of large allocation base - * pointers. - */ - size_t offset_state; - - dss_prec_t dss_prec; - - /* Extant arena chunks. */ - ql_head(extent_node_t) achunks; - - /* Extent serial number generator state. */ - size_t extent_sn_next; - - /* - * In order to avoid rapid chunk allocation/deallocation when an arena - * oscillates right on the cusp of needing a new chunk, cache the most - * recently freed chunk. The spare is left in the arena's chunk trees - * until it is deleted. - * - * There is one spare chunk per arena, rather than one spare total, in - * order to avoid interactions between multiple threads that could make - * a single spare inadequate. - */ - arena_chunk_t *spare; - - /* Minimum ratio (log base 2) of nactive:ndirty. */ - ssize_t lg_dirty_mult; - - /* True if a thread is currently executing arena_purge_to_limit(). */ - bool purging; - - /* Number of pages in active runs and huge regions. */ - size_t nactive; - - /* - * Current count of pages within unused runs that are potentially - * dirty, and for which madvise(... MADV_DONTNEED) has not been called. - * By tracking this, we can institute a limit on how much dirty unused - * memory is mapped for each arena. - */ - size_t ndirty; - - /* - * Unused dirty memory this arena manages. Dirty memory is conceptually - * tracked as an arbitrarily interleaved LRU of dirty runs and cached - * chunks, but the list linkage is actually semi-duplicated in order to - * avoid extra arena_chunk_map_misc_t space overhead. - * - * LRU-----------------------------------------------------------MRU - * - * /-- arena ---\ - * | | - * | | - * |------------| /- chunk -\ - * ...->|chunks_cache|<--------------------------->| /----\ |<--... - * |------------| | |node| | - * | | | | | | - * | | /- run -\ /- run -\ | | | | - * | | | | | | | | | | - * | | | | | | | | | | - * |------------| |-------| |-------| | |----| | - * ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----... - * |------------| |-------| |-------| | |----| | - * | | | | | | | | | | - * | | | | | | | \----/ | - * | | \-------/ \-------/ | | - * | | | | - * | | | | - * \------------/ \---------/ - */ - arena_runs_dirty_link_t runs_dirty; - extent_node_t chunks_cache; - - /* Decay-based purging state. */ - arena_decay_t decay; - - /* Extant huge allocations. */ - ql_head(extent_node_t) huge; - /* Synchronizes all huge allocation/update/deallocation. */ - malloc_mutex_t huge_mtx; - - /* - * Trees of chunks that were previously allocated (trees differ only in - * node ordering). These are used when allocating chunks, in an attempt - * to re-use address space. Depending on function, different tree - * orderings are needed, which is why there are two trees with the same - * contents. - */ - extent_tree_t chunks_szsnad_cached; - extent_tree_t chunks_ad_cached; - extent_tree_t chunks_szsnad_retained; - extent_tree_t chunks_ad_retained; - - malloc_mutex_t chunks_mtx; - /* Cache of nodes that were allocated via base_alloc(). */ - ql_head(extent_node_t) node_cache; - malloc_mutex_t node_cache_mtx; - - /* User-configurable chunk hook functions. */ - chunk_hooks_t chunk_hooks; - - /* bins is used to store trees of free regions. */ - arena_bin_t bins[NBINS]; - - /* - * Size-segregated address-ordered heaps of this arena's available runs, - * used for first-best-fit run allocation. Runs are quantized, i.e. - * they reside in the last heap which corresponds to a size class less - * than or equal to the run size. - */ - arena_run_heap_t runs_avail[NPSIZES]; -}; - -/* Used in conjunction with tsd for fast arena-related context lookup. */ -struct arena_tdata_s { - ticker_t decay_ticker; -}; -#endif /* JEMALLOC_ARENA_STRUCTS_B */ - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -static const size_t large_pad = -#ifdef JEMALLOC_CACHE_OBLIVIOUS - PAGE -#else - 0 -#endif - ; - -extern bool opt_thp; -extern purge_mode_t opt_purge; -extern const char *purge_mode_names[]; -extern ssize_t opt_lg_dirty_mult; -extern ssize_t opt_decay_time; - -extern arena_bin_info_t arena_bin_info[NBINS]; - -extern size_t map_bias; /* Number of arena chunk header pages. */ -extern size_t map_misc_offset; -extern size_t arena_maxrun; /* Max run size for arenas. */ -extern size_t large_maxclass; /* Max large size class. */ -extern unsigned nlclasses; /* Number of large size classes. */ -extern unsigned nhclasses; /* Number of huge size classes. */ - -#ifdef JEMALLOC_JET -typedef size_t (run_quantize_t)(size_t); -extern run_quantize_t *run_quantize_floor; -extern run_quantize_t *run_quantize_ceil; -#endif -void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, - bool cache); -void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, - bool cache); -extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena); -void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node); -void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, size_t *sn, bool *zero); -void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, - size_t usize, size_t sn); -void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, - void *chunk, size_t oldsize, size_t usize); -void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, - void *chunk, size_t oldsize, size_t usize, size_t sn); -bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, - void *chunk, size_t oldsize, size_t usize, bool *zero); -ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena); -bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, - ssize_t lg_dirty_mult); -ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena); -bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time); -void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all); -void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena); -void arena_reset(tsd_t *tsd, arena_t *arena); -void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, - tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); -void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, - bool zero); -#ifdef JEMALLOC_JET -typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t, - uint8_t); -extern arena_redzone_corruption_t *arena_redzone_corruption; -typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *); -extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; -#else -void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); -#endif -void arena_quarantine_junk_small(void *ptr, size_t usize); -void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind, - bool zero); -void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, - szind_t ind, bool zero); -void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, bool zero, tcache_t *tcache); -void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size); -void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, - arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm); -void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, - void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm); -void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, - void *ptr, size_t pageind); -#ifdef JEMALLOC_JET -typedef void (arena_dalloc_junk_large_t)(void *, size_t); -extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; -#else -void arena_dalloc_junk_large(void *ptr, size_t usize); -#endif -void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, - arena_chunk_t *chunk, void *ptr); -void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, - void *ptr); -#ifdef JEMALLOC_JET -typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); -extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; -#endif -bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, - size_t size, size_t extra, bool zero); -void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, - size_t size, size_t alignment, bool zero, tcache_t *tcache); -dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena); -bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec); -ssize_t arena_lg_dirty_mult_default_get(void); -bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult); -ssize_t arena_decay_time_default_get(void); -bool arena_decay_time_default_set(ssize_t decay_time); -void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, - unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, - ssize_t *decay_time, size_t *nactive, size_t *ndirty); -void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, - const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, - size_t *nactive, size_t *ndirty, arena_stats_t *astats, - malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, - malloc_huge_stats_t *hstats); -unsigned arena_nthreads_get(arena_t *arena, bool internal); -void arena_nthreads_inc(arena_t *arena, bool internal); -void arena_nthreads_dec(arena_t *arena, bool internal); -size_t arena_extent_sn_next(arena_t *arena); -arena_t *arena_new(tsdn_t *tsdn, unsigned ind); -void arena_boot(void); -void arena_prefork0(tsdn_t *tsdn, arena_t *arena); -void arena_prefork1(tsdn_t *tsdn, arena_t *arena); -void arena_prefork2(tsdn_t *tsdn, arena_t *arena); -void arena_prefork3(tsdn_t *tsdn, arena_t *arena); -void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); -void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk, - size_t pageind); -const arena_chunk_map_bits_t *arena_bitselm_get_const( - const arena_chunk_t *chunk, size_t pageind); -arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk, - size_t pageind); -const arena_chunk_map_misc_t *arena_miscelm_get_const( - const arena_chunk_t *chunk, size_t pageind); -size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm); -void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm); -arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd); -arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run); -size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind); -const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk, - size_t pageind); -size_t arena_mapbitsp_read(const size_t *mapbitsp); -size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_size_decode(size_t mapbits); -size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, - size_t pageind); -size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk, - size_t pageind); -size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk, - size_t pageind); -szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk, - size_t pageind); -size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind); -void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); -size_t arena_mapbits_size_encode(size_t size); -void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, - size_t size, size_t flags); -void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, - size_t size); -void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, - size_t flags); -void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, - size_t size, size_t flags); -void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, - szind_t binind); -void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, - size_t runind, szind_t binind, size_t flags); -void arena_metadata_allocated_add(arena_t *arena, size_t size); -void arena_metadata_allocated_sub(arena_t *arena, size_t size); -size_t arena_metadata_allocated_get(arena_t *arena); -bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); -bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); -bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes); -szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); -szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin); -size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, - const void *ptr); -prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr); -void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, - prof_tctx_t *tctx); -void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, - const void *old_ptr, prof_tctx_t *old_tctx); -void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks); -void arena_decay_tick(tsdn_t *tsdn, arena_t *arena); -void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, - bool zero, tcache_t *tcache, bool slow_path); -arena_t *arena_aalloc(const void *ptr); -size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote); -void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path); -void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, - bool slow_path); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) -# ifdef JEMALLOC_ARENA_INLINE_A -JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t * -arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind) -{ - - assert(pageind >= map_bias); - assert(pageind < chunk_npages); - - return (&chunk->map_bits[pageind-map_bias]); -} - -JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t * -arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind) -{ - - return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind)); -} - -JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * -arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind) -{ - - assert(pageind >= map_bias); - assert(pageind < chunk_npages); - - return ((arena_chunk_map_misc_t *)((uintptr_t)chunk + - (uintptr_t)map_misc_offset) + pageind-map_bias); -} - -JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t * -arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind) -{ - - return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind)); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm) -{ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); - size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk + - map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias; - - assert(pageind >= map_bias); - assert(pageind < chunk_npages); - - return (pageind); -} - -JEMALLOC_ALWAYS_INLINE void * -arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm) -{ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); - size_t pageind = arena_miscelm_to_pageind(miscelm); - - return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE))); -} - -JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * -arena_rd_to_miscelm(arena_runs_dirty_link_t *rd) -{ - arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t - *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd)); - - assert(arena_miscelm_to_pageind(miscelm) >= map_bias); - assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); - - return (miscelm); -} - -JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * -arena_run_to_miscelm(arena_run_t *run) -{ - arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t - *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run)); - - assert(arena_miscelm_to_pageind(miscelm) >= map_bias); - assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); - - return (miscelm); -} - -JEMALLOC_ALWAYS_INLINE size_t * -arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind) -{ - - return (&arena_bitselm_get_mutable(chunk, pageind)->bits); -} - -JEMALLOC_ALWAYS_INLINE const size_t * -arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind) -{ - - return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind)); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbitsp_read(const size_t *mapbitsp) -{ - - return (*mapbitsp); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind) -{ - - return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind))); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_size_decode(size_t mapbits) -{ - size_t size; - -#if CHUNK_MAP_SIZE_SHIFT > 0 - size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT; -#elif CHUNK_MAP_SIZE_SHIFT == 0 - size = mapbits & CHUNK_MAP_SIZE_MASK; -#else - size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT; -#endif - - return (size); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); - return (arena_mapbits_size_decode(mapbits)); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == - (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); - return (arena_mapbits_size_decode(mapbits)); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == - CHUNK_MAP_ALLOCATED); - return (mapbits >> CHUNK_MAP_RUNIND_SHIFT); -} - -JEMALLOC_ALWAYS_INLINE szind_t -arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - szind_t binind; - - mapbits = arena_mapbits_get(chunk, pageind); - binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; - assert(binind < NBINS || binind == BININD_INVALID); - return (binind); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & - (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); - return (mapbits & CHUNK_MAP_DIRTY); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & - (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); - return (mapbits & CHUNK_MAP_UNZEROED); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & - (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); - return (mapbits & CHUNK_MAP_DECOMMITTED); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - return (mapbits & CHUNK_MAP_LARGE); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - return (mapbits & CHUNK_MAP_ALLOCATED); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits) -{ - - *mapbitsp = mapbits; -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_size_encode(size_t size) -{ - size_t mapbits; - -#if CHUNK_MAP_SIZE_SHIFT > 0 - mapbits = size << CHUNK_MAP_SIZE_SHIFT; -#elif CHUNK_MAP_SIZE_SHIFT == 0 - mapbits = size; -#else - mapbits = size >> -CHUNK_MAP_SIZE_SHIFT; -#endif - - assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0); - return (mapbits); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, - size_t flags) -{ - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); - - assert((size & PAGE_MASK) == 0); - assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); - assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & - (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); - arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | - CHUNK_MAP_BININD_INVALID | flags); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, - size_t size) -{ - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - - assert((size & PAGE_MASK) == 0); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); - arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | - (mapbits & ~CHUNK_MAP_SIZE_MASK)); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags) -{ - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); - - assert((flags & CHUNK_MAP_UNZEROED) == flags); - arena_mapbitsp_write(mapbitsp, flags); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, - size_t flags) -{ - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); - - assert((size & PAGE_MASK) == 0); - assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); - assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & - (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); - arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | - CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE | - CHUNK_MAP_ALLOCATED); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, - szind_t binind) -{ - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - - assert(binind <= BININD_INVALID); - assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS + - large_pad); - arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) | - (binind << CHUNK_MAP_BININD_SHIFT)); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, - szind_t binind, size_t flags) -{ - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); - - assert(binind < BININD_INVALID); - assert(pageind - runind >= map_bias); - assert((flags & CHUNK_MAP_UNZEROED) == flags); - arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) | - (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED); -} - -JEMALLOC_INLINE void -arena_metadata_allocated_add(arena_t *arena, size_t size) -{ - - atomic_add_z(&arena->stats.metadata_allocated, size); -} - -JEMALLOC_INLINE void -arena_metadata_allocated_sub(arena_t *arena, size_t size) -{ - - atomic_sub_z(&arena->stats.metadata_allocated, size); -} - -JEMALLOC_INLINE size_t -arena_metadata_allocated_get(arena_t *arena) -{ - - return (atomic_read_z(&arena->stats.metadata_allocated)); -} - -JEMALLOC_INLINE bool -arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) -{ - - cassert(config_prof); - assert(prof_interval != 0); - - arena->prof_accumbytes += accumbytes; - if (arena->prof_accumbytes >= prof_interval) { - arena->prof_accumbytes -= prof_interval; - return (true); - } - return (false); -} - -JEMALLOC_INLINE bool -arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) -{ - - cassert(config_prof); - - if (likely(prof_interval == 0)) - return (false); - return (arena_prof_accum_impl(arena, accumbytes)); -} - -JEMALLOC_INLINE bool -arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) -{ - - cassert(config_prof); - - if (likely(prof_interval == 0)) - return (false); - - { - bool ret; - - malloc_mutex_lock(tsdn, &arena->lock); - ret = arena_prof_accum_impl(arena, accumbytes); - malloc_mutex_unlock(tsdn, &arena->lock); - return (ret); - } -} - -JEMALLOC_ALWAYS_INLINE szind_t -arena_ptr_small_binind_get(const void *ptr, size_t mapbits) -{ - szind_t binind; - - binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; - - if (config_debug) { - arena_chunk_t *chunk; - arena_t *arena; - size_t pageind; - size_t actual_mapbits; - size_t rpages_ind; - const arena_run_t *run; - arena_bin_t *bin; - szind_t run_binind, actual_binind; - arena_bin_info_t *bin_info; - const arena_chunk_map_misc_t *miscelm; - const void *rpages; - - assert(binind != BININD_INVALID); - assert(binind < NBINS); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = extent_node_arena_get(&chunk->node); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - actual_mapbits = arena_mapbits_get(chunk, pageind); - assert(mapbits == actual_mapbits); - assert(arena_mapbits_large_get(chunk, pageind) == 0); - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, - pageind); - miscelm = arena_miscelm_get_const(chunk, rpages_ind); - run = &miscelm->run; - run_binind = run->binind; - bin = &arena->bins[run_binind]; - actual_binind = (szind_t)(bin - arena->bins); - assert(run_binind == actual_binind); - bin_info = &arena_bin_info[actual_binind]; - rpages = arena_miscelm_to_rpages(miscelm); - assert(((uintptr_t)ptr - ((uintptr_t)rpages + - (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval - == 0); - } - - return (binind); -} -# endif /* JEMALLOC_ARENA_INLINE_A */ - -# ifdef JEMALLOC_ARENA_INLINE_B -JEMALLOC_INLINE szind_t -arena_bin_index(arena_t *arena, arena_bin_t *bin) -{ - szind_t binind = (szind_t)(bin - arena->bins); - assert(binind < NBINS); - return (binind); -} - -JEMALLOC_INLINE size_t -arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) -{ - size_t diff, interval, shift, regind; - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - void *rpages = arena_miscelm_to_rpages(miscelm); - - /* - * Freeing a pointer lower than region zero can cause assertion - * failure. - */ - assert((uintptr_t)ptr >= (uintptr_t)rpages + - (uintptr_t)bin_info->reg0_offset); - - /* - * Avoid doing division with a variable divisor if possible. Using - * actual division here can reduce allocator throughput by over 20%! - */ - diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages - - bin_info->reg0_offset); - - /* Rescale (factor powers of 2 out of the numerator and denominator). */ - interval = bin_info->reg_interval; - shift = ffs_zu(interval) - 1; - diff >>= shift; - interval >>= shift; - - if (interval == 1) { - /* The divisor was a power of 2. */ - regind = diff; - } else { - /* - * To divide by a number D that is not a power of two we - * multiply by (2^21 / D) and then right shift by 21 positions. - * - * X / D - * - * becomes - * - * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT - * - * We can omit the first three elements, because we never - * divide by 0, and 1 and 2 are both powers of two, which are - * handled above. - */ -#define SIZE_INV_SHIFT ((sizeof(size_t) << 3) - LG_RUN_MAXREGS) -#define SIZE_INV(s) (((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1) - static const size_t interval_invs[] = { - SIZE_INV(3), - SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), - SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), - SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), - SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), - SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), - SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), - SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) - }; - - if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t)) - + 2))) { - regind = (diff * interval_invs[interval - 3]) >> - SIZE_INV_SHIFT; - } else - regind = diff / interval; -#undef SIZE_INV -#undef SIZE_INV_SHIFT - } - assert(diff == regind * interval); - assert(regind < bin_info->nregs); - - return (regind); -} - -JEMALLOC_INLINE prof_tctx_t * -arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr) -{ - prof_tctx_t *ret; - arena_chunk_t *chunk; - - cassert(config_prof); - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) { - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); - if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) - ret = (prof_tctx_t *)(uintptr_t)1U; - else { - arena_chunk_map_misc_t *elm = - arena_miscelm_get_mutable(chunk, pageind); - ret = atomic_read_p(&elm->prof_tctx_pun); - } - } else - ret = huge_prof_tctx_get(tsdn, ptr); - - return (ret); -} - -JEMALLOC_INLINE void -arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, - prof_tctx_t *tctx) -{ - arena_chunk_t *chunk; - - cassert(config_prof); - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) { - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - - if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx > - (uintptr_t)1U)) { - arena_chunk_map_misc_t *elm; - - assert(arena_mapbits_large_get(chunk, pageind) != 0); - - elm = arena_miscelm_get_mutable(chunk, pageind); - atomic_write_p(&elm->prof_tctx_pun, tctx); - } else { - /* - * tctx must always be initialized for large runs. - * Assert that the surrounding conditional logic is - * equivalent to checking whether ptr refers to a large - * run. - */ - assert(arena_mapbits_large_get(chunk, pageind) == 0); - } - } else - huge_prof_tctx_set(tsdn, ptr, tctx); -} - -JEMALLOC_INLINE void -arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, - const void *old_ptr, prof_tctx_t *old_tctx) -{ - - cassert(config_prof); - assert(ptr != NULL); - - if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr && - (uintptr_t)old_tctx > (uintptr_t)1U))) { - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) { - size_t pageind; - arena_chunk_map_misc_t *elm; - - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> - LG_PAGE; - assert(arena_mapbits_allocated_get(chunk, pageind) != - 0); - assert(arena_mapbits_large_get(chunk, pageind) != 0); - - elm = arena_miscelm_get_mutable(chunk, pageind); - atomic_write_p(&elm->prof_tctx_pun, - (prof_tctx_t *)(uintptr_t)1U); - } else - huge_prof_tctx_reset(tsdn, ptr); - } -} - -JEMALLOC_ALWAYS_INLINE void -arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) -{ - tsd_t *tsd; - ticker_t *decay_ticker; - - if (unlikely(tsdn_null(tsdn))) - return; - tsd = tsdn_tsd(tsdn); - decay_ticker = decay_ticker_get(tsd, arena->ind); - if (unlikely(decay_ticker == NULL)) - return; - if (unlikely(ticker_ticks(decay_ticker, nticks))) - arena_purge(tsdn, arena, false); -} - -JEMALLOC_ALWAYS_INLINE void -arena_decay_tick(tsdn_t *tsdn, arena_t *arena) -{ - - arena_decay_ticks(tsdn, arena, 1); -} - -JEMALLOC_ALWAYS_INLINE void * -arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, - tcache_t *tcache, bool slow_path) -{ - - assert(!tsdn_null(tsdn) || tcache == NULL); - assert(size != 0); - - if (likely(tcache != NULL)) { - if (likely(size <= SMALL_MAXCLASS)) { - return (tcache_alloc_small(tsdn_tsd(tsdn), arena, - tcache, size, ind, zero, slow_path)); - } - if (likely(size <= tcache_maxclass)) { - return (tcache_alloc_large(tsdn_tsd(tsdn), arena, - tcache, size, ind, zero, slow_path)); - } - /* (size > tcache_maxclass) case falls through. */ - assert(size > tcache_maxclass); - } - - return (arena_malloc_hard(tsdn, arena, size, ind, zero)); -} - -JEMALLOC_ALWAYS_INLINE arena_t * -arena_aalloc(const void *ptr) -{ - arena_chunk_t *chunk; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) - return (extent_node_arena_get(&chunk->node)); - else - return (huge_aalloc(ptr)); -} - -/* Return the size of the allocation pointed to by ptr. */ -JEMALLOC_ALWAYS_INLINE size_t -arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote) -{ - size_t ret; - arena_chunk_t *chunk; - size_t pageind; - szind_t binind; - - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) { - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - binind = arena_mapbits_binind_get(chunk, pageind); - if (unlikely(binind == BININD_INVALID || (config_prof && !demote - && arena_mapbits_large_get(chunk, pageind) != 0))) { - /* - * Large allocation. In the common case (demote), and - * as this is an inline function, most callers will only - * end up looking at binind to determine that ptr is a - * small allocation. - */ - assert(config_cache_oblivious || ((uintptr_t)ptr & - PAGE_MASK) == 0); - ret = arena_mapbits_large_size_get(chunk, pageind) - - large_pad; - assert(ret != 0); - assert(pageind + ((ret+large_pad)>>LG_PAGE) <= - chunk_npages); - assert(arena_mapbits_dirty_get(chunk, pageind) == - arena_mapbits_dirty_get(chunk, - pageind+((ret+large_pad)>>LG_PAGE)-1)); - } else { - /* - * Small allocation (possibly promoted to a large - * object). - */ - assert(arena_mapbits_large_get(chunk, pageind) != 0 || - arena_ptr_small_binind_get(ptr, - arena_mapbits_get(chunk, pageind)) == binind); - ret = index2size(binind); - } - } else - ret = huge_salloc(tsdn, ptr); - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void -arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) -{ - arena_chunk_t *chunk; - size_t pageind, mapbits; - - assert(!tsdn_null(tsdn) || tcache == NULL); - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) { - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - mapbits = arena_mapbits_get(chunk, pageind); - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { - /* Small allocation. */ - if (likely(tcache != NULL)) { - szind_t binind = arena_ptr_small_binind_get(ptr, - mapbits); - tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, - binind, slow_path); - } else { - arena_dalloc_small(tsdn, - extent_node_arena_get(&chunk->node), chunk, - ptr, pageind); - } - } else { - size_t size = arena_mapbits_large_size_get(chunk, - pageind); - - assert(config_cache_oblivious || ((uintptr_t)ptr & - PAGE_MASK) == 0); - - if (likely(tcache != NULL) && size - large_pad <= - tcache_maxclass) { - tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, - size - large_pad, slow_path); - } else { - arena_dalloc_large(tsdn, - extent_node_arena_get(&chunk->node), chunk, - ptr); - } - } - } else - huge_dalloc(tsdn, ptr); -} - -JEMALLOC_ALWAYS_INLINE void -arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, - bool slow_path) -{ - arena_chunk_t *chunk; - - assert(!tsdn_null(tsdn) || tcache == NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) { - if (config_prof && opt_prof) { - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> - LG_PAGE; - assert(arena_mapbits_allocated_get(chunk, pageind) != - 0); - if (arena_mapbits_large_get(chunk, pageind) != 0) { - /* - * Make sure to use promoted size, not request - * size. - */ - size = arena_mapbits_large_size_get(chunk, - pageind) - large_pad; - } - } - assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false))); - - if (likely(size <= SMALL_MAXCLASS)) { - /* Small allocation. */ - if (likely(tcache != NULL)) { - szind_t binind = size2index(size); - tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, - binind, slow_path); - } else { - size_t pageind = ((uintptr_t)ptr - - (uintptr_t)chunk) >> LG_PAGE; - arena_dalloc_small(tsdn, - extent_node_arena_get(&chunk->node), chunk, - ptr, pageind); - } - } else { - assert(config_cache_oblivious || ((uintptr_t)ptr & - PAGE_MASK) == 0); - - if (likely(tcache != NULL) && size <= tcache_maxclass) { - tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, - size, slow_path); - } else { - arena_dalloc_large(tsdn, - extent_node_arena_get(&chunk->node), chunk, - ptr); - } - } - } else - huge_dalloc(tsdn, ptr); -} -# endif /* JEMALLOC_ARENA_INLINE_B */ -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/assert.h b/sources/jemalloc/include-win/jemalloc/internal/assert.h deleted file mode 100755 index 6f8f7eb9..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/assert.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Define a custom assert() in order to reduce the chances of deadlock during - * assertion failure. - */ -#ifndef assert -#define assert(e) do { \ - if (unlikely(config_debug && !(e))) { \ - malloc_printf( \ - ": %s:%d: Failed assertion: \"%s\"\n", \ - __FILE__, __LINE__, #e); \ - abort(); \ - } \ -} while (0) -#endif - -#ifndef not_reached -#define not_reached() do { \ - if (config_debug) { \ - malloc_printf( \ - ": %s:%d: Unreachable code reached\n", \ - __FILE__, __LINE__); \ - abort(); \ - } \ - unreachable(); \ -} while (0) -#endif - -#ifndef not_implemented -#define not_implemented() do { \ - if (config_debug) { \ - malloc_printf(": %s:%d: Not implemented\n", \ - __FILE__, __LINE__); \ - abort(); \ - } \ -} while (0) -#endif - -#ifndef assert_not_implemented -#define assert_not_implemented(e) do { \ - if (unlikely(config_debug && !(e))) \ - not_implemented(); \ -} while (0) -#endif - - diff --git a/sources/jemalloc/include-win/jemalloc/internal/atomic.h b/sources/jemalloc/include-win/jemalloc/internal/atomic.h deleted file mode 100755 index 3f15ea14..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/atomic.h +++ /dev/null @@ -1,651 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#define atomic_read_uint64(p) atomic_add_uint64(p, 0) -#define atomic_read_uint32(p) atomic_add_uint32(p, 0) -#define atomic_read_p(p) atomic_add_p(p, NULL) -#define atomic_read_z(p) atomic_add_z(p, 0) -#define atomic_read_u(p) atomic_add_u(p, 0) - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -/* - * All arithmetic functions return the arithmetic result of the atomic - * operation. Some atomic operation APIs return the value prior to mutation, in - * which case the following functions must redundantly compute the result so - * that it can be returned. These functions are normally inlined, so the extra - * operations can be optimized away if the return values aren't used by the - * callers. - * - * atomic_read_( *p) { return (*p); } - * atomic_add_( *p, x) { return (*p += x); } - * atomic_sub_( *p, x) { return (*p -= x); } - * bool atomic_cas_( *p, c, s) - * { - * if (*p != c) - * return (true); - * *p = s; - * return (false); - * } - * void atomic_write_( *p, x) { *p = x; } - */ - -#ifndef JEMALLOC_ENABLE_INLINE -uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); -uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); -bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s); -void atomic_write_uint64(uint64_t *p, uint64_t x); -uint32_t atomic_add_uint32(uint32_t *p, uint32_t x); -uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x); -bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s); -void atomic_write_uint32(uint32_t *p, uint32_t x); -void *atomic_add_p(void **p, void *x); -void *atomic_sub_p(void **p, void *x); -bool atomic_cas_p(void **p, void *c, void *s); -void atomic_write_p(void **p, const void *x); -size_t atomic_add_z(size_t *p, size_t x); -size_t atomic_sub_z(size_t *p, size_t x); -bool atomic_cas_z(size_t *p, size_t c, size_t s); -void atomic_write_z(size_t *p, size_t x); -unsigned atomic_add_u(unsigned *p, unsigned x); -unsigned atomic_sub_u(unsigned *p, unsigned x); -bool atomic_cas_u(unsigned *p, unsigned c, unsigned s); -void atomic_write_u(unsigned *p, unsigned x); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_)) -/******************************************************************************/ -/* 64-bit operations. */ -#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) -# if (defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - uint64_t t = x; - - asm volatile ( - "lock; xaddq %0, %1;" - : "+r" (t), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (t + x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - uint64_t t; - - x = (uint64_t)(-(int64_t)x); - t = x; - asm volatile ( - "lock; xaddq %0, %1;" - : "+r" (t), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (t + x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - uint8_t success; - - asm volatile ( - "lock; cmpxchgq %4, %0;" - "sete %1;" - : "=m" (*p), "=a" (success) /* Outputs. */ - : "m" (*p), "a" (c), "r" (s) /* Inputs. */ - : "memory" /* Clobbers. */ - ); - - return (!(bool)success); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - - asm volatile ( - "xchgq %1, %0;" /* Lock is implied by xchgq. */ - : "=m" (*p), "+r" (x) /* Outputs. */ - : "m" (*p) /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -# elif (defined(JEMALLOC_C11ATOMICS)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; - return (atomic_fetch_add(a, x) + x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; - return (atomic_fetch_sub(a, x) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; - return (!atomic_compare_exchange_strong(a, &c, s)); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; - atomic_store(a, x); -} -# elif (defined(JEMALLOC_ATOMIC9)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - /* - * atomic_fetchadd_64() doesn't exist, but we only ever use this - * function on LP64 systems, so atomic_fetchadd_long() will do. - */ - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - return (atomic_fetchadd_long(p, (unsigned long)x) + x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s)); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - atomic_store_rel_long(p, x); -} -# elif (defined(JEMALLOC_OSATOMIC)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (OSAtomicAdd64((int64_t)x, (int64_t *)p)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p)); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - - return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p)); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - uint64_t o; - - /*The documented OSAtomic*() API does not expose an atomic exchange. */ - do { - o = atomic_read_uint64(p); - } while (atomic_cas_uint64(p, o, x)); -} -# elif (defined(_MSC_VER)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (InterlockedExchangeAdd64(p, x) + x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - uint64_t o; - - o = InterlockedCompareExchange64(p, s, c); - return (o != c); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - - InterlockedExchange64(p, x); -} -# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \ - defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - - return (!__sync_bool_compare_and_swap(p, c, s)); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - - __sync_lock_test_and_set(p, x); -} -# else -# error "Missing implementation for 64-bit atomic operations" -# endif -#endif - -/******************************************************************************/ -/* 32-bit operations. */ -#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - uint32_t t = x; - - asm volatile ( - "lock; xaddl %0, %1;" - : "+r" (t), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (t + x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - uint32_t t; - - x = (uint32_t)(-(int32_t)x); - t = x; - asm volatile ( - "lock; xaddl %0, %1;" - : "+r" (t), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (t + x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - uint8_t success; - - asm volatile ( - "lock; cmpxchgl %4, %0;" - "sete %1;" - : "=m" (*p), "=a" (success) /* Outputs. */ - : "m" (*p), "a" (c), "r" (s) /* Inputs. */ - : "memory" - ); - - return (!(bool)success); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - - asm volatile ( - "xchgl %1, %0;" /* Lock is implied by xchgl. */ - : "=m" (*p), "+r" (x) /* Outputs. */ - : "m" (*p) /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -# elif (defined(JEMALLOC_C11ATOMICS)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; - return (atomic_fetch_add(a, x) + x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; - return (atomic_fetch_sub(a, x) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; - return (!atomic_compare_exchange_strong(a, &c, s)); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; - atomic_store(a, x); -} -#elif (defined(JEMALLOC_ATOMIC9)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (atomic_fetchadd_32(p, x) + x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - - return (!atomic_cmpset_32(p, c, s)); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - - atomic_store_rel_32(p, x); -} -#elif (defined(JEMALLOC_OSATOMIC)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (OSAtomicAdd32((int32_t)x, (int32_t *)p)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p)); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - - return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p)); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - uint32_t o; - - /*The documented OSAtomic*() API does not expose an atomic exchange. */ - do { - o = atomic_read_uint32(p); - } while (atomic_cas_uint32(p, o, x)); -} -#elif (defined(_MSC_VER)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (InterlockedExchangeAdd(p, x) + x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (InterlockedExchangeAdd(p, -((int32_t)x)) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - uint32_t o; - - o = InterlockedCompareExchange(p, s, c); - return (o != c); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - - InterlockedExchange(p, x); -} -#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \ - defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - - return (!__sync_bool_compare_and_swap(p, c, s)); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - - __sync_lock_test_and_set(p, x); -} -#else -# error "Missing implementation for 32-bit atomic operations" -#endif - -/******************************************************************************/ -/* Pointer operations. */ -JEMALLOC_INLINE void * -atomic_add_p(void **p, void *x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); -#elif (LG_SIZEOF_PTR == 2) - return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); -#endif -} - -JEMALLOC_INLINE void * -atomic_sub_p(void **p, void *x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((void *)atomic_add_uint64((uint64_t *)p, - (uint64_t)-((int64_t)x))); -#elif (LG_SIZEOF_PTR == 2) - return ((void *)atomic_add_uint32((uint32_t *)p, - (uint32_t)-((int32_t)x))); -#endif -} - -JEMALLOC_INLINE bool -atomic_cas_p(void **p, void *c, void *s) -{ - -#if (LG_SIZEOF_PTR == 3) - return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); -#elif (LG_SIZEOF_PTR == 2) - return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); -#endif -} - -JEMALLOC_INLINE void -atomic_write_p(void **p, const void *x) -{ - -#if (LG_SIZEOF_PTR == 3) - atomic_write_uint64((uint64_t *)p, (uint64_t)x); -#elif (LG_SIZEOF_PTR == 2) - atomic_write_uint32((uint32_t *)p, (uint32_t)x); -#endif -} - -/******************************************************************************/ -/* size_t operations. */ -JEMALLOC_INLINE size_t -atomic_add_z(size_t *p, size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); -#elif (LG_SIZEOF_PTR == 2) - return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); -#endif -} - -JEMALLOC_INLINE size_t -atomic_sub_z(size_t *p, size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((size_t)atomic_add_uint64((uint64_t *)p, - (uint64_t)-((int64_t)x))); -#elif (LG_SIZEOF_PTR == 2) - return ((size_t)atomic_add_uint32((uint32_t *)p, - (uint32_t)-((int32_t)x))); -#endif -} - -JEMALLOC_INLINE bool -atomic_cas_z(size_t *p, size_t c, size_t s) -{ - -#if (LG_SIZEOF_PTR == 3) - return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); -#elif (LG_SIZEOF_PTR == 2) - return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); -#endif -} - -JEMALLOC_INLINE void -atomic_write_z(size_t *p, size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - atomic_write_uint64((uint64_t *)p, (uint64_t)x); -#elif (LG_SIZEOF_PTR == 2) - atomic_write_uint32((uint32_t *)p, (uint32_t)x); -#endif -} - -/******************************************************************************/ -/* unsigned operations. */ -JEMALLOC_INLINE unsigned -atomic_add_u(unsigned *p, unsigned x) -{ - -#if (LG_SIZEOF_INT == 3) - return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); -#elif (LG_SIZEOF_INT == 2) - return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); -#endif -} - -JEMALLOC_INLINE unsigned -atomic_sub_u(unsigned *p, unsigned x) -{ - -#if (LG_SIZEOF_INT == 3) - return ((unsigned)atomic_add_uint64((uint64_t *)p, - (uint64_t)-((int64_t)x))); -#elif (LG_SIZEOF_INT == 2) - return ((unsigned)atomic_add_uint32((uint32_t *)p, - (uint32_t)-((int32_t)x))); -#endif -} - -JEMALLOC_INLINE bool -atomic_cas_u(unsigned *p, unsigned c, unsigned s) -{ - -#if (LG_SIZEOF_INT == 3) - return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); -#elif (LG_SIZEOF_INT == 2) - return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); -#endif -} - -JEMALLOC_INLINE void -atomic_write_u(unsigned *p, unsigned x) -{ - -#if (LG_SIZEOF_INT == 3) - atomic_write_uint64((uint64_t *)p, (uint64_t)x); -#elif (LG_SIZEOF_INT == 2) - atomic_write_uint32((uint32_t *)p, (uint32_t)x); -#endif -} - -/******************************************************************************/ -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/base.h b/sources/jemalloc/include-win/jemalloc/internal/base.h deleted file mode 100755 index d6b81e16..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/base.h +++ /dev/null @@ -1,25 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *base_alloc(tsdn_t *tsdn, size_t size); -void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident, - size_t *mapped); -bool base_boot(void); -void base_prefork(tsdn_t *tsdn); -void base_postfork_parent(tsdn_t *tsdn); -void base_postfork_child(tsdn_t *tsdn); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/bitmap.h b/sources/jemalloc/include-win/jemalloc/internal/bitmap.h deleted file mode 100755 index 36f38b59..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/bitmap.h +++ /dev/null @@ -1,274 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ -#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS -#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) - -typedef struct bitmap_level_s bitmap_level_t; -typedef struct bitmap_info_s bitmap_info_t; -typedef unsigned long bitmap_t; -#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG - -/* Number of bits per group. */ -#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) -#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) -#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) - -/* - * Do some analysis on how big the bitmap is before we use a tree. For a brute - * force linear search, if we would have to call ffs_lu() more than 2^3 times, - * use a tree instead. - */ -#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 -# define USE_TREE -#endif - -/* Number of groups required to store a given number of bits. */ -#define BITMAP_BITS2GROUPS(nbits) \ - ((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) - -/* - * Number of groups required at a particular level for a given number of bits. - */ -#define BITMAP_GROUPS_L0(nbits) \ - BITMAP_BITS2GROUPS(nbits) -#define BITMAP_GROUPS_L1(nbits) \ - BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) -#define BITMAP_GROUPS_L2(nbits) \ - BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) -#define BITMAP_GROUPS_L3(nbits) \ - BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ - BITMAP_BITS2GROUPS((nbits))))) - -/* - * Assuming the number of levels, number of groups required for a given number - * of bits. - */ -#define BITMAP_GROUPS_1_LEVEL(nbits) \ - BITMAP_GROUPS_L0(nbits) -#define BITMAP_GROUPS_2_LEVEL(nbits) \ - (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) -#define BITMAP_GROUPS_3_LEVEL(nbits) \ - (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) -#define BITMAP_GROUPS_4_LEVEL(nbits) \ - (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) - -/* - * Maximum number of groups required to support LG_BITMAP_MAXBITS. - */ -#ifdef USE_TREE - -#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS -# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) -#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 -# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) -#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 -# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) -#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 -# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) -#else -# error "Unsupported bitmap size" -#endif - -/* Maximum number of levels possible. */ -#define BITMAP_MAX_LEVELS \ - (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ - + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) - -#else /* USE_TREE */ - -#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS) - -#endif /* USE_TREE */ - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct bitmap_level_s { - /* Offset of this level's groups within the array of groups. */ - size_t group_offset; -}; - -struct bitmap_info_s { - /* Logical number of bits in bitmap (stored at bottom level). */ - size_t nbits; - -#ifdef USE_TREE - /* Number of levels necessary for nbits. */ - unsigned nlevels; - - /* - * Only the first (nlevels+1) elements are used, and levels are ordered - * bottom to top (e.g. the bottom level is stored in levels[0]). - */ - bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; -#else /* USE_TREE */ - /* Number of groups necessary for nbits. */ - size_t ngroups; -#endif /* USE_TREE */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); -void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo); -size_t bitmap_size(const bitmap_info_t *binfo); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo); -bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo); -void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_)) -JEMALLOC_INLINE bool -bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ -#ifdef USE_TREE - size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1; - bitmap_t rg = bitmap[rgoff]; - /* The bitmap is full iff the root group is 0. */ - return (rg == 0); -#else - size_t i; - - for (i = 0; i < binfo->ngroups; i++) { - if (bitmap[i] != 0) - return (false); - } - return (true); -#endif -} - -JEMALLOC_INLINE bool -bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ - size_t goff; - bitmap_t g; - - assert(bit < binfo->nbits); - goff = bit >> LG_BITMAP_GROUP_NBITS; - g = bitmap[goff]; - return (!(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))); -} - -JEMALLOC_INLINE void -bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ - size_t goff; - bitmap_t *gp; - bitmap_t g; - - assert(bit < binfo->nbits); - assert(!bitmap_get(bitmap, binfo, bit)); - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[goff]; - g = *gp; - assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); - g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - assert(bitmap_get(bitmap, binfo, bit)); -#ifdef USE_TREE - /* Propagate group state transitions up the tree. */ - if (g == 0) { - unsigned i; - for (i = 1; i < binfo->nlevels; i++) { - bit = goff; - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[binfo->levels[i].group_offset + goff]; - g = *gp; - assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); - g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - if (g != 0) - break; - } - } -#endif -} - -/* sfu: set first unset. */ -JEMALLOC_INLINE size_t -bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ - size_t bit; - bitmap_t g; - unsigned i; - - assert(!bitmap_full(bitmap, binfo)); - -#ifdef USE_TREE - i = binfo->nlevels - 1; - g = bitmap[binfo->levels[i].group_offset]; - bit = ffs_lu(g) - 1; - while (i > 0) { - i--; - g = bitmap[binfo->levels[i].group_offset + bit]; - bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1); - } -#else - i = 0; - g = bitmap[0]; - while ((bit = ffs_lu(g)) == 0) { - i++; - g = bitmap[i]; - } - bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); -#endif - bitmap_set(bitmap, binfo, bit); - return (bit); -} - -JEMALLOC_INLINE void -bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ - size_t goff; - bitmap_t *gp; - bitmap_t g; - UNUSED bool propagate; - - assert(bit < binfo->nbits); - assert(bitmap_get(bitmap, binfo, bit)); - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[goff]; - g = *gp; - propagate = (g == 0); - assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); - g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - assert(!bitmap_get(bitmap, binfo, bit)); -#ifdef USE_TREE - /* Propagate group state transitions up the tree. */ - if (propagate) { - unsigned i; - for (i = 1; i < binfo->nlevels; i++) { - bit = goff; - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[binfo->levels[i].group_offset + goff]; - g = *gp; - propagate = (g == 0); - assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) - == 0); - g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - if (!propagate) - break; - } - } -#endif /* USE_TREE */ -} - -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/chunk.h b/sources/jemalloc/include-win/jemalloc/internal/chunk.h deleted file mode 100755 index 55df9acc..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/chunk.h +++ /dev/null @@ -1,97 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * Size and alignment of memory chunks that are allocated by the OS's virtual - * memory system. - */ -#define LG_CHUNK_DEFAULT 21 - -/* Return the chunk address for allocation address a. */ -#define CHUNK_ADDR2BASE(a) \ - ((void *)((uintptr_t)(a) & ~chunksize_mask)) - -/* Return the chunk offset of address a. */ -#define CHUNK_ADDR2OFFSET(a) \ - ((size_t)((uintptr_t)(a) & chunksize_mask)) - -/* Return the smallest chunk multiple that is >= s. */ -#define CHUNK_CEILING(s) \ - (((s) + chunksize_mask) & ~chunksize_mask) - -#define CHUNK_HOOKS_INITIALIZER { \ - NULL, \ - NULL, \ - NULL, \ - NULL, \ - NULL, \ - NULL, \ - NULL \ -} - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern size_t opt_lg_chunk; -extern const char *opt_dss; - -extern rtree_t chunks_rtree; - -extern size_t chunksize; -extern size_t chunksize_mask; /* (chunksize - 1). */ -extern size_t chunk_npages; - -extern const chunk_hooks_t chunk_hooks_default; - -chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena); -chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, - const chunk_hooks_t *chunk_hooks); - -bool chunk_register(const void *chunk, const extent_node_t *node, - bool *gdump); -void chunk_deregister(const void *chunk, const extent_node_t *node); -void *chunk_alloc_base(size_t size); -void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, - size_t *sn, bool *zero, bool *commit, bool dalloc_node); -void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, - size_t *sn, bool *zero, bool *commit); -void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn, - bool committed); -void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn, - bool zeroed, bool committed); -bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset, - size_t length); -bool chunk_boot(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -extent_node_t *chunk_lookup(const void *chunk, bool dependent); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_)) -JEMALLOC_INLINE extent_node_t * -chunk_lookup(const void *ptr, bool dependent) -{ - - return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent)); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - -#include "jemalloc/internal/chunk_dss.h" -#include "jemalloc/internal/chunk_mmap.h" diff --git a/sources/jemalloc/include-win/jemalloc/internal/chunk_dss.h b/sources/jemalloc/include-win/jemalloc/internal/chunk_dss.h deleted file mode 100755 index da8511ba..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/chunk_dss.h +++ /dev/null @@ -1,37 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef enum { - dss_prec_disabled = 0, - dss_prec_primary = 1, - dss_prec_secondary = 2, - - dss_prec_limit = 3 -} dss_prec_t; -#define DSS_PREC_DEFAULT dss_prec_secondary -#define DSS_DEFAULT "secondary" - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -extern const char *dss_prec_names[]; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -dss_prec_t chunk_dss_prec_get(void); -bool chunk_dss_prec_set(dss_prec_t dss_prec); -void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, - size_t size, size_t alignment, bool *zero, bool *commit); -bool chunk_in_dss(void *chunk); -bool chunk_dss_mergeable(void *chunk_a, void *chunk_b); -void chunk_dss_boot(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/chunk_mmap.h b/sources/jemalloc/include-win/jemalloc/internal/chunk_mmap.h deleted file mode 100755 index 6f2d0ac2..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/chunk_mmap.h +++ /dev/null @@ -1,21 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, - bool *zero, bool *commit); -bool chunk_dalloc_mmap(void *chunk, size_t size); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/ckh.h b/sources/jemalloc/include-win/jemalloc/internal/ckh.h deleted file mode 100755 index f75ad90b..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/ckh.h +++ /dev/null @@ -1,86 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct ckh_s ckh_t; -typedef struct ckhc_s ckhc_t; - -/* Typedefs to allow easy function pointer passing. */ -typedef void ckh_hash_t (const void *, size_t[2]); -typedef bool ckh_keycomp_t (const void *, const void *); - -/* Maintain counters used to get an idea of performance. */ -/* #define CKH_COUNT */ -/* Print counter values in ckh_delete() (requires CKH_COUNT). */ -/* #define CKH_VERBOSE */ - -/* - * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit - * one bucket per L1 cache line. - */ -#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -/* Hash table cell. */ -struct ckhc_s { - const void *key; - const void *data; -}; - -struct ckh_s { -#ifdef CKH_COUNT - /* Counters used to get an idea of performance. */ - uint64_t ngrows; - uint64_t nshrinks; - uint64_t nshrinkfails; - uint64_t ninserts; - uint64_t nrelocs; -#endif - - /* Used for pseudo-random number generation. */ - uint64_t prng_state; - - /* Total number of items. */ - size_t count; - - /* - * Minimum and current number of hash table buckets. There are - * 2^LG_CKH_BUCKET_CELLS cells per bucket. - */ - unsigned lg_minbuckets; - unsigned lg_curbuckets; - - /* Hash and comparison functions. */ - ckh_hash_t *hash; - ckh_keycomp_t *keycomp; - - /* Hash table with 2^lg_curbuckets buckets. */ - ckhc_t *tab; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, - ckh_keycomp_t *keycomp); -void ckh_delete(tsd_t *tsd, ckh_t *ckh); -size_t ckh_count(ckh_t *ckh); -bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); -bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); -bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, - void **data); -bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data); -void ckh_string_hash(const void *key, size_t r_hash[2]); -bool ckh_string_keycomp(const void *k1, const void *k2); -void ckh_pointer_hash(const void *key, size_t r_hash[2]); -bool ckh_pointer_keycomp(const void *k1, const void *k2); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/ctl.h b/sources/jemalloc/include-win/jemalloc/internal/ctl.h deleted file mode 100755 index af0f6d7c..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/ctl.h +++ /dev/null @@ -1,118 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct ctl_node_s ctl_node_t; -typedef struct ctl_named_node_s ctl_named_node_t; -typedef struct ctl_indexed_node_s ctl_indexed_node_t; -typedef struct ctl_arena_stats_s ctl_arena_stats_t; -typedef struct ctl_stats_s ctl_stats_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct ctl_node_s { - bool named; -}; - -struct ctl_named_node_s { - struct ctl_node_s node; - const char *name; - /* If (nchildren == 0), this is a terminal node. */ - unsigned nchildren; - const ctl_node_t *children; - int (*ctl)(tsd_t *, const size_t *, size_t, void *, - size_t *, void *, size_t); -}; - -struct ctl_indexed_node_s { - struct ctl_node_s node; - const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t, - size_t); -}; - -struct ctl_arena_stats_s { - bool initialized; - unsigned nthreads; - const char *dss; - ssize_t lg_dirty_mult; - ssize_t decay_time; - size_t pactive; - size_t pdirty; - - /* The remainder are only populated if config_stats is true. */ - - arena_stats_t astats; - - /* Aggregate stats for small size classes, based on bin stats. */ - size_t allocated_small; - uint64_t nmalloc_small; - uint64_t ndalloc_small; - uint64_t nrequests_small; - - malloc_bin_stats_t bstats[NBINS]; - malloc_large_stats_t *lstats; /* nlclasses elements. */ - malloc_huge_stats_t *hstats; /* nhclasses elements. */ -}; - -struct ctl_stats_s { - size_t allocated; - size_t active; - size_t metadata; - size_t resident; - size_t mapped; - size_t retained; - unsigned narenas; - ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, - void *newp, size_t newlen); -int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, - size_t *miblenp); - -int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen); -bool ctl_boot(void); -void ctl_prefork(tsdn_t *tsdn); -void ctl_postfork_parent(tsdn_t *tsdn); -void ctl_postfork_child(tsdn_t *tsdn); - -#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ - if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ - != 0) { \ - malloc_printf( \ - ": Failure in xmallctl(\"%s\", ...)\n", \ - name); \ - abort(); \ - } \ -} while (0) - -#define xmallctlnametomib(name, mibp, miblenp) do { \ - if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ - malloc_printf(": Failure in " \ - "xmallctlnametomib(\"%s\", ...)\n", name); \ - abort(); \ - } \ -} while (0) - -#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ - if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ - newlen) != 0) { \ - malloc_write( \ - ": Failure in xmallctlbymib()\n"); \ - abort(); \ - } \ -} while (0) - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/sources/jemalloc/include-win/jemalloc/internal/extent.h b/sources/jemalloc/include-win/jemalloc/internal/extent.h deleted file mode 100755 index fc77f9f5..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/extent.h +++ /dev/null @@ -1,275 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct extent_node_s extent_node_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -/* Tree of extents. Use accessor functions for en_* fields. */ -struct extent_node_s { - /* Arena from which this extent came, if any. */ - arena_t *en_arena; - - /* Pointer to the extent that this tree node is responsible for. */ - void *en_addr; - - /* Total region size. */ - size_t en_size; - - /* - * Serial number (potentially non-unique). - * - * In principle serial numbers can wrap around on 32-bit systems if - * JEMALLOC_MUNMAP is defined, but as long as comparison functions fall - * back on address comparison for equal serial numbers, stable (if - * imperfect) ordering is maintained. - * - * Serial numbers may not be unique even in the absence of wrap-around, - * e.g. when splitting an extent and assigning the same serial number to - * both resulting adjacent extents. - */ - size_t en_sn; - - /* - * The zeroed flag is used by chunk recycling code to track whether - * memory is zero-filled. - */ - bool en_zeroed; - - /* - * True if physical memory is committed to the extent, whether - * explicitly or implicitly as on a system that overcommits and - * satisfies physical memory needs on demand via soft page faults. - */ - bool en_committed; - - /* - * The achunk flag is used to validate that huge allocation lookups - * don't return arena chunks. - */ - bool en_achunk; - - /* Profile counters, used for huge objects. */ - prof_tctx_t *en_prof_tctx; - - /* Linkage for arena's runs_dirty and chunks_cache rings. */ - arena_runs_dirty_link_t rd; - qr(extent_node_t) cc_link; - - union { - /* Linkage for the size/sn/address-ordered tree. */ - rb_node(extent_node_t) szsnad_link; - - /* Linkage for arena's achunks, huge, and node_cache lists. */ - ql_elm(extent_node_t) ql_link; - }; - - /* Linkage for the address-ordered tree. */ - rb_node(extent_node_t) ad_link; -}; -typedef rb_tree(extent_node_t) extent_tree_t; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#ifdef JEMALLOC_JET -size_t extent_size_quantize_floor(size_t size); -#endif -size_t extent_size_quantize_ceil(size_t size); - -rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t) - -rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -arena_t *extent_node_arena_get(const extent_node_t *node); -void *extent_node_addr_get(const extent_node_t *node); -size_t extent_node_size_get(const extent_node_t *node); -size_t extent_node_sn_get(const extent_node_t *node); -bool extent_node_zeroed_get(const extent_node_t *node); -bool extent_node_committed_get(const extent_node_t *node); -bool extent_node_achunk_get(const extent_node_t *node); -prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node); -void extent_node_arena_set(extent_node_t *node, arena_t *arena); -void extent_node_addr_set(extent_node_t *node, void *addr); -void extent_node_size_set(extent_node_t *node, size_t size); -void extent_node_sn_set(extent_node_t *node, size_t sn); -void extent_node_zeroed_set(extent_node_t *node, bool zeroed); -void extent_node_committed_set(extent_node_t *node, bool committed); -void extent_node_achunk_set(extent_node_t *node, bool achunk); -void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx); -void extent_node_init(extent_node_t *node, arena_t *arena, void *addr, - size_t size, size_t sn, bool zeroed, bool committed); -void extent_node_dirty_linkage_init(extent_node_t *node); -void extent_node_dirty_insert(extent_node_t *node, - arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty); -void extent_node_dirty_remove(extent_node_t *node); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_)) -JEMALLOC_INLINE arena_t * -extent_node_arena_get(const extent_node_t *node) -{ - - return (node->en_arena); -} - -JEMALLOC_INLINE void * -extent_node_addr_get(const extent_node_t *node) -{ - - return (node->en_addr); -} - -JEMALLOC_INLINE size_t -extent_node_size_get(const extent_node_t *node) -{ - - return (node->en_size); -} - -JEMALLOC_INLINE size_t -extent_node_sn_get(const extent_node_t *node) -{ - - return (node->en_sn); -} - -JEMALLOC_INLINE bool -extent_node_zeroed_get(const extent_node_t *node) -{ - - return (node->en_zeroed); -} - -JEMALLOC_INLINE bool -extent_node_committed_get(const extent_node_t *node) -{ - - assert(!node->en_achunk); - return (node->en_committed); -} - -JEMALLOC_INLINE bool -extent_node_achunk_get(const extent_node_t *node) -{ - - return (node->en_achunk); -} - -JEMALLOC_INLINE prof_tctx_t * -extent_node_prof_tctx_get(const extent_node_t *node) -{ - - return (node->en_prof_tctx); -} - -JEMALLOC_INLINE void -extent_node_arena_set(extent_node_t *node, arena_t *arena) -{ - - node->en_arena = arena; -} - -JEMALLOC_INLINE void -extent_node_addr_set(extent_node_t *node, void *addr) -{ - - node->en_addr = addr; -} - -JEMALLOC_INLINE void -extent_node_size_set(extent_node_t *node, size_t size) -{ - - node->en_size = size; -} - -JEMALLOC_INLINE void -extent_node_sn_set(extent_node_t *node, size_t sn) -{ - - node->en_sn = sn; -} - -JEMALLOC_INLINE void -extent_node_zeroed_set(extent_node_t *node, bool zeroed) -{ - - node->en_zeroed = zeroed; -} - -JEMALLOC_INLINE void -extent_node_committed_set(extent_node_t *node, bool committed) -{ - - node->en_committed = committed; -} - -JEMALLOC_INLINE void -extent_node_achunk_set(extent_node_t *node, bool achunk) -{ - - node->en_achunk = achunk; -} - -JEMALLOC_INLINE void -extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx) -{ - - node->en_prof_tctx = tctx; -} - -JEMALLOC_INLINE void -extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size, - size_t sn, bool zeroed, bool committed) -{ - - extent_node_arena_set(node, arena); - extent_node_addr_set(node, addr); - extent_node_size_set(node, size); - extent_node_sn_set(node, sn); - extent_node_zeroed_set(node, zeroed); - extent_node_committed_set(node, committed); - extent_node_achunk_set(node, false); - if (config_prof) - extent_node_prof_tctx_set(node, NULL); -} - -JEMALLOC_INLINE void -extent_node_dirty_linkage_init(extent_node_t *node) -{ - - qr_new(&node->rd, rd_link); - qr_new(node, cc_link); -} - -JEMALLOC_INLINE void -extent_node_dirty_insert(extent_node_t *node, - arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty) -{ - - qr_meld(runs_dirty, &node->rd, rd_link); - qr_meld(chunks_dirty, node, cc_link); -} - -JEMALLOC_INLINE void -extent_node_dirty_remove(extent_node_t *node) -{ - - qr_remove(&node->rd, rd_link); - qr_remove(node, cc_link); -} - -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/sources/jemalloc/include-win/jemalloc/internal/hash.h b/sources/jemalloc/include-win/jemalloc/internal/hash.h deleted file mode 100755 index 1ff2d9a0..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/hash.h +++ /dev/null @@ -1,357 +0,0 @@ -/* - * The following hash function is based on MurmurHash3, placed into the public - * domain by Austin Appleby. See https://github.com/aappleby/smhasher for - * details. - */ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -uint32_t hash_x86_32(const void *key, int len, uint32_t seed); -void hash_x86_128(const void *key, const int len, uint32_t seed, - uint64_t r_out[2]); -void hash_x64_128(const void *key, const int len, const uint32_t seed, - uint64_t r_out[2]); -void hash(const void *key, size_t len, const uint32_t seed, - size_t r_hash[2]); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_)) -/******************************************************************************/ -/* Internal implementation. */ -JEMALLOC_INLINE uint32_t -hash_rotl_32(uint32_t x, int8_t r) -{ - - return ((x << r) | (x >> (32 - r))); -} - -JEMALLOC_INLINE uint64_t -hash_rotl_64(uint64_t x, int8_t r) -{ - - return ((x << r) | (x >> (64 - r))); -} - -JEMALLOC_INLINE uint32_t -hash_get_block_32(const uint32_t *p, int i) -{ - - /* Handle unaligned read. */ - if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) { - uint32_t ret; - - memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t)); - return (ret); - } - - return (p[i]); -} - -JEMALLOC_INLINE uint64_t -hash_get_block_64(const uint64_t *p, int i) -{ - - /* Handle unaligned read. */ - if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) { - uint64_t ret; - - memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t)); - return (ret); - } - - return (p[i]); -} - -JEMALLOC_INLINE uint32_t -hash_fmix_32(uint32_t h) -{ - - h ^= h >> 16; - h *= 0x85ebca6b; - h ^= h >> 13; - h *= 0xc2b2ae35; - h ^= h >> 16; - - return (h); -} - -JEMALLOC_INLINE uint64_t -hash_fmix_64(uint64_t k) -{ - - k ^= k >> 33; - k *= KQU(0xff51afd7ed558ccd); - k ^= k >> 33; - k *= KQU(0xc4ceb9fe1a85ec53); - k ^= k >> 33; - - return (k); -} - -JEMALLOC_INLINE uint32_t -hash_x86_32(const void *key, int len, uint32_t seed) -{ - const uint8_t *data = (const uint8_t *) key; - const int nblocks = len / 4; - - uint32_t h1 = seed; - - const uint32_t c1 = 0xcc9e2d51; - const uint32_t c2 = 0x1b873593; - - /* body */ - { - const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); - int i; - - for (i = -nblocks; i; i++) { - uint32_t k1 = hash_get_block_32(blocks, i); - - k1 *= c1; - k1 = hash_rotl_32(k1, 15); - k1 *= c2; - - h1 ^= k1; - h1 = hash_rotl_32(h1, 13); - h1 = h1*5 + 0xe6546b64; - } - } - - /* tail */ - { - const uint8_t *tail = (const uint8_t *) (data + nblocks*4); - - uint32_t k1 = 0; - - switch (len & 3) { - case 3: k1 ^= tail[2] << 16; - case 2: k1 ^= tail[1] << 8; - case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); - k1 *= c2; h1 ^= k1; - } - } - - /* finalization */ - h1 ^= len; - - h1 = hash_fmix_32(h1); - - return (h1); -} - -UNUSED JEMALLOC_INLINE void -hash_x86_128(const void *key, const int len, uint32_t seed, - uint64_t r_out[2]) -{ - const uint8_t * data = (const uint8_t *) key; - const int nblocks = len / 16; - - uint32_t h1 = seed; - uint32_t h2 = seed; - uint32_t h3 = seed; - uint32_t h4 = seed; - - const uint32_t c1 = 0x239b961b; - const uint32_t c2 = 0xab0e9789; - const uint32_t c3 = 0x38b34ae5; - const uint32_t c4 = 0xa1e38b93; - - /* body */ - { - const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); - int i; - - for (i = -nblocks; i; i++) { - uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); - uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); - uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); - uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); - - k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; - - h1 = hash_rotl_32(h1, 19); h1 += h2; - h1 = h1*5 + 0x561ccd1b; - - k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; - - h2 = hash_rotl_32(h2, 17); h2 += h3; - h2 = h2*5 + 0x0bcaa747; - - k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; - - h3 = hash_rotl_32(h3, 15); h3 += h4; - h3 = h3*5 + 0x96cd1c35; - - k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; - - h4 = hash_rotl_32(h4, 13); h4 += h1; - h4 = h4*5 + 0x32ac3b17; - } - } - - /* tail */ - { - const uint8_t *tail = (const uint8_t *) (data + nblocks*16); - uint32_t k1 = 0; - uint32_t k2 = 0; - uint32_t k3 = 0; - uint32_t k4 = 0; - - switch (len & 15) { - case 15: k4 ^= tail[14] << 16; - case 14: k4 ^= tail[13] << 8; - case 13: k4 ^= tail[12] << 0; - k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; - - case 12: k3 ^= tail[11] << 24; - case 11: k3 ^= tail[10] << 16; - case 10: k3 ^= tail[ 9] << 8; - case 9: k3 ^= tail[ 8] << 0; - k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; - - case 8: k2 ^= tail[ 7] << 24; - case 7: k2 ^= tail[ 6] << 16; - case 6: k2 ^= tail[ 5] << 8; - case 5: k2 ^= tail[ 4] << 0; - k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; - - case 4: k1 ^= tail[ 3] << 24; - case 3: k1 ^= tail[ 2] << 16; - case 2: k1 ^= tail[ 1] << 8; - case 1: k1 ^= tail[ 0] << 0; - k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; - } - } - - /* finalization */ - h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; - - h1 += h2; h1 += h3; h1 += h4; - h2 += h1; h3 += h1; h4 += h1; - - h1 = hash_fmix_32(h1); - h2 = hash_fmix_32(h2); - h3 = hash_fmix_32(h3); - h4 = hash_fmix_32(h4); - - h1 += h2; h1 += h3; h1 += h4; - h2 += h1; h3 += h1; h4 += h1; - - r_out[0] = (((uint64_t) h2) << 32) | h1; - r_out[1] = (((uint64_t) h4) << 32) | h3; -} - -UNUSED JEMALLOC_INLINE void -hash_x64_128(const void *key, const int len, const uint32_t seed, - uint64_t r_out[2]) -{ - const uint8_t *data = (const uint8_t *) key; - const int nblocks = len / 16; - - uint64_t h1 = seed; - uint64_t h2 = seed; - - const uint64_t c1 = KQU(0x87c37b91114253d5); - const uint64_t c2 = KQU(0x4cf5ad432745937f); - - /* body */ - { - const uint64_t *blocks = (const uint64_t *) (data); - int i; - - for (i = 0; i < nblocks; i++) { - uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); - uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); - - k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; - - h1 = hash_rotl_64(h1, 27); h1 += h2; - h1 = h1*5 + 0x52dce729; - - k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; - - h2 = hash_rotl_64(h2, 31); h2 += h1; - h2 = h2*5 + 0x38495ab5; - } - } - - /* tail */ - { - const uint8_t *tail = (const uint8_t*)(data + nblocks*16); - uint64_t k1 = 0; - uint64_t k2 = 0; - - switch (len & 15) { - case 15: k2 ^= ((uint64_t)(tail[14])) << 48; - case 14: k2 ^= ((uint64_t)(tail[13])) << 40; - case 13: k2 ^= ((uint64_t)(tail[12])) << 32; - case 12: k2 ^= ((uint64_t)(tail[11])) << 24; - case 11: k2 ^= ((uint64_t)(tail[10])) << 16; - case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; - case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; - k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; - - case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; - case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; - case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; - case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; - case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; - case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; - case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; - case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; - k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; - } - } - - /* finalization */ - h1 ^= len; h2 ^= len; - - h1 += h2; - h2 += h1; - - h1 = hash_fmix_64(h1); - h2 = hash_fmix_64(h2); - - h1 += h2; - h2 += h1; - - r_out[0] = h1; - r_out[1] = h2; -} - -/******************************************************************************/ -/* API. */ -JEMALLOC_INLINE void -hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) -{ - - assert(len <= INT_MAX); /* Unfortunate implementation limitation. */ - -#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) - hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash); -#else - { - uint64_t hashes[2]; - hash_x86_128(key, (int)len, seed, hashes); - r_hash[0] = (size_t)hashes[0]; - r_hash[1] = (size_t)hashes[1]; - } -#endif -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/huge.h b/sources/jemalloc/include-win/jemalloc/internal/huge.h deleted file mode 100755 index 22184d9b..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/huge.h +++ /dev/null @@ -1,35 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); -void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, bool zero); -bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, - size_t usize_min, size_t usize_max, bool zero); -void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, - size_t usize, size_t alignment, bool zero, tcache_t *tcache); -#ifdef JEMALLOC_JET -typedef void (huge_dalloc_junk_t)(void *, size_t); -extern huge_dalloc_junk_t *huge_dalloc_junk; -#endif -void huge_dalloc(tsdn_t *tsdn, void *ptr); -arena_t *huge_aalloc(const void *ptr); -size_t huge_salloc(tsdn_t *tsdn, const void *ptr); -prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr); -void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx); -void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal.h b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal.h deleted file mode 100755 index f8df7996..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal.h +++ /dev/null @@ -1,1294 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_H -#define JEMALLOC_INTERNAL_H - -#include "jemalloc_internal_defs.h" -#include "jemalloc/internal/jemalloc_internal_decls.h" - -#ifdef JEMALLOC_UTRACE -#include -#endif - -#define JEMALLOC_NO_DEMANGLE -#ifdef JEMALLOC_JET -# define JEMALLOC_N(n) jet_##n -# include "jemalloc/internal/public_namespace.h" -# define JEMALLOC_NO_RENAME -# include "../jemalloc.h" -# undef JEMALLOC_NO_RENAME -#else -# define JEMALLOC_N(n) je_##n -# include "../jemalloc.h" -#endif -#include "jemalloc/internal/private_namespace.h" - -static const bool config_debug = -#ifdef JEMALLOC_DEBUG - true -#else - false -#endif - ; -static const bool have_dss = -#ifdef JEMALLOC_DSS - true -#else - false -#endif - ; -static const bool config_fill = -#ifdef JEMALLOC_FILL - true -#else - false -#endif - ; -static const bool config_lazy_lock = -#ifdef JEMALLOC_LAZY_LOCK - true -#else - false -#endif - ; -static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF; -static const bool config_prof = -#ifdef JEMALLOC_PROF - true -#else - false -#endif - ; -static const bool config_prof_libgcc = -#ifdef JEMALLOC_PROF_LIBGCC - true -#else - false -#endif - ; -static const bool config_prof_libunwind = -#ifdef JEMALLOC_PROF_LIBUNWIND - true -#else - false -#endif - ; -static const bool maps_coalesce = -#ifdef JEMALLOC_MAPS_COALESCE - true -#else - false -#endif - ; -static const bool config_munmap = -#ifdef JEMALLOC_MUNMAP - true -#else - false -#endif - ; -static const bool config_stats = -#ifdef JEMALLOC_STATS - true -#else - false -#endif - ; -static const bool config_tcache = -#ifdef JEMALLOC_TCACHE - true -#else - false -#endif - ; -static const bool config_thp = -#ifdef JEMALLOC_THP - true -#else - false -#endif - ; -static const bool config_tls = -#ifdef JEMALLOC_TLS - true -#else - false -#endif - ; -static const bool config_utrace = -#ifdef JEMALLOC_UTRACE - true -#else - false -#endif - ; -static const bool config_valgrind = -#ifdef JEMALLOC_VALGRIND - true -#else - false -#endif - ; -static const bool config_xmalloc = -#ifdef JEMALLOC_XMALLOC - true -#else - false -#endif - ; -static const bool config_ivsalloc = -#ifdef JEMALLOC_IVSALLOC - true -#else - false -#endif - ; -static const bool config_cache_oblivious = -#ifdef JEMALLOC_CACHE_OBLIVIOUS - true -#else - false -#endif - ; - -#ifdef JEMALLOC_C11ATOMICS -#include -#endif - -#ifdef JEMALLOC_ATOMIC9 -#include -#endif - -#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) -#include -#endif - -#ifdef JEMALLOC_ZONE -#include -#include -#include -#endif - -#include "jemalloc/internal/ph.h" -#ifndef __PGI -#define RB_COMPACT -#endif -#include "jemalloc/internal/rb.h" -#include "jemalloc/internal/qr.h" -#include "jemalloc/internal/ql.h" - -/* - * jemalloc can conceptually be broken into components (arena, tcache, etc.), - * but there are circular dependencies that cannot be broken without - * substantial performance degradation. In order to reduce the effect on - * visual code flow, read the header files in multiple passes, with one of the - * following cpp variables defined during each pass: - * - * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data - * types. - * JEMALLOC_H_STRUCTS : Data structures. - * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. - * JEMALLOC_H_INLINES : Inline functions. - */ -/******************************************************************************/ -#define JEMALLOC_H_TYPES - -#include "jemalloc/internal/jemalloc_internal_macros.h" - -/* Page size index type. */ -typedef unsigned pszind_t; - -/* Size class index type. */ -typedef unsigned szind_t; - -/* - * Flags bits: - * - * a: arena - * t: tcache - * 0: unused - * z: zero - * n: alignment - * - * aaaaaaaa aaaatttt tttttttt 0znnnnnn - */ -#define MALLOCX_ARENA_MASK ((int)~0xfffff) -#define MALLOCX_ARENA_MAX 0xffe -#define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU) -#define MALLOCX_TCACHE_MAX 0xffd -#define MALLOCX_LG_ALIGN_MASK ((int)0x3f) -/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */ -#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \ - (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)) -#define MALLOCX_ALIGN_GET(flags) \ - (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1)) -#define MALLOCX_ZERO_GET(flags) \ - ((bool)(flags & MALLOCX_ZERO)) - -#define MALLOCX_TCACHE_GET(flags) \ - (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2) -#define MALLOCX_ARENA_GET(flags) \ - (((unsigned)(((unsigned)flags) >> 20)) - 1) - -/* Smallest size class to support. */ -#define TINY_MIN (1U << LG_TINY_MIN) - -/* - * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size - * classes). - */ -#ifndef LG_QUANTUM -# if (defined(__i386__) || defined(_M_IX86)) -# define LG_QUANTUM 4 -# endif -# ifdef __ia64__ -# define LG_QUANTUM 4 -# endif -# ifdef __alpha__ -# define LG_QUANTUM 4 -# endif -# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__)) -# define LG_QUANTUM 4 -# endif -# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) -# define LG_QUANTUM 4 -# endif -# ifdef __arm__ -# define LG_QUANTUM 3 -# endif -# ifdef __aarch64__ -# define LG_QUANTUM 4 -# endif -# ifdef __hppa__ -# define LG_QUANTUM 4 -# endif -# ifdef __mips__ -# define LG_QUANTUM 3 -# endif -# ifdef __or1k__ -# define LG_QUANTUM 3 -# endif -# ifdef __powerpc__ -# define LG_QUANTUM 4 -# endif -# ifdef __riscv__ -# define LG_QUANTUM 4 -# endif -# ifdef __s390__ -# define LG_QUANTUM 4 -# endif -# ifdef __SH4__ -# define LG_QUANTUM 4 -# endif -# ifdef __tile__ -# define LG_QUANTUM 4 -# endif -# ifdef __le32__ -# define LG_QUANTUM 4 -# endif -# ifndef LG_QUANTUM -# error "Unknown minimum alignment for architecture; specify via " - "--with-lg-quantum" -# endif -#endif - -#define QUANTUM ((size_t)(1U << LG_QUANTUM)) -#define QUANTUM_MASK (QUANTUM - 1) - -/* Return the smallest quantum multiple that is >= a. */ -#define QUANTUM_CEILING(a) \ - (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) - -#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) -#define LONG_MASK (LONG - 1) - -/* Return the smallest long multiple that is >= a. */ -#define LONG_CEILING(a) \ - (((a) + LONG_MASK) & ~LONG_MASK) - -#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) -#define PTR_MASK (SIZEOF_PTR - 1) - -/* Return the smallest (void *) multiple that is >= a. */ -#define PTR_CEILING(a) \ - (((a) + PTR_MASK) & ~PTR_MASK) - -/* - * Maximum size of L1 cache line. This is used to avoid cache line aliasing. - * In addition, this controls the spacing of cacheline-spaced size classes. - * - * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can - * only handle raw constants. - */ -#define LG_CACHELINE 6 -#define CACHELINE 64 -#define CACHELINE_MASK (CACHELINE - 1) - -/* Return the smallest cacheline multiple that is >= s. */ -#define CACHELINE_CEILING(s) \ - (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) - -/* Page size. LG_PAGE is determined by the configure script. */ -#ifdef PAGE_MASK -# undef PAGE_MASK -#endif -#define PAGE ((size_t)(1U << LG_PAGE)) -#define PAGE_MASK ((size_t)(PAGE - 1)) - -/* Return the page base address for the page containing address a. */ -#define PAGE_ADDR2BASE(a) \ - ((void *)((uintptr_t)(a) & ~PAGE_MASK)) - -/* Return the smallest pagesize multiple that is >= s. */ -#define PAGE_CEILING(s) \ - (((s) + PAGE_MASK) & ~PAGE_MASK) - -/* Return the nearest aligned address at or below a. */ -#define ALIGNMENT_ADDR2BASE(a, alignment) \ - ((void *)((uintptr_t)(a) & ((~(alignment)) + 1))) - -/* Return the offset between a and the nearest aligned address at or below a. */ -#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ - ((size_t)((uintptr_t)(a) & (alignment - 1))) - -/* Return the smallest alignment multiple that is >= s. */ -#define ALIGNMENT_CEILING(s, alignment) \ - (((s) + (alignment - 1)) & ((~(alignment)) + 1)) - -/* Declare a variable-length array. */ -#if __STDC_VERSION__ < 199901L -# ifdef _MSC_VER -# include -# define alloca _alloca -# else -# ifdef JEMALLOC_HAS_ALLOCA_H -# include -# else -# include -# endif -# endif -# define VARIABLE_ARRAY(type, name, count) \ - type *name = alloca(sizeof(type) * (count)) -#else -# define VARIABLE_ARRAY(type, name, count) type name[(count)] -#endif - -#include "jemalloc/internal/nstime.h" -#include "jemalloc/internal/valgrind.h" -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/spin.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ticker.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/smoothstep.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/witness.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/arena.h" -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/pages.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" - -#undef JEMALLOC_H_TYPES -/******************************************************************************/ -#define JEMALLOC_H_STRUCTS - -#include "jemalloc/internal/nstime.h" -#include "jemalloc/internal/valgrind.h" -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/spin.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ticker.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/smoothstep.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/witness.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/bitmap.h" -#define JEMALLOC_ARENA_STRUCTS_A -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_STRUCTS_A -#include "jemalloc/internal/extent.h" -#define JEMALLOC_ARENA_STRUCTS_B -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_STRUCTS_B -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/pages.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" - -#include "jemalloc/internal/tsd.h" - -#undef JEMALLOC_H_STRUCTS -/******************************************************************************/ -#define JEMALLOC_H_EXTERNS - -extern bool opt_abort; -extern const char *opt_junk; -extern bool opt_junk_alloc; -extern bool opt_junk_free; -extern size_t opt_quarantine; -extern bool opt_redzone; -extern bool opt_utrace; -extern bool opt_xmalloc; -extern bool opt_zero; -extern unsigned opt_narenas; - -extern bool in_valgrind; - -/* Number of CPUs. */ -extern unsigned ncpus; - -/* Number of arenas used for automatic multiplexing of threads and arenas. */ -extern unsigned narenas_auto; - -/* - * Arenas that are used to service external requests. Not all elements of the - * arenas array are necessarily used; arenas are created lazily as needed. - */ -extern arena_t **arenas; - -/* - * pind2sz_tab encodes the same information as could be computed by - * pind2sz_compute(). - */ -extern size_t const pind2sz_tab[NPSIZES]; -/* - * index2size_tab encodes the same information as could be computed (at - * unacceptable cost in some code paths) by index2size_compute(). - */ -extern size_t const index2size_tab[NSIZES]; -/* - * size2index_tab is a compact lookup table that rounds request sizes up to - * size classes. In order to reduce cache footprint, the table is compressed, - * and all accesses are via size2index(). - */ -extern uint8_t const size2index_tab[]; - -arena_t *a0get(void); -void *a0malloc(size_t size); -void a0dalloc(void *ptr); -void *bootstrap_malloc(size_t size); -void *bootstrap_calloc(size_t num, size_t size); -void bootstrap_free(void *ptr); -unsigned narenas_total_get(void); -arena_t *arena_init(tsdn_t *tsdn, unsigned ind); -arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); -arena_t *arena_choose_hard(tsd_t *tsd, bool internal); -void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); -void thread_allocated_cleanup(tsd_t *tsd); -void thread_deallocated_cleanup(tsd_t *tsd); -void iarena_cleanup(tsd_t *tsd); -void arena_cleanup(tsd_t *tsd); -void arenas_tdata_cleanup(tsd_t *tsd); -void narenas_tdata_cleanup(tsd_t *tsd); -void arenas_tdata_bypass_cleanup(tsd_t *tsd); -void jemalloc_prefork(void); -void jemalloc_postfork_parent(void); -void jemalloc_postfork_child(void); - -#include "jemalloc/internal/nstime.h" -#include "jemalloc/internal/valgrind.h" -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/spin.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ticker.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/smoothstep.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/witness.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/arena.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/pages.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" -#include "jemalloc/internal/tsd.h" - -#undef JEMALLOC_H_EXTERNS -/******************************************************************************/ -#define JEMALLOC_H_INLINES - -#include "jemalloc/internal/nstime.h" -#include "jemalloc/internal/valgrind.h" -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/spin.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ticker.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/smoothstep.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/witness.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/pages.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" - -#ifndef JEMALLOC_ENABLE_INLINE -pszind_t psz2ind(size_t psz); -size_t pind2sz_compute(pszind_t pind); -size_t pind2sz_lookup(pszind_t pind); -size_t pind2sz(pszind_t pind); -size_t psz2u(size_t psz); -szind_t size2index_compute(size_t size); -szind_t size2index_lookup(size_t size); -szind_t size2index(size_t size); -size_t index2size_compute(szind_t index); -size_t index2size_lookup(szind_t index); -size_t index2size(szind_t index); -size_t s2u_compute(size_t size); -size_t s2u_lookup(size_t size); -size_t s2u(size_t size); -size_t sa2u(size_t size, size_t alignment); -arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal); -arena_t *arena_choose(tsd_t *tsd, arena_t *arena); -arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena); -arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind, - bool refresh_if_missing); -arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing); -ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) -JEMALLOC_INLINE pszind_t -psz2ind(size_t psz) -{ - - if (unlikely(psz > HUGE_MAXCLASS)) - return (NPSIZES); - { - pszind_t x = lg_floor((psz<<1)-1); - pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x - - (LG_SIZE_CLASS_GROUP + LG_PAGE); - pszind_t grp = shift << LG_SIZE_CLASS_GROUP; - - pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? - LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; - - size_t delta_inverse_mask = ZI(-1) << lg_delta; - pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) & - ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); - - pszind_t ind = grp + mod; - return (ind); - } -} - -JEMALLOC_INLINE size_t -pind2sz_compute(pszind_t pind) -{ - - { - size_t grp = pind >> LG_SIZE_CLASS_GROUP; - size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); - - size_t grp_size_mask = ~((!!grp)-1); - size_t grp_size = ((ZU(1) << (LG_PAGE + - (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; - - size_t shift = (grp == 0) ? 1 : grp; - size_t lg_delta = shift + (LG_PAGE-1); - size_t mod_size = (mod+1) << lg_delta; - - size_t sz = grp_size + mod_size; - return (sz); - } -} - -JEMALLOC_INLINE size_t -pind2sz_lookup(pszind_t pind) -{ - size_t ret = (size_t)pind2sz_tab[pind]; - assert(ret == pind2sz_compute(pind)); - return (ret); -} - -JEMALLOC_INLINE size_t -pind2sz(pszind_t pind) -{ - - assert(pind < NPSIZES); - return (pind2sz_lookup(pind)); -} - -JEMALLOC_INLINE size_t -psz2u(size_t psz) -{ - - if (unlikely(psz > HUGE_MAXCLASS)) - return (0); - { - size_t x = lg_floor((psz<<1)-1); - size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? - LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; - size_t delta = ZU(1) << lg_delta; - size_t delta_mask = delta - 1; - size_t usize = (psz + delta_mask) & ~delta_mask; - return (usize); - } -} - -JEMALLOC_INLINE szind_t -size2index_compute(size_t size) -{ - - if (unlikely(size > HUGE_MAXCLASS)) - return (NSIZES); -#if (NTBINS != 0) - if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { - szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; - szind_t lg_ceil = lg_floor(pow2_ceil_zu(size)); - return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin); - } -#endif - { - szind_t x = lg_floor((size<<1)-1); - szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 : - x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM); - szind_t grp = shift << LG_SIZE_CLASS_GROUP; - - szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) - ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; - - size_t delta_inverse_mask = ZI(-1) << lg_delta; - szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) & - ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); - - szind_t index = NTBINS + grp + mod; - return (index); - } -} - -JEMALLOC_ALWAYS_INLINE szind_t -size2index_lookup(size_t size) -{ - - assert(size <= LOOKUP_MAXCLASS); - { - szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]); - assert(ret == size2index_compute(size)); - return (ret); - } -} - -JEMALLOC_ALWAYS_INLINE szind_t -size2index(size_t size) -{ - - assert(size > 0); - if (likely(size <= LOOKUP_MAXCLASS)) - return (size2index_lookup(size)); - return (size2index_compute(size)); -} - -JEMALLOC_INLINE size_t -index2size_compute(szind_t index) -{ - -#if (NTBINS > 0) - if (index < NTBINS) - return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index)); -#endif - { - size_t reduced_index = index - NTBINS; - size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP; - size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) - - 1); - - size_t grp_size_mask = ~((!!grp)-1); - size_t grp_size = ((ZU(1) << (LG_QUANTUM + - (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; - - size_t shift = (grp == 0) ? 1 : grp; - size_t lg_delta = shift + (LG_QUANTUM-1); - size_t mod_size = (mod+1) << lg_delta; - - size_t usize = grp_size + mod_size; - return (usize); - } -} - -JEMALLOC_ALWAYS_INLINE size_t -index2size_lookup(szind_t index) -{ - size_t ret = (size_t)index2size_tab[index]; - assert(ret == index2size_compute(index)); - return (ret); -} - -JEMALLOC_ALWAYS_INLINE size_t -index2size(szind_t index) -{ - - assert(index < NSIZES); - return (index2size_lookup(index)); -} - -JEMALLOC_ALWAYS_INLINE size_t -s2u_compute(size_t size) -{ - - if (unlikely(size > HUGE_MAXCLASS)) - return (0); -#if (NTBINS > 0) - if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { - size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; - size_t lg_ceil = lg_floor(pow2_ceil_zu(size)); - return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : - (ZU(1) << lg_ceil)); - } -#endif - { - size_t x = lg_floor((size<<1)-1); - size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) - ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; - size_t delta = ZU(1) << lg_delta; - size_t delta_mask = delta - 1; - size_t usize = (size + delta_mask) & ~delta_mask; - return (usize); - } -} - -JEMALLOC_ALWAYS_INLINE size_t -s2u_lookup(size_t size) -{ - size_t ret = index2size_lookup(size2index_lookup(size)); - - assert(ret == s2u_compute(size)); - return (ret); -} - -/* - * Compute usable size that would result from allocating an object with the - * specified size. - */ -JEMALLOC_ALWAYS_INLINE size_t -s2u(size_t size) -{ - - assert(size > 0); - if (likely(size <= LOOKUP_MAXCLASS)) - return (s2u_lookup(size)); - return (s2u_compute(size)); -} - -/* - * Compute usable size that would result from allocating an object with the - * specified size and alignment. - */ -JEMALLOC_ALWAYS_INLINE size_t -sa2u(size_t size, size_t alignment) -{ - size_t usize; - - assert(alignment != 0 && ((alignment - 1) & alignment) == 0); - - /* Try for a small size class. */ - if (size <= SMALL_MAXCLASS && alignment < PAGE) { - /* - * Round size up to the nearest multiple of alignment. - * - * This done, we can take advantage of the fact that for each - * small size class, every object is aligned at the smallest - * power of two that is non-zero in the base two representation - * of the size. For example: - * - * Size | Base 2 | Minimum alignment - * -----+----------+------------------ - * 96 | 1100000 | 32 - * 144 | 10100000 | 32 - * 192 | 11000000 | 64 - */ - usize = s2u(ALIGNMENT_CEILING(size, alignment)); - if (usize < LARGE_MINCLASS) - return (usize); - } - - /* Try for a large size class. */ - if (likely(size <= large_maxclass) && likely(alignment < chunksize)) { - /* - * We can't achieve subpage alignment, so round up alignment - * to the minimum that can actually be supported. - */ - alignment = PAGE_CEILING(alignment); - - /* Make sure result is a large size class. */ - usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size); - - /* - * Calculate the size of the over-size run that arena_palloc() - * would need to allocate in order to guarantee the alignment. - */ - if (usize + large_pad + alignment - PAGE <= arena_maxrun) - return (usize); - } - - /* Huge size class. Beware of overflow. */ - - if (unlikely(alignment > HUGE_MAXCLASS)) - return (0); - - /* - * We can't achieve subchunk alignment, so round up alignment to the - * minimum that can actually be supported. - */ - alignment = CHUNK_CEILING(alignment); - - /* Make sure result is a huge size class. */ - if (size <= chunksize) - usize = chunksize; - else { - usize = s2u(size); - if (usize < size) { - /* size_t overflow. */ - return (0); - } - } - - /* - * Calculate the multi-chunk mapping that huge_palloc() would need in - * order to guarantee the alignment. - */ - if (usize + alignment - PAGE < usize) { - /* size_t overflow. */ - return (0); - } - return (usize); -} - -/* Choose an arena based on a per-thread value. */ -JEMALLOC_INLINE arena_t * -arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) -{ - arena_t *ret; - - if (arena != NULL) - return (arena); - - ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd); - if (unlikely(ret == NULL)) - ret = arena_choose_hard(tsd, internal); - - return (ret); -} - -JEMALLOC_INLINE arena_t * -arena_choose(tsd_t *tsd, arena_t *arena) -{ - - return (arena_choose_impl(tsd, arena, false)); -} - -JEMALLOC_INLINE arena_t * -arena_ichoose(tsd_t *tsd, arena_t *arena) -{ - - return (arena_choose_impl(tsd, arena, true)); -} - -JEMALLOC_INLINE arena_tdata_t * -arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) -{ - arena_tdata_t *tdata; - arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); - - if (unlikely(arenas_tdata == NULL)) { - /* arenas_tdata hasn't been initialized yet. */ - return (arena_tdata_get_hard(tsd, ind)); - } - if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) { - /* - * ind is invalid, cache is old (too small), or tdata to be - * initialized. - */ - return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) : - NULL); - } - - tdata = &arenas_tdata[ind]; - if (likely(tdata != NULL) || !refresh_if_missing) - return (tdata); - return (arena_tdata_get_hard(tsd, ind)); -} - -JEMALLOC_INLINE arena_t * -arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) -{ - arena_t *ret; - - assert(ind <= MALLOCX_ARENA_MAX); - - ret = arenas[ind]; - if (unlikely(ret == NULL)) { - ret = atomic_read_p((void *)&arenas[ind]); - if (init_if_missing && unlikely(ret == NULL)) - ret = arena_init(tsdn, ind); - } - return (ret); -} - -JEMALLOC_INLINE ticker_t * -decay_ticker_get(tsd_t *tsd, unsigned ind) -{ - arena_tdata_t *tdata; - - tdata = arena_tdata_get(tsd, ind, true); - if (unlikely(tdata == NULL)) - return (NULL); - return (&tdata->decay_ticker); -} -#endif - -#include "jemalloc/internal/bitmap.h" -/* - * Include portions of arena.h interleaved with tcache.h in order to resolve - * circular dependencies. - */ -#define JEMALLOC_ARENA_INLINE_A -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_INLINE_A -#include "jemalloc/internal/tcache.h" -#define JEMALLOC_ARENA_INLINE_B -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_INLINE_B -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" - -#ifndef JEMALLOC_ENABLE_INLINE -arena_t *iaalloc(const void *ptr); -size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote); -void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, - tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path); -void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, - bool slow_path); -void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, bool is_metadata, arena_t *arena); -void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, arena_t *arena); -void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero); -size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote); -size_t u2rz(size_t usize); -size_t p2rz(tsdn_t *tsdn, const void *ptr); -void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata, - bool slow_path); -void idalloc(tsd_t *tsd, void *ptr); -void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path); -void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, - bool slow_path); -void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, - bool slow_path); -void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero, tcache_t *tcache, - arena_t *arena); -void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, - size_t alignment, bool zero, tcache_t *tcache, arena_t *arena); -void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, - size_t alignment, bool zero); -bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) -JEMALLOC_ALWAYS_INLINE arena_t * -iaalloc(const void *ptr) -{ - - assert(ptr != NULL); - - return (arena_aalloc(ptr)); -} - -/* - * Typical usage: - * tsdn_t *tsdn = [...] - * void *ptr = [...] - * size_t sz = isalloc(tsdn, ptr, config_prof); - */ -JEMALLOC_ALWAYS_INLINE size_t -isalloc(tsdn_t *tsdn, const void *ptr, bool demote) -{ - - assert(ptr != NULL); - /* Demotion only makes sense if config_prof is true. */ - assert(config_prof || !demote); - - return (arena_salloc(tsdn, ptr, demote)); -} - -JEMALLOC_ALWAYS_INLINE void * -iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, - bool is_metadata, arena_t *arena, bool slow_path) -{ - void *ret; - - assert(size != 0); - assert(!is_metadata || tcache == NULL); - assert(!is_metadata || arena == NULL || arena->ind < narenas_auto); - - ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path); - if (config_stats && is_metadata && likely(ret != NULL)) { - arena_metadata_allocated_add(iaalloc(ret), - isalloc(tsdn, ret, config_prof)); - } - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) -{ - - return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true), - false, NULL, slow_path)); -} - -JEMALLOC_ALWAYS_INLINE void * -ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, bool is_metadata, arena_t *arena) -{ - void *ret; - - assert(usize != 0); - assert(usize == sa2u(usize, alignment)); - assert(!is_metadata || tcache == NULL); - assert(!is_metadata || arena == NULL || arena->ind < narenas_auto); - - ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache); - assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); - if (config_stats && is_metadata && likely(ret != NULL)) { - arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret, - config_prof)); - } - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, arena_t *arena) -{ - - return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena)); -} - -JEMALLOC_ALWAYS_INLINE void * -ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) -{ - - return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero, - tcache_get(tsd, true), false, NULL)); -} - -JEMALLOC_ALWAYS_INLINE size_t -ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote) -{ - extent_node_t *node; - - /* Return 0 if ptr is not within a chunk managed by jemalloc. */ - node = chunk_lookup(ptr, false); - if (node == NULL) - return (0); - /* Only arena chunks should be looked up via interior pointers. */ - assert(extent_node_addr_get(node) == ptr || - extent_node_achunk_get(node)); - - return (isalloc(tsdn, ptr, demote)); -} - -JEMALLOC_INLINE size_t -u2rz(size_t usize) -{ - size_t ret; - - if (usize <= SMALL_MAXCLASS) { - szind_t binind = size2index(usize); - ret = arena_bin_info[binind].redzone_size; - } else - ret = 0; - - return (ret); -} - -JEMALLOC_INLINE size_t -p2rz(tsdn_t *tsdn, const void *ptr) -{ - size_t usize = isalloc(tsdn, ptr, false); - - return (u2rz(usize)); -} - -JEMALLOC_ALWAYS_INLINE void -idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata, - bool slow_path) -{ - - assert(ptr != NULL); - assert(!is_metadata || tcache == NULL); - assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto); - if (config_stats && is_metadata) { - arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr, - config_prof)); - } - - arena_dalloc(tsdn, ptr, tcache, slow_path); -} - -JEMALLOC_ALWAYS_INLINE void -idalloc(tsd_t *tsd, void *ptr) -{ - - idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true); -} - -JEMALLOC_ALWAYS_INLINE void -iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) -{ - - if (slow_path && config_fill && unlikely(opt_quarantine)) - quarantine(tsd, ptr); - else - idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path); -} - -JEMALLOC_ALWAYS_INLINE void -isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, - bool slow_path) -{ - - arena_sdalloc(tsdn, ptr, size, tcache, slow_path); -} - -JEMALLOC_ALWAYS_INLINE void -isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path) -{ - - if (slow_path && config_fill && unlikely(opt_quarantine)) - quarantine(tsd, ptr); - else - isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path); -} - -JEMALLOC_ALWAYS_INLINE void * -iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) -{ - void *p; - size_t usize, copysize; - - usize = sa2u(size + extra, alignment); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) - return (NULL); - p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena); - if (p == NULL) { - if (extra == 0) - return (NULL); - /* Try again, without extra this time. */ - usize = sa2u(size, alignment); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) - return (NULL); - p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, - arena); - if (p == NULL) - return (NULL); - } - /* - * Copy at most size bytes (not size+extra), since the caller has no - * expectation that the extra bytes will be reliably preserved. - */ - copysize = (size < oldsize) ? size : oldsize; - memcpy(p, ptr, copysize); - isqalloc(tsd, ptr, oldsize, tcache, true); - return (p); -} - -JEMALLOC_ALWAYS_INLINE void * -iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, - bool zero, tcache_t *tcache, arena_t *arena) -{ - - assert(ptr != NULL); - assert(size != 0); - - if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) - != 0) { - /* - * Existing object alignment is inadequate; allocate new space - * and copy. - */ - return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment, - zero, tcache, arena)); - } - - return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero, - tcache)); -} - -JEMALLOC_ALWAYS_INLINE void * -iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, - bool zero) -{ - - return (iralloct(tsd, ptr, oldsize, size, alignment, zero, - tcache_get(tsd, true), NULL)); -} - -JEMALLOC_ALWAYS_INLINE bool -ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, - size_t alignment, bool zero) -{ - - assert(ptr != NULL); - assert(size != 0); - - if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) - != 0) { - /* Existing object alignment is inadequate. */ - return (true); - } - - return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero)); -} -#endif - -#include "jemalloc/internal/prof.h" - -#undef JEMALLOC_H_INLINES -/******************************************************************************/ -#endif /* JEMALLOC_INTERNAL_H */ diff --git a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal.h.in b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal.h.in deleted file mode 100755 index e3b499a8..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal.h.in +++ /dev/null @@ -1,1294 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_H -#define JEMALLOC_INTERNAL_H - -#include "jemalloc_internal_defs.h" -#include "jemalloc/internal/jemalloc_internal_decls.h" - -#ifdef JEMALLOC_UTRACE -#include -#endif - -#define JEMALLOC_NO_DEMANGLE -#ifdef JEMALLOC_JET -# define JEMALLOC_N(n) jet_##n -# include "jemalloc/internal/public_namespace.h" -# define JEMALLOC_NO_RENAME -# include "../jemalloc@install_suffix@.h" -# undef JEMALLOC_NO_RENAME -#else -# define JEMALLOC_N(n) @private_namespace@##n -# include "../jemalloc@install_suffix@.h" -#endif -#include "jemalloc/internal/private_namespace.h" - -static const bool config_debug = -#ifdef JEMALLOC_DEBUG - true -#else - false -#endif - ; -static const bool have_dss = -#ifdef JEMALLOC_DSS - true -#else - false -#endif - ; -static const bool config_fill = -#ifdef JEMALLOC_FILL - true -#else - false -#endif - ; -static const bool config_lazy_lock = -#ifdef JEMALLOC_LAZY_LOCK - true -#else - false -#endif - ; -static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF; -static const bool config_prof = -#ifdef JEMALLOC_PROF - true -#else - false -#endif - ; -static const bool config_prof_libgcc = -#ifdef JEMALLOC_PROF_LIBGCC - true -#else - false -#endif - ; -static const bool config_prof_libunwind = -#ifdef JEMALLOC_PROF_LIBUNWIND - true -#else - false -#endif - ; -static const bool maps_coalesce = -#ifdef JEMALLOC_MAPS_COALESCE - true -#else - false -#endif - ; -static const bool config_munmap = -#ifdef JEMALLOC_MUNMAP - true -#else - false -#endif - ; -static const bool config_stats = -#ifdef JEMALLOC_STATS - true -#else - false -#endif - ; -static const bool config_tcache = -#ifdef JEMALLOC_TCACHE - true -#else - false -#endif - ; -static const bool config_thp = -#ifdef JEMALLOC_THP - true -#else - false -#endif - ; -static const bool config_tls = -#ifdef JEMALLOC_TLS - true -#else - false -#endif - ; -static const bool config_utrace = -#ifdef JEMALLOC_UTRACE - true -#else - false -#endif - ; -static const bool config_valgrind = -#ifdef JEMALLOC_VALGRIND - true -#else - false -#endif - ; -static const bool config_xmalloc = -#ifdef JEMALLOC_XMALLOC - true -#else - false -#endif - ; -static const bool config_ivsalloc = -#ifdef JEMALLOC_IVSALLOC - true -#else - false -#endif - ; -static const bool config_cache_oblivious = -#ifdef JEMALLOC_CACHE_OBLIVIOUS - true -#else - false -#endif - ; - -#ifdef JEMALLOC_C11ATOMICS -#include -#endif - -#ifdef JEMALLOC_ATOMIC9 -#include -#endif - -#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) -#include -#endif - -#ifdef JEMALLOC_ZONE -#include -#include -#include -#endif - -#include "jemalloc/internal/ph.h" -#ifndef __PGI -#define RB_COMPACT -#endif -#include "jemalloc/internal/rb.h" -#include "jemalloc/internal/qr.h" -#include "jemalloc/internal/ql.h" - -/* - * jemalloc can conceptually be broken into components (arena, tcache, etc.), - * but there are circular dependencies that cannot be broken without - * substantial performance degradation. In order to reduce the effect on - * visual code flow, read the header files in multiple passes, with one of the - * following cpp variables defined during each pass: - * - * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data - * types. - * JEMALLOC_H_STRUCTS : Data structures. - * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. - * JEMALLOC_H_INLINES : Inline functions. - */ -/******************************************************************************/ -#define JEMALLOC_H_TYPES - -#include "jemalloc/internal/jemalloc_internal_macros.h" - -/* Page size index type. */ -typedef unsigned pszind_t; - -/* Size class index type. */ -typedef unsigned szind_t; - -/* - * Flags bits: - * - * a: arena - * t: tcache - * 0: unused - * z: zero - * n: alignment - * - * aaaaaaaa aaaatttt tttttttt 0znnnnnn - */ -#define MALLOCX_ARENA_MASK ((int)~0xfffff) -#define MALLOCX_ARENA_MAX 0xffe -#define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU) -#define MALLOCX_TCACHE_MAX 0xffd -#define MALLOCX_LG_ALIGN_MASK ((int)0x3f) -/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */ -#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \ - (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)) -#define MALLOCX_ALIGN_GET(flags) \ - (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1)) -#define MALLOCX_ZERO_GET(flags) \ - ((bool)(flags & MALLOCX_ZERO)) - -#define MALLOCX_TCACHE_GET(flags) \ - (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2) -#define MALLOCX_ARENA_GET(flags) \ - (((unsigned)(((unsigned)flags) >> 20)) - 1) - -/* Smallest size class to support. */ -#define TINY_MIN (1U << LG_TINY_MIN) - -/* - * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size - * classes). - */ -#ifndef LG_QUANTUM -# if (defined(__i386__) || defined(_M_IX86)) -# define LG_QUANTUM 4 -# endif -# ifdef __ia64__ -# define LG_QUANTUM 4 -# endif -# ifdef __alpha__ -# define LG_QUANTUM 4 -# endif -# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__)) -# define LG_QUANTUM 4 -# endif -# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) -# define LG_QUANTUM 4 -# endif -# ifdef __arm__ -# define LG_QUANTUM 3 -# endif -# ifdef __aarch64__ -# define LG_QUANTUM 4 -# endif -# ifdef __hppa__ -# define LG_QUANTUM 4 -# endif -# ifdef __mips__ -# define LG_QUANTUM 3 -# endif -# ifdef __or1k__ -# define LG_QUANTUM 3 -# endif -# ifdef __powerpc__ -# define LG_QUANTUM 4 -# endif -# ifdef __riscv__ -# define LG_QUANTUM 4 -# endif -# ifdef __s390__ -# define LG_QUANTUM 4 -# endif -# ifdef __SH4__ -# define LG_QUANTUM 4 -# endif -# ifdef __tile__ -# define LG_QUANTUM 4 -# endif -# ifdef __le32__ -# define LG_QUANTUM 4 -# endif -# ifndef LG_QUANTUM -# error "Unknown minimum alignment for architecture; specify via " - "--with-lg-quantum" -# endif -#endif - -#define QUANTUM ((size_t)(1U << LG_QUANTUM)) -#define QUANTUM_MASK (QUANTUM - 1) - -/* Return the smallest quantum multiple that is >= a. */ -#define QUANTUM_CEILING(a) \ - (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) - -#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) -#define LONG_MASK (LONG - 1) - -/* Return the smallest long multiple that is >= a. */ -#define LONG_CEILING(a) \ - (((a) + LONG_MASK) & ~LONG_MASK) - -#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) -#define PTR_MASK (SIZEOF_PTR - 1) - -/* Return the smallest (void *) multiple that is >= a. */ -#define PTR_CEILING(a) \ - (((a) + PTR_MASK) & ~PTR_MASK) - -/* - * Maximum size of L1 cache line. This is used to avoid cache line aliasing. - * In addition, this controls the spacing of cacheline-spaced size classes. - * - * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can - * only handle raw constants. - */ -#define LG_CACHELINE 6 -#define CACHELINE 64 -#define CACHELINE_MASK (CACHELINE - 1) - -/* Return the smallest cacheline multiple that is >= s. */ -#define CACHELINE_CEILING(s) \ - (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) - -/* Page size. LG_PAGE is determined by the configure script. */ -#ifdef PAGE_MASK -# undef PAGE_MASK -#endif -#define PAGE ((size_t)(1U << LG_PAGE)) -#define PAGE_MASK ((size_t)(PAGE - 1)) - -/* Return the page base address for the page containing address a. */ -#define PAGE_ADDR2BASE(a) \ - ((void *)((uintptr_t)(a) & ~PAGE_MASK)) - -/* Return the smallest pagesize multiple that is >= s. */ -#define PAGE_CEILING(s) \ - (((s) + PAGE_MASK) & ~PAGE_MASK) - -/* Return the nearest aligned address at or below a. */ -#define ALIGNMENT_ADDR2BASE(a, alignment) \ - ((void *)((uintptr_t)(a) & ((~(alignment)) + 1))) - -/* Return the offset between a and the nearest aligned address at or below a. */ -#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ - ((size_t)((uintptr_t)(a) & (alignment - 1))) - -/* Return the smallest alignment multiple that is >= s. */ -#define ALIGNMENT_CEILING(s, alignment) \ - (((s) + (alignment - 1)) & ((~(alignment)) + 1)) - -/* Declare a variable-length array. */ -#if __STDC_VERSION__ < 199901L -# ifdef _MSC_VER -# include -# define alloca _alloca -# else -# ifdef JEMALLOC_HAS_ALLOCA_H -# include -# else -# include -# endif -# endif -# define VARIABLE_ARRAY(type, name, count) \ - type *name = alloca(sizeof(type) * (count)) -#else -# define VARIABLE_ARRAY(type, name, count) type name[(count)] -#endif - -#include "jemalloc/internal/nstime.h" -#include "jemalloc/internal/valgrind.h" -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/spin.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ticker.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/smoothstep.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/witness.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/arena.h" -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/pages.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" - -#undef JEMALLOC_H_TYPES -/******************************************************************************/ -#define JEMALLOC_H_STRUCTS - -#include "jemalloc/internal/nstime.h" -#include "jemalloc/internal/valgrind.h" -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/spin.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ticker.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/smoothstep.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/witness.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/bitmap.h" -#define JEMALLOC_ARENA_STRUCTS_A -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_STRUCTS_A -#include "jemalloc/internal/extent.h" -#define JEMALLOC_ARENA_STRUCTS_B -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_STRUCTS_B -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/pages.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" - -#include "jemalloc/internal/tsd.h" - -#undef JEMALLOC_H_STRUCTS -/******************************************************************************/ -#define JEMALLOC_H_EXTERNS - -extern bool opt_abort; -extern const char *opt_junk; -extern bool opt_junk_alloc; -extern bool opt_junk_free; -extern size_t opt_quarantine; -extern bool opt_redzone; -extern bool opt_utrace; -extern bool opt_xmalloc; -extern bool opt_zero; -extern unsigned opt_narenas; - -extern bool in_valgrind; - -/* Number of CPUs. */ -extern unsigned ncpus; - -/* Number of arenas used for automatic multiplexing of threads and arenas. */ -extern unsigned narenas_auto; - -/* - * Arenas that are used to service external requests. Not all elements of the - * arenas array are necessarily used; arenas are created lazily as needed. - */ -extern arena_t **arenas; - -/* - * pind2sz_tab encodes the same information as could be computed by - * pind2sz_compute(). - */ -extern size_t const pind2sz_tab[NPSIZES]; -/* - * index2size_tab encodes the same information as could be computed (at - * unacceptable cost in some code paths) by index2size_compute(). - */ -extern size_t const index2size_tab[NSIZES]; -/* - * size2index_tab is a compact lookup table that rounds request sizes up to - * size classes. In order to reduce cache footprint, the table is compressed, - * and all accesses are via size2index(). - */ -extern uint8_t const size2index_tab[]; - -arena_t *a0get(void); -void *a0malloc(size_t size); -void a0dalloc(void *ptr); -void *bootstrap_malloc(size_t size); -void *bootstrap_calloc(size_t num, size_t size); -void bootstrap_free(void *ptr); -unsigned narenas_total_get(void); -arena_t *arena_init(tsdn_t *tsdn, unsigned ind); -arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); -arena_t *arena_choose_hard(tsd_t *tsd, bool internal); -void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); -void thread_allocated_cleanup(tsd_t *tsd); -void thread_deallocated_cleanup(tsd_t *tsd); -void iarena_cleanup(tsd_t *tsd); -void arena_cleanup(tsd_t *tsd); -void arenas_tdata_cleanup(tsd_t *tsd); -void narenas_tdata_cleanup(tsd_t *tsd); -void arenas_tdata_bypass_cleanup(tsd_t *tsd); -void jemalloc_prefork(void); -void jemalloc_postfork_parent(void); -void jemalloc_postfork_child(void); - -#include "jemalloc/internal/nstime.h" -#include "jemalloc/internal/valgrind.h" -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/spin.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ticker.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/smoothstep.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/witness.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/arena.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/pages.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" -#include "jemalloc/internal/tsd.h" - -#undef JEMALLOC_H_EXTERNS -/******************************************************************************/ -#define JEMALLOC_H_INLINES - -#include "jemalloc/internal/nstime.h" -#include "jemalloc/internal/valgrind.h" -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/spin.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ticker.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/smoothstep.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/witness.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/pages.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" - -#ifndef JEMALLOC_ENABLE_INLINE -pszind_t psz2ind(size_t psz); -size_t pind2sz_compute(pszind_t pind); -size_t pind2sz_lookup(pszind_t pind); -size_t pind2sz(pszind_t pind); -size_t psz2u(size_t psz); -szind_t size2index_compute(size_t size); -szind_t size2index_lookup(size_t size); -szind_t size2index(size_t size); -size_t index2size_compute(szind_t index); -size_t index2size_lookup(szind_t index); -size_t index2size(szind_t index); -size_t s2u_compute(size_t size); -size_t s2u_lookup(size_t size); -size_t s2u(size_t size); -size_t sa2u(size_t size, size_t alignment); -arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal); -arena_t *arena_choose(tsd_t *tsd, arena_t *arena); -arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena); -arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind, - bool refresh_if_missing); -arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing); -ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) -JEMALLOC_INLINE pszind_t -psz2ind(size_t psz) -{ - - if (unlikely(psz > HUGE_MAXCLASS)) - return (NPSIZES); - { - pszind_t x = lg_floor((psz<<1)-1); - pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x - - (LG_SIZE_CLASS_GROUP + LG_PAGE); - pszind_t grp = shift << LG_SIZE_CLASS_GROUP; - - pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? - LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; - - size_t delta_inverse_mask = ZI(-1) << lg_delta; - pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) & - ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); - - pszind_t ind = grp + mod; - return (ind); - } -} - -JEMALLOC_INLINE size_t -pind2sz_compute(pszind_t pind) -{ - - { - size_t grp = pind >> LG_SIZE_CLASS_GROUP; - size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); - - size_t grp_size_mask = ~((!!grp)-1); - size_t grp_size = ((ZU(1) << (LG_PAGE + - (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; - - size_t shift = (grp == 0) ? 1 : grp; - size_t lg_delta = shift + (LG_PAGE-1); - size_t mod_size = (mod+1) << lg_delta; - - size_t sz = grp_size + mod_size; - return (sz); - } -} - -JEMALLOC_INLINE size_t -pind2sz_lookup(pszind_t pind) -{ - size_t ret = (size_t)pind2sz_tab[pind]; - assert(ret == pind2sz_compute(pind)); - return (ret); -} - -JEMALLOC_INLINE size_t -pind2sz(pszind_t pind) -{ - - assert(pind < NPSIZES); - return (pind2sz_lookup(pind)); -} - -JEMALLOC_INLINE size_t -psz2u(size_t psz) -{ - - if (unlikely(psz > HUGE_MAXCLASS)) - return (0); - { - size_t x = lg_floor((psz<<1)-1); - size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? - LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; - size_t delta = ZU(1) << lg_delta; - size_t delta_mask = delta - 1; - size_t usize = (psz + delta_mask) & ~delta_mask; - return (usize); - } -} - -JEMALLOC_INLINE szind_t -size2index_compute(size_t size) -{ - - if (unlikely(size > HUGE_MAXCLASS)) - return (NSIZES); -#if (NTBINS != 0) - if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { - szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; - szind_t lg_ceil = lg_floor(pow2_ceil_zu(size)); - return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin); - } -#endif - { - szind_t x = lg_floor((size<<1)-1); - szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 : - x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM); - szind_t grp = shift << LG_SIZE_CLASS_GROUP; - - szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) - ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; - - size_t delta_inverse_mask = ZI(-1) << lg_delta; - szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) & - ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); - - szind_t index = NTBINS + grp + mod; - return (index); - } -} - -JEMALLOC_ALWAYS_INLINE szind_t -size2index_lookup(size_t size) -{ - - assert(size <= LOOKUP_MAXCLASS); - { - szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]); - assert(ret == size2index_compute(size)); - return (ret); - } -} - -JEMALLOC_ALWAYS_INLINE szind_t -size2index(size_t size) -{ - - assert(size > 0); - if (likely(size <= LOOKUP_MAXCLASS)) - return (size2index_lookup(size)); - return (size2index_compute(size)); -} - -JEMALLOC_INLINE size_t -index2size_compute(szind_t index) -{ - -#if (NTBINS > 0) - if (index < NTBINS) - return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index)); -#endif - { - size_t reduced_index = index - NTBINS; - size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP; - size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) - - 1); - - size_t grp_size_mask = ~((!!grp)-1); - size_t grp_size = ((ZU(1) << (LG_QUANTUM + - (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; - - size_t shift = (grp == 0) ? 1 : grp; - size_t lg_delta = shift + (LG_QUANTUM-1); - size_t mod_size = (mod+1) << lg_delta; - - size_t usize = grp_size + mod_size; - return (usize); - } -} - -JEMALLOC_ALWAYS_INLINE size_t -index2size_lookup(szind_t index) -{ - size_t ret = (size_t)index2size_tab[index]; - assert(ret == index2size_compute(index)); - return (ret); -} - -JEMALLOC_ALWAYS_INLINE size_t -index2size(szind_t index) -{ - - assert(index < NSIZES); - return (index2size_lookup(index)); -} - -JEMALLOC_ALWAYS_INLINE size_t -s2u_compute(size_t size) -{ - - if (unlikely(size > HUGE_MAXCLASS)) - return (0); -#if (NTBINS > 0) - if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { - size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; - size_t lg_ceil = lg_floor(pow2_ceil_zu(size)); - return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : - (ZU(1) << lg_ceil)); - } -#endif - { - size_t x = lg_floor((size<<1)-1); - size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) - ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; - size_t delta = ZU(1) << lg_delta; - size_t delta_mask = delta - 1; - size_t usize = (size + delta_mask) & ~delta_mask; - return (usize); - } -} - -JEMALLOC_ALWAYS_INLINE size_t -s2u_lookup(size_t size) -{ - size_t ret = index2size_lookup(size2index_lookup(size)); - - assert(ret == s2u_compute(size)); - return (ret); -} - -/* - * Compute usable size that would result from allocating an object with the - * specified size. - */ -JEMALLOC_ALWAYS_INLINE size_t -s2u(size_t size) -{ - - assert(size > 0); - if (likely(size <= LOOKUP_MAXCLASS)) - return (s2u_lookup(size)); - return (s2u_compute(size)); -} - -/* - * Compute usable size that would result from allocating an object with the - * specified size and alignment. - */ -JEMALLOC_ALWAYS_INLINE size_t -sa2u(size_t size, size_t alignment) -{ - size_t usize; - - assert(alignment != 0 && ((alignment - 1) & alignment) == 0); - - /* Try for a small size class. */ - if (size <= SMALL_MAXCLASS && alignment < PAGE) { - /* - * Round size up to the nearest multiple of alignment. - * - * This done, we can take advantage of the fact that for each - * small size class, every object is aligned at the smallest - * power of two that is non-zero in the base two representation - * of the size. For example: - * - * Size | Base 2 | Minimum alignment - * -----+----------+------------------ - * 96 | 1100000 | 32 - * 144 | 10100000 | 32 - * 192 | 11000000 | 64 - */ - usize = s2u(ALIGNMENT_CEILING(size, alignment)); - if (usize < LARGE_MINCLASS) - return (usize); - } - - /* Try for a large size class. */ - if (likely(size <= large_maxclass) && likely(alignment < chunksize)) { - /* - * We can't achieve subpage alignment, so round up alignment - * to the minimum that can actually be supported. - */ - alignment = PAGE_CEILING(alignment); - - /* Make sure result is a large size class. */ - usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size); - - /* - * Calculate the size of the over-size run that arena_palloc() - * would need to allocate in order to guarantee the alignment. - */ - if (usize + large_pad + alignment - PAGE <= arena_maxrun) - return (usize); - } - - /* Huge size class. Beware of overflow. */ - - if (unlikely(alignment > HUGE_MAXCLASS)) - return (0); - - /* - * We can't achieve subchunk alignment, so round up alignment to the - * minimum that can actually be supported. - */ - alignment = CHUNK_CEILING(alignment); - - /* Make sure result is a huge size class. */ - if (size <= chunksize) - usize = chunksize; - else { - usize = s2u(size); - if (usize < size) { - /* size_t overflow. */ - return (0); - } - } - - /* - * Calculate the multi-chunk mapping that huge_palloc() would need in - * order to guarantee the alignment. - */ - if (usize + alignment - PAGE < usize) { - /* size_t overflow. */ - return (0); - } - return (usize); -} - -/* Choose an arena based on a per-thread value. */ -JEMALLOC_INLINE arena_t * -arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) -{ - arena_t *ret; - - if (arena != NULL) - return (arena); - - ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd); - if (unlikely(ret == NULL)) - ret = arena_choose_hard(tsd, internal); - - return (ret); -} - -JEMALLOC_INLINE arena_t * -arena_choose(tsd_t *tsd, arena_t *arena) -{ - - return (arena_choose_impl(tsd, arena, false)); -} - -JEMALLOC_INLINE arena_t * -arena_ichoose(tsd_t *tsd, arena_t *arena) -{ - - return (arena_choose_impl(tsd, arena, true)); -} - -JEMALLOC_INLINE arena_tdata_t * -arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) -{ - arena_tdata_t *tdata; - arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); - - if (unlikely(arenas_tdata == NULL)) { - /* arenas_tdata hasn't been initialized yet. */ - return (arena_tdata_get_hard(tsd, ind)); - } - if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) { - /* - * ind is invalid, cache is old (too small), or tdata to be - * initialized. - */ - return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) : - NULL); - } - - tdata = &arenas_tdata[ind]; - if (likely(tdata != NULL) || !refresh_if_missing) - return (tdata); - return (arena_tdata_get_hard(tsd, ind)); -} - -JEMALLOC_INLINE arena_t * -arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) -{ - arena_t *ret; - - assert(ind <= MALLOCX_ARENA_MAX); - - ret = arenas[ind]; - if (unlikely(ret == NULL)) { - ret = atomic_read_p((void *)&arenas[ind]); - if (init_if_missing && unlikely(ret == NULL)) - ret = arena_init(tsdn, ind); - } - return (ret); -} - -JEMALLOC_INLINE ticker_t * -decay_ticker_get(tsd_t *tsd, unsigned ind) -{ - arena_tdata_t *tdata; - - tdata = arena_tdata_get(tsd, ind, true); - if (unlikely(tdata == NULL)) - return (NULL); - return (&tdata->decay_ticker); -} -#endif - -#include "jemalloc/internal/bitmap.h" -/* - * Include portions of arena.h interleaved with tcache.h in order to resolve - * circular dependencies. - */ -#define JEMALLOC_ARENA_INLINE_A -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_INLINE_A -#include "jemalloc/internal/tcache.h" -#define JEMALLOC_ARENA_INLINE_B -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_INLINE_B -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" - -#ifndef JEMALLOC_ENABLE_INLINE -arena_t *iaalloc(const void *ptr); -size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote); -void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, - tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path); -void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, - bool slow_path); -void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, bool is_metadata, arena_t *arena); -void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, arena_t *arena); -void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero); -size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote); -size_t u2rz(size_t usize); -size_t p2rz(tsdn_t *tsdn, const void *ptr); -void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata, - bool slow_path); -void idalloc(tsd_t *tsd, void *ptr); -void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path); -void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, - bool slow_path); -void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, - bool slow_path); -void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero, tcache_t *tcache, - arena_t *arena); -void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, - size_t alignment, bool zero, tcache_t *tcache, arena_t *arena); -void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, - size_t alignment, bool zero); -bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) -JEMALLOC_ALWAYS_INLINE arena_t * -iaalloc(const void *ptr) -{ - - assert(ptr != NULL); - - return (arena_aalloc(ptr)); -} - -/* - * Typical usage: - * tsdn_t *tsdn = [...] - * void *ptr = [...] - * size_t sz = isalloc(tsdn, ptr, config_prof); - */ -JEMALLOC_ALWAYS_INLINE size_t -isalloc(tsdn_t *tsdn, const void *ptr, bool demote) -{ - - assert(ptr != NULL); - /* Demotion only makes sense if config_prof is true. */ - assert(config_prof || !demote); - - return (arena_salloc(tsdn, ptr, demote)); -} - -JEMALLOC_ALWAYS_INLINE void * -iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, - bool is_metadata, arena_t *arena, bool slow_path) -{ - void *ret; - - assert(size != 0); - assert(!is_metadata || tcache == NULL); - assert(!is_metadata || arena == NULL || arena->ind < narenas_auto); - - ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path); - if (config_stats && is_metadata && likely(ret != NULL)) { - arena_metadata_allocated_add(iaalloc(ret), - isalloc(tsdn, ret, config_prof)); - } - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) -{ - - return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true), - false, NULL, slow_path)); -} - -JEMALLOC_ALWAYS_INLINE void * -ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, bool is_metadata, arena_t *arena) -{ - void *ret; - - assert(usize != 0); - assert(usize == sa2u(usize, alignment)); - assert(!is_metadata || tcache == NULL); - assert(!is_metadata || arena == NULL || arena->ind < narenas_auto); - - ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache); - assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); - if (config_stats && is_metadata && likely(ret != NULL)) { - arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret, - config_prof)); - } - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, arena_t *arena) -{ - - return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena)); -} - -JEMALLOC_ALWAYS_INLINE void * -ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) -{ - - return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero, - tcache_get(tsd, true), false, NULL)); -} - -JEMALLOC_ALWAYS_INLINE size_t -ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote) -{ - extent_node_t *node; - - /* Return 0 if ptr is not within a chunk managed by jemalloc. */ - node = chunk_lookup(ptr, false); - if (node == NULL) - return (0); - /* Only arena chunks should be looked up via interior pointers. */ - assert(extent_node_addr_get(node) == ptr || - extent_node_achunk_get(node)); - - return (isalloc(tsdn, ptr, demote)); -} - -JEMALLOC_INLINE size_t -u2rz(size_t usize) -{ - size_t ret; - - if (usize <= SMALL_MAXCLASS) { - szind_t binind = size2index(usize); - ret = arena_bin_info[binind].redzone_size; - } else - ret = 0; - - return (ret); -} - -JEMALLOC_INLINE size_t -p2rz(tsdn_t *tsdn, const void *ptr) -{ - size_t usize = isalloc(tsdn, ptr, false); - - return (u2rz(usize)); -} - -JEMALLOC_ALWAYS_INLINE void -idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata, - bool slow_path) -{ - - assert(ptr != NULL); - assert(!is_metadata || tcache == NULL); - assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto); - if (config_stats && is_metadata) { - arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr, - config_prof)); - } - - arena_dalloc(tsdn, ptr, tcache, slow_path); -} - -JEMALLOC_ALWAYS_INLINE void -idalloc(tsd_t *tsd, void *ptr) -{ - - idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true); -} - -JEMALLOC_ALWAYS_INLINE void -iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) -{ - - if (slow_path && config_fill && unlikely(opt_quarantine)) - quarantine(tsd, ptr); - else - idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path); -} - -JEMALLOC_ALWAYS_INLINE void -isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, - bool slow_path) -{ - - arena_sdalloc(tsdn, ptr, size, tcache, slow_path); -} - -JEMALLOC_ALWAYS_INLINE void -isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path) -{ - - if (slow_path && config_fill && unlikely(opt_quarantine)) - quarantine(tsd, ptr); - else - isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path); -} - -JEMALLOC_ALWAYS_INLINE void * -iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) -{ - void *p; - size_t usize, copysize; - - usize = sa2u(size + extra, alignment); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) - return (NULL); - p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena); - if (p == NULL) { - if (extra == 0) - return (NULL); - /* Try again, without extra this time. */ - usize = sa2u(size, alignment); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) - return (NULL); - p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, - arena); - if (p == NULL) - return (NULL); - } - /* - * Copy at most size bytes (not size+extra), since the caller has no - * expectation that the extra bytes will be reliably preserved. - */ - copysize = (size < oldsize) ? size : oldsize; - memcpy(p, ptr, copysize); - isqalloc(tsd, ptr, oldsize, tcache, true); - return (p); -} - -JEMALLOC_ALWAYS_INLINE void * -iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, - bool zero, tcache_t *tcache, arena_t *arena) -{ - - assert(ptr != NULL); - assert(size != 0); - - if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) - != 0) { - /* - * Existing object alignment is inadequate; allocate new space - * and copy. - */ - return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment, - zero, tcache, arena)); - } - - return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero, - tcache)); -} - -JEMALLOC_ALWAYS_INLINE void * -iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, - bool zero) -{ - - return (iralloct(tsd, ptr, oldsize, size, alignment, zero, - tcache_get(tsd, true), NULL)); -} - -JEMALLOC_ALWAYS_INLINE bool -ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, - size_t alignment, bool zero) -{ - - assert(ptr != NULL); - assert(size != 0); - - if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) - != 0) { - /* Existing object alignment is inadequate. */ - return (true); - } - - return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero)); -} -#endif - -#include "jemalloc/internal/prof.h" - -#undef JEMALLOC_H_INLINES -/******************************************************************************/ -#endif /* JEMALLOC_INTERNAL_H */ diff --git a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_decls.h b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_decls.h deleted file mode 100755 index c907d910..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_decls.h +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_DECLS_H -#define JEMALLOC_INTERNAL_DECLS_H - -#include -#ifdef _WIN32 -# include -# include "msvc_compat/windows_extra.h" - -#else -# include -# include -# if !defined(__pnacl__) && !defined(__native_client__) -# include -# if !defined(SYS_write) && defined(__NR_write) -# define SYS_write __NR_write -# endif -# include -# endif -# include -# ifdef JEMALLOC_OS_UNFAIR_LOCK -# include -# endif -# ifdef JEMALLOC_GLIBC_MALLOC_HOOK -# include -# endif -# include -# include -# include -# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME -# include -# endif -#endif -#include - -#include -#ifndef SIZE_T_MAX -# define SIZE_T_MAX SIZE_MAX -#endif -#include -#include -#include -#include -#include -#include -#ifndef offsetof -# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) -#endif -#include -#include -#include -#ifdef _MSC_VER -# include -typedef intptr_t ssize_t; -# define PATH_MAX 1024 -# define STDERR_FILENO 2 -# define __func__ __FUNCTION__ -# ifdef JEMALLOC_HAS_RESTRICT -# define restrict __restrict -# endif -/* Disable warnings about deprecated system functions. */ -# pragma warning(disable: 4996) -#if _MSC_VER < 1800 -static int -isblank(int c) -{ - - return (c == '\t' || c == ' '); -} -#endif -#else -# include -#endif -#include - -#endif /* JEMALLOC_INTERNAL_H */ diff --git a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_defs.h b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_defs.h deleted file mode 100755 index f3b1019c..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_defs.h +++ /dev/null @@ -1,316 +0,0 @@ -/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */ -#ifndef JEMALLOC_INTERNAL_DEFS_H_ -#define JEMALLOC_INTERNAL_DEFS_H_ -/* - * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all - * public APIs to be prefixed. This makes it possible, with some care, to use - * multiple allocators simultaneously. - */ -#define JEMALLOC_PREFIX "je_" -#define JEMALLOC_CPREFIX "JE_" - -/* - * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. - * For shared libraries, symbol visibility mechanisms prevent these symbols - * from being exported, but for static libraries, naming collisions are a real - * possibility. - */ -#define JEMALLOC_PRIVATE_NAMESPACE je_ - -/* - * Hyper-threaded CPUs may need a special instruction inside spin loops in - * order to yield to another virtual CPU. - */ -#define CPU_SPINWAIT _mm_pause() - -/* Defined if C11 atomics are available. */ -/* #undef JEMALLOC_C11ATOMICS */ - -/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ -/* #undef JEMALLOC_ATOMIC9 */ - -/* - * Defined if OSAtomic*() functions are available, as provided by Darwin, and - * documented in the atomic(3) manual page. - */ -/* #undef JEMALLOC_OSATOMIC */ - -/* - * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and - * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite - * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the - * functions are defined in libgcc instead of being inlines). - */ -/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */ - -/* - * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and - * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite - * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the - * functions are defined in libgcc instead of being inlines). - */ -/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */ - -/* - * Defined if __builtin_clz() and __builtin_clzl() are available. - */ -/* #undef JEMALLOC_HAVE_BUILTIN_CLZ */ - -/* - * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. - */ -/* #undef JEMALLOC_OS_UNFAIR_LOCK */ - -/* - * Defined if OSSpin*() functions are available, as provided by Darwin, and - * documented in the spinlock(3) manual page. - */ -/* #undef JEMALLOC_OSSPIN */ - -/* Defined if syscall(2) is usable. */ -/* #undef JEMALLOC_USE_SYSCALL */ - -/* - * Defined if secure_getenv(3) is available. - */ -/* #undef JEMALLOC_HAVE_SECURE_GETENV */ - -/* - * Defined if issetugid(2) is available. - */ -/* #undef JEMALLOC_HAVE_ISSETUGID */ - -/* Defined if pthread_atfork(3) is available. */ -/* #undef JEMALLOC_HAVE_PTHREAD_ATFORK */ - -/* - * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. - */ -/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */ - -/* - * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. - */ -/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC */ - -/* - * Defined if mach_absolute_time() is available. - */ -/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */ - -/* - * Defined if _malloc_thread_cleanup() exists. At least in the case of - * FreeBSD, pthread_key_create() allocates, which if used during malloc - * bootstrapping will cause recursion into the pthreads library. Therefore, if - * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in - * malloc_tsd. - */ -/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */ - -/* - * Defined if threaded initialization is known to be safe on this platform. - * Among other things, it must be possible to initialize a mutex without - * triggering allocation in order for threaded allocation to be safe. - */ -/* #undef JEMALLOC_THREADED_INIT */ - -/* - * Defined if the pthreads implementation defines - * _pthread_mutex_init_calloc_cb(), in which case the function is used in order - * to avoid recursive allocation during mutex initialization. - */ -/* #undef JEMALLOC_MUTEX_INIT_CB */ - -/* Non-empty if the tls_model attribute is supported. */ -#define JEMALLOC_TLS_MODEL - -/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ -#define JEMALLOC_CC_SILENCE - -/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */ -/* #undef JEMALLOC_CODE_COVERAGE */ - -/* - * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables - * inline functions. - */ -#define JEMALLOC_DEBUG - -/* JEMALLOC_STATS enables statistics calculation. */ -#define JEMALLOC_STATS - -/* JEMALLOC_PROF enables allocation profiling. */ -/* #undef JEMALLOC_PROF */ - -/* Use libunwind for profile backtracing if defined. */ -/* #undef JEMALLOC_PROF_LIBUNWIND */ - -/* Use libgcc for profile backtracing if defined. */ -/* #undef JEMALLOC_PROF_LIBGCC */ - -/* Use gcc intrinsics for profile backtracing if defined. */ -/* #undef JEMALLOC_PROF_GCC */ - -/* - * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. - * This makes it possible to allocate/deallocate objects without any locking - * when the cache is in the steady state. - */ -/* #undef JEMALLOC_TCACHE */ - -/* - * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage - * segment (DSS). - */ -/* #undef JEMALLOC_DSS */ - -/* Support memory filling (junk/zero/quarantine/redzone). */ -#define JEMALLOC_FILL - -/* Support utrace(2)-based tracing. */ -/* #undef JEMALLOC_UTRACE */ - -/* Support Valgrind. */ -/* #undef JEMALLOC_VALGRIND */ - -/* Support optional abort() on OOM. */ -/* #undef JEMALLOC_XMALLOC */ - -/* Support lazy locking (avoid locking unless a second thread is launched). */ -/* #undef JEMALLOC_LAZY_LOCK */ - -/* Minimum size class to support is 2^LG_TINY_MIN bytes. */ -#define LG_TINY_MIN 3 - -/* - * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size - * classes). - */ -/* #undef LG_QUANTUM */ - -/* One page is 2^LG_PAGE bytes. */ -#define LG_PAGE 12 - -/* - * If defined, adjacent virtual memory mappings with identical attributes - * automatically coalesce, and they fragment when changes are made to subranges. - * This is the normal order of things for mmap()/munmap(), but on Windows - * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. - * mappings do *not* coalesce/fragment. - */ -/* #undef JEMALLOC_MAPS_COALESCE */ - -/* - * If defined, use munmap() to unmap freed chunks, rather than storing them for - * later reuse. This is disabled by default on Linux because common sequences - * of mmap()/munmap() calls will cause virtual memory map holes. - */ -#define JEMALLOC_MUNMAP - -/* TLS is used to map arenas and magazine caches to threads. */ -/* #undef JEMALLOC_TLS */ - -/* - * Used to mark unreachable code to quiet "end of non-void" compiler warnings. - * Don't use this directly; instead use unreachable() from util.h - */ -#define JEMALLOC_INTERNAL_UNREACHABLE abort - -/* - * ffs*() functions to use for bitmapping. Don't use these directly; instead, - * use ffs_*() from util.h. - */ -#define JEMALLOC_INTERNAL_FFSLL ffsll -#define JEMALLOC_INTERNAL_FFSL ffsl -#define JEMALLOC_INTERNAL_FFS ffs - -/* - * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside - * within jemalloc-owned chunks before dereferencing them. - */ -#define JEMALLOC_IVSALLOC - -/* - * If defined, explicitly attempt to more uniformly distribute large allocation - * pointer alignments across all cache indices. - */ -#define JEMALLOC_CACHE_OBLIVIOUS - -/* - * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. - */ -/* #undef JEMALLOC_ZONE */ - -/* - * Methods for determining whether the OS overcommits. - * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's - * /proc/sys/vm.overcommit_memory file. - * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. - */ -/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */ -/* #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY */ - -/* Defined if madvise(2) is available. */ -/* #undef JEMALLOC_HAVE_MADVISE */ - -/* - * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE - * arguments to madvise(2). - */ -/* #undef JEMALLOC_HAVE_MADVISE_HUGE */ - -/* - * Methods for purging unused pages differ between operating systems. - * - * madvise(..., MADV_FREE) : This marks pages as being unused, such that they - * will be discarded rather than swapped out. - * madvise(..., MADV_DONTNEED) : This immediately discards pages, such that - * new pages will be demand-zeroed if the - * address region is later touched. - */ -/* #undef JEMALLOC_PURGE_MADVISE_FREE */ -/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED */ - -/* Defined if transparent huge page support is enabled. */ -/* #undef JEMALLOC_THP */ - -/* Define if operating system has alloca.h header. */ -/* #undef JEMALLOC_HAS_ALLOCA_H */ - -/* C99 restrict keyword supported. */ -/* #undef JEMALLOC_HAS_RESTRICT */ - -/* For use by hash code. */ -/* #undef JEMALLOC_BIG_ENDIAN */ - -/* sizeof(int) == 2^LG_SIZEOF_INT. */ -#define LG_SIZEOF_INT 2 - -/* sizeof(long) == 2^LG_SIZEOF_LONG. */ -#define LG_SIZEOF_LONG 2 - -/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ -#define LG_SIZEOF_LONG_LONG 3 - -/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ -#define LG_SIZEOF_INTMAX_T 3 - -/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ -/* #undef JEMALLOC_GLIBC_MALLOC_HOOK */ - -/* glibc memalign hook. */ -/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */ - -/* Adaptive mutex support in pthreads. */ -/* #undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP */ - -/* - * If defined, jemalloc symbols are not exported (doesn't work when - * JEMALLOC_PREFIX is not defined). - */ -/* #undef JEMALLOC_EXPORT */ - -/* config.malloc_conf options string. */ -#define JEMALLOC_CONFIG_MALLOC_CONF "" - -#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_defs.h.in b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_defs.h.in deleted file mode 100755 index 7c88b0d7..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_defs.h.in +++ /dev/null @@ -1,315 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_DEFS_H_ -#define JEMALLOC_INTERNAL_DEFS_H_ -/* - * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all - * public APIs to be prefixed. This makes it possible, with some care, to use - * multiple allocators simultaneously. - */ -#undef JEMALLOC_PREFIX -#undef JEMALLOC_CPREFIX - -/* - * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. - * For shared libraries, symbol visibility mechanisms prevent these symbols - * from being exported, but for static libraries, naming collisions are a real - * possibility. - */ -#undef JEMALLOC_PRIVATE_NAMESPACE - -/* - * Hyper-threaded CPUs may need a special instruction inside spin loops in - * order to yield to another virtual CPU. - */ -#undef CPU_SPINWAIT - -/* Defined if C11 atomics are available. */ -#undef JEMALLOC_C11ATOMICS - -/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ -#undef JEMALLOC_ATOMIC9 - -/* - * Defined if OSAtomic*() functions are available, as provided by Darwin, and - * documented in the atomic(3) manual page. - */ -#undef JEMALLOC_OSATOMIC - -/* - * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and - * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite - * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the - * functions are defined in libgcc instead of being inlines). - */ -#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 - -/* - * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and - * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite - * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the - * functions are defined in libgcc instead of being inlines). - */ -#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 - -/* - * Defined if __builtin_clz() and __builtin_clzl() are available. - */ -#undef JEMALLOC_HAVE_BUILTIN_CLZ - -/* - * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. - */ -#undef JEMALLOC_OS_UNFAIR_LOCK - -/* - * Defined if OSSpin*() functions are available, as provided by Darwin, and - * documented in the spinlock(3) manual page. - */ -#undef JEMALLOC_OSSPIN - -/* Defined if syscall(2) is usable. */ -#undef JEMALLOC_USE_SYSCALL - -/* - * Defined if secure_getenv(3) is available. - */ -#undef JEMALLOC_HAVE_SECURE_GETENV - -/* - * Defined if issetugid(2) is available. - */ -#undef JEMALLOC_HAVE_ISSETUGID - -/* Defined if pthread_atfork(3) is available. */ -#undef JEMALLOC_HAVE_PTHREAD_ATFORK - -/* - * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. - */ -#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE - -/* - * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. - */ -#undef JEMALLOC_HAVE_CLOCK_MONOTONIC - -/* - * Defined if mach_absolute_time() is available. - */ -#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME - -/* - * Defined if _malloc_thread_cleanup() exists. At least in the case of - * FreeBSD, pthread_key_create() allocates, which if used during malloc - * bootstrapping will cause recursion into the pthreads library. Therefore, if - * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in - * malloc_tsd. - */ -#undef JEMALLOC_MALLOC_THREAD_CLEANUP - -/* - * Defined if threaded initialization is known to be safe on this platform. - * Among other things, it must be possible to initialize a mutex without - * triggering allocation in order for threaded allocation to be safe. - */ -#undef JEMALLOC_THREADED_INIT - -/* - * Defined if the pthreads implementation defines - * _pthread_mutex_init_calloc_cb(), in which case the function is used in order - * to avoid recursive allocation during mutex initialization. - */ -#undef JEMALLOC_MUTEX_INIT_CB - -/* Non-empty if the tls_model attribute is supported. */ -#undef JEMALLOC_TLS_MODEL - -/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ -#undef JEMALLOC_CC_SILENCE - -/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */ -#undef JEMALLOC_CODE_COVERAGE - -/* - * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables - * inline functions. - */ -#undef JEMALLOC_DEBUG - -/* JEMALLOC_STATS enables statistics calculation. */ -#undef JEMALLOC_STATS - -/* JEMALLOC_PROF enables allocation profiling. */ -#undef JEMALLOC_PROF - -/* Use libunwind for profile backtracing if defined. */ -#undef JEMALLOC_PROF_LIBUNWIND - -/* Use libgcc for profile backtracing if defined. */ -#undef JEMALLOC_PROF_LIBGCC - -/* Use gcc intrinsics for profile backtracing if defined. */ -#undef JEMALLOC_PROF_GCC - -/* - * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. - * This makes it possible to allocate/deallocate objects without any locking - * when the cache is in the steady state. - */ -#undef JEMALLOC_TCACHE - -/* - * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage - * segment (DSS). - */ -#undef JEMALLOC_DSS - -/* Support memory filling (junk/zero/quarantine/redzone). */ -#undef JEMALLOC_FILL - -/* Support utrace(2)-based tracing. */ -#undef JEMALLOC_UTRACE - -/* Support Valgrind. */ -#undef JEMALLOC_VALGRIND - -/* Support optional abort() on OOM. */ -#undef JEMALLOC_XMALLOC - -/* Support lazy locking (avoid locking unless a second thread is launched). */ -#undef JEMALLOC_LAZY_LOCK - -/* Minimum size class to support is 2^LG_TINY_MIN bytes. */ -#undef LG_TINY_MIN - -/* - * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size - * classes). - */ -#undef LG_QUANTUM - -/* One page is 2^LG_PAGE bytes. */ -#undef LG_PAGE - -/* - * If defined, adjacent virtual memory mappings with identical attributes - * automatically coalesce, and they fragment when changes are made to subranges. - * This is the normal order of things for mmap()/munmap(), but on Windows - * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. - * mappings do *not* coalesce/fragment. - */ -#undef JEMALLOC_MAPS_COALESCE - -/* - * If defined, use munmap() to unmap freed chunks, rather than storing them for - * later reuse. This is disabled by default on Linux because common sequences - * of mmap()/munmap() calls will cause virtual memory map holes. - */ -#undef JEMALLOC_MUNMAP - -/* TLS is used to map arenas and magazine caches to threads. */ -#undef JEMALLOC_TLS - -/* - * Used to mark unreachable code to quiet "end of non-void" compiler warnings. - * Don't use this directly; instead use unreachable() from util.h - */ -#undef JEMALLOC_INTERNAL_UNREACHABLE - -/* - * ffs*() functions to use for bitmapping. Don't use these directly; instead, - * use ffs_*() from util.h. - */ -#undef JEMALLOC_INTERNAL_FFSLL -#undef JEMALLOC_INTERNAL_FFSL -#undef JEMALLOC_INTERNAL_FFS - -/* - * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside - * within jemalloc-owned chunks before dereferencing them. - */ -#undef JEMALLOC_IVSALLOC - -/* - * If defined, explicitly attempt to more uniformly distribute large allocation - * pointer alignments across all cache indices. - */ -#undef JEMALLOC_CACHE_OBLIVIOUS - -/* - * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. - */ -#undef JEMALLOC_ZONE - -/* - * Methods for determining whether the OS overcommits. - * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's - * /proc/sys/vm.overcommit_memory file. - * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. - */ -#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT -#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY - -/* Defined if madvise(2) is available. */ -#undef JEMALLOC_HAVE_MADVISE - -/* - * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE - * arguments to madvise(2). - */ -#undef JEMALLOC_HAVE_MADVISE_HUGE - -/* - * Methods for purging unused pages differ between operating systems. - * - * madvise(..., MADV_FREE) : This marks pages as being unused, such that they - * will be discarded rather than swapped out. - * madvise(..., MADV_DONTNEED) : This immediately discards pages, such that - * new pages will be demand-zeroed if the - * address region is later touched. - */ -#undef JEMALLOC_PURGE_MADVISE_FREE -#undef JEMALLOC_PURGE_MADVISE_DONTNEED - -/* Defined if transparent huge page support is enabled. */ -#undef JEMALLOC_THP - -/* Define if operating system has alloca.h header. */ -#undef JEMALLOC_HAS_ALLOCA_H - -/* C99 restrict keyword supported. */ -#undef JEMALLOC_HAS_RESTRICT - -/* For use by hash code. */ -#undef JEMALLOC_BIG_ENDIAN - -/* sizeof(int) == 2^LG_SIZEOF_INT. */ -#undef LG_SIZEOF_INT - -/* sizeof(long) == 2^LG_SIZEOF_LONG. */ -#undef LG_SIZEOF_LONG - -/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ -#undef LG_SIZEOF_LONG_LONG - -/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ -#undef LG_SIZEOF_INTMAX_T - -/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ -#undef JEMALLOC_GLIBC_MALLOC_HOOK - -/* glibc memalign hook. */ -#undef JEMALLOC_GLIBC_MEMALIGN_HOOK - -/* Adaptive mutex support in pthreads. */ -#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP - -/* - * If defined, jemalloc symbols are not exported (doesn't work when - * JEMALLOC_PREFIX is not defined). - */ -#undef JEMALLOC_EXPORT - -/* config.malloc_conf options string. */ -#undef JEMALLOC_CONFIG_MALLOC_CONF - -#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_macros.h b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_macros.h deleted file mode 100755 index a08ba772..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_macros.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for - * functions that are static inline functions if inlining is enabled, and - * single-definition library-private functions if inlining is disabled. - * - * JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in - * which case the denoted functions are always static, regardless of whether - * inlining is enabled. - */ -#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE) - /* Disable inlining to make debugging/profiling easier. */ -# define JEMALLOC_ALWAYS_INLINE -# define JEMALLOC_ALWAYS_INLINE_C static -# define JEMALLOC_INLINE -# define JEMALLOC_INLINE_C static -# define inline -#else -# define JEMALLOC_ENABLE_INLINE -# ifdef JEMALLOC_HAVE_ATTR -# define JEMALLOC_ALWAYS_INLINE \ - static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) -# define JEMALLOC_ALWAYS_INLINE_C \ - static inline JEMALLOC_ATTR(always_inline) -# else -# define JEMALLOC_ALWAYS_INLINE static inline -# define JEMALLOC_ALWAYS_INLINE_C static inline -# endif -# define JEMALLOC_INLINE static inline -# define JEMALLOC_INLINE_C static inline -# ifdef _MSC_VER -# define inline _inline -# endif -#endif - -#ifdef JEMALLOC_CC_SILENCE -# define UNUSED JEMALLOC_ATTR(unused) -#else -# define UNUSED -#endif - -#define ZU(z) ((size_t)z) -#define ZI(z) ((ssize_t)z) -#define QU(q) ((uint64_t)q) -#define QI(q) ((int64_t)q) - -#define KZU(z) ZU(z##ULL) -#define KZI(z) ZI(z##LL) -#define KQU(q) QU(q##ULL) -#define KQI(q) QI(q##LL) - -#ifndef __DECONST -# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) -#endif - -#ifndef JEMALLOC_HAS_RESTRICT -# define restrict -#endif diff --git a/sources/jemalloc/include-win/jemalloc/internal/mb.h b/sources/jemalloc/include-win/jemalloc/internal/mb.h deleted file mode 100755 index e58da5c3..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/mb.h +++ /dev/null @@ -1,115 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void mb_write(void); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_)) -#ifdef __i386__ -/* - * According to the Intel Architecture Software Developer's Manual, current - * processors execute instructions in order from the perspective of other - * processors in a multiprocessor system, but 1) Intel reserves the right to - * change that, and 2) the compiler's optimizer could re-order instructions if - * there weren't some form of barrier. Therefore, even if running on an - * architecture that does not need memory barriers (everything through at least - * i686), an "optimizer barrier" is necessary. - */ -JEMALLOC_INLINE void -mb_write(void) -{ - -# if 0 - /* This is a true memory barrier. */ - asm volatile ("pusha;" - "xor %%eax,%%eax;" - "cpuid;" - "popa;" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -# else - /* - * This is hopefully enough to keep the compiler from reordering - * instructions around this one. - */ - asm volatile ("nop;" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -# endif -} -#elif (defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE void -mb_write(void) -{ - - asm volatile ("sfence" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -#elif defined(__powerpc__) -JEMALLOC_INLINE void -mb_write(void) -{ - - asm volatile ("eieio" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -#elif defined(__sparc__) && defined(__arch64__) -JEMALLOC_INLINE void -mb_write(void) -{ - - asm volatile ("membar #StoreStore" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -#elif defined(__tile__) -JEMALLOC_INLINE void -mb_write(void) -{ - - __sync_synchronize(); -} -#else -/* - * This is much slower than a simple memory barrier, but the semantics of mutex - * unlock make this work. - */ -JEMALLOC_INLINE void -mb_write(void) -{ - malloc_mutex_t mtx; - - malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT); - malloc_mutex_lock(TSDN_NULL, &mtx); - malloc_mutex_unlock(TSDN_NULL, &mtx); -} -#endif -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/mutex.h b/sources/jemalloc/include-win/jemalloc/internal/mutex.h deleted file mode 100755 index 2b4b1c31..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/mutex.h +++ /dev/null @@ -1,145 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct malloc_mutex_s malloc_mutex_t; - -#ifdef _WIN32 -# define MALLOC_MUTEX_INITIALIZER -#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) -# define MALLOC_MUTEX_INITIALIZER \ - {OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} -#elif (defined(JEMALLOC_OSSPIN)) -# define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} -#elif (defined(JEMALLOC_MUTEX_INIT_CB)) -# define MALLOC_MUTEX_INITIALIZER \ - {PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} -#else -# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \ - defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) -# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP -# define MALLOC_MUTEX_INITIALIZER \ - {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \ - WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} -# else -# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT -# define MALLOC_MUTEX_INITIALIZER \ - {PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} -# endif -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct malloc_mutex_s { -#ifdef _WIN32 -# if _WIN32_WINNT >= 0x0600 - SRWLOCK lock; -# else - CRITICAL_SECTION lock; -# endif -#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) - os_unfair_lock lock; -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLock lock; -#elif (defined(JEMALLOC_MUTEX_INIT_CB)) - pthread_mutex_t lock; - malloc_mutex_t *postponed_next; -#else - pthread_mutex_t lock; -#endif - witness_t witness; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#ifdef JEMALLOC_LAZY_LOCK -extern bool isthreaded; -#else -# undef isthreaded /* Undo private_namespace.h definition. */ -# define isthreaded true -#endif - -bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, - witness_rank_t rank); -void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); -void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); -void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); -bool malloc_mutex_boot(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex); -void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex); -void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex); -void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) -JEMALLOC_INLINE void -malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) -{ - - witness_assert_not_owner(tsdn, &mutex->witness); - if (isthreaded) { -#ifdef _WIN32 -# if _WIN32_WINNT >= 0x0600 - AcquireSRWLockExclusive(&mutex->lock); -# else - EnterCriticalSection(&mutex->lock); -# endif -#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) - os_unfair_lock_lock(&mutex->lock); -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLockLock(&mutex->lock); -#else - pthread_mutex_lock(&mutex->lock); -#endif - } - witness_lock(tsdn, &mutex->witness); -} - -JEMALLOC_INLINE void -malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) -{ - - witness_unlock(tsdn, &mutex->witness); - if (isthreaded) { -#ifdef _WIN32 -# if _WIN32_WINNT >= 0x0600 - ReleaseSRWLockExclusive(&mutex->lock); -# else - LeaveCriticalSection(&mutex->lock); -# endif -#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) - os_unfair_lock_unlock(&mutex->lock); -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLockUnlock(&mutex->lock); -#else - pthread_mutex_unlock(&mutex->lock); -#endif - } -} - -JEMALLOC_INLINE void -malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) -{ - - witness_assert_owner(tsdn, &mutex->witness); -} - -JEMALLOC_INLINE void -malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) -{ - - witness_assert_not_owner(tsdn, &mutex->witness); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/nstime.h b/sources/jemalloc/include-win/jemalloc/internal/nstime.h deleted file mode 100755 index 93b27dc8..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/nstime.h +++ /dev/null @@ -1,48 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct nstime_s nstime_t; - -/* Maximum supported number of seconds (~584 years). */ -#define NSTIME_SEC_MAX KQU(18446744072) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct nstime_s { - uint64_t ns; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void nstime_init(nstime_t *time, uint64_t ns); -void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec); -uint64_t nstime_ns(const nstime_t *time); -uint64_t nstime_sec(const nstime_t *time); -uint64_t nstime_nsec(const nstime_t *time); -void nstime_copy(nstime_t *time, const nstime_t *source); -int nstime_compare(const nstime_t *a, const nstime_t *b); -void nstime_add(nstime_t *time, const nstime_t *addend); -void nstime_subtract(nstime_t *time, const nstime_t *subtrahend); -void nstime_imultiply(nstime_t *time, uint64_t multiplier); -void nstime_idivide(nstime_t *time, uint64_t divisor); -uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor); -#ifdef JEMALLOC_JET -typedef bool (nstime_monotonic_t)(void); -extern nstime_monotonic_t *nstime_monotonic; -typedef bool (nstime_update_t)(nstime_t *); -extern nstime_update_t *nstime_update; -#else -bool nstime_monotonic(void); -bool nstime_update(nstime_t *time); -#endif - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/pages.h b/sources/jemalloc/include-win/jemalloc/internal/pages.h deleted file mode 100755 index 4ae9f156..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/pages.h +++ /dev/null @@ -1,29 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *pages_map(void *addr, size_t size, bool *commit); -void pages_unmap(void *addr, size_t size); -void *pages_trim(void *addr, size_t alloc_size, size_t leadsize, - size_t size, bool *commit); -bool pages_commit(void *addr, size_t size); -bool pages_decommit(void *addr, size_t size); -bool pages_purge(void *addr, size_t size); -bool pages_huge(void *addr, size_t size); -bool pages_nohuge(void *addr, size_t size); -void pages_boot(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/sources/jemalloc/include-win/jemalloc/internal/ph.h b/sources/jemalloc/include-win/jemalloc/internal/ph.h deleted file mode 100755 index 4f91c333..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/ph.h +++ /dev/null @@ -1,345 +0,0 @@ -/* - * A Pairing Heap implementation. - * - * "The Pairing Heap: A New Form of Self-Adjusting Heap" - * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf - * - * With auxiliary twopass list, described in a follow on paper. - * - * "Pairing Heaps: Experiments and Analysis" - * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf - * - ******************************************************************************* - */ - -#ifndef PH_H_ -#define PH_H_ - -/* Node structure. */ -#define phn(a_type) \ -struct { \ - a_type *phn_prev; \ - a_type *phn_next; \ - a_type *phn_lchild; \ -} - -/* Root structure. */ -#define ph(a_type) \ -struct { \ - a_type *ph_root; \ -} - -/* Internal utility macros. */ -#define phn_lchild_get(a_type, a_field, a_phn) \ - (a_phn->a_field.phn_lchild) -#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \ - a_phn->a_field.phn_lchild = a_lchild; \ -} while (0) - -#define phn_next_get(a_type, a_field, a_phn) \ - (a_phn->a_field.phn_next) -#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \ - a_phn->a_field.phn_prev = a_prev; \ -} while (0) - -#define phn_prev_get(a_type, a_field, a_phn) \ - (a_phn->a_field.phn_prev) -#define phn_next_set(a_type, a_field, a_phn, a_next) do { \ - a_phn->a_field.phn_next = a_next; \ -} while (0) - -#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \ - a_type *phn0child; \ - \ - assert(a_phn0 != NULL); \ - assert(a_phn1 != NULL); \ - assert(a_cmp(a_phn0, a_phn1) <= 0); \ - \ - phn_prev_set(a_type, a_field, a_phn1, a_phn0); \ - phn0child = phn_lchild_get(a_type, a_field, a_phn0); \ - phn_next_set(a_type, a_field, a_phn1, phn0child); \ - if (phn0child != NULL) \ - phn_prev_set(a_type, a_field, phn0child, a_phn1); \ - phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \ -} while (0) - -#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \ - if (a_phn0 == NULL) \ - r_phn = a_phn1; \ - else if (a_phn1 == NULL) \ - r_phn = a_phn0; \ - else if (a_cmp(a_phn0, a_phn1) < 0) { \ - phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \ - a_cmp); \ - r_phn = a_phn0; \ - } else { \ - phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \ - a_cmp); \ - r_phn = a_phn1; \ - } \ -} while (0) - -#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \ - a_type *head = NULL; \ - a_type *tail = NULL; \ - a_type *phn0 = a_phn; \ - a_type *phn1 = phn_next_get(a_type, a_field, phn0); \ - \ - /* \ - * Multipass merge, wherein the first two elements of a FIFO \ - * are repeatedly merged, and each result is appended to the \ - * singly linked FIFO, until the FIFO contains only a single \ - * element. We start with a sibling list but no reference to \ - * its tail, so we do a single pass over the sibling list to \ - * populate the FIFO. \ - */ \ - if (phn1 != NULL) { \ - a_type *phnrest = phn_next_get(a_type, a_field, phn1); \ - if (phnrest != NULL) \ - phn_prev_set(a_type, a_field, phnrest, NULL); \ - phn_prev_set(a_type, a_field, phn0, NULL); \ - phn_next_set(a_type, a_field, phn0, NULL); \ - phn_prev_set(a_type, a_field, phn1, NULL); \ - phn_next_set(a_type, a_field, phn1, NULL); \ - phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \ - head = tail = phn0; \ - phn0 = phnrest; \ - while (phn0 != NULL) { \ - phn1 = phn_next_get(a_type, a_field, phn0); \ - if (phn1 != NULL) { \ - phnrest = phn_next_get(a_type, a_field, \ - phn1); \ - if (phnrest != NULL) { \ - phn_prev_set(a_type, a_field, \ - phnrest, NULL); \ - } \ - phn_prev_set(a_type, a_field, phn0, \ - NULL); \ - phn_next_set(a_type, a_field, phn0, \ - NULL); \ - phn_prev_set(a_type, a_field, phn1, \ - NULL); \ - phn_next_set(a_type, a_field, phn1, \ - NULL); \ - phn_merge(a_type, a_field, phn0, phn1, \ - a_cmp, phn0); \ - phn_next_set(a_type, a_field, tail, \ - phn0); \ - tail = phn0; \ - phn0 = phnrest; \ - } else { \ - phn_next_set(a_type, a_field, tail, \ - phn0); \ - tail = phn0; \ - phn0 = NULL; \ - } \ - } \ - phn0 = head; \ - phn1 = phn_next_get(a_type, a_field, phn0); \ - if (phn1 != NULL) { \ - while (true) { \ - head = phn_next_get(a_type, a_field, \ - phn1); \ - assert(phn_prev_get(a_type, a_field, \ - phn0) == NULL); \ - phn_next_set(a_type, a_field, phn0, \ - NULL); \ - assert(phn_prev_get(a_type, a_field, \ - phn1) == NULL); \ - phn_next_set(a_type, a_field, phn1, \ - NULL); \ - phn_merge(a_type, a_field, phn0, phn1, \ - a_cmp, phn0); \ - if (head == NULL) \ - break; \ - phn_next_set(a_type, a_field, tail, \ - phn0); \ - tail = phn0; \ - phn0 = head; \ - phn1 = phn_next_get(a_type, a_field, \ - phn0); \ - } \ - } \ - } \ - r_phn = phn0; \ -} while (0) - -#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \ - a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \ - if (phn != NULL) { \ - phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \ - phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \ - phn_prev_set(a_type, a_field, phn, NULL); \ - ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \ - assert(phn_next_get(a_type, a_field, phn) == NULL); \ - phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \ - a_ph->ph_root); \ - } \ -} while (0) - -#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \ - a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \ - if (lchild == NULL) \ - r_phn = NULL; \ - else { \ - ph_merge_siblings(a_type, a_field, lchild, a_cmp, \ - r_phn); \ - } \ -} while (0) - -/* - * The ph_proto() macro generates function prototypes that correspond to the - * functions generated by an equivalently parameterized call to ph_gen(). - */ -#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \ -a_attr void a_prefix##new(a_ph_type *ph); \ -a_attr bool a_prefix##empty(a_ph_type *ph); \ -a_attr a_type *a_prefix##first(a_ph_type *ph); \ -a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \ -a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \ -a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn); - -/* - * The ph_gen() macro generates a type-specific pairing heap implementation, - * based on the above cpp macros. - */ -#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \ -a_attr void \ -a_prefix##new(a_ph_type *ph) \ -{ \ - \ - memset(ph, 0, sizeof(ph(a_type))); \ -} \ -a_attr bool \ -a_prefix##empty(a_ph_type *ph) \ -{ \ - \ - return (ph->ph_root == NULL); \ -} \ -a_attr a_type * \ -a_prefix##first(a_ph_type *ph) \ -{ \ - \ - if (ph->ph_root == NULL) \ - return (NULL); \ - ph_merge_aux(a_type, a_field, ph, a_cmp); \ - return (ph->ph_root); \ -} \ -a_attr void \ -a_prefix##insert(a_ph_type *ph, a_type *phn) \ -{ \ - \ - memset(&phn->a_field, 0, sizeof(phn(a_type))); \ - \ - /* \ - * Treat the root as an aux list during insertion, and lazily \ - * merge during a_prefix##remove_first(). For elements that \ - * are inserted, then removed via a_prefix##remove() before the \ - * aux list is ever processed, this makes insert/remove \ - * constant-time, whereas eager merging would make insert \ - * O(log n). \ - */ \ - if (ph->ph_root == NULL) \ - ph->ph_root = phn; \ - else { \ - phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \ - a_field, ph->ph_root)); \ - if (phn_next_get(a_type, a_field, ph->ph_root) != \ - NULL) { \ - phn_prev_set(a_type, a_field, \ - phn_next_get(a_type, a_field, ph->ph_root), \ - phn); \ - } \ - phn_prev_set(a_type, a_field, phn, ph->ph_root); \ - phn_next_set(a_type, a_field, ph->ph_root, phn); \ - } \ -} \ -a_attr a_type * \ -a_prefix##remove_first(a_ph_type *ph) \ -{ \ - a_type *ret; \ - \ - if (ph->ph_root == NULL) \ - return (NULL); \ - ph_merge_aux(a_type, a_field, ph, a_cmp); \ - \ - ret = ph->ph_root; \ - \ - ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ - ph->ph_root); \ - \ - return (ret); \ -} \ -a_attr void \ -a_prefix##remove(a_ph_type *ph, a_type *phn) \ -{ \ - a_type *replace, *parent; \ - \ - /* \ - * We can delete from aux list without merging it, but we need \ - * to merge if we are dealing with the root node. \ - */ \ - if (ph->ph_root == phn) { \ - ph_merge_aux(a_type, a_field, ph, a_cmp); \ - if (ph->ph_root == phn) { \ - ph_merge_children(a_type, a_field, ph->ph_root, \ - a_cmp, ph->ph_root); \ - return; \ - } \ - } \ - \ - /* Get parent (if phn is leftmost child) before mutating. */ \ - if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \ - if (phn_lchild_get(a_type, a_field, parent) != phn) \ - parent = NULL; \ - } \ - /* Find a possible replacement node, and link to parent. */ \ - ph_merge_children(a_type, a_field, phn, a_cmp, replace); \ - /* Set next/prev for sibling linked list. */ \ - if (replace != NULL) { \ - if (parent != NULL) { \ - phn_prev_set(a_type, a_field, replace, parent); \ - phn_lchild_set(a_type, a_field, parent, \ - replace); \ - } else { \ - phn_prev_set(a_type, a_field, replace, \ - phn_prev_get(a_type, a_field, phn)); \ - if (phn_prev_get(a_type, a_field, phn) != \ - NULL) { \ - phn_next_set(a_type, a_field, \ - phn_prev_get(a_type, a_field, phn), \ - replace); \ - } \ - } \ - phn_next_set(a_type, a_field, replace, \ - phn_next_get(a_type, a_field, phn)); \ - if (phn_next_get(a_type, a_field, phn) != NULL) { \ - phn_prev_set(a_type, a_field, \ - phn_next_get(a_type, a_field, phn), \ - replace); \ - } \ - } else { \ - if (parent != NULL) { \ - a_type *next = phn_next_get(a_type, a_field, \ - phn); \ - phn_lchild_set(a_type, a_field, parent, next); \ - if (next != NULL) { \ - phn_prev_set(a_type, a_field, next, \ - parent); \ - } \ - } else { \ - assert(phn_prev_get(a_type, a_field, phn) != \ - NULL); \ - phn_next_set(a_type, a_field, \ - phn_prev_get(a_type, a_field, phn), \ - phn_next_get(a_type, a_field, phn)); \ - } \ - if (phn_next_get(a_type, a_field, phn) != NULL) { \ - phn_prev_set(a_type, a_field, \ - phn_next_get(a_type, a_field, phn), \ - phn_prev_get(a_type, a_field, phn)); \ - } \ - } \ -} - -#endif /* PH_H_ */ diff --git a/sources/jemalloc/include-win/jemalloc/internal/private_namespace.h b/sources/jemalloc/include-win/jemalloc/internal/private_namespace.h deleted file mode 100755 index 7ebeeba8..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/private_namespace.h +++ /dev/null @@ -1,639 +0,0 @@ -#define a0dalloc JEMALLOC_N(a0dalloc) -#define a0get JEMALLOC_N(a0get) -#define a0malloc JEMALLOC_N(a0malloc) -#define arena_aalloc JEMALLOC_N(arena_aalloc) -#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small) -#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge) -#define arena_bin_index JEMALLOC_N(arena_bin_index) -#define arena_bin_info JEMALLOC_N(arena_bin_info) -#define arena_bitselm_get_const JEMALLOC_N(arena_bitselm_get_const) -#define arena_bitselm_get_mutable JEMALLOC_N(arena_bitselm_get_mutable) -#define arena_boot JEMALLOC_N(arena_boot) -#define arena_choose JEMALLOC_N(arena_choose) -#define arena_choose_hard JEMALLOC_N(arena_choose_hard) -#define arena_choose_impl JEMALLOC_N(arena_choose_impl) -#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge) -#define arena_chunk_cache_maybe_insert JEMALLOC_N(arena_chunk_cache_maybe_insert) -#define arena_chunk_cache_maybe_remove JEMALLOC_N(arena_chunk_cache_maybe_remove) -#define arena_chunk_dalloc_huge JEMALLOC_N(arena_chunk_dalloc_huge) -#define arena_chunk_ralloc_huge_expand JEMALLOC_N(arena_chunk_ralloc_huge_expand) -#define arena_chunk_ralloc_huge_shrink JEMALLOC_N(arena_chunk_ralloc_huge_shrink) -#define arena_chunk_ralloc_huge_similar JEMALLOC_N(arena_chunk_ralloc_huge_similar) -#define arena_cleanup JEMALLOC_N(arena_cleanup) -#define arena_dalloc JEMALLOC_N(arena_dalloc) -#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin) -#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked) -#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) -#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large) -#define arena_dalloc_large_junked_locked JEMALLOC_N(arena_dalloc_large_junked_locked) -#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small) -#define arena_decay_tick JEMALLOC_N(arena_decay_tick) -#define arena_decay_ticks JEMALLOC_N(arena_decay_ticks) -#define arena_decay_time_default_get JEMALLOC_N(arena_decay_time_default_get) -#define arena_decay_time_default_set JEMALLOC_N(arena_decay_time_default_set) -#define arena_decay_time_get JEMALLOC_N(arena_decay_time_get) -#define arena_decay_time_set JEMALLOC_N(arena_decay_time_set) -#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get) -#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set) -#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next) -#define arena_get JEMALLOC_N(arena_get) -#define arena_ichoose JEMALLOC_N(arena_ichoose) -#define arena_init JEMALLOC_N(arena_init) -#define arena_lg_dirty_mult_default_get JEMALLOC_N(arena_lg_dirty_mult_default_get) -#define arena_lg_dirty_mult_default_set JEMALLOC_N(arena_lg_dirty_mult_default_set) -#define arena_lg_dirty_mult_get JEMALLOC_N(arena_lg_dirty_mult_get) -#define arena_lg_dirty_mult_set JEMALLOC_N(arena_lg_dirty_mult_set) -#define arena_malloc JEMALLOC_N(arena_malloc) -#define arena_malloc_hard JEMALLOC_N(arena_malloc_hard) -#define arena_malloc_large JEMALLOC_N(arena_malloc_large) -#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get) -#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get) -#define arena_mapbits_decommitted_get JEMALLOC_N(arena_mapbits_decommitted_get) -#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get) -#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get) -#define arena_mapbits_internal_set JEMALLOC_N(arena_mapbits_internal_set) -#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set) -#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get) -#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set) -#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get) -#define arena_mapbits_size_decode JEMALLOC_N(arena_mapbits_size_decode) -#define arena_mapbits_size_encode JEMALLOC_N(arena_mapbits_size_encode) -#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get) -#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set) -#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set) -#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get) -#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set) -#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get) -#define arena_mapbitsp_get_const JEMALLOC_N(arena_mapbitsp_get_const) -#define arena_mapbitsp_get_mutable JEMALLOC_N(arena_mapbitsp_get_mutable) -#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read) -#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write) -#define arena_maxrun JEMALLOC_N(arena_maxrun) -#define arena_maybe_purge JEMALLOC_N(arena_maybe_purge) -#define arena_metadata_allocated_add JEMALLOC_N(arena_metadata_allocated_add) -#define arena_metadata_allocated_get JEMALLOC_N(arena_metadata_allocated_get) -#define arena_metadata_allocated_sub JEMALLOC_N(arena_metadata_allocated_sub) -#define arena_migrate JEMALLOC_N(arena_migrate) -#define arena_miscelm_get_const JEMALLOC_N(arena_miscelm_get_const) -#define arena_miscelm_get_mutable JEMALLOC_N(arena_miscelm_get_mutable) -#define arena_miscelm_to_pageind JEMALLOC_N(arena_miscelm_to_pageind) -#define arena_miscelm_to_rpages JEMALLOC_N(arena_miscelm_to_rpages) -#define arena_new JEMALLOC_N(arena_new) -#define arena_node_alloc JEMALLOC_N(arena_node_alloc) -#define arena_node_dalloc JEMALLOC_N(arena_node_dalloc) -#define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec) -#define arena_nthreads_get JEMALLOC_N(arena_nthreads_get) -#define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc) -#define arena_palloc JEMALLOC_N(arena_palloc) -#define arena_postfork_child JEMALLOC_N(arena_postfork_child) -#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent) -#define arena_prefork0 JEMALLOC_N(arena_prefork0) -#define arena_prefork1 JEMALLOC_N(arena_prefork1) -#define arena_prefork2 JEMALLOC_N(arena_prefork2) -#define arena_prefork3 JEMALLOC_N(arena_prefork3) -#define arena_prof_accum JEMALLOC_N(arena_prof_accum) -#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl) -#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked) -#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted) -#define arena_prof_tctx_get JEMALLOC_N(arena_prof_tctx_get) -#define arena_prof_tctx_reset JEMALLOC_N(arena_prof_tctx_reset) -#define arena_prof_tctx_set JEMALLOC_N(arena_prof_tctx_set) -#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get) -#define arena_purge JEMALLOC_N(arena_purge) -#define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small) -#define arena_ralloc JEMALLOC_N(arena_ralloc) -#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) -#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move) -#define arena_rd_to_miscelm JEMALLOC_N(arena_rd_to_miscelm) -#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) -#define arena_reset JEMALLOC_N(arena_reset) -#define arena_run_regind JEMALLOC_N(arena_run_regind) -#define arena_run_to_miscelm JEMALLOC_N(arena_run_to_miscelm) -#define arena_salloc JEMALLOC_N(arena_salloc) -#define arena_sdalloc JEMALLOC_N(arena_sdalloc) -#define arena_stats_merge JEMALLOC_N(arena_stats_merge) -#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small) -#define arena_tdata_get JEMALLOC_N(arena_tdata_get) -#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard) -#define arenas JEMALLOC_N(arenas) -#define arenas_tdata_bypass_cleanup JEMALLOC_N(arenas_tdata_bypass_cleanup) -#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup) -#define atomic_add_p JEMALLOC_N(atomic_add_p) -#define atomic_add_u JEMALLOC_N(atomic_add_u) -#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32) -#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64) -#define atomic_add_z JEMALLOC_N(atomic_add_z) -#define atomic_cas_p JEMALLOC_N(atomic_cas_p) -#define atomic_cas_u JEMALLOC_N(atomic_cas_u) -#define atomic_cas_uint32 JEMALLOC_N(atomic_cas_uint32) -#define atomic_cas_uint64 JEMALLOC_N(atomic_cas_uint64) -#define atomic_cas_z JEMALLOC_N(atomic_cas_z) -#define atomic_sub_p JEMALLOC_N(atomic_sub_p) -#define atomic_sub_u JEMALLOC_N(atomic_sub_u) -#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32) -#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64) -#define atomic_sub_z JEMALLOC_N(atomic_sub_z) -#define atomic_write_p JEMALLOC_N(atomic_write_p) -#define atomic_write_u JEMALLOC_N(atomic_write_u) -#define atomic_write_uint32 JEMALLOC_N(atomic_write_uint32) -#define atomic_write_uint64 JEMALLOC_N(atomic_write_uint64) -#define atomic_write_z JEMALLOC_N(atomic_write_z) -#define base_alloc JEMALLOC_N(base_alloc) -#define base_boot JEMALLOC_N(base_boot) -#define base_postfork_child JEMALLOC_N(base_postfork_child) -#define base_postfork_parent JEMALLOC_N(base_postfork_parent) -#define base_prefork JEMALLOC_N(base_prefork) -#define base_stats_get JEMALLOC_N(base_stats_get) -#define bitmap_full JEMALLOC_N(bitmap_full) -#define bitmap_get JEMALLOC_N(bitmap_get) -#define bitmap_info_init JEMALLOC_N(bitmap_info_init) -#define bitmap_init JEMALLOC_N(bitmap_init) -#define bitmap_set JEMALLOC_N(bitmap_set) -#define bitmap_sfu JEMALLOC_N(bitmap_sfu) -#define bitmap_size JEMALLOC_N(bitmap_size) -#define bitmap_unset JEMALLOC_N(bitmap_unset) -#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc) -#define bootstrap_free JEMALLOC_N(bootstrap_free) -#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc) -#define bt_init JEMALLOC_N(bt_init) -#define buferror JEMALLOC_N(buferror) -#define chunk_alloc_base JEMALLOC_N(chunk_alloc_base) -#define chunk_alloc_cache JEMALLOC_N(chunk_alloc_cache) -#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss) -#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap) -#define chunk_alloc_wrapper JEMALLOC_N(chunk_alloc_wrapper) -#define chunk_boot JEMALLOC_N(chunk_boot) -#define chunk_dalloc_cache JEMALLOC_N(chunk_dalloc_cache) -#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap) -#define chunk_dalloc_wrapper JEMALLOC_N(chunk_dalloc_wrapper) -#define chunk_deregister JEMALLOC_N(chunk_deregister) -#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot) -#define chunk_dss_mergeable JEMALLOC_N(chunk_dss_mergeable) -#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get) -#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set) -#define chunk_hooks_default JEMALLOC_N(chunk_hooks_default) -#define chunk_hooks_get JEMALLOC_N(chunk_hooks_get) -#define chunk_hooks_set JEMALLOC_N(chunk_hooks_set) -#define chunk_in_dss JEMALLOC_N(chunk_in_dss) -#define chunk_lookup JEMALLOC_N(chunk_lookup) -#define chunk_npages JEMALLOC_N(chunk_npages) -#define chunk_purge_wrapper JEMALLOC_N(chunk_purge_wrapper) -#define chunk_register JEMALLOC_N(chunk_register) -#define chunks_rtree JEMALLOC_N(chunks_rtree) -#define chunksize JEMALLOC_N(chunksize) -#define chunksize_mask JEMALLOC_N(chunksize_mask) -#define ckh_count JEMALLOC_N(ckh_count) -#define ckh_delete JEMALLOC_N(ckh_delete) -#define ckh_insert JEMALLOC_N(ckh_insert) -#define ckh_iter JEMALLOC_N(ckh_iter) -#define ckh_new JEMALLOC_N(ckh_new) -#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash) -#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp) -#define ckh_remove JEMALLOC_N(ckh_remove) -#define ckh_search JEMALLOC_N(ckh_search) -#define ckh_string_hash JEMALLOC_N(ckh_string_hash) -#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp) -#define ctl_boot JEMALLOC_N(ctl_boot) -#define ctl_bymib JEMALLOC_N(ctl_bymib) -#define ctl_byname JEMALLOC_N(ctl_byname) -#define ctl_nametomib JEMALLOC_N(ctl_nametomib) -#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child) -#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent) -#define ctl_prefork JEMALLOC_N(ctl_prefork) -#define decay_ticker_get JEMALLOC_N(decay_ticker_get) -#define dss_prec_names JEMALLOC_N(dss_prec_names) -#define extent_node_achunk_get JEMALLOC_N(extent_node_achunk_get) -#define extent_node_achunk_set JEMALLOC_N(extent_node_achunk_set) -#define extent_node_addr_get JEMALLOC_N(extent_node_addr_get) -#define extent_node_addr_set JEMALLOC_N(extent_node_addr_set) -#define extent_node_arena_get JEMALLOC_N(extent_node_arena_get) -#define extent_node_arena_set JEMALLOC_N(extent_node_arena_set) -#define extent_node_committed_get JEMALLOC_N(extent_node_committed_get) -#define extent_node_committed_set JEMALLOC_N(extent_node_committed_set) -#define extent_node_dirty_insert JEMALLOC_N(extent_node_dirty_insert) -#define extent_node_dirty_linkage_init JEMALLOC_N(extent_node_dirty_linkage_init) -#define extent_node_dirty_remove JEMALLOC_N(extent_node_dirty_remove) -#define extent_node_init JEMALLOC_N(extent_node_init) -#define extent_node_prof_tctx_get JEMALLOC_N(extent_node_prof_tctx_get) -#define extent_node_prof_tctx_set JEMALLOC_N(extent_node_prof_tctx_set) -#define extent_node_size_get JEMALLOC_N(extent_node_size_get) -#define extent_node_size_set JEMALLOC_N(extent_node_size_set) -#define extent_node_sn_get JEMALLOC_N(extent_node_sn_get) -#define extent_node_sn_set JEMALLOC_N(extent_node_sn_set) -#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get) -#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set) -#define extent_size_quantize_ceil JEMALLOC_N(extent_size_quantize_ceil) -#define extent_size_quantize_floor JEMALLOC_N(extent_size_quantize_floor) -#define extent_tree_ad_destroy JEMALLOC_N(extent_tree_ad_destroy) -#define extent_tree_ad_destroy_recurse JEMALLOC_N(extent_tree_ad_destroy_recurse) -#define extent_tree_ad_empty JEMALLOC_N(extent_tree_ad_empty) -#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first) -#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert) -#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter) -#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse) -#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start) -#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last) -#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new) -#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next) -#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch) -#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev) -#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch) -#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove) -#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter) -#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse) -#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start) -#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search) -#define extent_tree_szsnad_destroy JEMALLOC_N(extent_tree_szsnad_destroy) -#define extent_tree_szsnad_destroy_recurse JEMALLOC_N(extent_tree_szsnad_destroy_recurse) -#define extent_tree_szsnad_empty JEMALLOC_N(extent_tree_szsnad_empty) -#define extent_tree_szsnad_first JEMALLOC_N(extent_tree_szsnad_first) -#define extent_tree_szsnad_insert JEMALLOC_N(extent_tree_szsnad_insert) -#define extent_tree_szsnad_iter JEMALLOC_N(extent_tree_szsnad_iter) -#define extent_tree_szsnad_iter_recurse JEMALLOC_N(extent_tree_szsnad_iter_recurse) -#define extent_tree_szsnad_iter_start JEMALLOC_N(extent_tree_szsnad_iter_start) -#define extent_tree_szsnad_last JEMALLOC_N(extent_tree_szsnad_last) -#define extent_tree_szsnad_new JEMALLOC_N(extent_tree_szsnad_new) -#define extent_tree_szsnad_next JEMALLOC_N(extent_tree_szsnad_next) -#define extent_tree_szsnad_nsearch JEMALLOC_N(extent_tree_szsnad_nsearch) -#define extent_tree_szsnad_prev JEMALLOC_N(extent_tree_szsnad_prev) -#define extent_tree_szsnad_psearch JEMALLOC_N(extent_tree_szsnad_psearch) -#define extent_tree_szsnad_remove JEMALLOC_N(extent_tree_szsnad_remove) -#define extent_tree_szsnad_reverse_iter JEMALLOC_N(extent_tree_szsnad_reverse_iter) -#define extent_tree_szsnad_reverse_iter_recurse JEMALLOC_N(extent_tree_szsnad_reverse_iter_recurse) -#define extent_tree_szsnad_reverse_iter_start JEMALLOC_N(extent_tree_szsnad_reverse_iter_start) -#define extent_tree_szsnad_search JEMALLOC_N(extent_tree_szsnad_search) -#define ffs_llu JEMALLOC_N(ffs_llu) -#define ffs_lu JEMALLOC_N(ffs_lu) -#define ffs_u JEMALLOC_N(ffs_u) -#define ffs_u32 JEMALLOC_N(ffs_u32) -#define ffs_u64 JEMALLOC_N(ffs_u64) -#define ffs_zu JEMALLOC_N(ffs_zu) -#define get_errno JEMALLOC_N(get_errno) -#define hash JEMALLOC_N(hash) -#define hash_fmix_32 JEMALLOC_N(hash_fmix_32) -#define hash_fmix_64 JEMALLOC_N(hash_fmix_64) -#define hash_get_block_32 JEMALLOC_N(hash_get_block_32) -#define hash_get_block_64 JEMALLOC_N(hash_get_block_64) -#define hash_rotl_32 JEMALLOC_N(hash_rotl_32) -#define hash_rotl_64 JEMALLOC_N(hash_rotl_64) -#define hash_x64_128 JEMALLOC_N(hash_x64_128) -#define hash_x86_128 JEMALLOC_N(hash_x86_128) -#define hash_x86_32 JEMALLOC_N(hash_x86_32) -#define huge_aalloc JEMALLOC_N(huge_aalloc) -#define huge_dalloc JEMALLOC_N(huge_dalloc) -#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) -#define huge_malloc JEMALLOC_N(huge_malloc) -#define huge_palloc JEMALLOC_N(huge_palloc) -#define huge_prof_tctx_get JEMALLOC_N(huge_prof_tctx_get) -#define huge_prof_tctx_reset JEMALLOC_N(huge_prof_tctx_reset) -#define huge_prof_tctx_set JEMALLOC_N(huge_prof_tctx_set) -#define huge_ralloc JEMALLOC_N(huge_ralloc) -#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move) -#define huge_salloc JEMALLOC_N(huge_salloc) -#define iaalloc JEMALLOC_N(iaalloc) -#define ialloc JEMALLOC_N(ialloc) -#define iallocztm JEMALLOC_N(iallocztm) -#define iarena_cleanup JEMALLOC_N(iarena_cleanup) -#define idalloc JEMALLOC_N(idalloc) -#define idalloctm JEMALLOC_N(idalloctm) -#define in_valgrind JEMALLOC_N(in_valgrind) -#define index2size JEMALLOC_N(index2size) -#define index2size_compute JEMALLOC_N(index2size_compute) -#define index2size_lookup JEMALLOC_N(index2size_lookup) -#define index2size_tab JEMALLOC_N(index2size_tab) -#define ipalloc JEMALLOC_N(ipalloc) -#define ipalloct JEMALLOC_N(ipalloct) -#define ipallocztm JEMALLOC_N(ipallocztm) -#define iqalloc JEMALLOC_N(iqalloc) -#define iralloc JEMALLOC_N(iralloc) -#define iralloct JEMALLOC_N(iralloct) -#define iralloct_realign JEMALLOC_N(iralloct_realign) -#define isalloc JEMALLOC_N(isalloc) -#define isdalloct JEMALLOC_N(isdalloct) -#define isqalloc JEMALLOC_N(isqalloc) -#define isthreaded JEMALLOC_N(isthreaded) -#define ivsalloc JEMALLOC_N(ivsalloc) -#define ixalloc JEMALLOC_N(ixalloc) -#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child) -#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent) -#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork) -#define large_maxclass JEMALLOC_N(large_maxclass) -#define lg_floor JEMALLOC_N(lg_floor) -#define lg_prof_sample JEMALLOC_N(lg_prof_sample) -#define malloc_cprintf JEMALLOC_N(malloc_cprintf) -#define malloc_mutex_assert_not_owner JEMALLOC_N(malloc_mutex_assert_not_owner) -#define malloc_mutex_assert_owner JEMALLOC_N(malloc_mutex_assert_owner) -#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot) -#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init) -#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock) -#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child) -#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent) -#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork) -#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock) -#define malloc_printf JEMALLOC_N(malloc_printf) -#define malloc_snprintf JEMALLOC_N(malloc_snprintf) -#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax) -#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0) -#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1) -#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register) -#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc) -#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc) -#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup) -#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf) -#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf) -#define malloc_write JEMALLOC_N(malloc_write) -#define map_bias JEMALLOC_N(map_bias) -#define map_misc_offset JEMALLOC_N(map_misc_offset) -#define mb_write JEMALLOC_N(mb_write) -#define narenas_auto JEMALLOC_N(narenas_auto) -#define narenas_tdata_cleanup JEMALLOC_N(narenas_tdata_cleanup) -#define narenas_total_get JEMALLOC_N(narenas_total_get) -#define ncpus JEMALLOC_N(ncpus) -#define nhbins JEMALLOC_N(nhbins) -#define nhclasses JEMALLOC_N(nhclasses) -#define nlclasses JEMALLOC_N(nlclasses) -#define nstime_add JEMALLOC_N(nstime_add) -#define nstime_compare JEMALLOC_N(nstime_compare) -#define nstime_copy JEMALLOC_N(nstime_copy) -#define nstime_divide JEMALLOC_N(nstime_divide) -#define nstime_idivide JEMALLOC_N(nstime_idivide) -#define nstime_imultiply JEMALLOC_N(nstime_imultiply) -#define nstime_init JEMALLOC_N(nstime_init) -#define nstime_init2 JEMALLOC_N(nstime_init2) -#define nstime_monotonic JEMALLOC_N(nstime_monotonic) -#define nstime_ns JEMALLOC_N(nstime_ns) -#define nstime_nsec JEMALLOC_N(nstime_nsec) -#define nstime_sec JEMALLOC_N(nstime_sec) -#define nstime_subtract JEMALLOC_N(nstime_subtract) -#define nstime_update JEMALLOC_N(nstime_update) -#define opt_abort JEMALLOC_N(opt_abort) -#define opt_decay_time JEMALLOC_N(opt_decay_time) -#define opt_dss JEMALLOC_N(opt_dss) -#define opt_junk JEMALLOC_N(opt_junk) -#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc) -#define opt_junk_free JEMALLOC_N(opt_junk_free) -#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk) -#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult) -#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval) -#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample) -#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max) -#define opt_narenas JEMALLOC_N(opt_narenas) -#define opt_prof JEMALLOC_N(opt_prof) -#define opt_prof_accum JEMALLOC_N(opt_prof_accum) -#define opt_prof_active JEMALLOC_N(opt_prof_active) -#define opt_prof_final JEMALLOC_N(opt_prof_final) -#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump) -#define opt_prof_leak JEMALLOC_N(opt_prof_leak) -#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix) -#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init) -#define opt_purge JEMALLOC_N(opt_purge) -#define opt_quarantine JEMALLOC_N(opt_quarantine) -#define opt_redzone JEMALLOC_N(opt_redzone) -#define opt_stats_print JEMALLOC_N(opt_stats_print) -#define opt_tcache JEMALLOC_N(opt_tcache) -#define opt_thp JEMALLOC_N(opt_thp) -#define opt_utrace JEMALLOC_N(opt_utrace) -#define opt_xmalloc JEMALLOC_N(opt_xmalloc) -#define opt_zero JEMALLOC_N(opt_zero) -#define p2rz JEMALLOC_N(p2rz) -#define pages_boot JEMALLOC_N(pages_boot) -#define pages_commit JEMALLOC_N(pages_commit) -#define pages_decommit JEMALLOC_N(pages_decommit) -#define pages_huge JEMALLOC_N(pages_huge) -#define pages_map JEMALLOC_N(pages_map) -#define pages_nohuge JEMALLOC_N(pages_nohuge) -#define pages_purge JEMALLOC_N(pages_purge) -#define pages_trim JEMALLOC_N(pages_trim) -#define pages_unmap JEMALLOC_N(pages_unmap) -#define pind2sz JEMALLOC_N(pind2sz) -#define pind2sz_compute JEMALLOC_N(pind2sz_compute) -#define pind2sz_lookup JEMALLOC_N(pind2sz_lookup) -#define pind2sz_tab JEMALLOC_N(pind2sz_tab) -#define pow2_ceil_u32 JEMALLOC_N(pow2_ceil_u32) -#define pow2_ceil_u64 JEMALLOC_N(pow2_ceil_u64) -#define pow2_ceil_zu JEMALLOC_N(pow2_ceil_zu) -#define prng_lg_range_u32 JEMALLOC_N(prng_lg_range_u32) -#define prng_lg_range_u64 JEMALLOC_N(prng_lg_range_u64) -#define prng_lg_range_zu JEMALLOC_N(prng_lg_range_zu) -#define prng_range_u32 JEMALLOC_N(prng_range_u32) -#define prng_range_u64 JEMALLOC_N(prng_range_u64) -#define prng_range_zu JEMALLOC_N(prng_range_zu) -#define prng_state_next_u32 JEMALLOC_N(prng_state_next_u32) -#define prng_state_next_u64 JEMALLOC_N(prng_state_next_u64) -#define prng_state_next_zu JEMALLOC_N(prng_state_next_zu) -#define prof_active JEMALLOC_N(prof_active) -#define prof_active_get JEMALLOC_N(prof_active_get) -#define prof_active_get_unlocked JEMALLOC_N(prof_active_get_unlocked) -#define prof_active_set JEMALLOC_N(prof_active_set) -#define prof_alloc_prep JEMALLOC_N(prof_alloc_prep) -#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback) -#define prof_backtrace JEMALLOC_N(prof_backtrace) -#define prof_boot0 JEMALLOC_N(prof_boot0) -#define prof_boot1 JEMALLOC_N(prof_boot1) -#define prof_boot2 JEMALLOC_N(prof_boot2) -#define prof_bt_count JEMALLOC_N(prof_bt_count) -#define prof_dump_header JEMALLOC_N(prof_dump_header) -#define prof_dump_open JEMALLOC_N(prof_dump_open) -#define prof_free JEMALLOC_N(prof_free) -#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object) -#define prof_gdump JEMALLOC_N(prof_gdump) -#define prof_gdump_get JEMALLOC_N(prof_gdump_get) -#define prof_gdump_get_unlocked JEMALLOC_N(prof_gdump_get_unlocked) -#define prof_gdump_set JEMALLOC_N(prof_gdump_set) -#define prof_gdump_val JEMALLOC_N(prof_gdump_val) -#define prof_idump JEMALLOC_N(prof_idump) -#define prof_interval JEMALLOC_N(prof_interval) -#define prof_lookup JEMALLOC_N(prof_lookup) -#define prof_malloc JEMALLOC_N(prof_malloc) -#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object) -#define prof_mdump JEMALLOC_N(prof_mdump) -#define prof_postfork_child JEMALLOC_N(prof_postfork_child) -#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent) -#define prof_prefork0 JEMALLOC_N(prof_prefork0) -#define prof_prefork1 JEMALLOC_N(prof_prefork1) -#define prof_realloc JEMALLOC_N(prof_realloc) -#define prof_reset JEMALLOC_N(prof_reset) -#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update) -#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update) -#define prof_tctx_get JEMALLOC_N(prof_tctx_get) -#define prof_tctx_reset JEMALLOC_N(prof_tctx_reset) -#define prof_tctx_set JEMALLOC_N(prof_tctx_set) -#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup) -#define prof_tdata_count JEMALLOC_N(prof_tdata_count) -#define prof_tdata_get JEMALLOC_N(prof_tdata_get) -#define prof_tdata_init JEMALLOC_N(prof_tdata_init) -#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit) -#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get) -#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get) -#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set) -#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set) -#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get) -#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set) -#define psz2ind JEMALLOC_N(psz2ind) -#define psz2u JEMALLOC_N(psz2u) -#define purge_mode_names JEMALLOC_N(purge_mode_names) -#define quarantine JEMALLOC_N(quarantine) -#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook) -#define quarantine_alloc_hook_work JEMALLOC_N(quarantine_alloc_hook_work) -#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup) -#define rtree_child_read JEMALLOC_N(rtree_child_read) -#define rtree_child_read_hard JEMALLOC_N(rtree_child_read_hard) -#define rtree_child_tryread JEMALLOC_N(rtree_child_tryread) -#define rtree_delete JEMALLOC_N(rtree_delete) -#define rtree_get JEMALLOC_N(rtree_get) -#define rtree_new JEMALLOC_N(rtree_new) -#define rtree_node_valid JEMALLOC_N(rtree_node_valid) -#define rtree_set JEMALLOC_N(rtree_set) -#define rtree_start_level JEMALLOC_N(rtree_start_level) -#define rtree_subkey JEMALLOC_N(rtree_subkey) -#define rtree_subtree_read JEMALLOC_N(rtree_subtree_read) -#define rtree_subtree_read_hard JEMALLOC_N(rtree_subtree_read_hard) -#define rtree_subtree_tryread JEMALLOC_N(rtree_subtree_tryread) -#define rtree_val_read JEMALLOC_N(rtree_val_read) -#define rtree_val_write JEMALLOC_N(rtree_val_write) -#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) -#define run_quantize_floor JEMALLOC_N(run_quantize_floor) -#define s2u JEMALLOC_N(s2u) -#define s2u_compute JEMALLOC_N(s2u_compute) -#define s2u_lookup JEMALLOC_N(s2u_lookup) -#define sa2u JEMALLOC_N(sa2u) -#define set_errno JEMALLOC_N(set_errno) -#define size2index JEMALLOC_N(size2index) -#define size2index_compute JEMALLOC_N(size2index_compute) -#define size2index_lookup JEMALLOC_N(size2index_lookup) -#define size2index_tab JEMALLOC_N(size2index_tab) -#define spin_adaptive JEMALLOC_N(spin_adaptive) -#define spin_init JEMALLOC_N(spin_init) -#define stats_cactive JEMALLOC_N(stats_cactive) -#define stats_cactive_add JEMALLOC_N(stats_cactive_add) -#define stats_cactive_get JEMALLOC_N(stats_cactive_get) -#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub) -#define stats_print JEMALLOC_N(stats_print) -#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy) -#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large) -#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small) -#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard) -#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate) -#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large) -#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small) -#define tcache_bin_info JEMALLOC_N(tcache_bin_info) -#define tcache_boot JEMALLOC_N(tcache_boot) -#define tcache_cleanup JEMALLOC_N(tcache_cleanup) -#define tcache_create JEMALLOC_N(tcache_create) -#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large) -#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small) -#define tcache_enabled_cleanup JEMALLOC_N(tcache_enabled_cleanup) -#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get) -#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set) -#define tcache_event JEMALLOC_N(tcache_event) -#define tcache_event_hard JEMALLOC_N(tcache_event_hard) -#define tcache_flush JEMALLOC_N(tcache_flush) -#define tcache_get JEMALLOC_N(tcache_get) -#define tcache_get_hard JEMALLOC_N(tcache_get_hard) -#define tcache_maxclass JEMALLOC_N(tcache_maxclass) -#define tcache_postfork_child JEMALLOC_N(tcache_postfork_child) -#define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent) -#define tcache_prefork JEMALLOC_N(tcache_prefork) -#define tcache_salloc JEMALLOC_N(tcache_salloc) -#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge) -#define tcaches JEMALLOC_N(tcaches) -#define tcaches_create JEMALLOC_N(tcaches_create) -#define tcaches_destroy JEMALLOC_N(tcaches_destroy) -#define tcaches_flush JEMALLOC_N(tcaches_flush) -#define tcaches_get JEMALLOC_N(tcaches_get) -#define thread_allocated_cleanup JEMALLOC_N(thread_allocated_cleanup) -#define thread_deallocated_cleanup JEMALLOC_N(thread_deallocated_cleanup) -#define ticker_copy JEMALLOC_N(ticker_copy) -#define ticker_init JEMALLOC_N(ticker_init) -#define ticker_read JEMALLOC_N(ticker_read) -#define ticker_tick JEMALLOC_N(ticker_tick) -#define ticker_ticks JEMALLOC_N(ticker_ticks) -#define tsd_arena_get JEMALLOC_N(tsd_arena_get) -#define tsd_arena_set JEMALLOC_N(tsd_arena_set) -#define tsd_arenap_get JEMALLOC_N(tsd_arenap_get) -#define tsd_arenas_tdata_bypass_get JEMALLOC_N(tsd_arenas_tdata_bypass_get) -#define tsd_arenas_tdata_bypass_set JEMALLOC_N(tsd_arenas_tdata_bypass_set) -#define tsd_arenas_tdata_bypassp_get JEMALLOC_N(tsd_arenas_tdata_bypassp_get) -#define tsd_arenas_tdata_get JEMALLOC_N(tsd_arenas_tdata_get) -#define tsd_arenas_tdata_set JEMALLOC_N(tsd_arenas_tdata_set) -#define tsd_arenas_tdatap_get JEMALLOC_N(tsd_arenas_tdatap_get) -#define tsd_boot JEMALLOC_N(tsd_boot) -#define tsd_boot0 JEMALLOC_N(tsd_boot0) -#define tsd_boot1 JEMALLOC_N(tsd_boot1) -#define tsd_booted JEMALLOC_N(tsd_booted) -#define tsd_booted_get JEMALLOC_N(tsd_booted_get) -#define tsd_cleanup JEMALLOC_N(tsd_cleanup) -#define tsd_cleanup_wrapper JEMALLOC_N(tsd_cleanup_wrapper) -#define tsd_fetch JEMALLOC_N(tsd_fetch) -#define tsd_fetch_impl JEMALLOC_N(tsd_fetch_impl) -#define tsd_get JEMALLOC_N(tsd_get) -#define tsd_get_allocates JEMALLOC_N(tsd_get_allocates) -#define tsd_iarena_get JEMALLOC_N(tsd_iarena_get) -#define tsd_iarena_set JEMALLOC_N(tsd_iarena_set) -#define tsd_iarenap_get JEMALLOC_N(tsd_iarenap_get) -#define tsd_initialized JEMALLOC_N(tsd_initialized) -#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion) -#define tsd_init_finish JEMALLOC_N(tsd_init_finish) -#define tsd_init_head JEMALLOC_N(tsd_init_head) -#define tsd_narenas_tdata_get JEMALLOC_N(tsd_narenas_tdata_get) -#define tsd_narenas_tdata_set JEMALLOC_N(tsd_narenas_tdata_set) -#define tsd_narenas_tdatap_get JEMALLOC_N(tsd_narenas_tdatap_get) -#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get) -#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set) -#define tsd_nominal JEMALLOC_N(tsd_nominal) -#define tsd_prof_tdata_get JEMALLOC_N(tsd_prof_tdata_get) -#define tsd_prof_tdata_set JEMALLOC_N(tsd_prof_tdata_set) -#define tsd_prof_tdatap_get JEMALLOC_N(tsd_prof_tdatap_get) -#define tsd_quarantine_get JEMALLOC_N(tsd_quarantine_get) -#define tsd_quarantine_set JEMALLOC_N(tsd_quarantine_set) -#define tsd_quarantinep_get JEMALLOC_N(tsd_quarantinep_get) -#define tsd_set JEMALLOC_N(tsd_set) -#define tsd_tcache_enabled_get JEMALLOC_N(tsd_tcache_enabled_get) -#define tsd_tcache_enabled_set JEMALLOC_N(tsd_tcache_enabled_set) -#define tsd_tcache_enabledp_get JEMALLOC_N(tsd_tcache_enabledp_get) -#define tsd_tcache_get JEMALLOC_N(tsd_tcache_get) -#define tsd_tcache_set JEMALLOC_N(tsd_tcache_set) -#define tsd_tcachep_get JEMALLOC_N(tsd_tcachep_get) -#define tsd_thread_allocated_get JEMALLOC_N(tsd_thread_allocated_get) -#define tsd_thread_allocated_set JEMALLOC_N(tsd_thread_allocated_set) -#define tsd_thread_allocatedp_get JEMALLOC_N(tsd_thread_allocatedp_get) -#define tsd_thread_deallocated_get JEMALLOC_N(tsd_thread_deallocated_get) -#define tsd_thread_deallocated_set JEMALLOC_N(tsd_thread_deallocated_set) -#define tsd_thread_deallocatedp_get JEMALLOC_N(tsd_thread_deallocatedp_get) -#define tsd_tls JEMALLOC_N(tsd_tls) -#define tsd_tsd JEMALLOC_N(tsd_tsd) -#define tsd_tsdn JEMALLOC_N(tsd_tsdn) -#define tsd_witness_fork_get JEMALLOC_N(tsd_witness_fork_get) -#define tsd_witness_fork_set JEMALLOC_N(tsd_witness_fork_set) -#define tsd_witness_forkp_get JEMALLOC_N(tsd_witness_forkp_get) -#define tsd_witnesses_get JEMALLOC_N(tsd_witnesses_get) -#define tsd_witnesses_set JEMALLOC_N(tsd_witnesses_set) -#define tsd_witnessesp_get JEMALLOC_N(tsd_witnessesp_get) -#define tsdn_fetch JEMALLOC_N(tsdn_fetch) -#define tsdn_null JEMALLOC_N(tsdn_null) -#define tsdn_tsd JEMALLOC_N(tsdn_tsd) -#define u2rz JEMALLOC_N(u2rz) -#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block) -#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined) -#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess) -#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined) -#define witness_assert_depth JEMALLOC_N(witness_assert_depth) -#define witness_assert_depth_to_rank JEMALLOC_N(witness_assert_depth_to_rank) -#define witness_assert_lockless JEMALLOC_N(witness_assert_lockless) -#define witness_assert_not_owner JEMALLOC_N(witness_assert_not_owner) -#define witness_assert_owner JEMALLOC_N(witness_assert_owner) -#define witness_depth_error JEMALLOC_N(witness_depth_error) -#define witness_fork_cleanup JEMALLOC_N(witness_fork_cleanup) -#define witness_init JEMALLOC_N(witness_init) -#define witness_lock JEMALLOC_N(witness_lock) -#define witness_lock_error JEMALLOC_N(witness_lock_error) -#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error) -#define witness_owner JEMALLOC_N(witness_owner) -#define witness_owner_error JEMALLOC_N(witness_owner_error) -#define witness_postfork_child JEMALLOC_N(witness_postfork_child) -#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent) -#define witness_prefork JEMALLOC_N(witness_prefork) -#define witness_unlock JEMALLOC_N(witness_unlock) -#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup) -#define zone_register JEMALLOC_N(zone_register) diff --git a/sources/jemalloc/include-win/jemalloc/internal/private_namespace.sh b/sources/jemalloc/include-win/jemalloc/internal/private_namespace.sh deleted file mode 100755 index cd25eb30..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/private_namespace.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -for symbol in `cat $1` ; do - echo "#define ${symbol} JEMALLOC_N(${symbol})" -done diff --git a/sources/jemalloc/include-win/jemalloc/internal/private_symbols.txt b/sources/jemalloc/include-win/jemalloc/internal/private_symbols.txt deleted file mode 100755 index 60b57e5a..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/private_symbols.txt +++ /dev/null @@ -1,639 +0,0 @@ -a0dalloc -a0get -a0malloc -arena_aalloc -arena_alloc_junk_small -arena_basic_stats_merge -arena_bin_index -arena_bin_info -arena_bitselm_get_const -arena_bitselm_get_mutable -arena_boot -arena_choose -arena_choose_hard -arena_choose_impl -arena_chunk_alloc_huge -arena_chunk_cache_maybe_insert -arena_chunk_cache_maybe_remove -arena_chunk_dalloc_huge -arena_chunk_ralloc_huge_expand -arena_chunk_ralloc_huge_shrink -arena_chunk_ralloc_huge_similar -arena_cleanup -arena_dalloc -arena_dalloc_bin -arena_dalloc_bin_junked_locked -arena_dalloc_junk_large -arena_dalloc_junk_small -arena_dalloc_large -arena_dalloc_large_junked_locked -arena_dalloc_small -arena_decay_tick -arena_decay_ticks -arena_decay_time_default_get -arena_decay_time_default_set -arena_decay_time_get -arena_decay_time_set -arena_dss_prec_get -arena_dss_prec_set -arena_extent_sn_next -arena_get -arena_ichoose -arena_init -arena_lg_dirty_mult_default_get -arena_lg_dirty_mult_default_set -arena_lg_dirty_mult_get -arena_lg_dirty_mult_set -arena_malloc -arena_malloc_hard -arena_malloc_large -arena_mapbits_allocated_get -arena_mapbits_binind_get -arena_mapbits_decommitted_get -arena_mapbits_dirty_get -arena_mapbits_get -arena_mapbits_internal_set -arena_mapbits_large_binind_set -arena_mapbits_large_get -arena_mapbits_large_set -arena_mapbits_large_size_get -arena_mapbits_size_decode -arena_mapbits_size_encode -arena_mapbits_small_runind_get -arena_mapbits_small_set -arena_mapbits_unallocated_set -arena_mapbits_unallocated_size_get -arena_mapbits_unallocated_size_set -arena_mapbits_unzeroed_get -arena_mapbitsp_get_const -arena_mapbitsp_get_mutable -arena_mapbitsp_read -arena_mapbitsp_write -arena_maxrun -arena_maybe_purge -arena_metadata_allocated_add -arena_metadata_allocated_get -arena_metadata_allocated_sub -arena_migrate -arena_miscelm_get_const -arena_miscelm_get_mutable -arena_miscelm_to_pageind -arena_miscelm_to_rpages -arena_new -arena_node_alloc -arena_node_dalloc -arena_nthreads_dec -arena_nthreads_get -arena_nthreads_inc -arena_palloc -arena_postfork_child -arena_postfork_parent -arena_prefork0 -arena_prefork1 -arena_prefork2 -arena_prefork3 -arena_prof_accum -arena_prof_accum_impl -arena_prof_accum_locked -arena_prof_promoted -arena_prof_tctx_get -arena_prof_tctx_reset -arena_prof_tctx_set -arena_ptr_small_binind_get -arena_purge -arena_quarantine_junk_small -arena_ralloc -arena_ralloc_junk_large -arena_ralloc_no_move -arena_rd_to_miscelm -arena_redzone_corruption -arena_reset -arena_run_regind -arena_run_to_miscelm -arena_salloc -arena_sdalloc -arena_stats_merge -arena_tcache_fill_small -arena_tdata_get -arena_tdata_get_hard -arenas -arenas_tdata_bypass_cleanup -arenas_tdata_cleanup -atomic_add_p -atomic_add_u -atomic_add_uint32 -atomic_add_uint64 -atomic_add_z -atomic_cas_p -atomic_cas_u -atomic_cas_uint32 -atomic_cas_uint64 -atomic_cas_z -atomic_sub_p -atomic_sub_u -atomic_sub_uint32 -atomic_sub_uint64 -atomic_sub_z -atomic_write_p -atomic_write_u -atomic_write_uint32 -atomic_write_uint64 -atomic_write_z -base_alloc -base_boot -base_postfork_child -base_postfork_parent -base_prefork -base_stats_get -bitmap_full -bitmap_get -bitmap_info_init -bitmap_init -bitmap_set -bitmap_sfu -bitmap_size -bitmap_unset -bootstrap_calloc -bootstrap_free -bootstrap_malloc -bt_init -buferror -chunk_alloc_base -chunk_alloc_cache -chunk_alloc_dss -chunk_alloc_mmap -chunk_alloc_wrapper -chunk_boot -chunk_dalloc_cache -chunk_dalloc_mmap -chunk_dalloc_wrapper -chunk_deregister -chunk_dss_boot -chunk_dss_mergeable -chunk_dss_prec_get -chunk_dss_prec_set -chunk_hooks_default -chunk_hooks_get -chunk_hooks_set -chunk_in_dss -chunk_lookup -chunk_npages -chunk_purge_wrapper -chunk_register -chunks_rtree -chunksize -chunksize_mask -ckh_count -ckh_delete -ckh_insert -ckh_iter -ckh_new -ckh_pointer_hash -ckh_pointer_keycomp -ckh_remove -ckh_search -ckh_string_hash -ckh_string_keycomp -ctl_boot -ctl_bymib -ctl_byname -ctl_nametomib -ctl_postfork_child -ctl_postfork_parent -ctl_prefork -decay_ticker_get -dss_prec_names -extent_node_achunk_get -extent_node_achunk_set -extent_node_addr_get -extent_node_addr_set -extent_node_arena_get -extent_node_arena_set -extent_node_committed_get -extent_node_committed_set -extent_node_dirty_insert -extent_node_dirty_linkage_init -extent_node_dirty_remove -extent_node_init -extent_node_prof_tctx_get -extent_node_prof_tctx_set -extent_node_size_get -extent_node_size_set -extent_node_sn_get -extent_node_sn_set -extent_node_zeroed_get -extent_node_zeroed_set -extent_size_quantize_ceil -extent_size_quantize_floor -extent_tree_ad_destroy -extent_tree_ad_destroy_recurse -extent_tree_ad_empty -extent_tree_ad_first -extent_tree_ad_insert -extent_tree_ad_iter -extent_tree_ad_iter_recurse -extent_tree_ad_iter_start -extent_tree_ad_last -extent_tree_ad_new -extent_tree_ad_next -extent_tree_ad_nsearch -extent_tree_ad_prev -extent_tree_ad_psearch -extent_tree_ad_remove -extent_tree_ad_reverse_iter -extent_tree_ad_reverse_iter_recurse -extent_tree_ad_reverse_iter_start -extent_tree_ad_search -extent_tree_szsnad_destroy -extent_tree_szsnad_destroy_recurse -extent_tree_szsnad_empty -extent_tree_szsnad_first -extent_tree_szsnad_insert -extent_tree_szsnad_iter -extent_tree_szsnad_iter_recurse -extent_tree_szsnad_iter_start -extent_tree_szsnad_last -extent_tree_szsnad_new -extent_tree_szsnad_next -extent_tree_szsnad_nsearch -extent_tree_szsnad_prev -extent_tree_szsnad_psearch -extent_tree_szsnad_remove -extent_tree_szsnad_reverse_iter -extent_tree_szsnad_reverse_iter_recurse -extent_tree_szsnad_reverse_iter_start -extent_tree_szsnad_search -ffs_llu -ffs_lu -ffs_u -ffs_u32 -ffs_u64 -ffs_zu -get_errno -hash -hash_fmix_32 -hash_fmix_64 -hash_get_block_32 -hash_get_block_64 -hash_rotl_32 -hash_rotl_64 -hash_x64_128 -hash_x86_128 -hash_x86_32 -huge_aalloc -huge_dalloc -huge_dalloc_junk -huge_malloc -huge_palloc -huge_prof_tctx_get -huge_prof_tctx_reset -huge_prof_tctx_set -huge_ralloc -huge_ralloc_no_move -huge_salloc -iaalloc -ialloc -iallocztm -iarena_cleanup -idalloc -idalloctm -in_valgrind -index2size -index2size_compute -index2size_lookup -index2size_tab -ipalloc -ipalloct -ipallocztm -iqalloc -iralloc -iralloct -iralloct_realign -isalloc -isdalloct -isqalloc -isthreaded -ivsalloc -ixalloc -jemalloc_postfork_child -jemalloc_postfork_parent -jemalloc_prefork -large_maxclass -lg_floor -lg_prof_sample -malloc_cprintf -malloc_mutex_assert_not_owner -malloc_mutex_assert_owner -malloc_mutex_boot -malloc_mutex_init -malloc_mutex_lock -malloc_mutex_postfork_child -malloc_mutex_postfork_parent -malloc_mutex_prefork -malloc_mutex_unlock -malloc_printf -malloc_snprintf -malloc_strtoumax -malloc_tsd_boot0 -malloc_tsd_boot1 -malloc_tsd_cleanup_register -malloc_tsd_dalloc -malloc_tsd_malloc -malloc_tsd_no_cleanup -malloc_vcprintf -malloc_vsnprintf -malloc_write -map_bias -map_misc_offset -mb_write -narenas_auto -narenas_tdata_cleanup -narenas_total_get -ncpus -nhbins -nhclasses -nlclasses -nstime_add -nstime_compare -nstime_copy -nstime_divide -nstime_idivide -nstime_imultiply -nstime_init -nstime_init2 -nstime_monotonic -nstime_ns -nstime_nsec -nstime_sec -nstime_subtract -nstime_update -opt_abort -opt_decay_time -opt_dss -opt_junk -opt_junk_alloc -opt_junk_free -opt_lg_chunk -opt_lg_dirty_mult -opt_lg_prof_interval -opt_lg_prof_sample -opt_lg_tcache_max -opt_narenas -opt_prof -opt_prof_accum -opt_prof_active -opt_prof_final -opt_prof_gdump -opt_prof_leak -opt_prof_prefix -opt_prof_thread_active_init -opt_purge -opt_quarantine -opt_redzone -opt_stats_print -opt_tcache -opt_thp -opt_utrace -opt_xmalloc -opt_zero -p2rz -pages_boot -pages_commit -pages_decommit -pages_huge -pages_map -pages_nohuge -pages_purge -pages_trim -pages_unmap -pind2sz -pind2sz_compute -pind2sz_lookup -pind2sz_tab -pow2_ceil_u32 -pow2_ceil_u64 -pow2_ceil_zu -prng_lg_range_u32 -prng_lg_range_u64 -prng_lg_range_zu -prng_range_u32 -prng_range_u64 -prng_range_zu -prng_state_next_u32 -prng_state_next_u64 -prng_state_next_zu -prof_active -prof_active_get -prof_active_get_unlocked -prof_active_set -prof_alloc_prep -prof_alloc_rollback -prof_backtrace -prof_boot0 -prof_boot1 -prof_boot2 -prof_bt_count -prof_dump_header -prof_dump_open -prof_free -prof_free_sampled_object -prof_gdump -prof_gdump_get -prof_gdump_get_unlocked -prof_gdump_set -prof_gdump_val -prof_idump -prof_interval -prof_lookup -prof_malloc -prof_malloc_sample_object -prof_mdump -prof_postfork_child -prof_postfork_parent -prof_prefork0 -prof_prefork1 -prof_realloc -prof_reset -prof_sample_accum_update -prof_sample_threshold_update -prof_tctx_get -prof_tctx_reset -prof_tctx_set -prof_tdata_cleanup -prof_tdata_count -prof_tdata_get -prof_tdata_init -prof_tdata_reinit -prof_thread_active_get -prof_thread_active_init_get -prof_thread_active_init_set -prof_thread_active_set -prof_thread_name_get -prof_thread_name_set -psz2ind -psz2u -purge_mode_names -quarantine -quarantine_alloc_hook -quarantine_alloc_hook_work -quarantine_cleanup -rtree_child_read -rtree_child_read_hard -rtree_child_tryread -rtree_delete -rtree_get -rtree_new -rtree_node_valid -rtree_set -rtree_start_level -rtree_subkey -rtree_subtree_read -rtree_subtree_read_hard -rtree_subtree_tryread -rtree_val_read -rtree_val_write -run_quantize_ceil -run_quantize_floor -s2u -s2u_compute -s2u_lookup -sa2u -set_errno -size2index -size2index_compute -size2index_lookup -size2index_tab -spin_adaptive -spin_init -stats_cactive -stats_cactive_add -stats_cactive_get -stats_cactive_sub -stats_print -tcache_alloc_easy -tcache_alloc_large -tcache_alloc_small -tcache_alloc_small_hard -tcache_arena_reassociate -tcache_bin_flush_large -tcache_bin_flush_small -tcache_bin_info -tcache_boot -tcache_cleanup -tcache_create -tcache_dalloc_large -tcache_dalloc_small -tcache_enabled_cleanup -tcache_enabled_get -tcache_enabled_set -tcache_event -tcache_event_hard -tcache_flush -tcache_get -tcache_get_hard -tcache_maxclass -tcache_postfork_child -tcache_postfork_parent -tcache_prefork -tcache_salloc -tcache_stats_merge -tcaches -tcaches_create -tcaches_destroy -tcaches_flush -tcaches_get -thread_allocated_cleanup -thread_deallocated_cleanup -ticker_copy -ticker_init -ticker_read -ticker_tick -ticker_ticks -tsd_arena_get -tsd_arena_set -tsd_arenap_get -tsd_arenas_tdata_bypass_get -tsd_arenas_tdata_bypass_set -tsd_arenas_tdata_bypassp_get -tsd_arenas_tdata_get -tsd_arenas_tdata_set -tsd_arenas_tdatap_get -tsd_boot -tsd_boot0 -tsd_boot1 -tsd_booted -tsd_booted_get -tsd_cleanup -tsd_cleanup_wrapper -tsd_fetch -tsd_fetch_impl -tsd_get -tsd_get_allocates -tsd_iarena_get -tsd_iarena_set -tsd_iarenap_get -tsd_initialized -tsd_init_check_recursion -tsd_init_finish -tsd_init_head -tsd_narenas_tdata_get -tsd_narenas_tdata_set -tsd_narenas_tdatap_get -tsd_wrapper_get -tsd_wrapper_set -tsd_nominal -tsd_prof_tdata_get -tsd_prof_tdata_set -tsd_prof_tdatap_get -tsd_quarantine_get -tsd_quarantine_set -tsd_quarantinep_get -tsd_set -tsd_tcache_enabled_get -tsd_tcache_enabled_set -tsd_tcache_enabledp_get -tsd_tcache_get -tsd_tcache_set -tsd_tcachep_get -tsd_thread_allocated_get -tsd_thread_allocated_set -tsd_thread_allocatedp_get -tsd_thread_deallocated_get -tsd_thread_deallocated_set -tsd_thread_deallocatedp_get -tsd_tls -tsd_tsd -tsd_tsdn -tsd_witness_fork_get -tsd_witness_fork_set -tsd_witness_forkp_get -tsd_witnesses_get -tsd_witnesses_set -tsd_witnessesp_get -tsdn_fetch -tsdn_null -tsdn_tsd -u2rz -valgrind_freelike_block -valgrind_make_mem_defined -valgrind_make_mem_noaccess -valgrind_make_mem_undefined -witness_assert_depth -witness_assert_depth_to_rank -witness_assert_lockless -witness_assert_not_owner -witness_assert_owner -witness_depth_error -witness_fork_cleanup -witness_init -witness_lock -witness_lock_error -witness_not_owner_error -witness_owner -witness_owner_error -witness_postfork_child -witness_postfork_parent -witness_prefork -witness_unlock -witnesses_cleanup -zone_register diff --git a/sources/jemalloc/include-win/jemalloc/internal/private_unnamespace.h b/sources/jemalloc/include-win/jemalloc/internal/private_unnamespace.h deleted file mode 100755 index 27038c63..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/private_unnamespace.h +++ /dev/null @@ -1,639 +0,0 @@ -#undef a0dalloc -#undef a0get -#undef a0malloc -#undef arena_aalloc -#undef arena_alloc_junk_small -#undef arena_basic_stats_merge -#undef arena_bin_index -#undef arena_bin_info -#undef arena_bitselm_get_const -#undef arena_bitselm_get_mutable -#undef arena_boot -#undef arena_choose -#undef arena_choose_hard -#undef arena_choose_impl -#undef arena_chunk_alloc_huge -#undef arena_chunk_cache_maybe_insert -#undef arena_chunk_cache_maybe_remove -#undef arena_chunk_dalloc_huge -#undef arena_chunk_ralloc_huge_expand -#undef arena_chunk_ralloc_huge_shrink -#undef arena_chunk_ralloc_huge_similar -#undef arena_cleanup -#undef arena_dalloc -#undef arena_dalloc_bin -#undef arena_dalloc_bin_junked_locked -#undef arena_dalloc_junk_large -#undef arena_dalloc_junk_small -#undef arena_dalloc_large -#undef arena_dalloc_large_junked_locked -#undef arena_dalloc_small -#undef arena_decay_tick -#undef arena_decay_ticks -#undef arena_decay_time_default_get -#undef arena_decay_time_default_set -#undef arena_decay_time_get -#undef arena_decay_time_set -#undef arena_dss_prec_get -#undef arena_dss_prec_set -#undef arena_extent_sn_next -#undef arena_get -#undef arena_ichoose -#undef arena_init -#undef arena_lg_dirty_mult_default_get -#undef arena_lg_dirty_mult_default_set -#undef arena_lg_dirty_mult_get -#undef arena_lg_dirty_mult_set -#undef arena_malloc -#undef arena_malloc_hard -#undef arena_malloc_large -#undef arena_mapbits_allocated_get -#undef arena_mapbits_binind_get -#undef arena_mapbits_decommitted_get -#undef arena_mapbits_dirty_get -#undef arena_mapbits_get -#undef arena_mapbits_internal_set -#undef arena_mapbits_large_binind_set -#undef arena_mapbits_large_get -#undef arena_mapbits_large_set -#undef arena_mapbits_large_size_get -#undef arena_mapbits_size_decode -#undef arena_mapbits_size_encode -#undef arena_mapbits_small_runind_get -#undef arena_mapbits_small_set -#undef arena_mapbits_unallocated_set -#undef arena_mapbits_unallocated_size_get -#undef arena_mapbits_unallocated_size_set -#undef arena_mapbits_unzeroed_get -#undef arena_mapbitsp_get_const -#undef arena_mapbitsp_get_mutable -#undef arena_mapbitsp_read -#undef arena_mapbitsp_write -#undef arena_maxrun -#undef arena_maybe_purge -#undef arena_metadata_allocated_add -#undef arena_metadata_allocated_get -#undef arena_metadata_allocated_sub -#undef arena_migrate -#undef arena_miscelm_get_const -#undef arena_miscelm_get_mutable -#undef arena_miscelm_to_pageind -#undef arena_miscelm_to_rpages -#undef arena_new -#undef arena_node_alloc -#undef arena_node_dalloc -#undef arena_nthreads_dec -#undef arena_nthreads_get -#undef arena_nthreads_inc -#undef arena_palloc -#undef arena_postfork_child -#undef arena_postfork_parent -#undef arena_prefork0 -#undef arena_prefork1 -#undef arena_prefork2 -#undef arena_prefork3 -#undef arena_prof_accum -#undef arena_prof_accum_impl -#undef arena_prof_accum_locked -#undef arena_prof_promoted -#undef arena_prof_tctx_get -#undef arena_prof_tctx_reset -#undef arena_prof_tctx_set -#undef arena_ptr_small_binind_get -#undef arena_purge -#undef arena_quarantine_junk_small -#undef arena_ralloc -#undef arena_ralloc_junk_large -#undef arena_ralloc_no_move -#undef arena_rd_to_miscelm -#undef arena_redzone_corruption -#undef arena_reset -#undef arena_run_regind -#undef arena_run_to_miscelm -#undef arena_salloc -#undef arena_sdalloc -#undef arena_stats_merge -#undef arena_tcache_fill_small -#undef arena_tdata_get -#undef arena_tdata_get_hard -#undef arenas -#undef arenas_tdata_bypass_cleanup -#undef arenas_tdata_cleanup -#undef atomic_add_p -#undef atomic_add_u -#undef atomic_add_uint32 -#undef atomic_add_uint64 -#undef atomic_add_z -#undef atomic_cas_p -#undef atomic_cas_u -#undef atomic_cas_uint32 -#undef atomic_cas_uint64 -#undef atomic_cas_z -#undef atomic_sub_p -#undef atomic_sub_u -#undef atomic_sub_uint32 -#undef atomic_sub_uint64 -#undef atomic_sub_z -#undef atomic_write_p -#undef atomic_write_u -#undef atomic_write_uint32 -#undef atomic_write_uint64 -#undef atomic_write_z -#undef base_alloc -#undef base_boot -#undef base_postfork_child -#undef base_postfork_parent -#undef base_prefork -#undef base_stats_get -#undef bitmap_full -#undef bitmap_get -#undef bitmap_info_init -#undef bitmap_init -#undef bitmap_set -#undef bitmap_sfu -#undef bitmap_size -#undef bitmap_unset -#undef bootstrap_calloc -#undef bootstrap_free -#undef bootstrap_malloc -#undef bt_init -#undef buferror -#undef chunk_alloc_base -#undef chunk_alloc_cache -#undef chunk_alloc_dss -#undef chunk_alloc_mmap -#undef chunk_alloc_wrapper -#undef chunk_boot -#undef chunk_dalloc_cache -#undef chunk_dalloc_mmap -#undef chunk_dalloc_wrapper -#undef chunk_deregister -#undef chunk_dss_boot -#undef chunk_dss_mergeable -#undef chunk_dss_prec_get -#undef chunk_dss_prec_set -#undef chunk_hooks_default -#undef chunk_hooks_get -#undef chunk_hooks_set -#undef chunk_in_dss -#undef chunk_lookup -#undef chunk_npages -#undef chunk_purge_wrapper -#undef chunk_register -#undef chunks_rtree -#undef chunksize -#undef chunksize_mask -#undef ckh_count -#undef ckh_delete -#undef ckh_insert -#undef ckh_iter -#undef ckh_new -#undef ckh_pointer_hash -#undef ckh_pointer_keycomp -#undef ckh_remove -#undef ckh_search -#undef ckh_string_hash -#undef ckh_string_keycomp -#undef ctl_boot -#undef ctl_bymib -#undef ctl_byname -#undef ctl_nametomib -#undef ctl_postfork_child -#undef ctl_postfork_parent -#undef ctl_prefork -#undef decay_ticker_get -#undef dss_prec_names -#undef extent_node_achunk_get -#undef extent_node_achunk_set -#undef extent_node_addr_get -#undef extent_node_addr_set -#undef extent_node_arena_get -#undef extent_node_arena_set -#undef extent_node_committed_get -#undef extent_node_committed_set -#undef extent_node_dirty_insert -#undef extent_node_dirty_linkage_init -#undef extent_node_dirty_remove -#undef extent_node_init -#undef extent_node_prof_tctx_get -#undef extent_node_prof_tctx_set -#undef extent_node_size_get -#undef extent_node_size_set -#undef extent_node_sn_get -#undef extent_node_sn_set -#undef extent_node_zeroed_get -#undef extent_node_zeroed_set -#undef extent_size_quantize_ceil -#undef extent_size_quantize_floor -#undef extent_tree_ad_destroy -#undef extent_tree_ad_destroy_recurse -#undef extent_tree_ad_empty -#undef extent_tree_ad_first -#undef extent_tree_ad_insert -#undef extent_tree_ad_iter -#undef extent_tree_ad_iter_recurse -#undef extent_tree_ad_iter_start -#undef extent_tree_ad_last -#undef extent_tree_ad_new -#undef extent_tree_ad_next -#undef extent_tree_ad_nsearch -#undef extent_tree_ad_prev -#undef extent_tree_ad_psearch -#undef extent_tree_ad_remove -#undef extent_tree_ad_reverse_iter -#undef extent_tree_ad_reverse_iter_recurse -#undef extent_tree_ad_reverse_iter_start -#undef extent_tree_ad_search -#undef extent_tree_szsnad_destroy -#undef extent_tree_szsnad_destroy_recurse -#undef extent_tree_szsnad_empty -#undef extent_tree_szsnad_first -#undef extent_tree_szsnad_insert -#undef extent_tree_szsnad_iter -#undef extent_tree_szsnad_iter_recurse -#undef extent_tree_szsnad_iter_start -#undef extent_tree_szsnad_last -#undef extent_tree_szsnad_new -#undef extent_tree_szsnad_next -#undef extent_tree_szsnad_nsearch -#undef extent_tree_szsnad_prev -#undef extent_tree_szsnad_psearch -#undef extent_tree_szsnad_remove -#undef extent_tree_szsnad_reverse_iter -#undef extent_tree_szsnad_reverse_iter_recurse -#undef extent_tree_szsnad_reverse_iter_start -#undef extent_tree_szsnad_search -#undef ffs_llu -#undef ffs_lu -#undef ffs_u -#undef ffs_u32 -#undef ffs_u64 -#undef ffs_zu -#undef get_errno -#undef hash -#undef hash_fmix_32 -#undef hash_fmix_64 -#undef hash_get_block_32 -#undef hash_get_block_64 -#undef hash_rotl_32 -#undef hash_rotl_64 -#undef hash_x64_128 -#undef hash_x86_128 -#undef hash_x86_32 -#undef huge_aalloc -#undef huge_dalloc -#undef huge_dalloc_junk -#undef huge_malloc -#undef huge_palloc -#undef huge_prof_tctx_get -#undef huge_prof_tctx_reset -#undef huge_prof_tctx_set -#undef huge_ralloc -#undef huge_ralloc_no_move -#undef huge_salloc -#undef iaalloc -#undef ialloc -#undef iallocztm -#undef iarena_cleanup -#undef idalloc -#undef idalloctm -#undef in_valgrind -#undef index2size -#undef index2size_compute -#undef index2size_lookup -#undef index2size_tab -#undef ipalloc -#undef ipalloct -#undef ipallocztm -#undef iqalloc -#undef iralloc -#undef iralloct -#undef iralloct_realign -#undef isalloc -#undef isdalloct -#undef isqalloc -#undef isthreaded -#undef ivsalloc -#undef ixalloc -#undef jemalloc_postfork_child -#undef jemalloc_postfork_parent -#undef jemalloc_prefork -#undef large_maxclass -#undef lg_floor -#undef lg_prof_sample -#undef malloc_cprintf -#undef malloc_mutex_assert_not_owner -#undef malloc_mutex_assert_owner -#undef malloc_mutex_boot -#undef malloc_mutex_init -#undef malloc_mutex_lock -#undef malloc_mutex_postfork_child -#undef malloc_mutex_postfork_parent -#undef malloc_mutex_prefork -#undef malloc_mutex_unlock -#undef malloc_printf -#undef malloc_snprintf -#undef malloc_strtoumax -#undef malloc_tsd_boot0 -#undef malloc_tsd_boot1 -#undef malloc_tsd_cleanup_register -#undef malloc_tsd_dalloc -#undef malloc_tsd_malloc -#undef malloc_tsd_no_cleanup -#undef malloc_vcprintf -#undef malloc_vsnprintf -#undef malloc_write -#undef map_bias -#undef map_misc_offset -#undef mb_write -#undef narenas_auto -#undef narenas_tdata_cleanup -#undef narenas_total_get -#undef ncpus -#undef nhbins -#undef nhclasses -#undef nlclasses -#undef nstime_add -#undef nstime_compare -#undef nstime_copy -#undef nstime_divide -#undef nstime_idivide -#undef nstime_imultiply -#undef nstime_init -#undef nstime_init2 -#undef nstime_monotonic -#undef nstime_ns -#undef nstime_nsec -#undef nstime_sec -#undef nstime_subtract -#undef nstime_update -#undef opt_abort -#undef opt_decay_time -#undef opt_dss -#undef opt_junk -#undef opt_junk_alloc -#undef opt_junk_free -#undef opt_lg_chunk -#undef opt_lg_dirty_mult -#undef opt_lg_prof_interval -#undef opt_lg_prof_sample -#undef opt_lg_tcache_max -#undef opt_narenas -#undef opt_prof -#undef opt_prof_accum -#undef opt_prof_active -#undef opt_prof_final -#undef opt_prof_gdump -#undef opt_prof_leak -#undef opt_prof_prefix -#undef opt_prof_thread_active_init -#undef opt_purge -#undef opt_quarantine -#undef opt_redzone -#undef opt_stats_print -#undef opt_tcache -#undef opt_thp -#undef opt_utrace -#undef opt_xmalloc -#undef opt_zero -#undef p2rz -#undef pages_boot -#undef pages_commit -#undef pages_decommit -#undef pages_huge -#undef pages_map -#undef pages_nohuge -#undef pages_purge -#undef pages_trim -#undef pages_unmap -#undef pind2sz -#undef pind2sz_compute -#undef pind2sz_lookup -#undef pind2sz_tab -#undef pow2_ceil_u32 -#undef pow2_ceil_u64 -#undef pow2_ceil_zu -#undef prng_lg_range_u32 -#undef prng_lg_range_u64 -#undef prng_lg_range_zu -#undef prng_range_u32 -#undef prng_range_u64 -#undef prng_range_zu -#undef prng_state_next_u32 -#undef prng_state_next_u64 -#undef prng_state_next_zu -#undef prof_active -#undef prof_active_get -#undef prof_active_get_unlocked -#undef prof_active_set -#undef prof_alloc_prep -#undef prof_alloc_rollback -#undef prof_backtrace -#undef prof_boot0 -#undef prof_boot1 -#undef prof_boot2 -#undef prof_bt_count -#undef prof_dump_header -#undef prof_dump_open -#undef prof_free -#undef prof_free_sampled_object -#undef prof_gdump -#undef prof_gdump_get -#undef prof_gdump_get_unlocked -#undef prof_gdump_set -#undef prof_gdump_val -#undef prof_idump -#undef prof_interval -#undef prof_lookup -#undef prof_malloc -#undef prof_malloc_sample_object -#undef prof_mdump -#undef prof_postfork_child -#undef prof_postfork_parent -#undef prof_prefork0 -#undef prof_prefork1 -#undef prof_realloc -#undef prof_reset -#undef prof_sample_accum_update -#undef prof_sample_threshold_update -#undef prof_tctx_get -#undef prof_tctx_reset -#undef prof_tctx_set -#undef prof_tdata_cleanup -#undef prof_tdata_count -#undef prof_tdata_get -#undef prof_tdata_init -#undef prof_tdata_reinit -#undef prof_thread_active_get -#undef prof_thread_active_init_get -#undef prof_thread_active_init_set -#undef prof_thread_active_set -#undef prof_thread_name_get -#undef prof_thread_name_set -#undef psz2ind -#undef psz2u -#undef purge_mode_names -#undef quarantine -#undef quarantine_alloc_hook -#undef quarantine_alloc_hook_work -#undef quarantine_cleanup -#undef rtree_child_read -#undef rtree_child_read_hard -#undef rtree_child_tryread -#undef rtree_delete -#undef rtree_get -#undef rtree_new -#undef rtree_node_valid -#undef rtree_set -#undef rtree_start_level -#undef rtree_subkey -#undef rtree_subtree_read -#undef rtree_subtree_read_hard -#undef rtree_subtree_tryread -#undef rtree_val_read -#undef rtree_val_write -#undef run_quantize_ceil -#undef run_quantize_floor -#undef s2u -#undef s2u_compute -#undef s2u_lookup -#undef sa2u -#undef set_errno -#undef size2index -#undef size2index_compute -#undef size2index_lookup -#undef size2index_tab -#undef spin_adaptive -#undef spin_init -#undef stats_cactive -#undef stats_cactive_add -#undef stats_cactive_get -#undef stats_cactive_sub -#undef stats_print -#undef tcache_alloc_easy -#undef tcache_alloc_large -#undef tcache_alloc_small -#undef tcache_alloc_small_hard -#undef tcache_arena_reassociate -#undef tcache_bin_flush_large -#undef tcache_bin_flush_small -#undef tcache_bin_info -#undef tcache_boot -#undef tcache_cleanup -#undef tcache_create -#undef tcache_dalloc_large -#undef tcache_dalloc_small -#undef tcache_enabled_cleanup -#undef tcache_enabled_get -#undef tcache_enabled_set -#undef tcache_event -#undef tcache_event_hard -#undef tcache_flush -#undef tcache_get -#undef tcache_get_hard -#undef tcache_maxclass -#undef tcache_postfork_child -#undef tcache_postfork_parent -#undef tcache_prefork -#undef tcache_salloc -#undef tcache_stats_merge -#undef tcaches -#undef tcaches_create -#undef tcaches_destroy -#undef tcaches_flush -#undef tcaches_get -#undef thread_allocated_cleanup -#undef thread_deallocated_cleanup -#undef ticker_copy -#undef ticker_init -#undef ticker_read -#undef ticker_tick -#undef ticker_ticks -#undef tsd_arena_get -#undef tsd_arena_set -#undef tsd_arenap_get -#undef tsd_arenas_tdata_bypass_get -#undef tsd_arenas_tdata_bypass_set -#undef tsd_arenas_tdata_bypassp_get -#undef tsd_arenas_tdata_get -#undef tsd_arenas_tdata_set -#undef tsd_arenas_tdatap_get -#undef tsd_boot -#undef tsd_boot0 -#undef tsd_boot1 -#undef tsd_booted -#undef tsd_booted_get -#undef tsd_cleanup -#undef tsd_cleanup_wrapper -#undef tsd_fetch -#undef tsd_fetch_impl -#undef tsd_get -#undef tsd_get_allocates -#undef tsd_iarena_get -#undef tsd_iarena_set -#undef tsd_iarenap_get -#undef tsd_initialized -#undef tsd_init_check_recursion -#undef tsd_init_finish -#undef tsd_init_head -#undef tsd_narenas_tdata_get -#undef tsd_narenas_tdata_set -#undef tsd_narenas_tdatap_get -#undef tsd_wrapper_get -#undef tsd_wrapper_set -#undef tsd_nominal -#undef tsd_prof_tdata_get -#undef tsd_prof_tdata_set -#undef tsd_prof_tdatap_get -#undef tsd_quarantine_get -#undef tsd_quarantine_set -#undef tsd_quarantinep_get -#undef tsd_set -#undef tsd_tcache_enabled_get -#undef tsd_tcache_enabled_set -#undef tsd_tcache_enabledp_get -#undef tsd_tcache_get -#undef tsd_tcache_set -#undef tsd_tcachep_get -#undef tsd_thread_allocated_get -#undef tsd_thread_allocated_set -#undef tsd_thread_allocatedp_get -#undef tsd_thread_deallocated_get -#undef tsd_thread_deallocated_set -#undef tsd_thread_deallocatedp_get -#undef tsd_tls -#undef tsd_tsd -#undef tsd_tsdn -#undef tsd_witness_fork_get -#undef tsd_witness_fork_set -#undef tsd_witness_forkp_get -#undef tsd_witnesses_get -#undef tsd_witnesses_set -#undef tsd_witnessesp_get -#undef tsdn_fetch -#undef tsdn_null -#undef tsdn_tsd -#undef u2rz -#undef valgrind_freelike_block -#undef valgrind_make_mem_defined -#undef valgrind_make_mem_noaccess -#undef valgrind_make_mem_undefined -#undef witness_assert_depth -#undef witness_assert_depth_to_rank -#undef witness_assert_lockless -#undef witness_assert_not_owner -#undef witness_assert_owner -#undef witness_depth_error -#undef witness_fork_cleanup -#undef witness_init -#undef witness_lock -#undef witness_lock_error -#undef witness_not_owner_error -#undef witness_owner -#undef witness_owner_error -#undef witness_postfork_child -#undef witness_postfork_parent -#undef witness_prefork -#undef witness_unlock -#undef witnesses_cleanup -#undef zone_register diff --git a/sources/jemalloc/include-win/jemalloc/internal/private_unnamespace.sh b/sources/jemalloc/include-win/jemalloc/internal/private_unnamespace.sh deleted file mode 100755 index 23fed8e8..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/private_unnamespace.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -for symbol in `cat $1` ; do - echo "#undef ${symbol}" -done diff --git a/sources/jemalloc/include-win/jemalloc/internal/prng.h b/sources/jemalloc/include-win/jemalloc/internal/prng.h deleted file mode 100755 index c2bda19c..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/prng.h +++ /dev/null @@ -1,207 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * Simple linear congruential pseudo-random number generator: - * - * prng(y) = (a*x + c) % m - * - * where the following constants ensure maximal period: - * - * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. - * c == Odd number (relatively prime to 2^n). - * m == 2^32 - * - * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. - * - * This choice of m has the disadvantage that the quality of the bits is - * proportional to bit position. For example, the lowest bit has a cycle of 2, - * the next has a cycle of 4, etc. For this reason, we prefer to use the upper - * bits. - */ - -#define PRNG_A_32 UINT32_C(1103515241) -#define PRNG_C_32 UINT32_C(12347) - -#define PRNG_A_64 UINT64_C(6364136223846793005) -#define PRNG_C_64 UINT64_C(1442695040888963407) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -uint32_t prng_state_next_u32(uint32_t state); -uint64_t prng_state_next_u64(uint64_t state); -size_t prng_state_next_zu(size_t state); - -uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range, - bool atomic); -uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range); -size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic); - -uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic); -uint64_t prng_range_u64(uint64_t *state, uint64_t range); -size_t prng_range_zu(size_t *state, size_t range, bool atomic); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_)) -JEMALLOC_ALWAYS_INLINE uint32_t -prng_state_next_u32(uint32_t state) -{ - - return ((state * PRNG_A_32) + PRNG_C_32); -} - -JEMALLOC_ALWAYS_INLINE uint64_t -prng_state_next_u64(uint64_t state) -{ - - return ((state * PRNG_A_64) + PRNG_C_64); -} - -JEMALLOC_ALWAYS_INLINE size_t -prng_state_next_zu(size_t state) -{ - -#if LG_SIZEOF_PTR == 2 - return ((state * PRNG_A_32) + PRNG_C_32); -#elif LG_SIZEOF_PTR == 3 - return ((state * PRNG_A_64) + PRNG_C_64); -#else -#error Unsupported pointer size -#endif -} - -JEMALLOC_ALWAYS_INLINE uint32_t -prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic) -{ - uint32_t ret, state1; - - assert(lg_range > 0); - assert(lg_range <= 32); - - if (atomic) { - uint32_t state0; - - do { - state0 = atomic_read_uint32(state); - state1 = prng_state_next_u32(state0); - } while (atomic_cas_uint32(state, state0, state1)); - } else { - state1 = prng_state_next_u32(*state); - *state = state1; - } - ret = state1 >> (32 - lg_range); - - return (ret); -} - -/* 64-bit atomic operations cannot be supported on all relevant platforms. */ -JEMALLOC_ALWAYS_INLINE uint64_t -prng_lg_range_u64(uint64_t *state, unsigned lg_range) -{ - uint64_t ret, state1; - - assert(lg_range > 0); - assert(lg_range <= 64); - - state1 = prng_state_next_u64(*state); - *state = state1; - ret = state1 >> (64 - lg_range); - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE size_t -prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic) -{ - size_t ret, state1; - - assert(lg_range > 0); - assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR)); - - if (atomic) { - size_t state0; - - do { - state0 = atomic_read_z(state); - state1 = prng_state_next_zu(state0); - } while (atomic_cas_z(state, state0, state1)); - } else { - state1 = prng_state_next_zu(*state); - *state = state1; - } - ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range); - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE uint32_t -prng_range_u32(uint32_t *state, uint32_t range, bool atomic) -{ - uint32_t ret; - unsigned lg_range; - - assert(range > 1); - - /* Compute the ceiling of lg(range). */ - lg_range = ffs_u32(pow2_ceil_u32(range)) - 1; - - /* Generate a result in [0..range) via repeated trial. */ - do { - ret = prng_lg_range_u32(state, lg_range, atomic); - } while (ret >= range); - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE uint64_t -prng_range_u64(uint64_t *state, uint64_t range) -{ - uint64_t ret; - unsigned lg_range; - - assert(range > 1); - - /* Compute the ceiling of lg(range). */ - lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; - - /* Generate a result in [0..range) via repeated trial. */ - do { - ret = prng_lg_range_u64(state, lg_range); - } while (ret >= range); - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE size_t -prng_range_zu(size_t *state, size_t range, bool atomic) -{ - size_t ret; - unsigned lg_range; - - assert(range > 1); - - /* Compute the ceiling of lg(range). */ - lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; - - /* Generate a result in [0..range) via repeated trial. */ - do { - ret = prng_lg_range_zu(state, lg_range, atomic); - } while (ret >= range); - - return (ret); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/prof.h b/sources/jemalloc/include-win/jemalloc/internal/prof.h deleted file mode 100755 index 8293b71e..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/prof.h +++ /dev/null @@ -1,547 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct prof_bt_s prof_bt_t; -typedef struct prof_cnt_s prof_cnt_t; -typedef struct prof_tctx_s prof_tctx_t; -typedef struct prof_gctx_s prof_gctx_t; -typedef struct prof_tdata_s prof_tdata_t; - -/* Option defaults. */ -#ifdef JEMALLOC_PROF -# define PROF_PREFIX_DEFAULT "jeprof" -#else -# define PROF_PREFIX_DEFAULT "" -#endif -#define LG_PROF_SAMPLE_DEFAULT 19 -#define LG_PROF_INTERVAL_DEFAULT -1 - -/* - * Hard limit on stack backtrace depth. The version of prof_backtrace() that - * is based on __builtin_return_address() necessarily has a hard-coded number - * of backtrace frame handlers, and should be kept in sync with this setting. - */ -#define PROF_BT_MAX 128 - -/* Initial hash table size. */ -#define PROF_CKH_MINITEMS 64 - -/* Size of memory buffer to use when writing dump files. */ -#define PROF_DUMP_BUFSIZE 65536 - -/* Size of stack-allocated buffer used by prof_printf(). */ -#define PROF_PRINTF_BUFSIZE 128 - -/* - * Number of mutexes shared among all gctx's. No space is allocated for these - * unless profiling is enabled, so it's okay to over-provision. - */ -#define PROF_NCTX_LOCKS 1024 - -/* - * Number of mutexes shared among all tdata's. No space is allocated for these - * unless profiling is enabled, so it's okay to over-provision. - */ -#define PROF_NTDATA_LOCKS 256 - -/* - * prof_tdata pointers close to NULL are used to encode state information that - * is used for cleaning up during thread shutdown. - */ -#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) -#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) -#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct prof_bt_s { - /* Backtrace, stored as len program counters. */ - void **vec; - unsigned len; -}; - -#ifdef JEMALLOC_PROF_LIBGCC -/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ -typedef struct { - prof_bt_t *bt; - unsigned max; -} prof_unwind_data_t; -#endif - -struct prof_cnt_s { - /* Profiling counters. */ - uint64_t curobjs; - uint64_t curbytes; - uint64_t accumobjs; - uint64_t accumbytes; -}; - -typedef enum { - prof_tctx_state_initializing, - prof_tctx_state_nominal, - prof_tctx_state_dumping, - prof_tctx_state_purgatory /* Dumper must finish destroying. */ -} prof_tctx_state_t; - -struct prof_tctx_s { - /* Thread data for thread that performed the allocation. */ - prof_tdata_t *tdata; - - /* - * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be - * defunct during teardown. - */ - uint64_t thr_uid; - uint64_t thr_discrim; - - /* Profiling counters, protected by tdata->lock. */ - prof_cnt_t cnts; - - /* Associated global context. */ - prof_gctx_t *gctx; - - /* - * UID that distinguishes multiple tctx's created by the same thread, - * but coexisting in gctx->tctxs. There are two ways that such - * coexistence can occur: - * - A dumper thread can cause a tctx to be retained in the purgatory - * state. - * - Although a single "producer" thread must create all tctx's which - * share the same thr_uid, multiple "consumers" can each concurrently - * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only - * gets called once each time cnts.cur{objs,bytes} drop to 0, but this - * threshold can be hit again before the first consumer finishes - * executing prof_tctx_destroy(). - */ - uint64_t tctx_uid; - - /* Linkage into gctx's tctxs. */ - rb_node(prof_tctx_t) tctx_link; - - /* - * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents - * sample vs destroy race. - */ - bool prepared; - - /* Current dump-related state, protected by gctx->lock. */ - prof_tctx_state_t state; - - /* - * Copy of cnts snapshotted during early dump phase, protected by - * dump_mtx. - */ - prof_cnt_t dump_cnts; -}; -typedef rb_tree(prof_tctx_t) prof_tctx_tree_t; - -struct prof_gctx_s { - /* Protects nlimbo, cnt_summed, and tctxs. */ - malloc_mutex_t *lock; - - /* - * Number of threads that currently cause this gctx to be in a state of - * limbo due to one of: - * - Initializing this gctx. - * - Initializing per thread counters associated with this gctx. - * - Preparing to destroy this gctx. - * - Dumping a heap profile that includes this gctx. - * nlimbo must be 1 (single destroyer) in order to safely destroy the - * gctx. - */ - unsigned nlimbo; - - /* - * Tree of profile counters, one for each thread that has allocated in - * this context. - */ - prof_tctx_tree_t tctxs; - - /* Linkage for tree of contexts to be dumped. */ - rb_node(prof_gctx_t) dump_link; - - /* Temporary storage for summation during dump. */ - prof_cnt_t cnt_summed; - - /* Associated backtrace. */ - prof_bt_t bt; - - /* Backtrace vector, variable size, referred to by bt. */ - void *vec[1]; -}; -typedef rb_tree(prof_gctx_t) prof_gctx_tree_t; - -struct prof_tdata_s { - malloc_mutex_t *lock; - - /* Monotonically increasing unique thread identifier. */ - uint64_t thr_uid; - - /* - * Monotonically increasing discriminator among tdata structures - * associated with the same thr_uid. - */ - uint64_t thr_discrim; - - /* Included in heap profile dumps if non-NULL. */ - char *thread_name; - - bool attached; - bool expired; - - rb_node(prof_tdata_t) tdata_link; - - /* - * Counter used to initialize prof_tctx_t's tctx_uid. No locking is - * necessary when incrementing this field, because only one thread ever - * does so. - */ - uint64_t tctx_uid_next; - - /* - * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks - * backtraces for which it has non-zero allocation/deallocation counters - * associated with thread-specific prof_tctx_t objects. Other threads - * may write to prof_tctx_t contents when freeing associated objects. - */ - ckh_t bt2tctx; - - /* Sampling state. */ - uint64_t prng_state; - uint64_t bytes_until_sample; - - /* State used to avoid dumping while operating on prof internals. */ - bool enq; - bool enq_idump; - bool enq_gdump; - - /* - * Set to true during an early dump phase for tdata's which are - * currently being dumped. New threads' tdata's have this initialized - * to false so that they aren't accidentally included in later dump - * phases. - */ - bool dumping; - - /* - * True if profiling is active for this tdata's thread - * (thread.prof.active mallctl). - */ - bool active; - - /* Temporary storage for summation during dump. */ - prof_cnt_t cnt_summed; - - /* Backtrace vector, used for calls to prof_backtrace(). */ - void *vec[PROF_BT_MAX]; -}; -typedef rb_tree(prof_tdata_t) prof_tdata_tree_t; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern bool opt_prof; -extern bool opt_prof_active; -extern bool opt_prof_thread_active_init; -extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ -extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ -extern bool opt_prof_gdump; /* High-water memory dumping. */ -extern bool opt_prof_final; /* Final profile dumping. */ -extern bool opt_prof_leak; /* Dump leak summary at exit. */ -extern bool opt_prof_accum; /* Report cumulative bytes. */ -extern char opt_prof_prefix[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PATH_MAX + -#endif - 1]; - -/* Accessed via prof_active_[gs]et{_unlocked,}(). */ -extern bool prof_active; - -/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ -extern bool prof_gdump_val; - -/* - * Profile dump interval, measured in bytes allocated. Each arena triggers a - * profile dump when it reaches this threshold. The effect is that the - * interval between profile dumps averages prof_interval, though the actual - * interval between dumps will tend to be sporadic, and the interval will be a - * maximum of approximately (prof_interval * narenas). - */ -extern uint64_t prof_interval; - -/* - * Initialized as opt_lg_prof_sample, and potentially modified during profiling - * resets. - */ -extern size_t lg_prof_sample; - -void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); -void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, - prof_tctx_t *tctx); -void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx); -void bt_init(prof_bt_t *bt, void **vec); -void prof_backtrace(prof_bt_t *bt); -prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); -#ifdef JEMALLOC_JET -size_t prof_tdata_count(void); -size_t prof_bt_count(void); -const prof_cnt_t *prof_cnt_all(void); -typedef int (prof_dump_open_t)(bool, const char *); -extern prof_dump_open_t *prof_dump_open; -typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *); -extern prof_dump_header_t *prof_dump_header; -#endif -void prof_idump(tsdn_t *tsdn); -bool prof_mdump(tsd_t *tsd, const char *filename); -void prof_gdump(tsdn_t *tsdn); -prof_tdata_t *prof_tdata_init(tsd_t *tsd); -prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); -void prof_reset(tsd_t *tsd, size_t lg_sample); -void prof_tdata_cleanup(tsd_t *tsd); -bool prof_active_get(tsdn_t *tsdn); -bool prof_active_set(tsdn_t *tsdn, bool active); -const char *prof_thread_name_get(tsd_t *tsd); -int prof_thread_name_set(tsd_t *tsd, const char *thread_name); -bool prof_thread_active_get(tsd_t *tsd); -bool prof_thread_active_set(tsd_t *tsd, bool active); -bool prof_thread_active_init_get(tsdn_t *tsdn); -bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); -bool prof_gdump_get(tsdn_t *tsdn); -bool prof_gdump_set(tsdn_t *tsdn, bool active); -void prof_boot0(void); -void prof_boot1(void); -bool prof_boot2(tsd_t *tsd); -void prof_prefork0(tsdn_t *tsdn); -void prof_prefork1(tsdn_t *tsdn); -void prof_postfork_parent(tsdn_t *tsdn); -void prof_postfork_child(tsdn_t *tsdn); -void prof_sample_threshold_update(prof_tdata_t *tdata); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -bool prof_active_get_unlocked(void); -bool prof_gdump_get_unlocked(void); -prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create); -prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr); -void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, - prof_tctx_t *tctx); -void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, - const void *old_ptr, prof_tctx_t *tctx); -bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit, - prof_tdata_t **tdata_out); -prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, - bool update); -void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, - prof_tctx_t *tctx); -void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, - prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr, - size_t old_usize, prof_tctx_t *old_tctx); -void prof_free(tsd_t *tsd, const void *ptr, size_t usize); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) -JEMALLOC_ALWAYS_INLINE bool -prof_active_get_unlocked(void) -{ - - /* - * Even if opt_prof is true, sampling can be temporarily disabled by - * setting prof_active to false. No locking is used when reading - * prof_active in the fast path, so there are no guarantees regarding - * how long it will take for all threads to notice state changes. - */ - return (prof_active); -} - -JEMALLOC_ALWAYS_INLINE bool -prof_gdump_get_unlocked(void) -{ - - /* - * No locking is used when reading prof_gdump_val in the fast path, so - * there are no guarantees regarding how long it will take for all - * threads to notice state changes. - */ - return (prof_gdump_val); -} - -JEMALLOC_ALWAYS_INLINE prof_tdata_t * -prof_tdata_get(tsd_t *tsd, bool create) -{ - prof_tdata_t *tdata; - - cassert(config_prof); - - tdata = tsd_prof_tdata_get(tsd); - if (create) { - if (unlikely(tdata == NULL)) { - if (tsd_nominal(tsd)) { - tdata = prof_tdata_init(tsd); - tsd_prof_tdata_set(tsd, tdata); - } - } else if (unlikely(tdata->expired)) { - tdata = prof_tdata_reinit(tsd, tdata); - tsd_prof_tdata_set(tsd, tdata); - } - assert(tdata == NULL || tdata->attached); - } - - return (tdata); -} - -JEMALLOC_ALWAYS_INLINE prof_tctx_t * -prof_tctx_get(tsdn_t *tsdn, const void *ptr) -{ - - cassert(config_prof); - assert(ptr != NULL); - - return (arena_prof_tctx_get(tsdn, ptr)); -} - -JEMALLOC_ALWAYS_INLINE void -prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) -{ - - cassert(config_prof); - assert(ptr != NULL); - - arena_prof_tctx_set(tsdn, ptr, usize, tctx); -} - -JEMALLOC_ALWAYS_INLINE void -prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr, - prof_tctx_t *old_tctx) -{ - - cassert(config_prof); - assert(ptr != NULL); - - arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx); -} - -JEMALLOC_ALWAYS_INLINE bool -prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update, - prof_tdata_t **tdata_out) -{ - prof_tdata_t *tdata; - - cassert(config_prof); - - tdata = prof_tdata_get(tsd, true); - if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) - tdata = NULL; - - if (tdata_out != NULL) - *tdata_out = tdata; - - if (unlikely(tdata == NULL)) - return (true); - - if (likely(tdata->bytes_until_sample >= usize)) { - if (update) - tdata->bytes_until_sample -= usize; - return (true); - } else { - /* Compute new sample threshold. */ - if (update) - prof_sample_threshold_update(tdata); - return (!tdata->active); - } -} - -JEMALLOC_ALWAYS_INLINE prof_tctx_t * -prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) -{ - prof_tctx_t *ret; - prof_tdata_t *tdata; - prof_bt_t bt; - - assert(usize == s2u(usize)); - - if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update, - &tdata))) - ret = (prof_tctx_t *)(uintptr_t)1U; - else { - bt_init(&bt, tdata->vec); - prof_backtrace(&bt); - ret = prof_lookup(tsd, &bt); - } - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void -prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) -{ - - cassert(config_prof); - assert(ptr != NULL); - assert(usize == isalloc(tsdn, ptr, true)); - - if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) - prof_malloc_sample_object(tsdn, ptr, usize, tctx); - else - prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U); -} - -JEMALLOC_ALWAYS_INLINE void -prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, - bool prof_active, bool updated, const void *old_ptr, size_t old_usize, - prof_tctx_t *old_tctx) -{ - bool sampled, old_sampled; - - cassert(config_prof); - assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); - - if (prof_active && !updated && ptr != NULL) { - assert(usize == isalloc(tsd_tsdn(tsd), ptr, true)); - if (prof_sample_accum_update(tsd, usize, true, NULL)) { - /* - * Don't sample. The usize passed to prof_alloc_prep() - * was larger than what actually got allocated, so a - * backtrace was captured for this allocation, even - * though its actual usize was insufficient to cross the - * sample threshold. - */ - prof_alloc_rollback(tsd, tctx, true); - tctx = (prof_tctx_t *)(uintptr_t)1U; - } - } - - sampled = ((uintptr_t)tctx > (uintptr_t)1U); - old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U); - - if (unlikely(sampled)) - prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx); - else - prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx); - - if (unlikely(old_sampled)) - prof_free_sampled_object(tsd, old_usize, old_tctx); -} - -JEMALLOC_ALWAYS_INLINE void -prof_free(tsd_t *tsd, const void *ptr, size_t usize) -{ - prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); - - cassert(config_prof); - assert(usize == isalloc(tsd_tsdn(tsd), ptr, true)); - - if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) - prof_free_sampled_object(tsd, usize, tctx); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/public_namespace.h b/sources/jemalloc/include-win/jemalloc/internal/public_namespace.h deleted file mode 100755 index edc48198..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/public_namespace.h +++ /dev/null @@ -1,20 +0,0 @@ -#define je_malloc_conf JEMALLOC_N(malloc_conf) -#define je_malloc_message JEMALLOC_N(malloc_message) -#define je_malloc JEMALLOC_N(malloc) -#define je_calloc JEMALLOC_N(calloc) -#define je_posix_memalign JEMALLOC_N(posix_memalign) -#define je_aligned_alloc JEMALLOC_N(aligned_alloc) -#define je_realloc JEMALLOC_N(realloc) -#define je_free JEMALLOC_N(free) -#define je_mallocx JEMALLOC_N(mallocx) -#define je_rallocx JEMALLOC_N(rallocx) -#define je_xallocx JEMALLOC_N(xallocx) -#define je_sallocx JEMALLOC_N(sallocx) -#define je_dallocx JEMALLOC_N(dallocx) -#define je_sdallocx JEMALLOC_N(sdallocx) -#define je_nallocx JEMALLOC_N(nallocx) -#define je_mallctl JEMALLOC_N(mallctl) -#define je_mallctlnametomib JEMALLOC_N(mallctlnametomib) -#define je_mallctlbymib JEMALLOC_N(mallctlbymib) -#define je_malloc_stats_print JEMALLOC_N(malloc_stats_print) -#define je_malloc_usable_size JEMALLOC_N(malloc_usable_size) diff --git a/sources/jemalloc/include-win/jemalloc/internal/public_namespace.sh b/sources/jemalloc/include-win/jemalloc/internal/public_namespace.sh deleted file mode 100755 index 362109f7..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/public_namespace.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -for nm in `cat $1` ; do - n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` - echo "#define je_${n} JEMALLOC_N(${n})" -done diff --git a/sources/jemalloc/include-win/jemalloc/internal/public_symbols.txt b/sources/jemalloc/include-win/jemalloc/internal/public_symbols.txt deleted file mode 100755 index 9f6b1d2b..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/public_symbols.txt +++ /dev/null @@ -1,20 +0,0 @@ -malloc_conf:je_malloc_conf -malloc_message:je_malloc_message -malloc:je_malloc -calloc:je_calloc -posix_memalign:je_posix_memalign -aligned_alloc:je_aligned_alloc -realloc:je_realloc -free:je_free -mallocx:je_mallocx -rallocx:je_rallocx -xallocx:je_xallocx -sallocx:je_sallocx -dallocx:je_dallocx -sdallocx:je_sdallocx -nallocx:je_nallocx -mallctl:je_mallctl -mallctlnametomib:je_mallctlnametomib -mallctlbymib:je_mallctlbymib -malloc_stats_print:je_malloc_stats_print -malloc_usable_size:je_malloc_usable_size diff --git a/sources/jemalloc/include-win/jemalloc/internal/public_unnamespace.h b/sources/jemalloc/include-win/jemalloc/internal/public_unnamespace.h deleted file mode 100755 index 4d56cedd..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/public_unnamespace.h +++ /dev/null @@ -1,20 +0,0 @@ -#undef je_malloc_conf -#undef je_malloc_message -#undef je_malloc -#undef je_calloc -#undef je_posix_memalign -#undef je_aligned_alloc -#undef je_realloc -#undef je_free -#undef je_mallocx -#undef je_rallocx -#undef je_xallocx -#undef je_sallocx -#undef je_dallocx -#undef je_sdallocx -#undef je_nallocx -#undef je_mallctl -#undef je_mallctlnametomib -#undef je_mallctlbymib -#undef je_malloc_stats_print -#undef je_malloc_usable_size diff --git a/sources/jemalloc/include-win/jemalloc/internal/public_unnamespace.sh b/sources/jemalloc/include-win/jemalloc/internal/public_unnamespace.sh deleted file mode 100755 index 4239d177..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/public_unnamespace.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -for nm in `cat $1` ; do - n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` - echo "#undef je_${n}" -done diff --git a/sources/jemalloc/include-win/jemalloc/internal/ql.h b/sources/jemalloc/include-win/jemalloc/internal/ql.h deleted file mode 100755 index 1834bb85..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/ql.h +++ /dev/null @@ -1,81 +0,0 @@ -/* List definitions. */ -#define ql_head(a_type) \ -struct { \ - a_type *qlh_first; \ -} - -#define ql_head_initializer(a_head) {NULL} - -#define ql_elm(a_type) qr(a_type) - -/* List functions. */ -#define ql_new(a_head) do { \ - (a_head)->qlh_first = NULL; \ -} while (0) - -#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) - -#define ql_first(a_head) ((a_head)->qlh_first) - -#define ql_last(a_head, a_field) \ - ((ql_first(a_head) != NULL) \ - ? qr_prev(ql_first(a_head), a_field) : NULL) - -#define ql_next(a_head, a_elm, a_field) \ - ((ql_last(a_head, a_field) != (a_elm)) \ - ? qr_next((a_elm), a_field) : NULL) - -#define ql_prev(a_head, a_elm, a_field) \ - ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ - : NULL) - -#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ - qr_before_insert((a_qlelm), (a_elm), a_field); \ - if (ql_first(a_head) == (a_qlelm)) { \ - ql_first(a_head) = (a_elm); \ - } \ -} while (0) - -#define ql_after_insert(a_qlelm, a_elm, a_field) \ - qr_after_insert((a_qlelm), (a_elm), a_field) - -#define ql_head_insert(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) != NULL) { \ - qr_before_insert(ql_first(a_head), (a_elm), a_field); \ - } \ - ql_first(a_head) = (a_elm); \ -} while (0) - -#define ql_tail_insert(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) != NULL) { \ - qr_before_insert(ql_first(a_head), (a_elm), a_field); \ - } \ - ql_first(a_head) = qr_next((a_elm), a_field); \ -} while (0) - -#define ql_remove(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) == (a_elm)) { \ - ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ - } \ - if (ql_first(a_head) != (a_elm)) { \ - qr_remove((a_elm), a_field); \ - } else { \ - ql_first(a_head) = NULL; \ - } \ -} while (0) - -#define ql_head_remove(a_head, a_type, a_field) do { \ - a_type *t = ql_first(a_head); \ - ql_remove((a_head), t, a_field); \ -} while (0) - -#define ql_tail_remove(a_head, a_type, a_field) do { \ - a_type *t = ql_last(a_head, a_field); \ - ql_remove((a_head), t, a_field); \ -} while (0) - -#define ql_foreach(a_var, a_head, a_field) \ - qr_foreach((a_var), ql_first(a_head), a_field) - -#define ql_reverse_foreach(a_var, a_head, a_field) \ - qr_reverse_foreach((a_var), ql_first(a_head), a_field) diff --git a/sources/jemalloc/include-win/jemalloc/internal/qr.h b/sources/jemalloc/include-win/jemalloc/internal/qr.h deleted file mode 100755 index 0fbaec25..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/qr.h +++ /dev/null @@ -1,69 +0,0 @@ -/* Ring definitions. */ -#define qr(a_type) \ -struct { \ - a_type *qre_next; \ - a_type *qre_prev; \ -} - -/* Ring functions. */ -#define qr_new(a_qr, a_field) do { \ - (a_qr)->a_field.qre_next = (a_qr); \ - (a_qr)->a_field.qre_prev = (a_qr); \ -} while (0) - -#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) - -#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) - -#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ - (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ - (a_qr)->a_field.qre_next = (a_qrelm); \ - (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ - (a_qrelm)->a_field.qre_prev = (a_qr); \ -} while (0) - -#define qr_after_insert(a_qrelm, a_qr, a_field) \ - do \ - { \ - (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ - (a_qr)->a_field.qre_prev = (a_qrelm); \ - (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ - (a_qrelm)->a_field.qre_next = (a_qr); \ - } while (0) - -#define qr_meld(a_qr_a, a_qr_b, a_field) do { \ - void *t; \ - (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ - (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ - t = (a_qr_a)->a_field.qre_prev; \ - (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ - (a_qr_b)->a_field.qre_prev = t; \ -} while (0) - -/* - * qr_meld() and qr_split() are functionally equivalent, so there's no need to - * have two copies of the code. - */ -#define qr_split(a_qr_a, a_qr_b, a_field) \ - qr_meld((a_qr_a), (a_qr_b), a_field) - -#define qr_remove(a_qr, a_field) do { \ - (a_qr)->a_field.qre_prev->a_field.qre_next \ - = (a_qr)->a_field.qre_next; \ - (a_qr)->a_field.qre_next->a_field.qre_prev \ - = (a_qr)->a_field.qre_prev; \ - (a_qr)->a_field.qre_next = (a_qr); \ - (a_qr)->a_field.qre_prev = (a_qr); \ -} while (0) - -#define qr_foreach(var, a_qr, a_field) \ - for ((var) = (a_qr); \ - (var) != NULL; \ - (var) = (((var)->a_field.qre_next != (a_qr)) \ - ? (var)->a_field.qre_next : NULL)) - -#define qr_reverse_foreach(var, a_qr, a_field) \ - for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ - (var) != NULL; \ - (var) = (((var) != (a_qr)) \ - ? (var)->a_field.qre_prev : NULL)) diff --git a/sources/jemalloc/include-win/jemalloc/internal/quarantine.h b/sources/jemalloc/include-win/jemalloc/internal/quarantine.h deleted file mode 100755 index ae607399..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/quarantine.h +++ /dev/null @@ -1,60 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct quarantine_obj_s quarantine_obj_t; -typedef struct quarantine_s quarantine_t; - -/* Default per thread quarantine size if valgrind is enabled. */ -#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct quarantine_obj_s { - void *ptr; - size_t usize; -}; - -struct quarantine_s { - size_t curbytes; - size_t curobjs; - size_t first; -#define LG_MAXOBJS_INIT 10 - size_t lg_maxobjs; - quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void quarantine_alloc_hook_work(tsd_t *tsd); -void quarantine(tsd_t *tsd, void *ptr); -void quarantine_cleanup(tsd_t *tsd); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void quarantine_alloc_hook(void); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_)) -JEMALLOC_ALWAYS_INLINE void -quarantine_alloc_hook(void) -{ - tsd_t *tsd; - - assert(config_fill && opt_quarantine); - - tsd = tsd_fetch(); - if (tsd_quarantine_get(tsd) == NULL) - quarantine_alloc_hook_work(tsd); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/sources/jemalloc/include-win/jemalloc/internal/rb.h b/sources/jemalloc/include-win/jemalloc/internal/rb.h deleted file mode 100755 index 3770342f..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/rb.h +++ /dev/null @@ -1,1003 +0,0 @@ -/*- - ******************************************************************************* - * - * cpp macro implementation of left-leaning 2-3 red-black trees. Parent - * pointers are not used, and color bits are stored in the least significant - * bit of right-child pointers (if RB_COMPACT is defined), thus making node - * linkage as compact as is possible for red-black trees. - * - * Usage: - * - * #include - * #include - * #define NDEBUG // (Optional, see assert(3).) - * #include - * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) - * #include - * ... - * - ******************************************************************************* - */ - -#ifndef RB_H_ -#define RB_H_ - -#ifdef RB_COMPACT -/* Node structure. */ -#define rb_node(a_type) \ -struct { \ - a_type *rbn_left; \ - a_type *rbn_right_red; \ -} -#else -#define rb_node(a_type) \ -struct { \ - a_type *rbn_left; \ - a_type *rbn_right; \ - bool rbn_red; \ -} -#endif - -/* Root structure. */ -#define rb_tree(a_type) \ -struct { \ - a_type *rbt_root; \ -} - -/* Left accessors. */ -#define rbtn_left_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_left) -#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ - (a_node)->a_field.rbn_left = a_left; \ -} while (0) - -#ifdef RB_COMPACT -/* Right accessors. */ -#define rbtn_right_get(a_type, a_field, a_node) \ - ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ - & ((ssize_t)-2))) -#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ - | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ -} while (0) - -/* Color accessors. */ -#define rbtn_red_get(a_type, a_field, a_node) \ - ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ - & ((size_t)1))) -#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ - (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ - | ((ssize_t)a_red)); \ -} while (0) -#define rbtn_red_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ - (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ -} while (0) -#define rbtn_black_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ - (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ -} while (0) - -/* Node initializer. */ -#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ - /* Bookkeeping bit cannot be used by node pointer. */ \ - assert(((uintptr_t)(a_node) & 0x1) == 0); \ - rbtn_left_set(a_type, a_field, (a_node), NULL); \ - rbtn_right_set(a_type, a_field, (a_node), NULL); \ - rbtn_red_set(a_type, a_field, (a_node)); \ -} while (0) -#else -/* Right accessors. */ -#define rbtn_right_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_right) -#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ - (a_node)->a_field.rbn_right = a_right; \ -} while (0) - -/* Color accessors. */ -#define rbtn_red_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_red) -#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ - (a_node)->a_field.rbn_red = (a_red); \ -} while (0) -#define rbtn_red_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_red = true; \ -} while (0) -#define rbtn_black_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_red = false; \ -} while (0) - -/* Node initializer. */ -#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ - rbtn_left_set(a_type, a_field, (a_node), NULL); \ - rbtn_right_set(a_type, a_field, (a_node), NULL); \ - rbtn_red_set(a_type, a_field, (a_node)); \ -} while (0) -#endif - -/* Tree initializer. */ -#define rb_new(a_type, a_field, a_rbt) do { \ - (a_rbt)->rbt_root = NULL; \ -} while (0) - -/* Internal utility macros. */ -#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ - (r_node) = (a_root); \ - if ((r_node) != NULL) { \ - for (; \ - rbtn_left_get(a_type, a_field, (r_node)) != NULL; \ - (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ - } \ - } \ -} while (0) - -#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ - (r_node) = (a_root); \ - if ((r_node) != NULL) { \ - for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \ - (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \ - } \ - } \ -} while (0) - -#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ - (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ - rbtn_right_set(a_type, a_field, (a_node), \ - rbtn_left_get(a_type, a_field, (r_node))); \ - rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ -} while (0) - -#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ - (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ - rbtn_left_set(a_type, a_field, (a_node), \ - rbtn_right_get(a_type, a_field, (r_node))); \ - rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ -} while (0) - -/* - * The rb_proto() macro generates function prototypes that correspond to the - * functions generated by an equivalently parameterized call to rb_gen(). - */ - -#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ -a_attr void \ -a_prefix##new(a_rbt_type *rbtree); \ -a_attr bool \ -a_prefix##empty(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##first(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##last(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##next(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ -a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ -a_prefix##search(a_rbt_type *rbtree, const a_type *key); \ -a_attr a_type * \ -a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \ -a_attr a_type * \ -a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \ -a_attr void \ -a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ -a_attr void \ -a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ -a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ - a_rbt_type *, a_type *, void *), void *arg); \ -a_attr a_type * \ -a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \ -a_attr void \ -a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ - void *arg); - -/* - * The rb_gen() macro generates a type-specific red-black tree implementation, - * based on the above cpp macros. - * - * Arguments: - * - * a_attr : Function attribute for generated functions (ex: static). - * a_prefix : Prefix for generated functions (ex: ex_). - * a_rb_type : Type for red-black tree data structure (ex: ex_t). - * a_type : Type for red-black tree node data structure (ex: ex_node_t). - * a_field : Name of red-black tree node linkage (ex: ex_link). - * a_cmp : Node comparison function name, with the following prototype: - * int (a_cmp *)(a_type *a_node, a_type *a_other); - * ^^^^^^ - * or a_key - * Interpretation of comparison function return values: - * -1 : a_node < a_other - * 0 : a_node == a_other - * 1 : a_node > a_other - * In all cases, the a_node or a_key macro argument is the first - * argument to the comparison function, which makes it possible - * to write comparison functions that treat the first argument - * specially. - * - * Assuming the following setup: - * - * typedef struct ex_node_s ex_node_t; - * struct ex_node_s { - * rb_node(ex_node_t) ex_link; - * }; - * typedef rb_tree(ex_node_t) ex_t; - * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) - * - * The following API is generated: - * - * static void - * ex_new(ex_t *tree); - * Description: Initialize a red-black tree structure. - * Args: - * tree: Pointer to an uninitialized red-black tree object. - * - * static bool - * ex_empty(ex_t *tree); - * Description: Determine whether tree is empty. - * Args: - * tree: Pointer to an initialized red-black tree object. - * Ret: True if tree is empty, false otherwise. - * - * static ex_node_t * - * ex_first(ex_t *tree); - * static ex_node_t * - * ex_last(ex_t *tree); - * Description: Get the first/last node in tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * Ret: First/last node in tree, or NULL if tree is empty. - * - * static ex_node_t * - * ex_next(ex_t *tree, ex_node_t *node); - * static ex_node_t * - * ex_prev(ex_t *tree, ex_node_t *node); - * Description: Get node's successor/predecessor. - * Args: - * tree: Pointer to an initialized red-black tree object. - * node: A node in tree. - * Ret: node's successor/predecessor in tree, or NULL if node is - * last/first. - * - * static ex_node_t * - * ex_search(ex_t *tree, const ex_node_t *key); - * Description: Search for node that matches key. - * Args: - * tree: Pointer to an initialized red-black tree object. - * key : Search key. - * Ret: Node in tree that matches key, or NULL if no match. - * - * static ex_node_t * - * ex_nsearch(ex_t *tree, const ex_node_t *key); - * static ex_node_t * - * ex_psearch(ex_t *tree, const ex_node_t *key); - * Description: Search for node that matches key. If no match is found, - * return what would be key's successor/predecessor, were - * key in tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * key : Search key. - * Ret: Node in tree that matches key, or if no match, hypothetical node's - * successor/predecessor (NULL if no successor/predecessor). - * - * static void - * ex_insert(ex_t *tree, ex_node_t *node); - * Description: Insert node into tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * node: Node to be inserted into tree. - * - * static void - * ex_remove(ex_t *tree, ex_node_t *node); - * Description: Remove node from tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * node: Node in tree to be removed. - * - * static ex_node_t * - * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, - * ex_node_t *, void *), void *arg); - * static ex_node_t * - * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, - * ex_node_t *, void *), void *arg); - * Description: Iterate forward/backward over tree, starting at node. If - * tree is modified, iteration must be immediately - * terminated by the callback function that causes the - * modification. - * Args: - * tree : Pointer to an initialized red-black tree object. - * start: Node at which to start iteration, or NULL to start at - * first/last node. - * cb : Callback function, which is called for each node during - * iteration. Under normal circumstances the callback function - * should return NULL, which causes iteration to continue. If a - * callback function returns non-NULL, iteration is immediately - * terminated and the non-NULL return value is returned by the - * iterator. This is useful for re-starting iteration after - * modifying tree. - * arg : Opaque pointer passed to cb(). - * Ret: NULL if iteration completed, or the non-NULL callback return value - * that caused termination of the iteration. - * - * static void - * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg); - * Description: Iterate over the tree with post-order traversal, remove - * each node, and run the callback if non-null. This is - * used for destroying a tree without paying the cost to - * rebalance it. The tree must not be otherwise altered - * during traversal. - * Args: - * tree: Pointer to an initialized red-black tree object. - * cb : Callback function, which, if non-null, is called for each node - * during iteration. There is no way to stop iteration once it - * has begun. - * arg : Opaque pointer passed to cb(). - */ -#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ -a_attr void \ -a_prefix##new(a_rbt_type *rbtree) { \ - rb_new(a_type, a_field, rbtree); \ -} \ -a_attr bool \ -a_prefix##empty(a_rbt_type *rbtree) { \ - return (rbtree->rbt_root == NULL); \ -} \ -a_attr a_type * \ -a_prefix##first(a_rbt_type *rbtree) { \ - a_type *ret; \ - rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##last(a_rbt_type *rbtree) { \ - a_type *ret; \ - rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ - a_type *ret; \ - if (rbtn_right_get(a_type, a_field, node) != NULL) { \ - rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ - a_field, node), ret); \ - } else { \ - a_type *tnode = rbtree->rbt_root; \ - assert(tnode != NULL); \ - ret = NULL; \ - while (true) { \ - int cmp = (a_cmp)(node, tnode); \ - if (cmp < 0) { \ - ret = tnode; \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - break; \ - } \ - assert(tnode != NULL); \ - } \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ - a_type *ret; \ - if (rbtn_left_get(a_type, a_field, node) != NULL) { \ - rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ - a_field, node), ret); \ - } else { \ - a_type *tnode = rbtree->rbt_root; \ - assert(tnode != NULL); \ - ret = NULL; \ - while (true) { \ - int cmp = (a_cmp)(node, tnode); \ - if (cmp < 0) { \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - ret = tnode; \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - break; \ - } \ - assert(tnode != NULL); \ - } \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \ - a_type *ret; \ - int cmp; \ - ret = rbtree->rbt_root; \ - while (ret != NULL \ - && (cmp = (a_cmp)(key, ret)) != 0) { \ - if (cmp < 0) { \ - ret = rbtn_left_get(a_type, a_field, ret); \ - } else { \ - ret = rbtn_right_get(a_type, a_field, ret); \ - } \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \ - a_type *ret; \ - a_type *tnode = rbtree->rbt_root; \ - ret = NULL; \ - while (tnode != NULL) { \ - int cmp = (a_cmp)(key, tnode); \ - if (cmp < 0) { \ - ret = tnode; \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - ret = tnode; \ - break; \ - } \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \ - a_type *ret; \ - a_type *tnode = rbtree->rbt_root; \ - ret = NULL; \ - while (tnode != NULL) { \ - int cmp = (a_cmp)(key, tnode); \ - if (cmp < 0) { \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - ret = tnode; \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - ret = tnode; \ - break; \ - } \ - } \ - return (ret); \ -} \ -a_attr void \ -a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ - struct { \ - a_type *node; \ - int cmp; \ - } path[sizeof(void *) << 4], *pathp; \ - rbt_node_new(a_type, a_field, rbtree, node); \ - /* Wind. */ \ - path->node = rbtree->rbt_root; \ - for (pathp = path; pathp->node != NULL; pathp++) { \ - int cmp = pathp->cmp = a_cmp(node, pathp->node); \ - assert(cmp != 0); \ - if (cmp < 0) { \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } else { \ - pathp[1].node = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - } \ - } \ - pathp->node = node; \ - /* Unwind. */ \ - for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ - a_type *cnode = pathp->node; \ - if (pathp->cmp < 0) { \ - a_type *left = pathp[1].node; \ - rbtn_left_set(a_type, a_field, cnode, left); \ - if (rbtn_red_get(a_type, a_field, left)) { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ - leftleft)) { \ - /* Fix up 4-node. */ \ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, cnode, tnode); \ - cnode = tnode; \ - } \ - } else { \ - return; \ - } \ - } else { \ - a_type *right = pathp[1].node; \ - rbtn_right_set(a_type, a_field, cnode, right); \ - if (rbtn_red_get(a_type, a_field, right)) { \ - a_type *left = rbtn_left_get(a_type, a_field, cnode); \ - if (left != NULL && rbtn_red_get(a_type, a_field, \ - left)) { \ - /* Split 4-node. */ \ - rbtn_black_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, right); \ - rbtn_red_set(a_type, a_field, cnode); \ - } else { \ - /* Lean left. */ \ - a_type *tnode; \ - bool tred = rbtn_red_get(a_type, a_field, cnode); \ - rbtn_rotate_left(a_type, a_field, cnode, tnode); \ - rbtn_color_set(a_type, a_field, tnode, tred); \ - rbtn_red_set(a_type, a_field, cnode); \ - cnode = tnode; \ - } \ - } else { \ - return; \ - } \ - } \ - pathp->node = cnode; \ - } \ - /* Set root, and make it black. */ \ - rbtree->rbt_root = path->node; \ - rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ -} \ -a_attr void \ -a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ - struct { \ - a_type *node; \ - int cmp; \ - } *pathp, *nodep, path[sizeof(void *) << 4]; \ - /* Wind. */ \ - nodep = NULL; /* Silence compiler warning. */ \ - path->node = rbtree->rbt_root; \ - for (pathp = path; pathp->node != NULL; pathp++) { \ - int cmp = pathp->cmp = a_cmp(node, pathp->node); \ - if (cmp < 0) { \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } else { \ - pathp[1].node = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - if (cmp == 0) { \ - /* Find node's successor, in preparation for swap. */ \ - pathp->cmp = 1; \ - nodep = pathp; \ - for (pathp++; pathp->node != NULL; \ - pathp++) { \ - pathp->cmp = -1; \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } \ - break; \ - } \ - } \ - } \ - assert(nodep->node == node); \ - pathp--; \ - if (pathp->node != node) { \ - /* Swap node with its successor. */ \ - bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ - rbtn_color_set(a_type, a_field, pathp->node, \ - rbtn_red_get(a_type, a_field, node)); \ - rbtn_left_set(a_type, a_field, pathp->node, \ - rbtn_left_get(a_type, a_field, node)); \ - /* If node's successor is its right child, the following code */\ - /* will do the wrong thing for the right child pointer. */\ - /* However, it doesn't matter, because the pointer will be */\ - /* properly set when the successor is pruned. */\ - rbtn_right_set(a_type, a_field, pathp->node, \ - rbtn_right_get(a_type, a_field, node)); \ - rbtn_color_set(a_type, a_field, node, tred); \ - /* The pruned leaf node's child pointers are never accessed */\ - /* again, so don't bother setting them to nil. */\ - nodep->node = pathp->node; \ - pathp->node = node; \ - if (nodep == path) { \ - rbtree->rbt_root = nodep->node; \ - } else { \ - if (nodep[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, nodep[-1].node, \ - nodep->node); \ - } else { \ - rbtn_right_set(a_type, a_field, nodep[-1].node, \ - nodep->node); \ - } \ - } \ - } else { \ - a_type *left = rbtn_left_get(a_type, a_field, node); \ - if (left != NULL) { \ - /* node has no successor, but it has a left child. */\ - /* Splice node out, without losing the left child. */\ - assert(!rbtn_red_get(a_type, a_field, node)); \ - assert(rbtn_red_get(a_type, a_field, left)); \ - rbtn_black_set(a_type, a_field, left); \ - if (pathp == path) { \ - rbtree->rbt_root = left; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - left); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - left); \ - } \ - } \ - return; \ - } else if (pathp == path) { \ - /* The tree only contained one node. */ \ - rbtree->rbt_root = NULL; \ - return; \ - } \ - } \ - if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - /* Prune red node, which requires no fixup. */ \ - assert(pathp[-1].cmp < 0); \ - rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \ - return; \ - } \ - /* The node to be pruned is black, so unwind until balance is */\ - /* restored. */\ - pathp->node = NULL; \ - for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ - assert(pathp->cmp != 0); \ - if (pathp->cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp->node, \ - pathp[1].node); \ - if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - a_type *right = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - a_type *rightleft = rbtn_left_get(a_type, a_field, \ - right); \ - a_type *tnode; \ - if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ - rightleft)) { \ - /* In the following diagrams, ||, //, and \\ */\ - /* indicate the path to the removed node. */\ - /* */\ - /* || */\ - /* pathp(r) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - /* */\ - rbtn_black_set(a_type, a_field, pathp->node); \ - rbtn_rotate_right(a_type, a_field, right, tnode); \ - rbtn_right_set(a_type, a_field, pathp->node, tnode);\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - } else { \ - /* || */\ - /* pathp(r) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - /* */\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - } \ - /* Balance restored, but rotation modified subtree */\ - /* root. */\ - assert((uintptr_t)pathp > (uintptr_t)path); \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - return; \ - } else { \ - a_type *right = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - a_type *rightleft = rbtn_left_get(a_type, a_field, \ - right); \ - if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ - rightleft)) { \ - /* || */\ - /* pathp(b) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, rightleft); \ - rbtn_rotate_right(a_type, a_field, right, tnode); \ - rbtn_right_set(a_type, a_field, pathp->node, tnode);\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ - /* subtree root, which may actually be the tree */\ - /* root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - a_type *tnode; \ - rbtn_red_set(a_type, a_field, pathp->node); \ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - pathp->node = tnode; \ - } \ - } \ - } else { \ - a_type *left; \ - rbtn_right_set(a_type, a_field, pathp->node, \ - pathp[1].node); \ - left = rbtn_left_get(a_type, a_field, pathp->node); \ - if (rbtn_red_get(a_type, a_field, left)) { \ - a_type *tnode; \ - a_type *leftright = rbtn_right_get(a_type, a_field, \ - left); \ - a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ - leftright); \ - if (leftrightleft != NULL && rbtn_red_get(a_type, \ - a_field, leftrightleft)) { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (r) (b) */\ - /* \ */\ - /* (b) */\ - /* / */\ - /* (r) */\ - a_type *unode; \ - rbtn_black_set(a_type, a_field, leftrightleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - unode); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - rbtn_right_set(a_type, a_field, unode, tnode); \ - rbtn_rotate_left(a_type, a_field, unode, tnode); \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (r) (b) */\ - /* \ */\ - /* (b) */\ - /* / */\ - /* (b) */\ - assert(leftright != NULL); \ - rbtn_red_set(a_type, a_field, leftright); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - rbtn_black_set(a_type, a_field, tnode); \ - } \ - /* Balance restored, but rotation modified subtree */\ - /* root, which may actually be the tree root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - } \ - return; \ - } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ - leftleft)) { \ - /* || */\ - /* pathp(r) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, pathp->node); \ - rbtn_red_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ - /* subtree root. */\ - assert((uintptr_t)pathp > (uintptr_t)path); \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(r) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - rbtn_red_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, pathp->node); \ - /* Balance restored. */ \ - return; \ - } \ - } else { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ - leftleft)) { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ - /* subtree root, which may actually be the tree */\ - /* root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - rbtn_red_set(a_type, a_field, left); \ - } \ - } \ - } \ - } \ - /* Set root. */ \ - rbtree->rbt_root = path->node; \ - assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \ -} \ -a_attr a_type * \ -a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - if (node == NULL) { \ - return (NULL); \ - } else { \ - a_type *ret; \ - if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ - a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \ - arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - int cmp = a_cmp(start, node); \ - if (cmp < 0) { \ - a_type *ret; \ - if ((ret = a_prefix##iter_start(rbtree, start, \ - rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \ - (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ - } else if (cmp > 0) { \ - return (a_prefix##iter_start(rbtree, start, \ - rbtn_right_get(a_type, a_field, node), cb, arg)); \ - } else { \ - a_type *ret; \ - if ((ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ - a_rbt_type *, a_type *, void *), void *arg) { \ - a_type *ret; \ - if (start != NULL) { \ - ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ - cb, arg); \ - } else { \ - ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - if (node == NULL) { \ - return (NULL); \ - } else { \ - a_type *ret; \ - if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ - (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ - a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ - void *arg) { \ - int cmp = a_cmp(start, node); \ - if (cmp > 0) { \ - a_type *ret; \ - if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ - rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ - (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } else if (cmp < 0) { \ - return (a_prefix##reverse_iter_start(rbtree, start, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } else { \ - a_type *ret; \ - if ((ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - a_type *ret; \ - if (start != NULL) { \ - ret = a_prefix##reverse_iter_start(rbtree, start, \ - rbtree->rbt_root, cb, arg); \ - } else { \ - ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ - cb, arg); \ - } \ - return (ret); \ -} \ -a_attr void \ -a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \ - a_type *, void *), void *arg) { \ - if (node == NULL) { \ - return; \ - } \ - a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \ - node), cb, arg); \ - rbtn_left_set(a_type, a_field, (node), NULL); \ - a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \ - node), cb, arg); \ - rbtn_right_set(a_type, a_field, (node), NULL); \ - if (cb) { \ - cb(node, arg); \ - } \ -} \ -a_attr void \ -a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ - void *arg) { \ - a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \ - rbtree->rbt_root = NULL; \ -} - -#endif /* RB_H_ */ diff --git a/sources/jemalloc/include-win/jemalloc/internal/rtree.h b/sources/jemalloc/include-win/jemalloc/internal/rtree.h deleted file mode 100755 index 8d0c584d..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/rtree.h +++ /dev/null @@ -1,366 +0,0 @@ -/* - * This radix tree implementation is tailored to the singular purpose of - * associating metadata with chunks that are currently owned by jemalloc. - * - ******************************************************************************* - */ -#ifdef JEMALLOC_H_TYPES - -typedef struct rtree_node_elm_s rtree_node_elm_t; -typedef struct rtree_level_s rtree_level_t; -typedef struct rtree_s rtree_t; - -/* - * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the - * machine address width. - */ -#define LG_RTREE_BITS_PER_LEVEL 4 -#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL) -/* Maximum rtree height. */ -#define RTREE_HEIGHT_MAX \ - ((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL) - -/* Used for two-stage lock-free node initialization. */ -#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1) - -/* - * The node allocation callback function's argument is the number of contiguous - * rtree_node_elm_t structures to allocate, and the resulting memory must be - * zeroed. - */ -typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t); -typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *); - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct rtree_node_elm_s { - union { - void *pun; - rtree_node_elm_t *child; - extent_node_t *val; - }; -}; - -struct rtree_level_s { - /* - * A non-NULL subtree points to a subtree rooted along the hypothetical - * path to the leaf node corresponding to key 0. Depending on what keys - * have been used to store to the tree, an arbitrary combination of - * subtree pointers may remain NULL. - * - * Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4. - * This results in a 3-level tree, and the leftmost leaf can be directly - * accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding - * 0x00000000) can be accessed via subtrees[1], and the remainder of the - * tree can be accessed via subtrees[0]. - * - * levels[0] : [ | 0x0001******** | 0x0002******** | ...] - * - * levels[1] : [ | 0x00000001**** | 0x00000002**** | ... ] - * - * levels[2] : [val(0x000000000000) | val(0x000000000001) | ...] - * - * This has practical implications on x64, which currently uses only the - * lower 47 bits of virtual address space in userland, thus leaving - * subtrees[0] unused and avoiding a level of tree traversal. - */ - union { - void *subtree_pun; - rtree_node_elm_t *subtree; - }; - /* Number of key bits distinguished by this level. */ - unsigned bits; - /* - * Cumulative number of key bits distinguished by traversing to - * corresponding tree level. - */ - unsigned cumbits; -}; - -struct rtree_s { - rtree_node_alloc_t *alloc; - rtree_node_dalloc_t *dalloc; - unsigned height; - /* - * Precomputed table used to convert from the number of leading 0 key - * bits to which subtree level to start at. - */ - unsigned start_level[RTREE_HEIGHT_MAX]; - rtree_level_t levels[RTREE_HEIGHT_MAX]; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, - rtree_node_dalloc_t *dalloc); -void rtree_delete(rtree_t *rtree); -rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree, - unsigned level); -rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree, - rtree_node_elm_t *elm, unsigned level); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -unsigned rtree_start_level(rtree_t *rtree, uintptr_t key); -uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level); - -bool rtree_node_valid(rtree_node_elm_t *node); -rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm, - bool dependent); -rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, - unsigned level, bool dependent); -extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, - bool dependent); -void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, - const extent_node_t *val); -rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level, - bool dependent); -rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level, - bool dependent); - -extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent); -bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) -JEMALLOC_ALWAYS_INLINE unsigned -rtree_start_level(rtree_t *rtree, uintptr_t key) -{ - unsigned start_level; - - if (unlikely(key == 0)) - return (rtree->height - 1); - - start_level = rtree->start_level[lg_floor(key) >> - LG_RTREE_BITS_PER_LEVEL]; - assert(start_level < rtree->height); - return (start_level); -} - -JEMALLOC_ALWAYS_INLINE uintptr_t -rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level) -{ - - return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - - rtree->levels[level].cumbits)) & ((ZU(1) << - rtree->levels[level].bits) - 1)); -} - -JEMALLOC_ALWAYS_INLINE bool -rtree_node_valid(rtree_node_elm_t *node) -{ - - return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING); -} - -JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * -rtree_child_tryread(rtree_node_elm_t *elm, bool dependent) -{ - rtree_node_elm_t *child; - - /* Double-checked read (first read may be stale. */ - child = elm->child; - if (!dependent && !rtree_node_valid(child)) - child = atomic_read_p(&elm->pun); - assert(!dependent || child != NULL); - return (child); -} - -JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * -rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level, - bool dependent) -{ - rtree_node_elm_t *child; - - child = rtree_child_tryread(elm, dependent); - if (!dependent && unlikely(!rtree_node_valid(child))) - child = rtree_child_read_hard(rtree, elm, level); - assert(!dependent || child != NULL); - return (child); -} - -JEMALLOC_ALWAYS_INLINE extent_node_t * -rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent) -{ - - if (dependent) { - /* - * Reading a val on behalf of a pointer to a valid allocation is - * guaranteed to be a clean read even without synchronization, - * because the rtree update became visible in memory before the - * pointer came into existence. - */ - return (elm->val); - } else { - /* - * An arbitrary read, e.g. on behalf of ivsalloc(), may not be - * dependent on a previous rtree write, which means a stale read - * could result if synchronization were omitted here. - */ - return (atomic_read_p(&elm->pun)); - } -} - -JEMALLOC_INLINE void -rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val) -{ - - atomic_write_p(&elm->pun, val); -} - -JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * -rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent) -{ - rtree_node_elm_t *subtree; - - /* Double-checked read (first read may be stale. */ - subtree = rtree->levels[level].subtree; - if (!dependent && unlikely(!rtree_node_valid(subtree))) - subtree = atomic_read_p(&rtree->levels[level].subtree_pun); - assert(!dependent || subtree != NULL); - return (subtree); -} - -JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * -rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent) -{ - rtree_node_elm_t *subtree; - - subtree = rtree_subtree_tryread(rtree, level, dependent); - if (!dependent && unlikely(!rtree_node_valid(subtree))) - subtree = rtree_subtree_read_hard(rtree, level); - assert(!dependent || subtree != NULL); - return (subtree); -} - -JEMALLOC_ALWAYS_INLINE extent_node_t * -rtree_get(rtree_t *rtree, uintptr_t key, bool dependent) -{ - uintptr_t subkey; - unsigned start_level; - rtree_node_elm_t *node; - - start_level = rtree_start_level(rtree, key); - - node = rtree_subtree_tryread(rtree, start_level, dependent); -#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height) - switch (start_level + RTREE_GET_BIAS) { -#define RTREE_GET_SUBTREE(level) \ - case level: \ - assert(level < (RTREE_HEIGHT_MAX-1)); \ - if (!dependent && unlikely(!rtree_node_valid(node))) \ - return (NULL); \ - subkey = rtree_subkey(rtree, key, level - \ - RTREE_GET_BIAS); \ - node = rtree_child_tryread(&node[subkey], dependent); \ - /* Fall through. */ -#define RTREE_GET_LEAF(level) \ - case level: \ - assert(level == (RTREE_HEIGHT_MAX-1)); \ - if (!dependent && unlikely(!rtree_node_valid(node))) \ - return (NULL); \ - subkey = rtree_subkey(rtree, key, level - \ - RTREE_GET_BIAS); \ - /* \ - * node is a leaf, so it contains values rather than \ - * child pointers. \ - */ \ - return (rtree_val_read(rtree, &node[subkey], \ - dependent)); -#if RTREE_HEIGHT_MAX > 1 - RTREE_GET_SUBTREE(0) -#endif -#if RTREE_HEIGHT_MAX > 2 - RTREE_GET_SUBTREE(1) -#endif -#if RTREE_HEIGHT_MAX > 3 - RTREE_GET_SUBTREE(2) -#endif -#if RTREE_HEIGHT_MAX > 4 - RTREE_GET_SUBTREE(3) -#endif -#if RTREE_HEIGHT_MAX > 5 - RTREE_GET_SUBTREE(4) -#endif -#if RTREE_HEIGHT_MAX > 6 - RTREE_GET_SUBTREE(5) -#endif -#if RTREE_HEIGHT_MAX > 7 - RTREE_GET_SUBTREE(6) -#endif -#if RTREE_HEIGHT_MAX > 8 - RTREE_GET_SUBTREE(7) -#endif -#if RTREE_HEIGHT_MAX > 9 - RTREE_GET_SUBTREE(8) -#endif -#if RTREE_HEIGHT_MAX > 10 - RTREE_GET_SUBTREE(9) -#endif -#if RTREE_HEIGHT_MAX > 11 - RTREE_GET_SUBTREE(10) -#endif -#if RTREE_HEIGHT_MAX > 12 - RTREE_GET_SUBTREE(11) -#endif -#if RTREE_HEIGHT_MAX > 13 - RTREE_GET_SUBTREE(12) -#endif -#if RTREE_HEIGHT_MAX > 14 - RTREE_GET_SUBTREE(13) -#endif -#if RTREE_HEIGHT_MAX > 15 - RTREE_GET_SUBTREE(14) -#endif -#if RTREE_HEIGHT_MAX > 16 -# error Unsupported RTREE_HEIGHT_MAX -#endif - RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1) -#undef RTREE_GET_SUBTREE -#undef RTREE_GET_LEAF - default: not_reached(); - } -#undef RTREE_GET_BIAS - not_reached(); -} - -JEMALLOC_INLINE bool -rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val) -{ - uintptr_t subkey; - unsigned i, start_level; - rtree_node_elm_t *node, *child; - - start_level = rtree_start_level(rtree, key); - - node = rtree_subtree_read(rtree, start_level, false); - if (node == NULL) - return (true); - for (i = start_level; /**/; i++, node = child) { - subkey = rtree_subkey(rtree, key, i); - if (i == rtree->height - 1) { - /* - * node is a leaf, so it contains values rather than - * child pointers. - */ - rtree_val_write(rtree, &node[subkey], val); - return (false); - } - assert(i + 1 < rtree->height); - child = rtree_child_read(rtree, &node[subkey], i, false); - if (child == NULL) - return (true); - } - not_reached(); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/size_classes.h b/sources/jemalloc/include-win/jemalloc/internal/size_classes.h deleted file mode 100755 index e4edc4bc..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/size_classes.h +++ /dev/null @@ -1,1428 +0,0 @@ -/* This file was automatically generated by size_classes.sh. */ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to - * be defined prior to inclusion, and it in turn defines: - * - * LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling. - * SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz, - * bin, lg_delta_lookup) tuples. - * index: Size class index. - * lg_grp: Lg group base size (no deltas added). - * lg_delta: Lg delta to previous size class. - * ndelta: Delta multiplier. size == 1< 255) -# error "Too many small size classes" -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/size_classes.sh b/sources/jemalloc/include-win/jemalloc/internal/size_classes.sh deleted file mode 100755 index f6fbce4e..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/size_classes.sh +++ /dev/null @@ -1,318 +0,0 @@ -#!/bin/sh -# -# Usage: size_classes.sh - -# The following limits are chosen such that they cover all supported platforms. - -# Pointer sizes. -lg_zarr="2 3" - -# Quanta. -lg_qarr=$1 - -# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)]. -lg_tmin=$2 - -# Maximum lookup size. -lg_kmax=12 - -# Page sizes. -lg_parr=`echo $3 | tr ',' ' '` - -# Size class group size (number of size classes for each size doubling). -lg_g=$4 - -pow2() { - e=$1 - pow2_result=1 - while [ ${e} -gt 0 ] ; do - pow2_result=$((${pow2_result} + ${pow2_result})) - e=$((${e} - 1)) - done -} - -lg() { - x=$1 - lg_result=0 - while [ ${x} -gt 1 ] ; do - lg_result=$((${lg_result} + 1)) - x=$((${x} / 2)) - done -} - -size_class() { - index=$1 - lg_grp=$2 - lg_delta=$3 - ndelta=$4 - lg_p=$5 - lg_kmax=$6 - - if [ ${lg_delta} -ge ${lg_p} ] ; then - psz="yes" - else - pow2 ${lg_p}; p=${pow2_result} - pow2 ${lg_grp}; grp=${pow2_result} - pow2 ${lg_delta}; delta=${pow2_result} - sz=$((${grp} + ${delta} * ${ndelta})) - npgs=$((${sz} / ${p})) - if [ ${sz} -eq $((${npgs} * ${p})) ] ; then - psz="yes" - else - psz="no" - fi - fi - - lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta} - if [ ${pow2_result} -lt ${ndelta} ] ; then - rem="yes" - else - rem="no" - fi - - lg_size=${lg_grp} - if [ $((${lg_delta} + ${lg_ndelta})) -eq ${lg_grp} ] ; then - lg_size=$((${lg_grp} + 1)) - else - lg_size=${lg_grp} - rem="yes" - fi - - if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then - bin="yes" - else - bin="no" - fi - if [ ${lg_size} -lt ${lg_kmax} \ - -o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then - lg_delta_lookup=${lg_delta} - else - lg_delta_lookup="no" - fi - printf ' SC(%3d, %6d, %8d, %6d, %3s, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${psz} ${bin} ${lg_delta_lookup} - # Defined upon return: - # - psz ("yes" or "no") - # - bin ("yes" or "no") - # - lg_delta_lookup (${lg_delta} or "no") -} - -sep_line() { - echo " \\" -} - -size_classes() { - lg_z=$1 - lg_q=$2 - lg_t=$3 - lg_p=$4 - lg_g=$5 - - pow2 $((${lg_z} + 3)); ptr_bits=${pow2_result} - pow2 ${lg_g}; g=${pow2_result} - - echo "#define SIZE_CLASSES \\" - echo " /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \\" - - ntbins=0 - nlbins=0 - lg_tiny_maxclass='"NA"' - nbins=0 - npsizes=0 - - # Tiny size classes. - ndelta=0 - index=0 - lg_grp=${lg_t} - lg_delta=${lg_grp} - while [ ${lg_grp} -lt ${lg_q} ] ; do - size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} - if [ ${lg_delta_lookup} != "no" ] ; then - nlbins=$((${index} + 1)) - fi - if [ ${psz} = "yes" ] ; then - npsizes=$((${npsizes} + 1)) - fi - if [ ${bin} != "no" ] ; then - nbins=$((${index} + 1)) - fi - ntbins=$((${ntbins} + 1)) - lg_tiny_maxclass=${lg_grp} # Final written value is correct. - index=$((${index} + 1)) - lg_delta=${lg_grp} - lg_grp=$((${lg_grp} + 1)) - done - - # First non-tiny group. - if [ ${ntbins} -gt 0 ] ; then - sep_line - # The first size class has an unusual encoding, because the size has to be - # split between grp and delta*ndelta. - lg_grp=$((${lg_grp} - 1)) - ndelta=1 - size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} - index=$((${index} + 1)) - lg_grp=$((${lg_grp} + 1)) - lg_delta=$((${lg_delta} + 1)) - if [ ${psz} = "yes" ] ; then - npsizes=$((${npsizes} + 1)) - fi - fi - while [ ${ndelta} -lt ${g} ] ; do - size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} - index=$((${index} + 1)) - ndelta=$((${ndelta} + 1)) - if [ ${psz} = "yes" ] ; then - npsizes=$((${npsizes} + 1)) - fi - done - - # All remaining groups. - lg_grp=$((${lg_grp} + ${lg_g})) - while [ ${lg_grp} -lt $((${ptr_bits} - 1)) ] ; do - sep_line - ndelta=1 - if [ ${lg_grp} -eq $((${ptr_bits} - 2)) ] ; then - ndelta_limit=$((${g} - 1)) - else - ndelta_limit=${g} - fi - while [ ${ndelta} -le ${ndelta_limit} ] ; do - size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} - if [ ${lg_delta_lookup} != "no" ] ; then - nlbins=$((${index} + 1)) - # Final written value is correct: - lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" - fi - if [ ${psz} = "yes" ] ; then - npsizes=$((${npsizes} + 1)) - fi - if [ ${bin} != "no" ] ; then - nbins=$((${index} + 1)) - # Final written value is correct: - small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" - if [ ${lg_g} -gt 0 ] ; then - lg_large_minclass=$((${lg_grp} + 1)) - else - lg_large_minclass=$((${lg_grp} + 2)) - fi - fi - # Final written value is correct: - huge_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" - index=$((${index} + 1)) - ndelta=$((${ndelta} + 1)) - done - lg_grp=$((${lg_grp} + 1)) - lg_delta=$((${lg_delta} + 1)) - done - echo - nsizes=${index} - - # Defined upon completion: - # - ntbins - # - nlbins - # - nbins - # - nsizes - # - npsizes - # - lg_tiny_maxclass - # - lookup_maxclass - # - small_maxclass - # - lg_large_minclass - # - huge_maxclass -} - -cat < 255) -# error "Too many small size classes" -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ -EOF diff --git a/sources/jemalloc/include-win/jemalloc/internal/smoothstep.h b/sources/jemalloc/include-win/jemalloc/internal/smoothstep.h deleted file mode 100755 index c5333cca..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/smoothstep.h +++ /dev/null @@ -1,246 +0,0 @@ -/* - * This file was generated by the following command: - * sh smoothstep.sh smoother 200 24 3 15 - */ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * This header defines a precomputed table based on the smoothstep family of - * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0 - * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so - * that floating point math can be avoided. - * - * 3 2 - * smoothstep(x) = -2x + 3x - * - * 5 4 3 - * smootherstep(x) = 6x - 15x + 10x - * - * 7 6 5 4 - * smootheststep(x) = -20x + 70x - 84x + 35x - */ - -#define SMOOTHSTEP_VARIANT "smoother" -#define SMOOTHSTEP_NSTEPS 200 -#define SMOOTHSTEP_BFP 24 -#define SMOOTHSTEP \ - /* STEP(step, h, x, y) */ \ - STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \ - STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \ - STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \ - STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \ - STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \ - STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \ - STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \ - STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \ - STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \ - STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \ - STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \ - STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \ - STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \ - STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \ - STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \ - STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \ - STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \ - STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \ - STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \ - STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \ - STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \ - STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \ - STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \ - STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \ - STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \ - STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \ - STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \ - STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \ - STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \ - STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \ - STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \ - STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \ - STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \ - STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \ - STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \ - STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \ - STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \ - STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \ - STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \ - STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \ - STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \ - STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \ - STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \ - STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \ - STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \ - STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \ - STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \ - STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \ - STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \ - STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \ - STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \ - STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \ - STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \ - STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \ - STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \ - STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \ - STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \ - STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \ - STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \ - STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \ - STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \ - STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \ - STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \ - STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \ - STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \ - STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \ - STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \ - STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \ - STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \ - STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \ - STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \ - STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \ - STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \ - STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \ - STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \ - STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \ - STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \ - STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \ - STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \ - STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \ - STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \ - STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \ - STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \ - STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \ - STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \ - STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \ - STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \ - STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \ - STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \ - STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \ - STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \ - STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \ - STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \ - STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \ - STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \ - STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \ - STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \ - STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \ - STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \ - STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \ - STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \ - STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \ - STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \ - STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \ - STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \ - STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \ - STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \ - STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \ - STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \ - STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \ - STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \ - STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \ - STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \ - STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \ - STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \ - STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \ - STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \ - STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \ - STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \ - STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \ - STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \ - STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \ - STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \ - STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \ - STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \ - STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \ - STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \ - STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \ - STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \ - STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \ - STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \ - STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \ - STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \ - STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \ - STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \ - STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \ - STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \ - STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \ - STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \ - STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \ - STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \ - STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \ - STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \ - STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \ - STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \ - STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \ - STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \ - STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \ - STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \ - STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \ - STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \ - STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \ - STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \ - STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \ - STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \ - STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \ - STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \ - STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \ - STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \ - STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \ - STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \ - STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \ - STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \ - STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \ - STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \ - STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \ - STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \ - STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \ - STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \ - STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \ - STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \ - STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \ - STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \ - STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \ - STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \ - STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \ - STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \ - STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \ - STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \ - STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \ - STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \ - STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \ - STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \ - STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \ - STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \ - STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \ - STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \ - STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \ - STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \ - STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \ - STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \ - STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \ - STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \ - STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \ - STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \ - STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \ - STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \ - STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \ - STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \ - STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \ - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/smoothstep.sh b/sources/jemalloc/include-win/jemalloc/internal/smoothstep.sh deleted file mode 100755 index 8124693f..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/smoothstep.sh +++ /dev/null @@ -1,115 +0,0 @@ -#!/bin/sh -# -# Generate a discrete lookup table for a sigmoid function in the smoothstep -# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table -# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode -# the entries using a binary fixed point representation. -# -# Usage: smoothstep.sh -# -# is in {smooth, smoother, smoothest}. -# must be greater than zero. -# must be in [0..62]; reasonable values are roughly [10..30]. -# is x decimal precision. -# is y decimal precision. - -#set -x - -cmd="sh smoothstep.sh $*" -variant=$1 -nsteps=$2 -bfp=$3 -xprec=$4 -yprec=$5 - -case "${variant}" in - smooth) - ;; - smoother) - ;; - smoothest) - ;; - *) - echo "Unsupported variant" - exit 1 - ;; -esac - -smooth() { - step=$1 - y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` - h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` -} - -smoother() { - step=$1 - y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` - h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` -} - -smoothest() { - step=$1 - y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` - h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` -} - -cat <iteration = 0; -} - -JEMALLOC_INLINE void -spin_adaptive(spin_t *spin) -{ - volatile uint64_t i; - - for (i = 0; i < (KQU(1) << spin->iteration); i++) - CPU_SPINWAIT; - - if (spin->iteration < 63) - spin->iteration++; -} - -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/sources/jemalloc/include-win/jemalloc/internal/stats.h b/sources/jemalloc/include-win/jemalloc/internal/stats.h deleted file mode 100755 index 04e7dae1..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/stats.h +++ /dev/null @@ -1,197 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct tcache_bin_stats_s tcache_bin_stats_t; -typedef struct malloc_bin_stats_s malloc_bin_stats_t; -typedef struct malloc_large_stats_s malloc_large_stats_t; -typedef struct malloc_huge_stats_s malloc_huge_stats_t; -typedef struct arena_stats_s arena_stats_t; -typedef struct chunk_stats_s chunk_stats_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct tcache_bin_stats_s { - /* - * Number of allocation requests that corresponded to the size of this - * bin. - */ - uint64_t nrequests; -}; - -struct malloc_bin_stats_s { - /* - * Total number of allocation/deallocation requests served directly by - * the bin. Note that tcache may allocate an object, then recycle it - * many times, resulting many increments to nrequests, but only one - * each to nmalloc and ndalloc. - */ - uint64_t nmalloc; - uint64_t ndalloc; - - /* - * Number of allocation requests that correspond to the size of this - * bin. This includes requests served by tcache, though tcache only - * periodically merges into this counter. - */ - uint64_t nrequests; - - /* - * Current number of regions of this size class, including regions - * currently cached by tcache. - */ - size_t curregs; - - /* Number of tcache fills from this bin. */ - uint64_t nfills; - - /* Number of tcache flushes to this bin. */ - uint64_t nflushes; - - /* Total number of runs created for this bin's size class. */ - uint64_t nruns; - - /* - * Total number of runs reused by extracting them from the runs tree for - * this bin's size class. - */ - uint64_t reruns; - - /* Current number of runs in this bin. */ - size_t curruns; -}; - -struct malloc_large_stats_s { - /* - * Total number of allocation/deallocation requests served directly by - * the arena. Note that tcache may allocate an object, then recycle it - * many times, resulting many increments to nrequests, but only one - * each to nmalloc and ndalloc. - */ - uint64_t nmalloc; - uint64_t ndalloc; - - /* - * Number of allocation requests that correspond to this size class. - * This includes requests served by tcache, though tcache only - * periodically merges into this counter. - */ - uint64_t nrequests; - - /* - * Current number of runs of this size class, including runs currently - * cached by tcache. - */ - size_t curruns; -}; - -struct malloc_huge_stats_s { - /* - * Total number of allocation/deallocation requests served directly by - * the arena. - */ - uint64_t nmalloc; - uint64_t ndalloc; - - /* Current number of (multi-)chunk allocations of this size class. */ - size_t curhchunks; -}; - -struct arena_stats_s { - /* Number of bytes currently mapped. */ - size_t mapped; - - /* - * Number of bytes currently retained as a side effect of munmap() being - * disabled/bypassed. Retained bytes are technically mapped (though - * always decommitted or purged), but they are excluded from the mapped - * statistic (above). - */ - size_t retained; - - /* - * Total number of purge sweeps, total number of madvise calls made, - * and total pages purged in order to keep dirty unused memory under - * control. - */ - uint64_t npurge; - uint64_t nmadvise; - uint64_t purged; - - /* - * Number of bytes currently mapped purely for metadata purposes, and - * number of bytes currently allocated for internal metadata. - */ - size_t metadata_mapped; - size_t metadata_allocated; /* Protected via atomic_*_z(). */ - - /* Per-size-category statistics. */ - size_t allocated_large; - uint64_t nmalloc_large; - uint64_t ndalloc_large; - uint64_t nrequests_large; - - size_t allocated_huge; - uint64_t nmalloc_huge; - uint64_t ndalloc_huge; - - /* One element for each large size class. */ - malloc_large_stats_t *lstats; - - /* One element for each huge size class. */ - malloc_huge_stats_t *hstats; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern bool opt_stats_print; - -extern size_t stats_cactive; - -void stats_print(void (*write)(void *, const char *), void *cbopaque, - const char *opts); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -size_t stats_cactive_get(void); -void stats_cactive_add(size_t size); -void stats_cactive_sub(size_t size); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_)) -JEMALLOC_INLINE size_t -stats_cactive_get(void) -{ - - return (atomic_read_z(&stats_cactive)); -} - -JEMALLOC_INLINE void -stats_cactive_add(size_t size) -{ - - assert(size > 0); - assert((size & chunksize_mask) == 0); - - atomic_add_z(&stats_cactive, size); -} - -JEMALLOC_INLINE void -stats_cactive_sub(size_t size) -{ - - assert(size > 0); - assert((size & chunksize_mask) == 0); - - atomic_sub_z(&stats_cactive, size); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/tcache.h b/sources/jemalloc/include-win/jemalloc/internal/tcache.h deleted file mode 100755 index 5fe5ebfa..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/tcache.h +++ /dev/null @@ -1,472 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct tcache_bin_info_s tcache_bin_info_t; -typedef struct tcache_bin_s tcache_bin_t; -typedef struct tcache_s tcache_t; -typedef struct tcaches_s tcaches_t; - -/* - * tcache pointers close to NULL are used to encode state information that is - * used for two purposes: preventing thread caching on a per thread basis and - * cleaning up during thread shutdown. - */ -#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) -#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) -#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) -#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY - -/* - * Absolute minimum number of cache slots for each small bin. - */ -#define TCACHE_NSLOTS_SMALL_MIN 20 - -/* - * Absolute maximum number of cache slots for each small bin in the thread - * cache. This is an additional constraint beyond that imposed as: twice the - * number of regions per run for this size class. - * - * This constant must be an even number. - */ -#define TCACHE_NSLOTS_SMALL_MAX 200 - -/* Number of cache slots for large size classes. */ -#define TCACHE_NSLOTS_LARGE 20 - -/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ -#define LG_TCACHE_MAXCLASS_DEFAULT 15 - -/* - * TCACHE_GC_SWEEP is the approximate number of allocation events between - * full GC sweeps. Integer rounding may cause the actual number to be - * slightly higher, since GC is performed incrementally. - */ -#define TCACHE_GC_SWEEP 8192 - -/* Number of tcache allocation/deallocation events between incremental GCs. */ -#define TCACHE_GC_INCR \ - ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1)) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -typedef enum { - tcache_enabled_false = 0, /* Enable cast to/from bool. */ - tcache_enabled_true = 1, - tcache_enabled_default = 2 -} tcache_enabled_t; - -/* - * Read-only information associated with each element of tcache_t's tbins array - * is stored separately, mainly to reduce memory usage. - */ -struct tcache_bin_info_s { - unsigned ncached_max; /* Upper limit on ncached. */ -}; - -struct tcache_bin_s { - tcache_bin_stats_t tstats; - int low_water; /* Min # cached since last GC. */ - unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */ - unsigned ncached; /* # of cached objects. */ - /* - * To make use of adjacent cacheline prefetch, the items in the avail - * stack goes to higher address for newer allocations. avail points - * just above the available space, which means that - * avail[-ncached, ... -1] are available items and the lowest item will - * be allocated first. - */ - void **avail; /* Stack of available objects. */ -}; - -struct tcache_s { - ql_elm(tcache_t) link; /* Used for aggregating stats. */ - uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */ - ticker_t gc_ticker; /* Drives incremental GC. */ - szind_t next_gc_bin; /* Next bin to GC. */ - tcache_bin_t tbins[1]; /* Dynamically sized. */ - /* - * The pointer stacks associated with tbins follow as a contiguous - * array. During tcache initialization, the avail pointer in each - * element of tbins is initialized to point to the proper offset within - * this array. - */ -}; - -/* Linkage for list of available (previously used) explicit tcache IDs. */ -struct tcaches_s { - union { - tcache_t *tcache; - tcaches_t *next; - }; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern bool opt_tcache; -extern ssize_t opt_lg_tcache_max; - -extern tcache_bin_info_t *tcache_bin_info; - -/* - * Number of tcache bins. There are NBINS small-object bins, plus 0 or more - * large-object bins. - */ -extern unsigned nhbins; - -/* Maximum cached size class. */ -extern size_t tcache_maxclass; - -/* - * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and - * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are - * completely disjoint from this data structure. tcaches starts off as a sparse - * array, so it has no physical memory footprint until individual pages are - * touched. This allows the entire array to be allocated the first time an - * explicit tcache is created without a disproportionate impact on memory usage. - */ -extern tcaches_t *tcaches; - -size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); -void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); -void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, - tcache_bin_t *tbin, szind_t binind, bool *tcache_success); -void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, - szind_t binind, unsigned rem); -void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, - unsigned rem, tcache_t *tcache); -void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, - arena_t *oldarena, arena_t *newarena); -tcache_t *tcache_get_hard(tsd_t *tsd); -tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena); -void tcache_cleanup(tsd_t *tsd); -void tcache_enabled_cleanup(tsd_t *tsd); -void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); -bool tcaches_create(tsd_t *tsd, unsigned *r_ind); -void tcaches_flush(tsd_t *tsd, unsigned ind); -void tcaches_destroy(tsd_t *tsd, unsigned ind); -bool tcache_boot(tsdn_t *tsdn); -void tcache_prefork(tsdn_t *tsdn); -void tcache_postfork_parent(tsdn_t *tsdn); -void tcache_postfork_child(tsdn_t *tsdn); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void tcache_event(tsd_t *tsd, tcache_t *tcache); -void tcache_flush(void); -bool tcache_enabled_get(void); -tcache_t *tcache_get(tsd_t *tsd, bool create); -void tcache_enabled_set(bool enabled); -void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success); -void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, - size_t size, szind_t ind, bool zero, bool slow_path); -void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, - size_t size, szind_t ind, bool zero, bool slow_path); -void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, - szind_t binind, bool slow_path); -void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, - size_t size, bool slow_path); -tcache_t *tcaches_get(tsd_t *tsd, unsigned ind); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_)) -JEMALLOC_INLINE void -tcache_flush(void) -{ - tsd_t *tsd; - - cassert(config_tcache); - - tsd = tsd_fetch(); - tcache_cleanup(tsd); -} - -JEMALLOC_INLINE bool -tcache_enabled_get(void) -{ - tsd_t *tsd; - tcache_enabled_t tcache_enabled; - - cassert(config_tcache); - - tsd = tsd_fetch(); - tcache_enabled = tsd_tcache_enabled_get(tsd); - if (tcache_enabled == tcache_enabled_default) { - tcache_enabled = (tcache_enabled_t)opt_tcache; - tsd_tcache_enabled_set(tsd, tcache_enabled); - } - - return ((bool)tcache_enabled); -} - -JEMALLOC_INLINE void -tcache_enabled_set(bool enabled) -{ - tsd_t *tsd; - tcache_enabled_t tcache_enabled; - - cassert(config_tcache); - - tsd = tsd_fetch(); - - tcache_enabled = (tcache_enabled_t)enabled; - tsd_tcache_enabled_set(tsd, tcache_enabled); - - if (!enabled) - tcache_cleanup(tsd); -} - -JEMALLOC_ALWAYS_INLINE tcache_t * -tcache_get(tsd_t *tsd, bool create) -{ - tcache_t *tcache; - - if (!config_tcache) - return (NULL); - - tcache = tsd_tcache_get(tsd); - if (!create) - return (tcache); - if (unlikely(tcache == NULL) && tsd_nominal(tsd)) { - tcache = tcache_get_hard(tsd); - tsd_tcache_set(tsd, tcache); - } - - return (tcache); -} - -JEMALLOC_ALWAYS_INLINE void -tcache_event(tsd_t *tsd, tcache_t *tcache) -{ - - if (TCACHE_GC_INCR == 0) - return; - - if (unlikely(ticker_tick(&tcache->gc_ticker))) - tcache_event_hard(tsd, tcache); -} - -JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success) -{ - void *ret; - - if (unlikely(tbin->ncached == 0)) { - tbin->low_water = -1; - *tcache_success = false; - return (NULL); - } - /* - * tcache_success (instead of ret) should be checked upon the return of - * this function. We avoid checking (ret == NULL) because there is - * never a null stored on the avail stack (which is unknown to the - * compiler), and eagerly checking ret would cause pipeline stall - * (waiting for the cacheline). - */ - *tcache_success = true; - ret = *(tbin->avail - tbin->ncached); - tbin->ncached--; - - if (unlikely((int)tbin->ncached < tbin->low_water)) - tbin->low_water = tbin->ncached; - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, - szind_t binind, bool zero, bool slow_path) -{ - void *ret; - tcache_bin_t *tbin; - bool tcache_success; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - - assert(binind < NBINS); - tbin = &tcache->tbins[binind]; - ret = tcache_alloc_easy(tbin, &tcache_success); - assert(tcache_success == (ret != NULL)); - if (unlikely(!tcache_success)) { - bool tcache_hard_success; - arena = arena_choose(tsd, arena); - if (unlikely(arena == NULL)) - return (NULL); - - ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, - tbin, binind, &tcache_hard_success); - if (tcache_hard_success == false) - return (NULL); - } - - assert(ret); - /* - * Only compute usize if required. The checks in the following if - * statement are all static. - */ - if (config_prof || (slow_path && config_fill) || unlikely(zero)) { - usize = index2size(binind); - assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize); - } - - if (likely(!zero)) { - if (slow_path && config_fill) { - if (unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ret, - &arena_bin_info[binind], false); - } else if (unlikely(opt_zero)) - memset(ret, 0, usize); - } - } else { - if (slow_path && config_fill && unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ret, &arena_bin_info[binind], - true); - } - memset(ret, 0, usize); - } - - if (config_stats) - tbin->tstats.nrequests++; - if (config_prof) - tcache->prof_accumbytes += usize; - tcache_event(tsd, tcache); - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, - szind_t binind, bool zero, bool slow_path) -{ - void *ret; - tcache_bin_t *tbin; - bool tcache_success; - - assert(binind < nhbins); - tbin = &tcache->tbins[binind]; - ret = tcache_alloc_easy(tbin, &tcache_success); - assert(tcache_success == (ret != NULL)); - if (unlikely(!tcache_success)) { - /* - * Only allocate one large object at a time, because it's quite - * expensive to create one and not use it. - */ - arena = arena_choose(tsd, arena); - if (unlikely(arena == NULL)) - return (NULL); - - ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero); - if (ret == NULL) - return (NULL); - } else { - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - - /* Only compute usize on demand */ - if (config_prof || (slow_path && config_fill) || - unlikely(zero)) { - usize = index2size(binind); - assert(usize <= tcache_maxclass); - } - - if (config_prof && usize == LARGE_MINCLASS) { - arena_chunk_t *chunk = - (arena_chunk_t *)CHUNK_ADDR2BASE(ret); - size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> - LG_PAGE); - arena_mapbits_large_binind_set(chunk, pageind, - BININD_INVALID); - } - if (likely(!zero)) { - if (slow_path && config_fill) { - if (unlikely(opt_junk_alloc)) { - memset(ret, JEMALLOC_ALLOC_JUNK, - usize); - } else if (unlikely(opt_zero)) - memset(ret, 0, usize); - } - } else - memset(ret, 0, usize); - - if (config_stats) - tbin->tstats.nrequests++; - if (config_prof) - tcache->prof_accumbytes += usize; - } - - tcache_event(tsd, tcache); - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void -tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, - bool slow_path) -{ - tcache_bin_t *tbin; - tcache_bin_info_t *tbin_info; - - assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS); - - if (slow_path && config_fill && unlikely(opt_junk_free)) - arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); - - tbin = &tcache->tbins[binind]; - tbin_info = &tcache_bin_info[binind]; - if (unlikely(tbin->ncached == tbin_info->ncached_max)) { - tcache_bin_flush_small(tsd, tcache, tbin, binind, - (tbin_info->ncached_max >> 1)); - } - assert(tbin->ncached < tbin_info->ncached_max); - tbin->ncached++; - *(tbin->avail - tbin->ncached) = ptr; - - tcache_event(tsd, tcache); -} - -JEMALLOC_ALWAYS_INLINE void -tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size, - bool slow_path) -{ - szind_t binind; - tcache_bin_t *tbin; - tcache_bin_info_t *tbin_info; - - assert((size & PAGE_MASK) == 0); - assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS); - assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass); - - binind = size2index(size); - - if (slow_path && config_fill && unlikely(opt_junk_free)) - arena_dalloc_junk_large(ptr, size); - - tbin = &tcache->tbins[binind]; - tbin_info = &tcache_bin_info[binind]; - if (unlikely(tbin->ncached == tbin_info->ncached_max)) { - tcache_bin_flush_large(tsd, tbin, binind, - (tbin_info->ncached_max >> 1), tcache); - } - assert(tbin->ncached < tbin_info->ncached_max); - tbin->ncached++; - *(tbin->avail - tbin->ncached) = ptr; - - tcache_event(tsd, tcache); -} - -JEMALLOC_ALWAYS_INLINE tcache_t * -tcaches_get(tsd_t *tsd, unsigned ind) -{ - tcaches_t *elm = &tcaches[ind]; - if (unlikely(elm->tcache == NULL)) { - elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd, - NULL)); - } - return (elm->tcache); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/ticker.h b/sources/jemalloc/include-win/jemalloc/internal/ticker.h deleted file mode 100755 index 4696e56d..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/ticker.h +++ /dev/null @@ -1,75 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct ticker_s ticker_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct ticker_s { - int32_t tick; - int32_t nticks; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void ticker_init(ticker_t *ticker, int32_t nticks); -void ticker_copy(ticker_t *ticker, const ticker_t *other); -int32_t ticker_read(const ticker_t *ticker); -bool ticker_ticks(ticker_t *ticker, int32_t nticks); -bool ticker_tick(ticker_t *ticker); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_)) -JEMALLOC_INLINE void -ticker_init(ticker_t *ticker, int32_t nticks) -{ - - ticker->tick = nticks; - ticker->nticks = nticks; -} - -JEMALLOC_INLINE void -ticker_copy(ticker_t *ticker, const ticker_t *other) -{ - - *ticker = *other; -} - -JEMALLOC_INLINE int32_t -ticker_read(const ticker_t *ticker) -{ - - return (ticker->tick); -} - -JEMALLOC_INLINE bool -ticker_ticks(ticker_t *ticker, int32_t nticks) -{ - - if (unlikely(ticker->tick < nticks)) { - ticker->tick = ticker->nticks; - return (true); - } - ticker->tick -= nticks; - return(false); -} - -JEMALLOC_INLINE bool -ticker_tick(ticker_t *ticker) -{ - - return (ticker_ticks(ticker, 1)); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/tsd.h b/sources/jemalloc/include-win/jemalloc/internal/tsd.h deleted file mode 100755 index 9f374335..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/tsd.h +++ /dev/null @@ -1,788 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* Maximum number of malloc_tsd users with cleanup functions. */ -#define MALLOC_TSD_CLEANUPS_MAX 2 - -typedef bool (*malloc_tsd_cleanup_t)(void); - -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -typedef struct tsd_init_block_s tsd_init_block_t; -typedef struct tsd_init_head_s tsd_init_head_t; -#endif - -typedef struct tsd_s tsd_t; -typedef struct tsdn_s tsdn_t; - -#define TSDN_NULL ((tsdn_t *)0) - -typedef enum { - tsd_state_uninitialized, - tsd_state_nominal, - tsd_state_purgatory, - tsd_state_reincarnated -} tsd_state_t; - -/* - * TLS/TSD-agnostic macro-based implementation of thread-specific data. There - * are five macros that support (at least) three use cases: file-private, - * library-private, and library-private inlined. Following is an example - * library-private tsd variable: - * - * In example.h: - * typedef struct { - * int x; - * int y; - * } example_t; - * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0}) - * malloc_tsd_types(example_, example_t) - * malloc_tsd_protos(, example_, example_t) - * malloc_tsd_externs(example_, example_t) - * In example.c: - * malloc_tsd_data(, example_, example_t, EX_INITIALIZER) - * malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER, - * example_tsd_cleanup) - * - * The result is a set of generated functions, e.g.: - * - * bool example_tsd_boot(void) {...} - * bool example_tsd_booted_get(void) {...} - * example_t *example_tsd_get(bool init) {...} - * void example_tsd_set(example_t *val) {...} - * - * Note that all of the functions deal in terms of (a_type *) rather than - * (a_type) so that it is possible to support non-pointer types (unlike - * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is - * cast to (void *). This means that the cleanup function needs to cast the - * function argument to (a_type *), then dereference the resulting pointer to - * access fields, e.g. - * - * void - * example_tsd_cleanup(void *arg) - * { - * example_t *example = (example_t *)arg; - * - * example->x = 42; - * [...] - * if ([want the cleanup function to be called again]) - * example_tsd_set(example); - * } - * - * If example_tsd_set() is called within example_tsd_cleanup(), it will be - * called again. This is similar to how pthreads TSD destruction works, except - * that pthreads only calls the cleanup function again if the value was set to - * non-NULL. - */ - -/* malloc_tsd_types(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_types(a_name, a_type) -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_types(a_name, a_type) -#elif (defined(_WIN32)) -#define malloc_tsd_types(a_name, a_type) \ -typedef struct { \ - bool initialized; \ - a_type val; \ -} a_name##tsd_wrapper_t; -#else -#define malloc_tsd_types(a_name, a_type) \ -typedef struct { \ - bool initialized; \ - a_type val; \ -} a_name##tsd_wrapper_t; -#endif - -/* malloc_tsd_protos(). */ -#define malloc_tsd_protos(a_attr, a_name, a_type) \ -a_attr bool \ -a_name##tsd_boot0(void); \ -a_attr void \ -a_name##tsd_boot1(void); \ -a_attr bool \ -a_name##tsd_boot(void); \ -a_attr bool \ -a_name##tsd_booted_get(void); \ -a_attr a_type * \ -a_name##tsd_get(bool init); \ -a_attr void \ -a_name##tsd_set(a_type *val); - -/* malloc_tsd_externs(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_externs(a_name, a_type) \ -extern __thread a_type a_name##tsd_tls; \ -extern __thread bool a_name##tsd_initialized; \ -extern bool a_name##tsd_booted; -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_externs(a_name, a_type) \ -extern __thread a_type a_name##tsd_tls; \ -extern pthread_key_t a_name##tsd_tsd; \ -extern bool a_name##tsd_booted; -#elif (defined(_WIN32)) -#define malloc_tsd_externs(a_name, a_type) \ -extern DWORD a_name##tsd_tsd; \ -extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ -extern bool a_name##tsd_booted; -#else -#define malloc_tsd_externs(a_name, a_type) \ -extern pthread_key_t a_name##tsd_tsd; \ -extern tsd_init_head_t a_name##tsd_init_head; \ -extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ -extern bool a_name##tsd_booted; -#endif - -/* malloc_tsd_data(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr __thread a_type JEMALLOC_TLS_MODEL \ - a_name##tsd_tls = a_initializer; \ -a_attr __thread bool JEMALLOC_TLS_MODEL \ - a_name##tsd_initialized = false; \ -a_attr bool a_name##tsd_booted = false; -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr __thread a_type JEMALLOC_TLS_MODEL \ - a_name##tsd_tls = a_initializer; \ -a_attr pthread_key_t a_name##tsd_tsd; \ -a_attr bool a_name##tsd_booted = false; -#elif (defined(_WIN32)) -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr DWORD a_name##tsd_tsd; \ -a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ - false, \ - a_initializer \ -}; \ -a_attr bool a_name##tsd_booted = false; -#else -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr pthread_key_t a_name##tsd_tsd; \ -a_attr tsd_init_head_t a_name##tsd_init_head = { \ - ql_head_initializer(blocks), \ - MALLOC_MUTEX_INITIALIZER \ -}; \ -a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ - false, \ - a_initializer \ -}; \ -a_attr bool a_name##tsd_booted = false; -#endif - -/* malloc_tsd_funcs(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##tsd_cleanup_wrapper(void) \ -{ \ - \ - if (a_name##tsd_initialized) { \ - a_name##tsd_initialized = false; \ - a_cleanup(&a_name##tsd_tls); \ - } \ - return (a_name##tsd_initialized); \ -} \ -a_attr bool \ -a_name##tsd_boot0(void) \ -{ \ - \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - malloc_tsd_cleanup_register( \ - &a_name##tsd_cleanup_wrapper); \ - } \ - a_name##tsd_booted = true; \ - return (false); \ -} \ -a_attr void \ -a_name##tsd_boot1(void) \ -{ \ - \ - /* Do nothing. */ \ -} \ -a_attr bool \ -a_name##tsd_boot(void) \ -{ \ - \ - return (a_name##tsd_boot0()); \ -} \ -a_attr bool \ -a_name##tsd_booted_get(void) \ -{ \ - \ - return (a_name##tsd_booted); \ -} \ -a_attr bool \ -a_name##tsd_get_allocates(void) \ -{ \ - \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##tsd_get(bool init) \ -{ \ - \ - assert(a_name##tsd_booted); \ - return (&a_name##tsd_tls); \ -} \ -a_attr void \ -a_name##tsd_set(a_type *val) \ -{ \ - \ - assert(a_name##tsd_booted); \ - a_name##tsd_tls = (*val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - a_name##tsd_initialized = true; \ -} -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##tsd_boot0(void) \ -{ \ - \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \ - 0) \ - return (true); \ - } \ - a_name##tsd_booted = true; \ - return (false); \ -} \ -a_attr void \ -a_name##tsd_boot1(void) \ -{ \ - \ - /* Do nothing. */ \ -} \ -a_attr bool \ -a_name##tsd_boot(void) \ -{ \ - \ - return (a_name##tsd_boot0()); \ -} \ -a_attr bool \ -a_name##tsd_booted_get(void) \ -{ \ - \ - return (a_name##tsd_booted); \ -} \ -a_attr bool \ -a_name##tsd_get_allocates(void) \ -{ \ - \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##tsd_get(bool init) \ -{ \ - \ - assert(a_name##tsd_booted); \ - return (&a_name##tsd_tls); \ -} \ -a_attr void \ -a_name##tsd_set(a_type *val) \ -{ \ - \ - assert(a_name##tsd_booted); \ - a_name##tsd_tls = (*val); \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - if (pthread_setspecific(a_name##tsd_tsd, \ - (void *)(&a_name##tsd_tls))) { \ - malloc_write(": Error" \ - " setting TSD for "#a_name"\n"); \ - if (opt_abort) \ - abort(); \ - } \ - } \ -} -#elif (defined(_WIN32)) -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##tsd_cleanup_wrapper(void) \ -{ \ - DWORD error = GetLastError(); \ - a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ - TlsGetValue(a_name##tsd_tsd); \ - SetLastError(error); \ - \ - if (wrapper == NULL) \ - return (false); \ - if (a_cleanup != malloc_tsd_no_cleanup && \ - wrapper->initialized) { \ - wrapper->initialized = false; \ - a_cleanup(&wrapper->val); \ - if (wrapper->initialized) { \ - /* Trigger another cleanup round. */ \ - return (true); \ - } \ - } \ - malloc_tsd_dalloc(wrapper); \ - return (false); \ -} \ -a_attr void \ -a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ -{ \ - \ - if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \ - malloc_write(": Error setting" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ -} \ -a_attr a_name##tsd_wrapper_t * \ -a_name##tsd_wrapper_get(bool init) \ -{ \ - DWORD error = GetLastError(); \ - a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ - TlsGetValue(a_name##tsd_tsd); \ - SetLastError(error); \ - \ - if (init && unlikely(wrapper == NULL)) { \ - wrapper = (a_name##tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ - if (wrapper == NULL) { \ - malloc_write(": Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } else { \ - wrapper->initialized = false; \ - wrapper->val = a_initializer; \ - } \ - a_name##tsd_wrapper_set(wrapper); \ - } \ - return (wrapper); \ -} \ -a_attr bool \ -a_name##tsd_boot0(void) \ -{ \ - \ - a_name##tsd_tsd = TlsAlloc(); \ - if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \ - return (true); \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - malloc_tsd_cleanup_register( \ - &a_name##tsd_cleanup_wrapper); \ - } \ - a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ - a_name##tsd_booted = true; \ - return (false); \ -} \ -a_attr void \ -a_name##tsd_boot1(void) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - wrapper = (a_name##tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ - if (wrapper == NULL) { \ - malloc_write(": Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ - memcpy(wrapper, &a_name##tsd_boot_wrapper, \ - sizeof(a_name##tsd_wrapper_t)); \ - a_name##tsd_wrapper_set(wrapper); \ -} \ -a_attr bool \ -a_name##tsd_boot(void) \ -{ \ - \ - if (a_name##tsd_boot0()) \ - return (true); \ - a_name##tsd_boot1(); \ - return (false); \ -} \ -a_attr bool \ -a_name##tsd_booted_get(void) \ -{ \ - \ - return (a_name##tsd_booted); \ -} \ -a_attr bool \ -a_name##tsd_get_allocates(void) \ -{ \ - \ - return (true); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##tsd_get(bool init) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - \ - assert(a_name##tsd_booted); \ - wrapper = a_name##tsd_wrapper_get(init); \ - if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \ - return (NULL); \ - return (&wrapper->val); \ -} \ -a_attr void \ -a_name##tsd_set(a_type *val) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - \ - assert(a_name##tsd_booted); \ - wrapper = a_name##tsd_wrapper_get(true); \ - wrapper->val = *(val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - wrapper->initialized = true; \ -} -#else -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr void \ -a_name##tsd_cleanup_wrapper(void *arg) \ -{ \ - a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \ - \ - if (a_cleanup != malloc_tsd_no_cleanup && \ - wrapper->initialized) { \ - wrapper->initialized = false; \ - a_cleanup(&wrapper->val); \ - if (wrapper->initialized) { \ - /* Trigger another cleanup round. */ \ - if (pthread_setspecific(a_name##tsd_tsd, \ - (void *)wrapper)) { \ - malloc_write(": Error" \ - " setting TSD for "#a_name"\n"); \ - if (opt_abort) \ - abort(); \ - } \ - return; \ - } \ - } \ - malloc_tsd_dalloc(wrapper); \ -} \ -a_attr void \ -a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ -{ \ - \ - if (pthread_setspecific(a_name##tsd_tsd, \ - (void *)wrapper)) { \ - malloc_write(": Error setting" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ -} \ -a_attr a_name##tsd_wrapper_t * \ -a_name##tsd_wrapper_get(bool init) \ -{ \ - a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ - pthread_getspecific(a_name##tsd_tsd); \ - \ - if (init && unlikely(wrapper == NULL)) { \ - tsd_init_block_t block; \ - wrapper = (a_name##tsd_wrapper_t *) \ - tsd_init_check_recursion(&a_name##tsd_init_head, \ - &block); \ - if (wrapper) \ - return (wrapper); \ - wrapper = (a_name##tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ - block.data = (void *)wrapper; \ - if (wrapper == NULL) { \ - malloc_write(": Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } else { \ - wrapper->initialized = false; \ - wrapper->val = a_initializer; \ - } \ - a_name##tsd_wrapper_set(wrapper); \ - tsd_init_finish(&a_name##tsd_init_head, &block); \ - } \ - return (wrapper); \ -} \ -a_attr bool \ -a_name##tsd_boot0(void) \ -{ \ - \ - if (pthread_key_create(&a_name##tsd_tsd, \ - a_name##tsd_cleanup_wrapper) != 0) \ - return (true); \ - a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ - a_name##tsd_booted = true; \ - return (false); \ -} \ -a_attr void \ -a_name##tsd_boot1(void) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - wrapper = (a_name##tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ - if (wrapper == NULL) { \ - malloc_write(": Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ - memcpy(wrapper, &a_name##tsd_boot_wrapper, \ - sizeof(a_name##tsd_wrapper_t)); \ - a_name##tsd_wrapper_set(wrapper); \ -} \ -a_attr bool \ -a_name##tsd_boot(void) \ -{ \ - \ - if (a_name##tsd_boot0()) \ - return (true); \ - a_name##tsd_boot1(); \ - return (false); \ -} \ -a_attr bool \ -a_name##tsd_booted_get(void) \ -{ \ - \ - return (a_name##tsd_booted); \ -} \ -a_attr bool \ -a_name##tsd_get_allocates(void) \ -{ \ - \ - return (true); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##tsd_get(bool init) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - \ - assert(a_name##tsd_booted); \ - wrapper = a_name##tsd_wrapper_get(init); \ - if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \ - return (NULL); \ - return (&wrapper->val); \ -} \ -a_attr void \ -a_name##tsd_set(a_type *val) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - \ - assert(a_name##tsd_booted); \ - wrapper = a_name##tsd_wrapper_get(true); \ - wrapper->val = *(val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - wrapper->initialized = true; \ -} -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -struct tsd_init_block_s { - ql_elm(tsd_init_block_t) link; - pthread_t thread; - void *data; -}; -struct tsd_init_head_s { - ql_head(tsd_init_block_t) blocks; - malloc_mutex_t lock; -}; -#endif - -#define MALLOC_TSD \ -/* O(name, type) */ \ - O(tcache, tcache_t *) \ - O(thread_allocated, uint64_t) \ - O(thread_deallocated, uint64_t) \ - O(prof_tdata, prof_tdata_t *) \ - O(iarena, arena_t *) \ - O(arena, arena_t *) \ - O(arenas_tdata, arena_tdata_t *) \ - O(narenas_tdata, unsigned) \ - O(arenas_tdata_bypass, bool) \ - O(tcache_enabled, tcache_enabled_t) \ - O(quarantine, quarantine_t *) \ - O(witnesses, witness_list_t) \ - O(witness_fork, bool) \ - -#define TSD_INITIALIZER { \ - tsd_state_uninitialized, \ - NULL, \ - 0, \ - 0, \ - NULL, \ - NULL, \ - NULL, \ - NULL, \ - 0, \ - false, \ - tcache_enabled_default, \ - NULL, \ - ql_head_initializer(witnesses), \ - false \ -} - -struct tsd_s { - tsd_state_t state; -#define O(n, t) \ - t n; -MALLOC_TSD -#undef O -}; - -/* - * Wrapper around tsd_t that makes it possible to avoid implicit conversion - * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be - * explicitly converted to tsd_t, which is non-nullable. - */ -struct tsdn_s { - tsd_t tsd; -}; - -static const tsd_t tsd_initializer = TSD_INITIALIZER; - -malloc_tsd_types(, tsd_t) - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *malloc_tsd_malloc(size_t size); -void malloc_tsd_dalloc(void *wrapper); -void malloc_tsd_no_cleanup(void *arg); -void malloc_tsd_cleanup_register(bool (*f)(void)); -tsd_t *malloc_tsd_boot0(void); -void malloc_tsd_boot1(void); -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -void *tsd_init_check_recursion(tsd_init_head_t *head, - tsd_init_block_t *block); -void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); -#endif -void tsd_cleanup(void *arg); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t) - -tsd_t *tsd_fetch_impl(bool init); -tsd_t *tsd_fetch(void); -tsdn_t *tsd_tsdn(tsd_t *tsd); -bool tsd_nominal(tsd_t *tsd); -#define O(n, t) \ -t *tsd_##n##p_get(tsd_t *tsd); \ -t tsd_##n##_get(tsd_t *tsd); \ -void tsd_##n##_set(tsd_t *tsd, t n); -MALLOC_TSD -#undef O -tsdn_t *tsdn_fetch(void); -bool tsdn_null(const tsdn_t *tsdn); -tsd_t *tsdn_tsd(tsdn_t *tsdn); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_)) -malloc_tsd_externs(, tsd_t) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup) - -JEMALLOC_ALWAYS_INLINE tsd_t * -tsd_fetch_impl(bool init) -{ - tsd_t *tsd = tsd_get(init); - - if (!init && tsd_get_allocates() && tsd == NULL) - return (NULL); - assert(tsd != NULL); - - if (unlikely(tsd->state != tsd_state_nominal)) { - if (tsd->state == tsd_state_uninitialized) { - tsd->state = tsd_state_nominal; - /* Trigger cleanup handler registration. */ - tsd_set(tsd); - } else if (tsd->state == tsd_state_purgatory) { - tsd->state = tsd_state_reincarnated; - tsd_set(tsd); - } else - assert(tsd->state == tsd_state_reincarnated); - } - - return (tsd); -} - -JEMALLOC_ALWAYS_INLINE tsd_t * -tsd_fetch(void) -{ - - return (tsd_fetch_impl(true)); -} - -JEMALLOC_ALWAYS_INLINE tsdn_t * -tsd_tsdn(tsd_t *tsd) -{ - - return ((tsdn_t *)tsd); -} - -JEMALLOC_INLINE bool -tsd_nominal(tsd_t *tsd) -{ - - return (tsd->state == tsd_state_nominal); -} - -#define O(n, t) \ -JEMALLOC_ALWAYS_INLINE t * \ -tsd_##n##p_get(tsd_t *tsd) \ -{ \ - \ - return (&tsd->n); \ -} \ - \ -JEMALLOC_ALWAYS_INLINE t \ -tsd_##n##_get(tsd_t *tsd) \ -{ \ - \ - return (*tsd_##n##p_get(tsd)); \ -} \ - \ -JEMALLOC_ALWAYS_INLINE void \ -tsd_##n##_set(tsd_t *tsd, t n) \ -{ \ - \ - assert(tsd->state == tsd_state_nominal); \ - tsd->n = n; \ -} -MALLOC_TSD -#undef O - -JEMALLOC_ALWAYS_INLINE tsdn_t * -tsdn_fetch(void) -{ - - if (!tsd_booted_get()) - return (NULL); - - return (tsd_tsdn(tsd_fetch_impl(false))); -} - -JEMALLOC_ALWAYS_INLINE bool -tsdn_null(const tsdn_t *tsdn) -{ - - return (tsdn == NULL); -} - -JEMALLOC_ALWAYS_INLINE tsd_t * -tsdn_tsd(tsdn_t *tsdn) -{ - - assert(!tsdn_null(tsdn)); - - return (&tsdn->tsd); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/util.h b/sources/jemalloc/include-win/jemalloc/internal/util.h deleted file mode 100755 index 4b56d652..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/util.h +++ /dev/null @@ -1,342 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#ifdef _WIN32 -# ifdef _WIN64 -# define FMT64_PREFIX "ll" -# define FMTPTR_PREFIX "ll" -# else -# define FMT64_PREFIX "ll" -# define FMTPTR_PREFIX "" -# endif -# define FMTd32 "d" -# define FMTu32 "u" -# define FMTx32 "x" -# define FMTd64 FMT64_PREFIX "d" -# define FMTu64 FMT64_PREFIX "u" -# define FMTx64 FMT64_PREFIX "x" -# define FMTdPTR FMTPTR_PREFIX "d" -# define FMTuPTR FMTPTR_PREFIX "u" -# define FMTxPTR FMTPTR_PREFIX "x" -#else -# include -# define FMTd32 PRId32 -# define FMTu32 PRIu32 -# define FMTx32 PRIx32 -# define FMTd64 PRId64 -# define FMTu64 PRIu64 -# define FMTx64 PRIx64 -# define FMTdPTR PRIdPTR -# define FMTuPTR PRIuPTR -# define FMTxPTR PRIxPTR -#endif - -/* Size of stack-allocated buffer passed to buferror(). */ -#define BUFERROR_BUF 64 - -/* - * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be - * large enough for all possible uses within jemalloc. - */ -#define MALLOC_PRINTF_BUFSIZE 4096 - -/* Junk fill patterns. */ -#ifndef JEMALLOC_ALLOC_JUNK -# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) -#endif -#ifndef JEMALLOC_FREE_JUNK -# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) -#endif - -/* - * Wrap a cpp argument that contains commas such that it isn't broken up into - * multiple arguments. - */ -#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ - -/* - * Silence compiler warnings due to uninitialized values. This is used - * wherever the compiler fails to recognize that the variable is never used - * uninitialized. - */ -#ifdef JEMALLOC_CC_SILENCE -# define JEMALLOC_CC_SILENCE_INIT(v) = v -#else -# define JEMALLOC_CC_SILENCE_INIT(v) -#endif - -#ifdef __GNUC__ -# define likely(x) __builtin_expect(!!(x), 1) -# define unlikely(x) __builtin_expect(!!(x), 0) -#else -# define likely(x) !!(x) -# define unlikely(x) !!(x) -#endif - -#if !defined(JEMALLOC_INTERNAL_UNREACHABLE) -# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure -#endif - -#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() - -#include "jemalloc/internal/assert.h" - -/* Use to assert a particular configuration, e.g., cassert(config_debug). */ -#define cassert(c) do { \ - if (unlikely(!(c))) \ - not_reached(); \ -} while (0) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -int buferror(int err, char *buf, size_t buflen); -uintmax_t malloc_strtoumax(const char *restrict nptr, - char **restrict endptr, int base); -void malloc_write(const char *s); - -/* - * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating - * point math. - */ -size_t malloc_vsnprintf(char *str, size_t size, const char *format, - va_list ap); -size_t malloc_snprintf(char *str, size_t size, const char *format, ...) - JEMALLOC_FORMAT_PRINTF(3, 4); -void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, va_list ap); -void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, - const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); -void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -unsigned ffs_llu(unsigned long long bitmap); -unsigned ffs_lu(unsigned long bitmap); -unsigned ffs_u(unsigned bitmap); -unsigned ffs_zu(size_t bitmap); -unsigned ffs_u64(uint64_t bitmap); -unsigned ffs_u32(uint32_t bitmap); -uint64_t pow2_ceil_u64(uint64_t x); -uint32_t pow2_ceil_u32(uint32_t x); -size_t pow2_ceil_zu(size_t x); -unsigned lg_floor(size_t x); -void set_errno(int errnum); -int get_errno(void); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) - -/* Sanity check. */ -#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \ - || !defined(JEMALLOC_INTERNAL_FFS) -# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure -#endif - -JEMALLOC_ALWAYS_INLINE unsigned -ffs_llu(unsigned long long bitmap) -{ - - return (JEMALLOC_INTERNAL_FFSLL(bitmap)); -} - -JEMALLOC_ALWAYS_INLINE unsigned -ffs_lu(unsigned long bitmap) -{ - - return (JEMALLOC_INTERNAL_FFSL(bitmap)); -} - -JEMALLOC_ALWAYS_INLINE unsigned -ffs_u(unsigned bitmap) -{ - - return (JEMALLOC_INTERNAL_FFS(bitmap)); -} - -JEMALLOC_ALWAYS_INLINE unsigned -ffs_zu(size_t bitmap) -{ - -#if LG_SIZEOF_PTR == LG_SIZEOF_INT - return (ffs_u(bitmap)); -#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG - return (ffs_lu(bitmap)); -#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG - return (ffs_llu(bitmap)); -#else -#error No implementation for size_t ffs() -#endif -} - -JEMALLOC_ALWAYS_INLINE unsigned -ffs_u64(uint64_t bitmap) -{ - -#if LG_SIZEOF_LONG == 3 - return (ffs_lu(bitmap)); -#elif LG_SIZEOF_LONG_LONG == 3 - return (ffs_llu(bitmap)); -#else -#error No implementation for 64-bit ffs() -#endif -} - -JEMALLOC_ALWAYS_INLINE unsigned -ffs_u32(uint32_t bitmap) -{ - -#if LG_SIZEOF_INT == 2 - return (ffs_u(bitmap)); -#else -#error No implementation for 32-bit ffs() -#endif - return (ffs_u(bitmap)); -} - -JEMALLOC_INLINE uint64_t -pow2_ceil_u64(uint64_t x) -{ - - x--; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; - x |= x >> 32; - x++; - return (x); -} - -JEMALLOC_INLINE uint32_t -pow2_ceil_u32(uint32_t x) -{ - - x--; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; - x++; - return (x); -} - -/* Compute the smallest power of 2 that is >= x. */ -JEMALLOC_INLINE size_t -pow2_ceil_zu(size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - return (pow2_ceil_u64(x)); -#else - return (pow2_ceil_u32(x)); -#endif -} - -#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE unsigned -lg_floor(size_t x) -{ - size_t ret; - - assert(x != 0); - - asm ("bsr %1, %0" - : "=r"(ret) // Outputs. - : "r"(x) // Inputs. - ); - assert(ret < UINT_MAX); - return ((unsigned)ret); -} -#elif (defined(_MSC_VER)) -JEMALLOC_INLINE unsigned -lg_floor(size_t x) -{ - unsigned long ret; - - assert(x != 0); - -#if (LG_SIZEOF_PTR == 3) - _BitScanReverse64(&ret, x); -#elif (LG_SIZEOF_PTR == 2) - _BitScanReverse(&ret, x); -#else -# error "Unsupported type size for lg_floor()" -#endif - assert(ret < UINT_MAX); - return ((unsigned)ret); -} -#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) -JEMALLOC_INLINE unsigned -lg_floor(size_t x) -{ - - assert(x != 0); - -#if (LG_SIZEOF_PTR == LG_SIZEOF_INT) - return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x)); -#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG) - return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x)); -#else -# error "Unsupported type size for lg_floor()" -#endif -} -#else -JEMALLOC_INLINE unsigned -lg_floor(size_t x) -{ - - assert(x != 0); - - x |= (x >> 1); - x |= (x >> 2); - x |= (x >> 4); - x |= (x >> 8); - x |= (x >> 16); -#if (LG_SIZEOF_PTR == 3) - x |= (x >> 32); -#endif - if (x == SIZE_T_MAX) - return ((8 << LG_SIZEOF_PTR) - 1); - x++; - return (ffs_zu(x) - 2); -} -#endif - -/* Set error code. */ -JEMALLOC_INLINE void -set_errno(int errnum) -{ - -#ifdef _WIN32 - SetLastError(errnum); -#else - errno = errnum; -#endif -} - -/* Get last error code. */ -JEMALLOC_INLINE int -get_errno(void) -{ - -#ifdef _WIN32 - return (GetLastError()); -#else - return (errno); -#endif -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/valgrind.h b/sources/jemalloc/include-win/jemalloc/internal/valgrind.h deleted file mode 100755 index 877a142b..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/valgrind.h +++ /dev/null @@ -1,128 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#ifdef JEMALLOC_VALGRIND -#include - -/* - * The size that is reported to Valgrind must be consistent through a chain of - * malloc..realloc..realloc calls. Request size isn't recorded anywhere in - * jemalloc, so it is critical that all callers of these macros provide usize - * rather than request size. As a result, buffer overflow detection is - * technically weakened for the standard API, though it is generally accepted - * practice to consider any extra bytes reported by malloc_usable_size() as - * usable space. - */ -#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \ - if (unlikely(in_valgrind)) \ - valgrind_make_mem_noaccess(ptr, usize); \ -} while (0) -#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \ - if (unlikely(in_valgrind)) \ - valgrind_make_mem_undefined(ptr, usize); \ -} while (0) -#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \ - if (unlikely(in_valgrind)) \ - valgrind_make_mem_defined(ptr, usize); \ -} while (0) -/* - * The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro - * calls must be embedded in macros rather than in functions so that when - * Valgrind reports errors, there are no extra stack frames in the backtraces. - */ -#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \ - if (unlikely(in_valgrind && cond)) { \ - VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \ - zero); \ - } \ -} while (0) -#define JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr) \ - (false) -#define JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr) \ - ((ptr) != (old_ptr)) -#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr) \ - (false) -#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr) \ - (ptr == NULL) -#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr) \ - (false) -#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr) \ - (old_ptr == NULL) -#define JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null, \ - old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do { \ - if (unlikely(in_valgrind)) { \ - size_t rzsize = p2rz(tsdn, ptr); \ - \ - if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr, \ - old_ptr)) { \ - VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ - usize, rzsize); \ - if (zero && old_usize < usize) { \ - valgrind_make_mem_defined( \ - (void *)((uintptr_t)ptr + \ - old_usize), usize - old_usize); \ - } \ - } else { \ - if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_## \ - old_ptr_null(old_ptr)) { \ - valgrind_freelike_block(old_ptr, \ - old_rzsize); \ - } \ - if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_## \ - ptr_null(ptr)) { \ - size_t copy_size = (old_usize < usize) \ - ? old_usize : usize; \ - size_t tail_size = usize - copy_size; \ - VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ - rzsize, false); \ - if (copy_size > 0) { \ - valgrind_make_mem_defined(ptr, \ - copy_size); \ - } \ - if (zero && tail_size > 0) { \ - valgrind_make_mem_defined( \ - (void *)((uintptr_t)ptr + \ - copy_size), tail_size); \ - } \ - } \ - } \ - } \ -} while (0) -#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ - if (unlikely(in_valgrind)) \ - valgrind_freelike_block(ptr, rzsize); \ -} while (0) -#else -#define RUNNING_ON_VALGRIND ((unsigned)0) -#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0) -#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0) -#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0) -#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0) -#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \ - ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ - zero) do {} while (0) -#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#ifdef JEMALLOC_VALGRIND -void valgrind_make_mem_noaccess(void *ptr, size_t usize); -void valgrind_make_mem_undefined(void *ptr, size_t usize); -void valgrind_make_mem_defined(void *ptr, size_t usize); -void valgrind_freelike_block(void *ptr, size_t usize); -#endif - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/sources/jemalloc/include-win/jemalloc/internal/witness.h b/sources/jemalloc/include-win/jemalloc/internal/witness.h deleted file mode 100755 index 30d8c7e9..00000000 --- a/sources/jemalloc/include-win/jemalloc/internal/witness.h +++ /dev/null @@ -1,304 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct witness_s witness_t; -typedef unsigned witness_rank_t; -typedef ql_head(witness_t) witness_list_t; -typedef int witness_comp_t (const witness_t *, const witness_t *); - -/* - * Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by - * the witness machinery. - */ -#define WITNESS_RANK_OMIT 0U - -#define WITNESS_RANK_MIN 1U - -#define WITNESS_RANK_INIT 1U -#define WITNESS_RANK_CTL 1U -#define WITNESS_RANK_TCACHES 2U -#define WITNESS_RANK_ARENAS 3U - -#define WITNESS_RANK_PROF_DUMP 4U -#define WITNESS_RANK_PROF_BT2GCTX 5U -#define WITNESS_RANK_PROF_TDATAS 6U -#define WITNESS_RANK_PROF_TDATA 7U -#define WITNESS_RANK_PROF_GCTX 8U - -/* - * Used as an argument to witness_assert_depth_to_rank() in order to validate - * depth excluding non-core locks with lower ranks. Since the rank argument to - * witness_assert_depth_to_rank() is inclusive rather than exclusive, this - * definition can have the same value as the minimally ranked core lock. - */ -#define WITNESS_RANK_CORE 9U - -#define WITNESS_RANK_ARENA 9U -#define WITNESS_RANK_ARENA_CHUNKS 10U -#define WITNESS_RANK_ARENA_NODE_CACHE 11U - -#define WITNESS_RANK_BASE 12U - -#define WITNESS_RANK_LEAF 0xffffffffU -#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF -#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF -#define WITNESS_RANK_DSS WITNESS_RANK_LEAF -#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF -#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF -#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF -#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF -#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF - -#define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}} - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct witness_s { - /* Name, used for printing lock order reversal messages. */ - const char *name; - - /* - * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses - * must be acquired in order of increasing rank. - */ - witness_rank_t rank; - - /* - * If two witnesses are of equal rank and they have the samp comp - * function pointer, it is called as a last attempt to differentiate - * between witnesses of equal rank. - */ - witness_comp_t *comp; - - /* Linkage for thread's currently owned locks. */ - ql_elm(witness_t) link; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void witness_init(witness_t *witness, const char *name, witness_rank_t rank, - witness_comp_t *comp); -#ifdef JEMALLOC_JET -typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *); -extern witness_lock_error_t *witness_lock_error; -#else -void witness_lock_error(const witness_list_t *witnesses, - const witness_t *witness); -#endif -#ifdef JEMALLOC_JET -typedef void (witness_owner_error_t)(const witness_t *); -extern witness_owner_error_t *witness_owner_error; -#else -void witness_owner_error(const witness_t *witness); -#endif -#ifdef JEMALLOC_JET -typedef void (witness_not_owner_error_t)(const witness_t *); -extern witness_not_owner_error_t *witness_not_owner_error; -#else -void witness_not_owner_error(const witness_t *witness); -#endif -#ifdef JEMALLOC_JET -typedef void (witness_depth_error_t)(const witness_list_t *, - witness_rank_t rank_inclusive, unsigned depth); -extern witness_depth_error_t *witness_depth_error; -#else -void witness_depth_error(const witness_list_t *witnesses, - witness_rank_t rank_inclusive, unsigned depth); -#endif - -void witnesses_cleanup(tsd_t *tsd); -void witness_fork_cleanup(tsd_t *tsd); -void witness_prefork(tsd_t *tsd); -void witness_postfork_parent(tsd_t *tsd); -void witness_postfork_child(tsd_t *tsd); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -bool witness_owner(tsd_t *tsd, const witness_t *witness); -void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness); -void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness); -void witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive, - unsigned depth); -void witness_assert_depth(tsdn_t *tsdn, unsigned depth); -void witness_assert_lockless(tsdn_t *tsdn); -void witness_lock(tsdn_t *tsdn, witness_t *witness); -void witness_unlock(tsdn_t *tsdn, witness_t *witness); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) -JEMALLOC_INLINE bool -witness_owner(tsd_t *tsd, const witness_t *witness) -{ - witness_list_t *witnesses; - witness_t *w; - - cassert(config_debug); - - witnesses = tsd_witnessesp_get(tsd); - ql_foreach(w, witnesses, link) { - if (w == witness) - return (true); - } - - return (false); -} - -JEMALLOC_INLINE void -witness_assert_owner(tsdn_t *tsdn, const witness_t *witness) -{ - tsd_t *tsd; - - if (!config_debug) - return; - - if (tsdn_null(tsdn)) - return; - tsd = tsdn_tsd(tsdn); - if (witness->rank == WITNESS_RANK_OMIT) - return; - - if (witness_owner(tsd, witness)) - return; - witness_owner_error(witness); -} - -JEMALLOC_INLINE void -witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness) -{ - tsd_t *tsd; - witness_list_t *witnesses; - witness_t *w; - - if (!config_debug) - return; - - if (tsdn_null(tsdn)) - return; - tsd = tsdn_tsd(tsdn); - if (witness->rank == WITNESS_RANK_OMIT) - return; - - witnesses = tsd_witnessesp_get(tsd); - ql_foreach(w, witnesses, link) { - if (w == witness) - witness_not_owner_error(witness); - } -} - -JEMALLOC_INLINE void -witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive, - unsigned depth) { - tsd_t *tsd; - unsigned d; - witness_list_t *witnesses; - witness_t *w; - - if (!config_debug) - return; - - if (tsdn_null(tsdn)) - return; - tsd = tsdn_tsd(tsdn); - - d = 0; - witnesses = tsd_witnessesp_get(tsd); - w = ql_last(witnesses, link); - if (w != NULL) { - ql_reverse_foreach(w, witnesses, link) { - if (w->rank < rank_inclusive) { - break; - } - d++; - } - } - if (d != depth) - witness_depth_error(witnesses, rank_inclusive, depth); -} - -JEMALLOC_INLINE void -witness_assert_depth(tsdn_t *tsdn, unsigned depth) { - witness_assert_depth_to_rank(tsdn, WITNESS_RANK_MIN, depth); -} - -JEMALLOC_INLINE void -witness_assert_lockless(tsdn_t *tsdn) { - witness_assert_depth(tsdn, 0); -} - -JEMALLOC_INLINE void -witness_lock(tsdn_t *tsdn, witness_t *witness) -{ - tsd_t *tsd; - witness_list_t *witnesses; - witness_t *w; - - if (!config_debug) - return; - - if (tsdn_null(tsdn)) - return; - tsd = tsdn_tsd(tsdn); - if (witness->rank == WITNESS_RANK_OMIT) - return; - - witness_assert_not_owner(tsdn, witness); - - witnesses = tsd_witnessesp_get(tsd); - w = ql_last(witnesses, link); - if (w == NULL) { - /* No other locks; do nothing. */ - } else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) { - /* Forking, and relaxed ranking satisfied. */ - } else if (w->rank > witness->rank) { - /* Not forking, rank order reversal. */ - witness_lock_error(witnesses, witness); - } else if (w->rank == witness->rank && (w->comp == NULL || w->comp != - witness->comp || w->comp(w, witness) > 0)) { - /* - * Missing/incompatible comparison function, or comparison - * function indicates rank order reversal. - */ - witness_lock_error(witnesses, witness); - } - - ql_elm_new(witness, link); - ql_tail_insert(witnesses, witness, link); -} - -JEMALLOC_INLINE void -witness_unlock(tsdn_t *tsdn, witness_t *witness) -{ - tsd_t *tsd; - witness_list_t *witnesses; - - if (!config_debug) - return; - - if (tsdn_null(tsdn)) - return; - tsd = tsdn_tsd(tsdn); - if (witness->rank == WITNESS_RANK_OMIT) - return; - - /* - * Check whether owner before removal, rather than relying on - * witness_assert_owner() to abort, so that unit tests can test this - * function's failure mode without causing undefined behavior. - */ - if (witness_owner(tsd, witness)) { - witnesses = tsd_witnessesp_get(tsd); - ql_remove(witnesses, witness, link); - } else - witness_assert_owner(tsdn, witness); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc.h b/sources/jemalloc/include-win/jemalloc/jemalloc.h deleted file mode 100755 index eaa16270..00000000 --- a/sources/jemalloc/include-win/jemalloc/jemalloc.h +++ /dev/null @@ -1,376 +0,0 @@ -#ifndef JEMALLOC_H_ -#define JEMALLOC_H_ -#ifdef __cplusplus -extern "C" { -#endif - -/* Defined if __attribute__((...)) syntax is supported. */ -/* #undef JEMALLOC_HAVE_ATTR */ - -/* Defined if alloc_size attribute is supported. */ -/* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */ - -/* Defined if format(gnu_printf, ...) attribute is supported. */ -/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */ - -/* Defined if format(printf, ...) attribute is supported. */ -/* #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF */ - -/* - * Define overrides for non-standard allocator-related functions if they are - * present on the system. - */ -/* #undef JEMALLOC_OVERRIDE_MEMALIGN */ -/* #undef JEMALLOC_OVERRIDE_VALLOC */ - -/* - * At least Linux omits the "const" in: - * - * size_t malloc_usable_size(const void *ptr); - * - * Match the operating system's prototype. - */ -#define JEMALLOC_USABLE_SIZE_CONST const - -/* - * If defined, specify throw() for the public function prototypes when compiling - * with C++. The only justification for this is to match the prototypes that - * glibc defines. - */ -/* #undef JEMALLOC_USE_CXX_THROW */ - -#ifdef _MSC_VER -# ifdef _WIN64 -# define LG_SIZEOF_PTR_WIN 3 -# else -# define LG_SIZEOF_PTR_WIN 2 -# endif -#endif - -/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ -#define LG_SIZEOF_PTR LG_SIZEOF_PTR_WIN - -/* - * Name mangling for public symbols is controlled by --with-mangling and - * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by - * these macro definitions. - */ -#ifndef JEMALLOC_NO_RENAME -# define je_malloc_conf je_malloc_conf -# define je_malloc_message je_malloc_message -# define je_malloc je_malloc -# define je_calloc je_calloc -# define je_posix_memalign je_posix_memalign -# define je_aligned_alloc je_aligned_alloc -# define je_realloc je_realloc -# define je_free je_free -# define je_mallocx je_mallocx -# define je_rallocx je_rallocx -# define je_xallocx je_xallocx -# define je_sallocx je_sallocx -# define je_dallocx je_dallocx -# define je_sdallocx je_sdallocx -# define je_nallocx je_nallocx -# define je_mallctl je_mallctl -# define je_mallctlnametomib je_mallctlnametomib -# define je_mallctlbymib je_mallctlbymib -# define je_malloc_stats_print je_malloc_stats_print -# define je_malloc_usable_size je_malloc_usable_size -#endif - -#include -#include -#include -#include -#include - -#define JEMALLOC_VERSION "0.0.0-0-g0000000000000000000000000000000000000000" -#define JEMALLOC_VERSION_MAJOR 0 -#define JEMALLOC_VERSION_MINOR 0 -#define JEMALLOC_VERSION_BUGFIX 0 -#define JEMALLOC_VERSION_NREV 0 -#define JEMALLOC_VERSION_GID "0000000000000000000000000000000000000000" - -# define MALLOCX_LG_ALIGN(la) ((int)(la)) -# if LG_SIZEOF_PTR == 2 -# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) -# else -# define MALLOCX_ALIGN(a) \ - ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ - ffs((int)(((size_t)(a))>>32))+31)) -# endif -# define MALLOCX_ZERO ((int)0x40) -/* - * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 - * encodes MALLOCX_TCACHE_NONE. - */ -# define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) -# define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) -/* - * Bias arena index bits so that 0 encodes "use an automatically chosen arena". - */ -# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) - -#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) -# define JEMALLOC_CXX_THROW throw() -#else -# define JEMALLOC_CXX_THROW -#endif - -#if _MSC_VER -# define JEMALLOC_ATTR(s) -# define JEMALLOC_ALIGNED(s) __declspec(align(s)) -# define JEMALLOC_ALLOC_SIZE(s) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) -# ifndef JEMALLOC_EXPORT -# ifdef DLLEXPORT -# define JEMALLOC_EXPORT -# else -# define JEMALLOC_EXPORT -# endif -# endif -# define JEMALLOC_FORMAT_PRINTF(s, i) -# define JEMALLOC_NOINLINE __declspec(noinline) -# ifdef __cplusplus -# define JEMALLOC_NOTHROW __declspec(nothrow) -# else -# define JEMALLOC_NOTHROW -# endif -# define JEMALLOC_SECTION(s) __declspec(allocate(s)) -# define JEMALLOC_RESTRICT_RETURN __declspec(restrict) -# if _MSC_VER >= 1900 && !defined(__EDG__) -# define JEMALLOC_ALLOCATOR __declspec(allocator) -# else -# define JEMALLOC_ALLOCATOR -# endif -#elif defined(JEMALLOC_HAVE_ATTR) -# define JEMALLOC_ATTR(s) __attribute__((s)) -# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) -# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE -# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) -# else -# define JEMALLOC_ALLOC_SIZE(s) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) -# endif -# ifndef JEMALLOC_EXPORT -# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) -# endif -# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF -# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) -# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) -# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) -# else -# define JEMALLOC_FORMAT_PRINTF(s, i) -# endif -# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) -# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) -# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) -# define JEMALLOC_RESTRICT_RETURN -# define JEMALLOC_ALLOCATOR -#else -# define JEMALLOC_ATTR(s) -# define JEMALLOC_ALIGNED(s) -# define JEMALLOC_ALLOC_SIZE(s) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) -# define JEMALLOC_EXPORT -# define JEMALLOC_FORMAT_PRINTF(s, i) -# define JEMALLOC_NOINLINE -# define JEMALLOC_NOTHROW -# define JEMALLOC_SECTION(s) -# define JEMALLOC_RESTRICT_RETURN -# define JEMALLOC_ALLOCATOR -#endif - -/* - * The je_ prefix on the following public symbol declarations is an artifact - * of namespace management, and should be omitted in application code unless - * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h). - */ -extern JEMALLOC_EXPORT const char *je_malloc_conf; -extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, - const char *s); - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_malloc(size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr, - size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment, - size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) - JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) - JEMALLOC_CXX_THROW; - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags) - JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size, - int flags) JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size, - size_t extra, int flags); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr, - int flags) JEMALLOC_ATTR(pure); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, - int flags); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags) - JEMALLOC_ATTR(pure); - -JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, - void *oldp, size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, - size_t *mibp, size_t *miblenp); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print( - void (*write_cb)(void *, const char *), void *je_cbopaque, - const char *opts); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size( - JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; - -#ifdef JEMALLOC_OVERRIDE_MEMALIGN -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); -#endif - -#ifdef JEMALLOC_OVERRIDE_VALLOC -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW - JEMALLOC_ATTR(malloc); -#endif - -/* - * void * - * chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero, - * bool *commit, unsigned arena_ind); - */ -typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned); - -/* - * bool - * chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind); - */ -typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned); - -/* - * bool - * chunk_commit(void *chunk, size_t size, size_t offset, size_t length, - * unsigned arena_ind); - */ -typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned); - -/* - * bool - * chunk_decommit(void *chunk, size_t size, size_t offset, size_t length, - * unsigned arena_ind); - */ -typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned); - -/* - * bool - * chunk_purge(void *chunk, size_t size, size_t offset, size_t length, - * unsigned arena_ind); - */ -typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned); - -/* - * bool - * chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b, - * bool committed, unsigned arena_ind); - */ -typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned); - -/* - * bool - * chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, - * bool committed, unsigned arena_ind); - */ -typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned); - -typedef struct { - chunk_alloc_t *alloc; - chunk_dalloc_t *dalloc; - chunk_commit_t *commit; - chunk_decommit_t *decommit; - chunk_purge_t *purge; - chunk_split_t *split; - chunk_merge_t *merge; -} chunk_hooks_t; - -/* - * By default application code must explicitly refer to mangled symbol names, - * so that it is possible to use jemalloc in conjunction with another allocator - * in the same application. Define JEMALLOC_MANGLE in order to cause automatic - * name mangling that matches the API prefixing that happened as a result of - * --with-mangling and/or --with-jemalloc-prefix configuration settings. - */ -#ifdef JEMALLOC_MANGLE -# ifndef JEMALLOC_NO_DEMANGLE -# define JEMALLOC_NO_DEMANGLE -# endif -# define malloc_conf je_malloc_conf -# define malloc_message je_malloc_message -# define malloc je_malloc -# define calloc je_calloc -# define posix_memalign je_posix_memalign -# define aligned_alloc je_aligned_alloc -# define realloc je_realloc -# define free je_free -# define mallocx je_mallocx -# define rallocx je_rallocx -# define xallocx je_xallocx -# define sallocx je_sallocx -# define dallocx je_dallocx -# define sdallocx je_sdallocx -# define nallocx je_nallocx -# define mallctl je_mallctl -# define mallctlnametomib je_mallctlnametomib -# define mallctlbymib je_mallctlbymib -# define malloc_stats_print je_malloc_stats_print -# define malloc_usable_size je_malloc_usable_size -#endif - -/* - * The je_* macros can be used as stable alternative names for the - * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily - * meant for use in jemalloc itself, but it can be used by application code to - * provide isolation from the name mangling specified via --with-mangling - * and/or --with-jemalloc-prefix. - */ -#ifndef JEMALLOC_NO_DEMANGLE -# undef je_malloc_conf -# undef je_malloc_message -# undef je_malloc -# undef je_calloc -# undef je_posix_memalign -# undef je_aligned_alloc -# undef je_realloc -# undef je_free -# undef je_mallocx -# undef je_rallocx -# undef je_xallocx -# undef je_sallocx -# undef je_dallocx -# undef je_sdallocx -# undef je_nallocx -# undef je_mallctl -# undef je_mallctlnametomib -# undef je_mallctlbymib -# undef je_malloc_stats_print -# undef je_malloc_usable_size -#endif - -#ifdef __cplusplus -} -#endif -#endif /* JEMALLOC_H_ */ diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc_defs.h b/sources/jemalloc/include-win/jemalloc/jemalloc_defs.h deleted file mode 100755 index f6a0102c..00000000 --- a/sources/jemalloc/include-win/jemalloc/jemalloc_defs.h +++ /dev/null @@ -1,46 +0,0 @@ -/* include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */ -/* Defined if __attribute__((...)) syntax is supported. */ -/* #undef JEMALLOC_HAVE_ATTR */ - -/* Defined if alloc_size attribute is supported. */ -/* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */ - -/* Defined if format(gnu_printf, ...) attribute is supported. */ -/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */ - -/* Defined if format(printf, ...) attribute is supported. */ -/* #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF */ - -/* - * Define overrides for non-standard allocator-related functions if they are - * present on the system. - */ -/* #undef JEMALLOC_OVERRIDE_MEMALIGN */ -/* #undef JEMALLOC_OVERRIDE_VALLOC */ - -/* - * At least Linux omits the "const" in: - * - * size_t malloc_usable_size(const void *ptr); - * - * Match the operating system's prototype. - */ -#define JEMALLOC_USABLE_SIZE_CONST const - -/* - * If defined, specify throw() for the public function prototypes when compiling - * with C++. The only justification for this is to match the prototypes that - * glibc defines. - */ -/* #undef JEMALLOC_USE_CXX_THROW */ - -#ifdef _MSC_VER -# ifdef _WIN64 -# define LG_SIZEOF_PTR_WIN 3 -# else -# define LG_SIZEOF_PTR_WIN 2 -# endif -#endif - -/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ -#define LG_SIZEOF_PTR LG_SIZEOF_PTR_WIN diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc_macros.h b/sources/jemalloc/include-win/jemalloc/jemalloc_macros.h deleted file mode 100755 index fdc318ef..00000000 --- a/sources/jemalloc/include-win/jemalloc/jemalloc_macros.h +++ /dev/null @@ -1,103 +0,0 @@ -#include -#include -#include -#include -#include - -#define JEMALLOC_VERSION "0.0.0-0-g0000000000000000000000000000000000000000" -#define JEMALLOC_VERSION_MAJOR 0 -#define JEMALLOC_VERSION_MINOR 0 -#define JEMALLOC_VERSION_BUGFIX 0 -#define JEMALLOC_VERSION_NREV 0 -#define JEMALLOC_VERSION_GID "0000000000000000000000000000000000000000" - -# define MALLOCX_LG_ALIGN(la) ((int)(la)) -# if LG_SIZEOF_PTR == 2 -# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) -# else -# define MALLOCX_ALIGN(a) \ - ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ - ffs((int)(((size_t)(a))>>32))+31)) -# endif -# define MALLOCX_ZERO ((int)0x40) -/* - * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 - * encodes MALLOCX_TCACHE_NONE. - */ -# define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) -# define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) -/* - * Bias arena index bits so that 0 encodes "use an automatically chosen arena". - */ -# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) - -#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) -# define JEMALLOC_CXX_THROW throw() -#else -# define JEMALLOC_CXX_THROW -#endif - -#if _MSC_VER -# define JEMALLOC_ATTR(s) -# define JEMALLOC_ALIGNED(s) __declspec(align(s)) -# define JEMALLOC_ALLOC_SIZE(s) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) -# ifndef JEMALLOC_EXPORT -# ifdef DLLEXPORT -# define JEMALLOC_EXPORT __declspec(dllexport) -# else -# define JEMALLOC_EXPORT __declspec(dllimport) -# endif -# endif -# define JEMALLOC_FORMAT_PRINTF(s, i) -# define JEMALLOC_NOINLINE __declspec(noinline) -# ifdef __cplusplus -# define JEMALLOC_NOTHROW __declspec(nothrow) -# else -# define JEMALLOC_NOTHROW -# endif -# define JEMALLOC_SECTION(s) __declspec(allocate(s)) -# define JEMALLOC_RESTRICT_RETURN __declspec(restrict) -# if _MSC_VER >= 1900 && !defined(__EDG__) -# define JEMALLOC_ALLOCATOR __declspec(allocator) -# else -# define JEMALLOC_ALLOCATOR -# endif -#elif defined(JEMALLOC_HAVE_ATTR) -# define JEMALLOC_ATTR(s) __attribute__((s)) -# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) -# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE -# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) -# else -# define JEMALLOC_ALLOC_SIZE(s) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) -# endif -# ifndef JEMALLOC_EXPORT -# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) -# endif -# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF -# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) -# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) -# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) -# else -# define JEMALLOC_FORMAT_PRINTF(s, i) -# endif -# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) -# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) -# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) -# define JEMALLOC_RESTRICT_RETURN -# define JEMALLOC_ALLOCATOR -#else -# define JEMALLOC_ATTR(s) -# define JEMALLOC_ALIGNED(s) -# define JEMALLOC_ALLOC_SIZE(s) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) -# define JEMALLOC_EXPORT -# define JEMALLOC_FORMAT_PRINTF(s, i) -# define JEMALLOC_NOINLINE -# define JEMALLOC_NOTHROW -# define JEMALLOC_SECTION(s) -# define JEMALLOC_RESTRICT_RETURN -# define JEMALLOC_ALLOCATOR -#endif diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc_mangle.h b/sources/jemalloc/include-win/jemalloc/jemalloc_mangle.h deleted file mode 100755 index f6b02d80..00000000 --- a/sources/jemalloc/include-win/jemalloc/jemalloc_mangle.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * By default application code must explicitly refer to mangled symbol names, - * so that it is possible to use jemalloc in conjunction with another allocator - * in the same application. Define JEMALLOC_MANGLE in order to cause automatic - * name mangling that matches the API prefixing that happened as a result of - * --with-mangling and/or --with-jemalloc-prefix configuration settings. - */ -#ifdef JEMALLOC_MANGLE -# ifndef JEMALLOC_NO_DEMANGLE -# define JEMALLOC_NO_DEMANGLE -# endif -# define malloc_conf je_malloc_conf -# define malloc_message je_malloc_message -# define malloc je_malloc -# define calloc je_calloc -# define posix_memalign je_posix_memalign -# define aligned_alloc je_aligned_alloc -# define realloc je_realloc -# define free je_free -# define mallocx je_mallocx -# define rallocx je_rallocx -# define xallocx je_xallocx -# define sallocx je_sallocx -# define dallocx je_dallocx -# define sdallocx je_sdallocx -# define nallocx je_nallocx -# define mallctl je_mallctl -# define mallctlnametomib je_mallctlnametomib -# define mallctlbymib je_mallctlbymib -# define malloc_stats_print je_malloc_stats_print -# define malloc_usable_size je_malloc_usable_size -#endif - -/* - * The je_* macros can be used as stable alternative names for the - * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily - * meant for use in jemalloc itself, but it can be used by application code to - * provide isolation from the name mangling specified via --with-mangling - * and/or --with-jemalloc-prefix. - */ -#ifndef JEMALLOC_NO_DEMANGLE -# undef je_malloc_conf -# undef je_malloc_message -# undef je_malloc -# undef je_calloc -# undef je_posix_memalign -# undef je_aligned_alloc -# undef je_realloc -# undef je_free -# undef je_mallocx -# undef je_rallocx -# undef je_xallocx -# undef je_sallocx -# undef je_dallocx -# undef je_sdallocx -# undef je_nallocx -# undef je_mallctl -# undef je_mallctlnametomib -# undef je_mallctlbymib -# undef je_malloc_stats_print -# undef je_malloc_usable_size -#endif diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc_mangle_jet.h b/sources/jemalloc/include-win/jemalloc/jemalloc_mangle_jet.h deleted file mode 100755 index a89087ac..00000000 --- a/sources/jemalloc/include-win/jemalloc/jemalloc_mangle_jet.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * By default application code must explicitly refer to mangled symbol names, - * so that it is possible to use jemalloc in conjunction with another allocator - * in the same application. Define JEMALLOC_MANGLE in order to cause automatic - * name mangling that matches the API prefixing that happened as a result of - * --with-mangling and/or --with-jemalloc-prefix configuration settings. - */ -#ifdef JEMALLOC_MANGLE -# ifndef JEMALLOC_NO_DEMANGLE -# define JEMALLOC_NO_DEMANGLE -# endif -# define malloc_conf jet_malloc_conf -# define malloc_message jet_malloc_message -# define malloc jet_malloc -# define calloc jet_calloc -# define posix_memalign jet_posix_memalign -# define aligned_alloc jet_aligned_alloc -# define realloc jet_realloc -# define free jet_free -# define mallocx jet_mallocx -# define rallocx jet_rallocx -# define xallocx jet_xallocx -# define sallocx jet_sallocx -# define dallocx jet_dallocx -# define sdallocx jet_sdallocx -# define nallocx jet_nallocx -# define mallctl jet_mallctl -# define mallctlnametomib jet_mallctlnametomib -# define mallctlbymib jet_mallctlbymib -# define malloc_stats_print jet_malloc_stats_print -# define malloc_usable_size jet_malloc_usable_size -#endif - -/* - * The jet_* macros can be used as stable alternative names for the - * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily - * meant for use in jemalloc itself, but it can be used by application code to - * provide isolation from the name mangling specified via --with-mangling - * and/or --with-jemalloc-prefix. - */ -#ifndef JEMALLOC_NO_DEMANGLE -# undef jet_malloc_conf -# undef jet_malloc_message -# undef jet_malloc -# undef jet_calloc -# undef jet_posix_memalign -# undef jet_aligned_alloc -# undef jet_realloc -# undef jet_free -# undef jet_mallocx -# undef jet_rallocx -# undef jet_xallocx -# undef jet_sallocx -# undef jet_dallocx -# undef jet_sdallocx -# undef jet_nallocx -# undef jet_mallctl -# undef jet_mallctlnametomib -# undef jet_mallctlbymib -# undef jet_malloc_stats_print -# undef jet_malloc_usable_size -#endif diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc_protos.h b/sources/jemalloc/include-win/jemalloc/jemalloc_protos.h deleted file mode 100755 index ff025e30..00000000 --- a/sources/jemalloc/include-win/jemalloc/jemalloc_protos.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * The je_ prefix on the following public symbol declarations is an artifact - * of namespace management, and should be omitted in application code unless - * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h). - */ -extern JEMALLOC_EXPORT const char *je_malloc_conf; -extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, - const char *s); - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_malloc(size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr, - size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment, - size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) - JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) - JEMALLOC_CXX_THROW; - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags) - JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size, - int flags) JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size, - size_t extra, int flags); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr, - int flags) JEMALLOC_ATTR(pure); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, - int flags); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags) - JEMALLOC_ATTR(pure); - -JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, - void *oldp, size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, - size_t *mibp, size_t *miblenp); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print( - void (*write_cb)(void *, const char *), void *je_cbopaque, - const char *opts); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size( - JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; - -#ifdef JEMALLOC_OVERRIDE_MEMALIGN -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); -#endif - -#ifdef JEMALLOC_OVERRIDE_VALLOC -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW - JEMALLOC_ATTR(malloc); -#endif diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc_protos_jet.h b/sources/jemalloc/include-win/jemalloc/jemalloc_protos_jet.h deleted file mode 100755 index f71efef0..00000000 --- a/sources/jemalloc/include-win/jemalloc/jemalloc_protos_jet.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * The jet_ prefix on the following public symbol declarations is an artifact - * of namespace management, and should be omitted in application code unless - * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h). - */ -extern JEMALLOC_EXPORT const char *jet_malloc_conf; -extern JEMALLOC_EXPORT void (*jet_malloc_message)(void *cbopaque, - const char *s); - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *jet_malloc(size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *jet_calloc(size_t num, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_posix_memalign(void **memptr, - size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *jet_aligned_alloc(size_t alignment, - size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) - JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *jet_realloc(void *ptr, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_free(void *ptr) - JEMALLOC_CXX_THROW; - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *jet_mallocx(size_t size, int flags) - JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *jet_rallocx(void *ptr, size_t size, - int flags) JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_xallocx(void *ptr, size_t size, - size_t extra, int flags); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_sallocx(const void *ptr, - int flags) JEMALLOC_ATTR(pure); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_dallocx(void *ptr, int flags); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_sdallocx(void *ptr, size_t size, - int flags); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_nallocx(size_t size, int flags) - JEMALLOC_ATTR(pure); - -JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctl(const char *name, - void *oldp, size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctlnametomib(const char *name, - size_t *mibp, size_t *miblenp); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctlbymib(const size_t *mib, - size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_malloc_stats_print( - void (*write_cb)(void *, const char *), void *jet_cbopaque, - const char *opts); -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_malloc_usable_size( - JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; - -#ifdef JEMALLOC_OVERRIDE_MEMALIGN -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *jet_memalign(size_t alignment, size_t size) - JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); -#endif - -#ifdef JEMALLOC_OVERRIDE_VALLOC -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *jet_valloc(size_t size) JEMALLOC_CXX_THROW - JEMALLOC_ATTR(malloc); -#endif diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc_rename.h b/sources/jemalloc/include-win/jemalloc/jemalloc_rename.h deleted file mode 100755 index 6c915bde..00000000 --- a/sources/jemalloc/include-win/jemalloc/jemalloc_rename.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Name mangling for public symbols is controlled by --with-mangling and - * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by - * these macro definitions. - */ -#ifndef JEMALLOC_NO_RENAME -# define je_malloc_conf je_malloc_conf -# define je_malloc_message je_malloc_message -# define je_malloc je_malloc -# define je_calloc je_calloc -# define je_posix_memalign je_posix_memalign -# define je_aligned_alloc je_aligned_alloc -# define je_realloc je_realloc -# define je_free je_free -# define je_mallocx je_mallocx -# define je_rallocx je_rallocx -# define je_xallocx je_xallocx -# define je_sallocx je_sallocx -# define je_dallocx je_dallocx -# define je_sdallocx je_sdallocx -# define je_nallocx je_nallocx -# define je_mallctl je_mallctl -# define je_mallctlnametomib je_mallctlnametomib -# define je_mallctlbymib je_mallctlbymib -# define je_malloc_stats_print je_malloc_stats_print -# define je_malloc_usable_size je_malloc_usable_size -#endif diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc_typedefs.h b/sources/jemalloc/include-win/jemalloc/jemalloc_typedefs.h deleted file mode 100755 index fa7b350a..00000000 --- a/sources/jemalloc/include-win/jemalloc/jemalloc_typedefs.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * void * - * chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero, - * bool *commit, unsigned arena_ind); - */ -typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned); - -/* - * bool - * chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind); - */ -typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned); - -/* - * bool - * chunk_commit(void *chunk, size_t size, size_t offset, size_t length, - * unsigned arena_ind); - */ -typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned); - -/* - * bool - * chunk_decommit(void *chunk, size_t size, size_t offset, size_t length, - * unsigned arena_ind); - */ -typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned); - -/* - * bool - * chunk_purge(void *chunk, size_t size, size_t offset, size_t length, - * unsigned arena_ind); - */ -typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned); - -/* - * bool - * chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b, - * bool committed, unsigned arena_ind); - */ -typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned); - -/* - * bool - * chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, - * bool committed, unsigned arena_ind); - */ -typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned); - -typedef struct { - chunk_alloc_t *alloc; - chunk_dalloc_t *dalloc; - chunk_commit_t *commit; - chunk_decommit_t *decommit; - chunk_purge_t *purge; - chunk_split_t *split; - chunk_merge_t *merge; -} chunk_hooks_t; diff --git a/sources/jemalloc/include-win/msvc_compat/C99/stdbool.h b/sources/jemalloc/include-win/msvc_compat/C99/stdbool.h deleted file mode 100755 index d92160eb..00000000 --- a/sources/jemalloc/include-win/msvc_compat/C99/stdbool.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef stdbool_h -#define stdbool_h - -#include - -/* MSVC doesn't define _Bool or bool in C, but does have BOOL */ -/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */ -/* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as - * a built-in type. */ -#ifndef __clang__ -typedef BOOL _Bool; -#endif - -#define bool _Bool -#define true 1 -#define false 0 - -#define __bool_true_false_are_defined 1 - -#endif /* stdbool_h */ diff --git a/sources/jemalloc/include-win/msvc_compat/C99/stdint.h b/sources/jemalloc/include-win/msvc_compat/C99/stdint.h deleted file mode 100755 index d02608a5..00000000 --- a/sources/jemalloc/include-win/msvc_compat/C99/stdint.h +++ /dev/null @@ -1,247 +0,0 @@ -// ISO C9x compliant stdint.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006-2008 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. The name of the author may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// - -#ifndef _MSC_VER // [ -#error "Use this header only with Microsoft Visual C++ compilers!" -#endif // _MSC_VER ] - -#ifndef _MSC_STDINT_H_ // [ -#define _MSC_STDINT_H_ - -#if _MSC_VER > 1000 -#pragma once -#endif - -#include - -// For Visual Studio 6 in C++ mode and for many Visual Studio versions when -// compiling for ARM we should wrap include with 'extern "C++" {}' -// or compiler give many errors like this: -// error C2733: second C linkage of overloaded function 'wmemchr' not allowed -#ifdef __cplusplus -extern "C" { -#endif -# include -#ifdef __cplusplus -} -#endif - -// Define _W64 macros to mark types changing their size, like intptr_t. -#ifndef _W64 -# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 -# define _W64 __w64 -# else -# define _W64 -# endif -#endif - - -// 7.18.1 Integer types - -// 7.18.1.1 Exact-width integer types - -// Visual Studio 6 and Embedded Visual C++ 4 doesn't -// realize that, e.g. char has the same size as __int8 -// so we give up on __intX for them. -#if (_MSC_VER < 1300) - typedef signed char int8_t; - typedef signed short int16_t; - typedef signed int int32_t; - typedef unsigned char uint8_t; - typedef unsigned short uint16_t; - typedef unsigned int uint32_t; -#else - typedef signed __int8 int8_t; - typedef signed __int16 int16_t; - typedef signed __int32 int32_t; - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; -#endif -typedef signed __int64 int64_t; -typedef unsigned __int64 uint64_t; - - -// 7.18.1.2 Minimum-width integer types -typedef int8_t int_least8_t; -typedef int16_t int_least16_t; -typedef int32_t int_least32_t; -typedef int64_t int_least64_t; -typedef uint8_t uint_least8_t; -typedef uint16_t uint_least16_t; -typedef uint32_t uint_least32_t; -typedef uint64_t uint_least64_t; - -// 7.18.1.3 Fastest minimum-width integer types -typedef int8_t int_fast8_t; -typedef int16_t int_fast16_t; -typedef int32_t int_fast32_t; -typedef int64_t int_fast64_t; -typedef uint8_t uint_fast8_t; -typedef uint16_t uint_fast16_t; -typedef uint32_t uint_fast32_t; -typedef uint64_t uint_fast64_t; - -// 7.18.1.4 Integer types capable of holding object pointers -#ifdef _WIN64 // [ - typedef signed __int64 intptr_t; - typedef unsigned __int64 uintptr_t; -#else // _WIN64 ][ - typedef _W64 signed int intptr_t; - typedef _W64 unsigned int uintptr_t; -#endif // _WIN64 ] - -// 7.18.1.5 Greatest-width integer types -typedef int64_t intmax_t; -typedef uint64_t uintmax_t; - - -// 7.18.2 Limits of specified-width integer types - -#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 - -// 7.18.2.1 Limits of exact-width integer types -#define INT8_MIN ((int8_t)_I8_MIN) -#define INT8_MAX _I8_MAX -#define INT16_MIN ((int16_t)_I16_MIN) -#define INT16_MAX _I16_MAX -#define INT32_MIN ((int32_t)_I32_MIN) -#define INT32_MAX _I32_MAX -#define INT64_MIN ((int64_t)_I64_MIN) -#define INT64_MAX _I64_MAX -#define UINT8_MAX _UI8_MAX -#define UINT16_MAX _UI16_MAX -#define UINT32_MAX _UI32_MAX -#define UINT64_MAX _UI64_MAX - -// 7.18.2.2 Limits of minimum-width integer types -#define INT_LEAST8_MIN INT8_MIN -#define INT_LEAST8_MAX INT8_MAX -#define INT_LEAST16_MIN INT16_MIN -#define INT_LEAST16_MAX INT16_MAX -#define INT_LEAST32_MIN INT32_MIN -#define INT_LEAST32_MAX INT32_MAX -#define INT_LEAST64_MIN INT64_MIN -#define INT_LEAST64_MAX INT64_MAX -#define UINT_LEAST8_MAX UINT8_MAX -#define UINT_LEAST16_MAX UINT16_MAX -#define UINT_LEAST32_MAX UINT32_MAX -#define UINT_LEAST64_MAX UINT64_MAX - -// 7.18.2.3 Limits of fastest minimum-width integer types -#define INT_FAST8_MIN INT8_MIN -#define INT_FAST8_MAX INT8_MAX -#define INT_FAST16_MIN INT16_MIN -#define INT_FAST16_MAX INT16_MAX -#define INT_FAST32_MIN INT32_MIN -#define INT_FAST32_MAX INT32_MAX -#define INT_FAST64_MIN INT64_MIN -#define INT_FAST64_MAX INT64_MAX -#define UINT_FAST8_MAX UINT8_MAX -#define UINT_FAST16_MAX UINT16_MAX -#define UINT_FAST32_MAX UINT32_MAX -#define UINT_FAST64_MAX UINT64_MAX - -// 7.18.2.4 Limits of integer types capable of holding object pointers -#ifdef _WIN64 // [ -# define INTPTR_MIN INT64_MIN -# define INTPTR_MAX INT64_MAX -# define UINTPTR_MAX UINT64_MAX -#else // _WIN64 ][ -# define INTPTR_MIN INT32_MIN -# define INTPTR_MAX INT32_MAX -# define UINTPTR_MAX UINT32_MAX -#endif // _WIN64 ] - -// 7.18.2.5 Limits of greatest-width integer types -#define INTMAX_MIN INT64_MIN -#define INTMAX_MAX INT64_MAX -#define UINTMAX_MAX UINT64_MAX - -// 7.18.3 Limits of other integer types - -#ifdef _WIN64 // [ -# define PTRDIFF_MIN _I64_MIN -# define PTRDIFF_MAX _I64_MAX -#else // _WIN64 ][ -# define PTRDIFF_MIN _I32_MIN -# define PTRDIFF_MAX _I32_MAX -#endif // _WIN64 ] - -#define SIG_ATOMIC_MIN INT_MIN -#define SIG_ATOMIC_MAX INT_MAX - -#ifndef SIZE_MAX // [ -# ifdef _WIN64 // [ -# define SIZE_MAX _UI64_MAX -# else // _WIN64 ][ -# define SIZE_MAX _UI32_MAX -# endif // _WIN64 ] -#endif // SIZE_MAX ] - -// WCHAR_MIN and WCHAR_MAX are also defined in -#ifndef WCHAR_MIN // [ -# define WCHAR_MIN 0 -#endif // WCHAR_MIN ] -#ifndef WCHAR_MAX // [ -# define WCHAR_MAX _UI16_MAX -#endif // WCHAR_MAX ] - -#define WINT_MIN 0 -#define WINT_MAX _UI16_MAX - -#endif // __STDC_LIMIT_MACROS ] - - -// 7.18.4 Limits of other integer types - -#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 - -// 7.18.4.1 Macros for minimum-width integer constants - -#define INT8_C(val) val##i8 -#define INT16_C(val) val##i16 -#define INT32_C(val) val##i32 -#define INT64_C(val) val##i64 - -#define UINT8_C(val) val##ui8 -#define UINT16_C(val) val##ui16 -#define UINT32_C(val) val##ui32 -#define UINT64_C(val) val##ui64 - -// 7.18.4.2 Macros for greatest-width integer constants -#define INTMAX_C INT64_C -#define UINTMAX_C UINT64_C - -#endif // __STDC_CONSTANT_MACROS ] - - -#endif // _MSC_STDINT_H_ ] diff --git a/sources/jemalloc/include-win/msvc_compat/strings.h b/sources/jemalloc/include-win/msvc_compat/strings.h deleted file mode 100755 index a3ee2506..00000000 --- a/sources/jemalloc/include-win/msvc_compat/strings.h +++ /dev/null @@ -1,59 +0,0 @@ -#ifndef strings_h -#define strings_h - -/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided - * for both */ -#ifdef _MSC_VER -# include -# pragma intrinsic(_BitScanForward) -static __forceinline int ffsl(long x) -{ - unsigned long i; - - if (_BitScanForward(&i, x)) - return (i + 1); - return (0); -} - -static __forceinline int ffs(int x) -{ - - return (ffsl(x)); -} - -# ifdef _M_X64 -# pragma intrinsic(_BitScanForward64) -# endif - -static __forceinline int ffsll(unsigned __int64 x) -{ - unsigned long i; -#ifdef _M_X64 - if (_BitScanForward64(&i, x)) - return (i + 1); - return (0); -#else -// Fallback for 32-bit build where 64-bit version not available -// assuming little endian - union { - unsigned __int64 ll; - unsigned long l[2]; - } s; - - s.ll = x; - - if (_BitScanForward(&i, s.l[0])) - return (i + 1); - else if(_BitScanForward(&i, s.l[1])) - return (i + 33); - return (0); -#endif -} - -#else -# define ffsll(x) __builtin_ffsll(x) -# define ffsl(x) __builtin_ffsl(x) -# define ffs(x) __builtin_ffs(x) -#endif - -#endif /* strings_h */ diff --git a/sources/jemalloc/include-win/msvc_compat/windows_extra.h b/sources/jemalloc/include-win/msvc_compat/windows_extra.h deleted file mode 100755 index 3008faa3..00000000 --- a/sources/jemalloc/include-win/msvc_compat/windows_extra.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H -#define MSVC_COMPAT_WINDOWS_EXTRA_H - -#include - -#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */ diff --git a/sources/jemalloc/src/je_arena.c b/sources/jemalloc/src/je_arena.c deleted file mode 100755 index a9dff0b0..00000000 --- a/sources/jemalloc/src/je_arena.c +++ /dev/null @@ -1,3950 +0,0 @@ -#define JEMALLOC_ARENA_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -bool opt_thp = true; -static bool thp_initially_huge; -purge_mode_t opt_purge = PURGE_DEFAULT; -const char *purge_mode_names[] = { - "ratio", - "decay", - "N/A" -}; -ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; -static ssize_t lg_dirty_mult_default; -ssize_t opt_decay_time = DECAY_TIME_DEFAULT; -static ssize_t decay_time_default; - -arena_bin_info_t arena_bin_info[NBINS]; - -size_t map_bias; -size_t map_misc_offset; -size_t arena_maxrun; /* Max run size for arenas. */ -size_t large_maxclass; /* Max large size class. */ -unsigned nlclasses; /* Number of large size classes. */ -unsigned nhclasses; /* Number of huge size classes. */ - -/******************************************************************************/ -/* - * Function prototypes for static functions that are referenced prior to - * definition. - */ - -static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, - arena_chunk_t *chunk); -static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, - size_t ndirty_limit); -static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, - bool dirty, bool cleaned, bool decommitted); -static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, - arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); -static void arena_bin_lower_run(arena_t *arena, arena_run_t *run, - arena_bin_t *bin); - -/******************************************************************************/ - -JEMALLOC_INLINE_C size_t -arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm) -{ - arena_chunk_t *chunk; - size_t pageind, mapbits; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); - pageind = arena_miscelm_to_pageind(miscelm); - mapbits = arena_mapbits_get(chunk, pageind); - return (arena_mapbits_size_decode(mapbits)); -} - -JEMALLOC_INLINE_C const extent_node_t * -arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm) -{ - arena_chunk_t *chunk; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); - return (&chunk->node); -} - -JEMALLOC_INLINE_C int -arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b) -{ - size_t a_sn, b_sn; - - assert(a != NULL); - assert(b != NULL); - - a_sn = extent_node_sn_get(arena_miscelm_extent_get(a)); - b_sn = extent_node_sn_get(arena_miscelm_extent_get(b)); - - return ((a_sn > b_sn) - (a_sn < b_sn)); -} - -JEMALLOC_INLINE_C int -arena_ad_comp(const arena_chunk_map_misc_t *a, - const arena_chunk_map_misc_t *b) -{ - uintptr_t a_miscelm = (uintptr_t)a; - uintptr_t b_miscelm = (uintptr_t)b; - - assert(a != NULL); - assert(b != NULL); - - return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); -} - -JEMALLOC_INLINE_C int -arena_snad_comp(const arena_chunk_map_misc_t *a, - const arena_chunk_map_misc_t *b) -{ - int ret; - - assert(a != NULL); - assert(b != NULL); - - ret = arena_sn_comp(a, b); - if (ret != 0) - return (ret); - - ret = arena_ad_comp(a, b); - return (ret); -} - -/* Generate pairing heap functions. */ -ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t, - ph_link, arena_snad_comp) - -#ifdef JEMALLOC_JET -#undef run_quantize_floor -#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor) -#endif -static size_t -run_quantize_floor(size_t size) -{ - size_t ret; - pszind_t pind; - - assert(size > 0); - assert(size <= HUGE_MAXCLASS); - assert((size & PAGE_MASK) == 0); - - assert(size != 0); - assert(size == PAGE_CEILING(size)); - - pind = psz2ind(size - large_pad + 1); - if (pind == 0) { - /* - * Avoid underflow. This short-circuit would also do the right - * thing for all sizes in the range for which there are - * PAGE-spaced size classes, but it's simplest to just handle - * the one case that would cause erroneous results. - */ - return (size); - } - ret = pind2sz(pind - 1) + large_pad; - assert(ret <= size); - return (ret); -} -#ifdef JEMALLOC_JET -#undef run_quantize_floor -#define run_quantize_floor JEMALLOC_N(run_quantize_floor) -run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor); -#endif - -#ifdef JEMALLOC_JET -#undef run_quantize_ceil -#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil) -#endif -static size_t -run_quantize_ceil(size_t size) -{ - size_t ret; - - assert(size > 0); - assert(size <= HUGE_MAXCLASS); - assert((size & PAGE_MASK) == 0); - - ret = run_quantize_floor(size); - if (ret < size) { - /* - * Skip a quantization that may have an adequately large run, - * because under-sized runs may be mixed in. This only happens - * when an unusual size is requested, i.e. for aligned - * allocation, and is just one of several places where linear - * search would potentially find sufficiently aligned available - * memory somewhere lower. - */ - ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad; - } - return (ret); -} -#ifdef JEMALLOC_JET -#undef run_quantize_ceil -#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) -run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil); -#endif - -static void -arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages) -{ - pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get( - arena_miscelm_get_const(chunk, pageind)))); - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - assert((npages << LG_PAGE) < chunksize); - assert(pind2sz(pind) <= chunksize); - arena_run_heap_insert(&arena->runs_avail[pind], - arena_miscelm_get_mutable(chunk, pageind)); -} - -static void -arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages) -{ - pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get( - arena_miscelm_get_const(chunk, pageind)))); - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - assert((npages << LG_PAGE) < chunksize); - assert(pind2sz(pind) <= chunksize); - arena_run_heap_remove(&arena->runs_avail[pind], - arena_miscelm_get_mutable(chunk, pageind)); -} - -static void -arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages) -{ - arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, - pageind); - - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); - assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == - CHUNK_MAP_DIRTY); - - qr_new(&miscelm->rd, rd_link); - qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link); - arena->ndirty += npages; -} - -static void -arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages) -{ - arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, - pageind); - - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); - assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == - CHUNK_MAP_DIRTY); - - qr_remove(&miscelm->rd, rd_link); - assert(arena->ndirty >= npages); - arena->ndirty -= npages; -} - -static size_t -arena_chunk_dirty_npages(const extent_node_t *node) -{ - - return (extent_node_size_get(node) >> LG_PAGE); -} - -void -arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache) -{ - - if (cache) { - extent_node_dirty_linkage_init(node); - extent_node_dirty_insert(node, &arena->runs_dirty, - &arena->chunks_cache); - arena->ndirty += arena_chunk_dirty_npages(node); - } -} - -void -arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty) -{ - - if (dirty) { - extent_node_dirty_remove(node); - assert(arena->ndirty >= arena_chunk_dirty_npages(node)); - arena->ndirty -= arena_chunk_dirty_npages(node); - } -} - -JEMALLOC_INLINE_C void * -arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) -{ - void *ret; - size_t regind; - arena_chunk_map_misc_t *miscelm; - void *rpages; - - assert(run->nfree > 0); - assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); - - regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info); - miscelm = arena_run_to_miscelm(run); - rpages = arena_miscelm_to_rpages(miscelm); - ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + - (uintptr_t)(bin_info->reg_interval * regind)); - run->nfree--; - return (ret); -} - -JEMALLOC_INLINE_C void -arena_run_reg_dalloc(arena_run_t *run, void *ptr) -{ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t mapbits = arena_mapbits_get(chunk, pageind); - szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - size_t regind = arena_run_regind(run, bin_info, ptr); - - assert(run->nfree < bin_info->nregs); - /* Freeing an interior pointer can cause assertion failure. */ - assert(((uintptr_t)ptr - - ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + - (uintptr_t)bin_info->reg0_offset)) % - (uintptr_t)bin_info->reg_interval == 0); - assert((uintptr_t)ptr >= - (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + - (uintptr_t)bin_info->reg0_offset); - /* Freeing an unallocated pointer can cause assertion failure. */ - assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)); - - bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); - run->nfree++; -} - -JEMALLOC_INLINE_C void -arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) -{ - - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (npages << LG_PAGE)); - memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, - (npages << LG_PAGE)); -} - -JEMALLOC_INLINE_C void -arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) -{ - - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind - << LG_PAGE)), PAGE); -} - -JEMALLOC_INLINE_C void -arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) -{ - size_t i; - UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); - - arena_run_page_mark_zeroed(chunk, run_ind); - for (i = 0; i < PAGE / sizeof(size_t); i++) - assert(p[i] == 0); -} - -static void -arena_nactive_add(arena_t *arena, size_t add_pages) -{ - - if (config_stats) { - size_t cactive_add = CHUNK_CEILING((arena->nactive + - add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << - LG_PAGE); - if (cactive_add != 0) - stats_cactive_add(cactive_add); - } - arena->nactive += add_pages; -} - -static void -arena_nactive_sub(arena_t *arena, size_t sub_pages) -{ - - if (config_stats) { - size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) - - CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE); - if (cactive_sub != 0) - stats_cactive_sub(cactive_sub); - } - arena->nactive -= sub_pages; -} - -static void -arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, - size_t flag_dirty, size_t flag_decommitted, size_t need_pages) -{ - size_t total_pages, rem_pages; - - assert(flag_dirty == 0 || flag_decommitted == 0); - - total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> - LG_PAGE; - assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == - flag_dirty); - assert(need_pages <= total_pages); - rem_pages = total_pages - need_pages; - - arena_avail_remove(arena, chunk, run_ind, total_pages); - if (flag_dirty != 0) - arena_run_dirty_remove(arena, chunk, run_ind, total_pages); - arena_nactive_add(arena, need_pages); - - /* Keep track of trailing unused pages for later use. */ - if (rem_pages > 0) { - size_t flags = flag_dirty | flag_decommitted; - size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED : - 0; - - arena_mapbits_unallocated_set(chunk, run_ind+need_pages, - (rem_pages << LG_PAGE), flags | - (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) & - flag_unzeroed_mask)); - arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1, - (rem_pages << LG_PAGE), flags | - (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) & - flag_unzeroed_mask)); - if (flag_dirty != 0) { - arena_run_dirty_insert(arena, chunk, run_ind+need_pages, - rem_pages); - } - arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); - } -} - -static bool -arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, - bool remove, bool zero) -{ - arena_chunk_t *chunk; - arena_chunk_map_misc_t *miscelm; - size_t flag_dirty, flag_decommitted, run_ind, need_pages; - size_t flag_unzeroed_mask; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - miscelm = arena_run_to_miscelm(run); - run_ind = arena_miscelm_to_pageind(miscelm); - flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); - flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); - need_pages = (size >> LG_PAGE); - assert(need_pages > 0); - - if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, - run_ind << LG_PAGE, size, arena->ind)) - return (true); - - if (remove) { - arena_run_split_remove(arena, chunk, run_ind, flag_dirty, - flag_decommitted, need_pages); - } - - if (zero) { - if (flag_decommitted != 0) { - /* The run is untouched, and therefore zeroed. */ - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void - *)((uintptr_t)chunk + (run_ind << LG_PAGE)), - (need_pages << LG_PAGE)); - } else if (flag_dirty != 0) { - /* The run is dirty, so all pages must be zeroed. */ - arena_run_zero(chunk, run_ind, need_pages); - } else { - /* - * The run is clean, so some pages may be zeroed (i.e. - * never before touched). - */ - size_t i; - for (i = 0; i < need_pages; i++) { - if (arena_mapbits_unzeroed_get(chunk, run_ind+i) - != 0) - arena_run_zero(chunk, run_ind+i, 1); - else if (config_debug) { - arena_run_page_validate_zeroed(chunk, - run_ind+i); - } else { - arena_run_page_mark_zeroed(chunk, - run_ind+i); - } - } - } - } else { - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); - } - - /* - * Set the last element first, in case the run only contains one page - * (i.e. both statements set the same element). - */ - flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? - CHUNK_MAP_UNZEROED : 0; - arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - run_ind+need_pages-1))); - arena_mapbits_large_set(chunk, run_ind, size, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind))); - return (false); -} - -static bool -arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) -{ - - return (arena_run_split_large_helper(arena, run, size, true, zero)); -} - -static bool -arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) -{ - - return (arena_run_split_large_helper(arena, run, size, false, zero)); -} - -static bool -arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, - szind_t binind) -{ - arena_chunk_t *chunk; - arena_chunk_map_misc_t *miscelm; - size_t flag_dirty, flag_decommitted, run_ind, need_pages, i; - - assert(binind != BININD_INVALID); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - miscelm = arena_run_to_miscelm(run); - run_ind = arena_miscelm_to_pageind(miscelm); - flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); - flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); - need_pages = (size >> LG_PAGE); - assert(need_pages > 0); - - if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, - run_ind << LG_PAGE, size, arena->ind)) - return (true); - - arena_run_split_remove(arena, chunk, run_ind, flag_dirty, - flag_decommitted, need_pages); - - for (i = 0; i < need_pages; i++) { - size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk, - run_ind+i); - arena_mapbits_small_set(chunk, run_ind+i, i, binind, - flag_unzeroed); - if (config_debug && flag_dirty == 0 && flag_unzeroed == 0) - arena_run_page_validate_zeroed(chunk, run_ind+i); - } - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); - return (false); -} - -static arena_chunk_t * -arena_chunk_init_spare(arena_t *arena) -{ - arena_chunk_t *chunk; - - assert(arena->spare != NULL); - - chunk = arena->spare; - arena->spare = NULL; - - assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); - assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); - assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == - arena_maxrun); - assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == - arena_maxrun); - assert(arena_mapbits_dirty_get(chunk, map_bias) == - arena_mapbits_dirty_get(chunk, chunk_npages-1)); - - return (chunk); -} - -static bool -arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, size_t sn, bool zero, - bool *gdump) -{ - - /* - * The extent node notion of "committed" doesn't directly apply to - * arena chunks. Arbitrarily mark them as committed. The commit state - * of runs is tracked individually, and upon chunk deallocation the - * entire chunk is in a consistent commit state. - */ - extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true); - extent_node_achunk_set(&chunk->node, true); - return (chunk_register(chunk, &chunk->node, gdump)); -} - -static arena_chunk_t * -arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, bool *zero, bool *commit) -{ - arena_chunk_t *chunk; - size_t sn; - - malloc_mutex_unlock(tsdn, &arena->lock); - /* prof_gdump() requirement. */ - witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0); - - chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks, - NULL, chunksize, chunksize, &sn, zero, commit); - if (chunk != NULL && !*commit) { - /* Commit header. */ - if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << - LG_PAGE, arena->ind)) { - chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, - (void *)chunk, chunksize, sn, *zero, *commit); - chunk = NULL; - } - } - if (chunk != NULL) { - bool gdump; - if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) { - if (!*commit) { - /* Undo commit of header. */ - chunk_hooks->decommit(chunk, chunksize, 0, - map_bias << LG_PAGE, arena->ind); - } - chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, - (void *)chunk, chunksize, sn, *zero, *commit); - chunk = NULL; - } - if (config_prof && opt_prof && gdump) - prof_gdump(tsdn); - } - - malloc_mutex_lock(tsdn, &arena->lock); - return (chunk); -} - -static arena_chunk_t * -arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero, - bool *commit) -{ - arena_chunk_t *chunk; - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - size_t sn; - - /* prof_gdump() requirement. */ - witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 1); - malloc_mutex_assert_owner(tsdn, &arena->lock); - - chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize, - chunksize, &sn, zero, commit, true); - if (chunk != NULL) { - bool gdump; - if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) { - chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, - chunksize, sn, true); - return (NULL); - } - if (config_prof && opt_prof && gdump) { - malloc_mutex_unlock(tsdn, &arena->lock); - prof_gdump(tsdn); - malloc_mutex_lock(tsdn, &arena->lock); - } - } - if (chunk == NULL) { - chunk = arena_chunk_alloc_internal_hard(tsdn, arena, - &chunk_hooks, zero, commit); - } - - if (config_stats && chunk != NULL) { - arena->stats.mapped += chunksize; - arena->stats.metadata_mapped += (map_bias << LG_PAGE); - } - - return (chunk); -} - -static arena_chunk_t * -arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena) -{ - arena_chunk_t *chunk; - bool zero, commit; - size_t flag_unzeroed, flag_decommitted, i; - - assert(arena->spare == NULL); - - zero = false; - commit = false; - chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit); - if (chunk == NULL) - return (NULL); - - if (config_thp && opt_thp) { - chunk->hugepage = thp_initially_huge; - } - - /* - * Initialize the map to contain one maximal free untouched run. Mark - * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed - * or decommitted chunk. - */ - flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; - flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; - arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, - flag_unzeroed | flag_decommitted); - /* - * There is no need to initialize the internal page map entries unless - * the chunk is not zeroed. - */ - if (!zero) { - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( - (void *)arena_bitselm_get_const(chunk, map_bias+1), - (size_t)((uintptr_t)arena_bitselm_get_const(chunk, - chunk_npages-1) - - (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); - for (i = map_bias+1; i < chunk_npages-1; i++) - arena_mapbits_internal_set(chunk, i, flag_unzeroed); - } else { - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void - *)arena_bitselm_get_const(chunk, map_bias+1), - (size_t)((uintptr_t)arena_bitselm_get_const(chunk, - chunk_npages-1) - - (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); - if (config_debug) { - for (i = map_bias+1; i < chunk_npages-1; i++) { - assert(arena_mapbits_unzeroed_get(chunk, i) == - flag_unzeroed); - } - } - } - arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, - flag_unzeroed); - - return (chunk); -} - -static arena_chunk_t * -arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena) -{ - arena_chunk_t *chunk; - - if (arena->spare != NULL) - chunk = arena_chunk_init_spare(arena); - else { - chunk = arena_chunk_init_hard(tsdn, arena); - if (chunk == NULL) - return (NULL); - } - - ql_elm_new(&chunk->node, ql_link); - ql_tail_insert(&arena->achunks, &chunk->node, ql_link); - arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); - - return (chunk); -} - -static void -arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) -{ - size_t sn; - UNUSED bool hugepage JEMALLOC_CC_SILENCE_INIT(false); - bool committed; - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - - chunk_deregister(chunk, &chunk->node); - - sn = extent_node_sn_get(&chunk->node); - if (config_thp && opt_thp) { - hugepage = chunk->hugepage; - } - committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0); - if (!committed) { - /* - * Decommit the header. Mark the chunk as decommitted even if - * header decommit fails, since treating a partially committed - * chunk as committed has a high potential for causing later - * access of decommitted memory. - */ - chunk_hooks = chunk_hooks_get(tsdn, arena); - chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE, - arena->ind); - } - if (config_thp && opt_thp && hugepage != thp_initially_huge) { - /* - * Convert chunk back to initial THP state, so that all - * subsequent chunk allocations start out in a consistent state. - */ - if (thp_initially_huge) { - pages_huge(chunk, chunksize); - } else { - pages_nohuge(chunk, chunksize); - } - } - - chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize, - sn, committed); - - if (config_stats) { - arena->stats.mapped -= chunksize; - arena->stats.metadata_mapped -= (map_bias << LG_PAGE); - } -} - -static void -arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare) -{ - - assert(arena->spare != spare); - - if (arena_mapbits_dirty_get(spare, map_bias) != 0) { - arena_run_dirty_remove(arena, spare, map_bias, - chunk_npages-map_bias); - } - - arena_chunk_discard(tsdn, arena, spare); -} - -static void -arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) -{ - arena_chunk_t *spare; - - assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); - assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); - assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == - arena_maxrun); - assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == - arena_maxrun); - assert(arena_mapbits_dirty_get(chunk, map_bias) == - arena_mapbits_dirty_get(chunk, chunk_npages-1)); - assert(arena_mapbits_decommitted_get(chunk, map_bias) == - arena_mapbits_decommitted_get(chunk, chunk_npages-1)); - - /* Remove run from runs_avail, so that the arena does not use it. */ - arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); - - ql_remove(&arena->achunks, &chunk->node, ql_link); - spare = arena->spare; - arena->spare = chunk; - if (spare != NULL) - arena_spare_discard(tsdn, arena, spare); -} - -static void -arena_huge_malloc_stats_update(arena_t *arena, size_t usize) -{ - szind_t index = size2index(usize) - nlclasses - NBINS; - - cassert(config_stats); - - arena->stats.nmalloc_huge++; - arena->stats.allocated_huge += usize; - arena->stats.hstats[index].nmalloc++; - arena->stats.hstats[index].curhchunks++; -} - -static void -arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) -{ - szind_t index = size2index(usize) - nlclasses - NBINS; - - cassert(config_stats); - - arena->stats.nmalloc_huge--; - arena->stats.allocated_huge -= usize; - arena->stats.hstats[index].nmalloc--; - arena->stats.hstats[index].curhchunks--; -} - -static void -arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) -{ - szind_t index = size2index(usize) - nlclasses - NBINS; - - cassert(config_stats); - - arena->stats.ndalloc_huge++; - arena->stats.allocated_huge -= usize; - arena->stats.hstats[index].ndalloc++; - arena->stats.hstats[index].curhchunks--; -} - -static void -arena_huge_reset_stats_cancel(arena_t *arena, size_t usize) -{ - szind_t index = size2index(usize) - nlclasses - NBINS; - - cassert(config_stats); - - arena->stats.ndalloc_huge++; - arena->stats.hstats[index].ndalloc--; -} - -static void -arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) -{ - szind_t index = size2index(usize) - nlclasses - NBINS; - - cassert(config_stats); - - arena->stats.ndalloc_huge--; - arena->stats.allocated_huge += usize; - arena->stats.hstats[index].ndalloc--; - arena->stats.hstats[index].curhchunks++; -} - -static void -arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) -{ - - arena_huge_dalloc_stats_update(arena, oldsize); - arena_huge_malloc_stats_update(arena, usize); -} - -static void -arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, - size_t usize) -{ - - arena_huge_dalloc_stats_update_undo(arena, oldsize); - arena_huge_malloc_stats_update_undo(arena, usize); -} - -extent_node_t * -arena_node_alloc(tsdn_t *tsdn, arena_t *arena) -{ - extent_node_t *node; - - malloc_mutex_lock(tsdn, &arena->node_cache_mtx); - node = ql_last(&arena->node_cache, ql_link); - if (node == NULL) { - malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); - return (base_alloc(tsdn, sizeof(extent_node_t))); - } - ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); - malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); - return (node); -} - -void -arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node) -{ - - malloc_mutex_lock(tsdn, &arena->node_cache_mtx); - ql_elm_new(node, ql_link); - ql_tail_insert(&arena->node_cache, node, ql_link); - malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); -} - -static void * -arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn, - bool *zero, size_t csize) -{ - void *ret; - bool commit = true; - - ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize, - alignment, sn, zero, &commit); - if (ret == NULL) { - /* Revert optimistic stats updates. */ - malloc_mutex_lock(tsdn, &arena->lock); - if (config_stats) { - arena_huge_malloc_stats_update_undo(arena, usize); - arena->stats.mapped -= usize; - } - arena_nactive_sub(arena, usize >> LG_PAGE); - malloc_mutex_unlock(tsdn, &arena->lock); - } - - return (ret); -} - -void * -arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, size_t *sn, bool *zero) -{ - void *ret; - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - size_t csize = CHUNK_CEILING(usize); - bool commit = true; - - malloc_mutex_lock(tsdn, &arena->lock); - - /* Optimistically update stats. */ - if (config_stats) { - arena_huge_malloc_stats_update(arena, usize); - arena->stats.mapped += usize; - } - arena_nactive_add(arena, usize >> LG_PAGE); - - ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize, - alignment, sn, zero, &commit, true); - malloc_mutex_unlock(tsdn, &arena->lock); - if (ret == NULL) { - ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks, - usize, alignment, sn, zero, csize); - } - - return (ret); -} - -void -arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize, - size_t sn) -{ - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - size_t csize; - - csize = CHUNK_CEILING(usize); - malloc_mutex_lock(tsdn, &arena->lock); - if (config_stats) { - arena_huge_dalloc_stats_update(arena, usize); - arena->stats.mapped -= usize; - } - arena_nactive_sub(arena, usize >> LG_PAGE); - - chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true); - malloc_mutex_unlock(tsdn, &arena->lock); -} - -void -arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk, - size_t oldsize, size_t usize) -{ - - assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); - assert(oldsize != usize); - - malloc_mutex_lock(tsdn, &arena->lock); - if (config_stats) - arena_huge_ralloc_stats_update(arena, oldsize, usize); - if (oldsize < usize) - arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE); - else - arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE); - malloc_mutex_unlock(tsdn, &arena->lock); -} - -void -arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk, - size_t oldsize, size_t usize, size_t sn) -{ - size_t udiff = oldsize - usize; - size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); - - malloc_mutex_lock(tsdn, &arena->lock); - if (config_stats) { - arena_huge_ralloc_stats_update(arena, oldsize, usize); - if (cdiff != 0) - arena->stats.mapped -= cdiff; - } - arena_nactive_sub(arena, udiff >> LG_PAGE); - - if (cdiff != 0) { - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - void *nchunk = (void *)((uintptr_t)chunk + - CHUNK_CEILING(usize)); - - chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, - sn, true); - } - malloc_mutex_unlock(tsdn, &arena->lock); -} - -static bool -arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize, - size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff) -{ - bool err; - bool commit = true; - - err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, - chunksize, sn, zero, &commit) == NULL); - if (err) { - /* Revert optimistic stats updates. */ - malloc_mutex_lock(tsdn, &arena->lock); - if (config_stats) { - arena_huge_ralloc_stats_update_undo(arena, oldsize, - usize); - arena->stats.mapped -= cdiff; - } - arena_nactive_sub(arena, udiff >> LG_PAGE); - malloc_mutex_unlock(tsdn, &arena->lock); - } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, - cdiff, true, arena->ind)) { - chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, - *sn, *zero, true); - err = true; - } - return (err); -} - -bool -arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk, - size_t oldsize, size_t usize, bool *zero) -{ - bool err; - chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); - void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); - size_t udiff = usize - oldsize; - size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); - size_t sn; - bool commit = true; - - malloc_mutex_lock(tsdn, &arena->lock); - - /* Optimistically update stats. */ - if (config_stats) { - arena_huge_ralloc_stats_update(arena, oldsize, usize); - arena->stats.mapped += cdiff; - } - arena_nactive_add(arena, udiff >> LG_PAGE); - - err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, - chunksize, &sn, zero, &commit, true) == NULL); - malloc_mutex_unlock(tsdn, &arena->lock); - if (err) { - err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena, - &chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk, - udiff, cdiff); - } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, - cdiff, true, arena->ind)) { - chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff, - sn, *zero, true); - err = true; - } - - return (err); -} - -/* - * Do first-best-fit run selection, i.e. select the lowest run that best fits. - * Run sizes are indexed, so not all candidate runs are necessarily exactly the - * same size. - */ -static arena_run_t * -arena_run_first_best_fit(arena_t *arena, size_t size) -{ - pszind_t pind, i; - - pind = psz2ind(run_quantize_ceil(size)); - - for (i = pind; pind2sz(i) <= chunksize; i++) { - arena_chunk_map_misc_t *miscelm = arena_run_heap_first( - &arena->runs_avail[i]); - if (miscelm != NULL) - return (&miscelm->run); - } - - return (NULL); -} - -static arena_run_t * -arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) -{ - arena_run_t *run = arena_run_first_best_fit(arena, size); - if (run != NULL) { - if (arena_run_split_large(arena, run, size, zero)) - run = NULL; - } - return (run); -} - -static arena_run_t * -arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero) -{ - arena_chunk_t *chunk; - arena_run_t *run; - - assert(size <= arena_maxrun); - assert(size == PAGE_CEILING(size)); - - /* Search the arena's chunks for the lowest best fit. */ - run = arena_run_alloc_large_helper(arena, size, zero); - if (run != NULL) - return (run); - - /* - * No usable runs. Create a new chunk from which to allocate the run. - */ - chunk = arena_chunk_alloc(tsdn, arena); - if (chunk != NULL) { - run = &arena_miscelm_get_mutable(chunk, map_bias)->run; - if (arena_run_split_large(arena, run, size, zero)) - run = NULL; - return (run); - } - - /* - * arena_chunk_alloc() failed, but another thread may have made - * sufficient memory available while this one dropped arena->lock in - * arena_chunk_alloc(), so search one more time. - */ - return (arena_run_alloc_large_helper(arena, size, zero)); -} - -static arena_run_t * -arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) -{ - arena_run_t *run = arena_run_first_best_fit(arena, size); - if (run != NULL) { - if (arena_run_split_small(arena, run, size, binind)) - run = NULL; - } - return (run); -} - -static arena_run_t * -arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind) -{ - arena_chunk_t *chunk; - arena_run_t *run; - - assert(size <= arena_maxrun); - assert(size == PAGE_CEILING(size)); - assert(binind != BININD_INVALID); - - /* Search the arena's chunks for the lowest best fit. */ - run = arena_run_alloc_small_helper(arena, size, binind); - if (run != NULL) - return (run); - - /* - * No usable runs. Create a new chunk from which to allocate the run. - */ - chunk = arena_chunk_alloc(tsdn, arena); - if (chunk != NULL) { - run = &arena_miscelm_get_mutable(chunk, map_bias)->run; - if (arena_run_split_small(arena, run, size, binind)) - run = NULL; - return (run); - } - - /* - * arena_chunk_alloc() failed, but another thread may have made - * sufficient memory available while this one dropped arena->lock in - * arena_chunk_alloc(), so search one more time. - */ - return (arena_run_alloc_small_helper(arena, size, binind)); -} - -static bool -arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) -{ - - return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t) - << 3)); -} - -ssize_t -arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena) -{ - ssize_t lg_dirty_mult; - - malloc_mutex_lock(tsdn, &arena->lock); - lg_dirty_mult = arena->lg_dirty_mult; - malloc_mutex_unlock(tsdn, &arena->lock); - - return (lg_dirty_mult); -} - -bool -arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult) -{ - - if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) - return (true); - - malloc_mutex_lock(tsdn, &arena->lock); - arena->lg_dirty_mult = lg_dirty_mult; - arena_maybe_purge(tsdn, arena); - malloc_mutex_unlock(tsdn, &arena->lock); - - return (false); -} - -static void -arena_decay_deadline_init(arena_t *arena) -{ - - assert(opt_purge == purge_mode_decay); - - /* - * Generate a new deadline that is uniformly random within the next - * epoch after the current one. - */ - nstime_copy(&arena->decay.deadline, &arena->decay.epoch); - nstime_add(&arena->decay.deadline, &arena->decay.interval); - if (arena->decay.time > 0) { - nstime_t jitter; - - nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state, - nstime_ns(&arena->decay.interval))); - nstime_add(&arena->decay.deadline, &jitter); - } -} - -static bool -arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time) -{ - - assert(opt_purge == purge_mode_decay); - - return (nstime_compare(&arena->decay.deadline, time) <= 0); -} - -static size_t -arena_decay_backlog_npages_limit(const arena_t *arena) -{ - static const uint64_t h_steps[] = { -#define STEP(step, h, x, y) \ - h, - SMOOTHSTEP -#undef STEP - }; - uint64_t sum; - size_t npages_limit_backlog; - unsigned i; - - assert(opt_purge == purge_mode_decay); - - /* - * For each element of decay_backlog, multiply by the corresponding - * fixed-point smoothstep decay factor. Sum the products, then divide - * to round down to the nearest whole number of pages. - */ - sum = 0; - for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) - sum += arena->decay.backlog[i] * h_steps[i]; - npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); - - return (npages_limit_backlog); -} - -static void -arena_decay_backlog_update_last(arena_t *arena) -{ - size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ? - arena->ndirty - arena->decay.ndirty : 0; - arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta; -} - -static void -arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64) -{ - - if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { - memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) * - sizeof(size_t)); - } else { - size_t nadvance_z = (size_t)nadvance_u64; - - assert((uint64_t)nadvance_z == nadvance_u64); - - memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z], - (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); - if (nadvance_z > 1) { - memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS - - nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); - } - } - - arena_decay_backlog_update_last(arena); -} - -static void -arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time) -{ - uint64_t nadvance_u64; - nstime_t delta; - - assert(opt_purge == purge_mode_decay); - assert(arena_decay_deadline_reached(arena, time)); - - nstime_copy(&delta, time); - nstime_subtract(&delta, &arena->decay.epoch); - nadvance_u64 = nstime_divide(&delta, &arena->decay.interval); - assert(nadvance_u64 > 0); - - /* Add nadvance_u64 decay intervals to epoch. */ - nstime_copy(&delta, &arena->decay.interval); - nstime_imultiply(&delta, nadvance_u64); - nstime_add(&arena->decay.epoch, &delta); - - /* Set a new deadline. */ - arena_decay_deadline_init(arena); - - /* Update the backlog. */ - arena_decay_backlog_update(arena, nadvance_u64); -} - -static void -arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena) -{ - size_t ndirty_limit = arena_decay_backlog_npages_limit(arena); - - if (arena->ndirty > ndirty_limit) - arena_purge_to_limit(tsdn, arena, ndirty_limit); - arena->decay.ndirty = arena->ndirty; -} - -static void -arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time) -{ - - arena_decay_epoch_advance_helper(arena, time); - arena_decay_epoch_advance_purge(tsdn, arena); -} - -static void -arena_decay_init(arena_t *arena, ssize_t decay_time) -{ - - arena->decay.time = decay_time; - if (decay_time > 0) { - nstime_init2(&arena->decay.interval, decay_time, 0); - nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS); - } - - nstime_init(&arena->decay.epoch, 0); - nstime_update(&arena->decay.epoch); - arena->decay.jitter_state = (uint64_t)(uintptr_t)arena; - arena_decay_deadline_init(arena); - arena->decay.ndirty = arena->ndirty; - memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); -} - -static bool -arena_decay_time_valid(ssize_t decay_time) -{ - - if (decay_time < -1) - return (false); - if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX) - return (true); - return (false); -} - -ssize_t -arena_decay_time_get(tsdn_t *tsdn, arena_t *arena) -{ - ssize_t decay_time; - - malloc_mutex_lock(tsdn, &arena->lock); - decay_time = arena->decay.time; - malloc_mutex_unlock(tsdn, &arena->lock); - - return (decay_time); -} - -bool -arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) -{ - - if (!arena_decay_time_valid(decay_time)) - return (true); - - malloc_mutex_lock(tsdn, &arena->lock); - /* - * Restart decay backlog from scratch, which may cause many dirty pages - * to be immediately purged. It would conceptually be possible to map - * the old backlog onto the new backlog, but there is no justification - * for such complexity since decay_time changes are intended to be - * infrequent, either between the {-1, 0, >0} states, or a one-time - * arbitrary change during initial arena configuration. - */ - arena_decay_init(arena, decay_time); - arena_maybe_purge(tsdn, arena); - malloc_mutex_unlock(tsdn, &arena->lock); - - return (false); -} - -static void -arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena) -{ - - assert(opt_purge == purge_mode_ratio); - - /* Don't purge if the option is disabled. */ - if (arena->lg_dirty_mult < 0) - return; - - /* - * Iterate, since preventing recursive purging could otherwise leave too - * many dirty pages. - */ - while (true) { - size_t threshold = (arena->nactive >> arena->lg_dirty_mult); - if (threshold < chunk_npages) - threshold = chunk_npages; - /* - * Don't purge unless the number of purgeable pages exceeds the - * threshold. - */ - if (arena->ndirty <= threshold) - return; - arena_purge_to_limit(tsdn, arena, threshold); - } -} - -static void -arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena) -{ - nstime_t time; - - assert(opt_purge == purge_mode_decay); - - /* Purge all or nothing if the option is disabled. */ - if (arena->decay.time <= 0) { - if (arena->decay.time == 0) - arena_purge_to_limit(tsdn, arena, 0); - return; - } - - nstime_init(&time, 0); - nstime_update(&time); - if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch, - &time) > 0)) { - /* - * Time went backwards. Move the epoch back in time and - * generate a new deadline, with the expectation that time - * typically flows forward for long enough periods of time that - * epochs complete. Unfortunately, this strategy is susceptible - * to clock jitter triggering premature epoch advances, but - * clock jitter estimation and compensation isn't feasible here - * because calls into this code are event-driven. - */ - nstime_copy(&arena->decay.epoch, &time); - arena_decay_deadline_init(arena); - } else { - /* Verify that time does not go backwards. */ - assert(nstime_compare(&arena->decay.epoch, &time) <= 0); - } - - /* - * If the deadline has been reached, advance to the current epoch and - * purge to the new limit if necessary. Note that dirty pages created - * during the current epoch are not subject to purge until a future - * epoch, so as a result purging only happens during epoch advances. - */ - if (arena_decay_deadline_reached(arena, &time)) - arena_decay_epoch_advance(tsdn, arena, &time); -} - -void -arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) -{ - - /* Don't recursively purge. */ - if (arena->purging) - return; - - if (opt_purge == purge_mode_ratio) - arena_maybe_purge_ratio(tsdn, arena); - else - arena_maybe_purge_decay(tsdn, arena); -} - -static size_t -arena_dirty_count(arena_t *arena) -{ - size_t ndirty = 0; - arena_runs_dirty_link_t *rdelm; - extent_node_t *chunkselm; - - for (rdelm = qr_next(&arena->runs_dirty, rd_link), - chunkselm = qr_next(&arena->chunks_cache, cc_link); - rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) { - size_t npages; - - if (rdelm == &chunkselm->rd) { - npages = extent_node_size_get(chunkselm) >> LG_PAGE; - chunkselm = qr_next(chunkselm, cc_link); - } else { - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - rdelm); - arena_chunk_map_misc_t *miscelm = - arena_rd_to_miscelm(rdelm); - size_t pageind = arena_miscelm_to_pageind(miscelm); - assert(arena_mapbits_allocated_get(chunk, pageind) == - 0); - assert(arena_mapbits_large_get(chunk, pageind) == 0); - assert(arena_mapbits_dirty_get(chunk, pageind) != 0); - npages = arena_mapbits_unallocated_size_get(chunk, - pageind) >> LG_PAGE; - } - ndirty += npages; - } - - return (ndirty); -} - -static size_t -arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel, - extent_node_t *purge_chunks_sentinel) -{ - arena_runs_dirty_link_t *rdelm, *rdelm_next; - extent_node_t *chunkselm; - size_t nstashed = 0; - - /* Stash runs/chunks according to ndirty_limit. */ - for (rdelm = qr_next(&arena->runs_dirty, rd_link), - chunkselm = qr_next(&arena->chunks_cache, cc_link); - rdelm != &arena->runs_dirty; rdelm = rdelm_next) { - size_t npages; - rdelm_next = qr_next(rdelm, rd_link); - - if (rdelm == &chunkselm->rd) { - extent_node_t *chunkselm_next; - size_t sn; - bool zero, commit; - UNUSED void *chunk; - - npages = extent_node_size_get(chunkselm) >> LG_PAGE; - if (opt_purge == purge_mode_decay && arena->ndirty - - (nstashed + npages) < ndirty_limit) - break; - - chunkselm_next = qr_next(chunkselm, cc_link); - /* - * Allocate. chunkselm remains valid due to the - * dalloc_node=false argument to chunk_alloc_cache(). - */ - zero = false; - commit = false; - chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks, - extent_node_addr_get(chunkselm), - extent_node_size_get(chunkselm), chunksize, &sn, - &zero, &commit, false); - assert(chunk == extent_node_addr_get(chunkselm)); - assert(zero == extent_node_zeroed_get(chunkselm)); - extent_node_dirty_insert(chunkselm, purge_runs_sentinel, - purge_chunks_sentinel); - assert(npages == (extent_node_size_get(chunkselm) >> - LG_PAGE)); - chunkselm = chunkselm_next; - } else { - arena_chunk_t *chunk = - (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); - arena_chunk_map_misc_t *miscelm = - arena_rd_to_miscelm(rdelm); - size_t pageind = arena_miscelm_to_pageind(miscelm); - arena_run_t *run = &miscelm->run; - size_t run_size = - arena_mapbits_unallocated_size_get(chunk, pageind); - - npages = run_size >> LG_PAGE; - if (opt_purge == purge_mode_decay && arena->ndirty - - (nstashed + npages) < ndirty_limit) - break; - - assert(pageind + npages <= chunk_npages); - assert(arena_mapbits_dirty_get(chunk, pageind) == - arena_mapbits_dirty_get(chunk, pageind+npages-1)); - - /* - * If purging the spare chunk's run, make it available - * prior to allocation. - */ - if (chunk == arena->spare) - arena_chunk_alloc(tsdn, arena); - - /* Temporarily allocate the free dirty run. */ - arena_run_split_large(arena, run, run_size, false); - /* Stash. */ - if (false) - qr_new(rdelm, rd_link); /* Redundant. */ - else { - assert(qr_next(rdelm, rd_link) == rdelm); - assert(qr_prev(rdelm, rd_link) == rdelm); - } - qr_meld(purge_runs_sentinel, rdelm, rd_link); - } - - nstashed += npages; - if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <= - ndirty_limit) - break; - } - - return (nstashed); -} - -static size_t -arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - arena_runs_dirty_link_t *purge_runs_sentinel, - extent_node_t *purge_chunks_sentinel) -{ - size_t npurged, nmadvise; - arena_runs_dirty_link_t *rdelm; - extent_node_t *chunkselm; - - if (config_stats) - nmadvise = 0; - npurged = 0; - - malloc_mutex_unlock(tsdn, &arena->lock); - for (rdelm = qr_next(purge_runs_sentinel, rd_link), - chunkselm = qr_next(purge_chunks_sentinel, cc_link); - rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { - size_t npages; - - if (rdelm == &chunkselm->rd) { - /* - * Don't actually purge the chunk here because 1) - * chunkselm is embedded in the chunk and must remain - * valid, and 2) we deallocate the chunk in - * arena_unstash_purged(), where it is destroyed, - * decommitted, or purged, depending on chunk - * deallocation policy. - */ - size_t size = extent_node_size_get(chunkselm); - npages = size >> LG_PAGE; - chunkselm = qr_next(chunkselm, cc_link); - } else { - size_t pageind, run_size, flag_unzeroed, flags, i; - bool decommitted; - arena_chunk_t *chunk = - (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); - arena_chunk_map_misc_t *miscelm = - arena_rd_to_miscelm(rdelm); - pageind = arena_miscelm_to_pageind(miscelm); - run_size = arena_mapbits_large_size_get(chunk, pageind); - npages = run_size >> LG_PAGE; - - /* - * If this is the first run purged within chunk, mark - * the chunk as non-THP-capable. This will prevent all - * use of THPs for this chunk until the chunk as a whole - * is deallocated. - */ - if (config_thp && opt_thp && chunk->hugepage) { - chunk->hugepage = pages_nohuge(chunk, - chunksize); - } - - assert(pageind + npages <= chunk_npages); - assert(!arena_mapbits_decommitted_get(chunk, pageind)); - assert(!arena_mapbits_decommitted_get(chunk, - pageind+npages-1)); - decommitted = !chunk_hooks->decommit(chunk, chunksize, - pageind << LG_PAGE, npages << LG_PAGE, arena->ind); - if (decommitted) { - flag_unzeroed = 0; - flags = CHUNK_MAP_DECOMMITTED; - } else { - flag_unzeroed = chunk_purge_wrapper(tsdn, arena, - chunk_hooks, chunk, chunksize, pageind << - LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; - flags = flag_unzeroed; - } - arena_mapbits_large_set(chunk, pageind+npages-1, 0, - flags); - arena_mapbits_large_set(chunk, pageind, run_size, - flags); - - /* - * Set the unzeroed flag for internal pages, now that - * chunk_purge_wrapper() has returned whether the pages - * were zeroed as a side effect of purging. This chunk - * map modification is safe even though the arena mutex - * isn't currently owned by this thread, because the run - * is marked as allocated, thus protecting it from being - * modified by any other thread. As long as these - * writes don't perturb the first and last elements' - * CHUNK_MAP_ALLOCATED bits, behavior is well defined. - */ - for (i = 1; i < npages-1; i++) { - arena_mapbits_internal_set(chunk, pageind+i, - flag_unzeroed); - } - } - - npurged += npages; - if (config_stats) - nmadvise++; - } - malloc_mutex_lock(tsdn, &arena->lock); - - if (config_stats) { - arena->stats.nmadvise += nmadvise; - arena->stats.purged += npurged; - } - - return (npurged); -} - -static void -arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - arena_runs_dirty_link_t *purge_runs_sentinel, - extent_node_t *purge_chunks_sentinel) -{ - arena_runs_dirty_link_t *rdelm, *rdelm_next; - extent_node_t *chunkselm; - - /* Deallocate chunks/runs. */ - for (rdelm = qr_next(purge_runs_sentinel, rd_link), - chunkselm = qr_next(purge_chunks_sentinel, cc_link); - rdelm != purge_runs_sentinel; rdelm = rdelm_next) { - rdelm_next = qr_next(rdelm, rd_link); - if (rdelm == &chunkselm->rd) { - extent_node_t *chunkselm_next = qr_next(chunkselm, - cc_link); - void *addr = extent_node_addr_get(chunkselm); - size_t size = extent_node_size_get(chunkselm); - size_t sn = extent_node_sn_get(chunkselm); - bool zeroed = extent_node_zeroed_get(chunkselm); - bool committed = extent_node_committed_get(chunkselm); - extent_node_dirty_remove(chunkselm); - arena_node_dalloc(tsdn, arena, chunkselm); - chunkselm = chunkselm_next; - chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr, - size, sn, zeroed, committed); - } else { - arena_chunk_t *chunk = - (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); - arena_chunk_map_misc_t *miscelm = - arena_rd_to_miscelm(rdelm); - size_t pageind = arena_miscelm_to_pageind(miscelm); - bool decommitted = (arena_mapbits_decommitted_get(chunk, - pageind) != 0); - arena_run_t *run = &miscelm->run; - qr_remove(rdelm, rd_link); - arena_run_dalloc(tsdn, arena, run, false, true, - decommitted); - } - } -} - -/* - * NB: ndirty_limit is interpreted differently depending on opt_purge: - * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the - * desired state: - * (arena->ndirty <= ndirty_limit) - * - purge_mode_decay: Purge as many dirty runs/chunks as possible without - * violating the invariant: - * (arena->ndirty >= ndirty_limit) - */ -static void -arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) -{ - chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); - size_t npurge, npurged; - arena_runs_dirty_link_t purge_runs_sentinel; - extent_node_t purge_chunks_sentinel; - - arena->purging = true; - - /* - * Calls to arena_dirty_count() are disabled even for debug builds - * because overhead grows nonlinearly as memory usage increases. - */ - if (false && config_debug) { - size_t ndirty = arena_dirty_count(arena); - assert(ndirty == arena->ndirty); - } - assert(opt_purge != purge_mode_ratio || (arena->nactive >> - arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0); - - qr_new(&purge_runs_sentinel, rd_link); - extent_node_dirty_linkage_init(&purge_chunks_sentinel); - - npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit, - &purge_runs_sentinel, &purge_chunks_sentinel); - if (npurge == 0) - goto label_return; - npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks, - &purge_runs_sentinel, &purge_chunks_sentinel); - assert(npurged == npurge); - arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel, - &purge_chunks_sentinel); - - if (config_stats) - arena->stats.npurge++; - -label_return: - arena->purging = false; -} - -void -arena_purge(tsdn_t *tsdn, arena_t *arena, bool all) -{ - - malloc_mutex_lock(tsdn, &arena->lock); - if (all) - arena_purge_to_limit(tsdn, arena, 0); - else - arena_maybe_purge(tsdn, arena); - malloc_mutex_unlock(tsdn, &arena->lock); -} - -static void -arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk) -{ - size_t pageind, npages; - - cassert(config_prof); - assert(opt_prof); - - /* - * Iterate over the allocated runs and remove profiled allocations from - * the sample set. - */ - for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { - if (arena_mapbits_allocated_get(chunk, pageind) != 0) { - if (arena_mapbits_large_get(chunk, pageind) != 0) { - void *ptr = (void *)((uintptr_t)chunk + (pageind - << LG_PAGE)); - size_t usize = isalloc(tsd_tsdn(tsd), ptr, - config_prof); - - prof_free(tsd, ptr, usize); - npages = arena_mapbits_large_size_get(chunk, - pageind) >> LG_PAGE; - } else { - /* Skip small run. */ - size_t binind = arena_mapbits_binind_get(chunk, - pageind); - arena_bin_info_t *bin_info = - &arena_bin_info[binind]; - npages = bin_info->run_size >> LG_PAGE; - } - } else { - /* Skip unallocated run. */ - npages = arena_mapbits_unallocated_size_get(chunk, - pageind) >> LG_PAGE; - } - assert(pageind + npages <= chunk_npages); - } -} - -void -arena_reset(tsd_t *tsd, arena_t *arena) -{ - unsigned i; - extent_node_t *node; - - /* - * Locking in this function is unintuitive. The caller guarantees that - * no concurrent operations are happening in this arena, but there are - * still reasons that some locking is necessary: - * - * - Some of the functions in the transitive closure of calls assume - * appropriate locks are held, and in some cases these locks are - * temporarily dropped to avoid lock order reversal or deadlock due to - * reentry. - * - mallctl("epoch", ...) may concurrently refresh stats. While - * strictly speaking this is a "concurrent operation", disallowing - * stats refreshes would impose an inconvenient burden. - */ - - /* Remove large allocations from prof sample set. */ - if (config_prof && opt_prof) { - ql_foreach(node, &arena->achunks, ql_link) { - arena_achunk_prof_reset(tsd, arena, - extent_node_addr_get(node)); - } - } - - /* Reset curruns for large size classes. */ - if (config_stats) { - for (i = 0; i < nlclasses; i++) - arena->stats.lstats[i].curruns = 0; - } - - /* Huge allocations. */ - malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); - for (node = ql_last(&arena->huge, ql_link); node != NULL; node = - ql_last(&arena->huge, ql_link)) { - void *ptr = extent_node_addr_get(node); - size_t usize; - - malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); - if (config_stats || (config_prof && opt_prof)) - usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); - /* Remove huge allocation from prof sample set. */ - if (config_prof && opt_prof) - prof_free(tsd, ptr, usize); - huge_dalloc(tsd_tsdn(tsd), ptr); - malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); - /* Cancel out unwanted effects on stats. */ - if (config_stats) - arena_huge_reset_stats_cancel(arena, usize); - } - malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); - - malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); - - /* Bins. */ - for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; - malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); - bin->runcur = NULL; - arena_run_heap_new(&bin->runs); - if (config_stats) { - bin->stats.curregs = 0; - bin->stats.curruns = 0; - } - malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); - } - - /* - * Re-initialize runs_dirty such that the chunks_cache and runs_dirty - * chains directly correspond. - */ - qr_new(&arena->runs_dirty, rd_link); - for (node = qr_next(&arena->chunks_cache, cc_link); - node != &arena->chunks_cache; node = qr_next(node, cc_link)) { - qr_new(&node->rd, rd_link); - qr_meld(&arena->runs_dirty, &node->rd, rd_link); - } - - /* Arena chunks. */ - for (node = ql_last(&arena->achunks, ql_link); node != NULL; node = - ql_last(&arena->achunks, ql_link)) { - ql_remove(&arena->achunks, node, ql_link); - arena_chunk_discard(tsd_tsdn(tsd), arena, - extent_node_addr_get(node)); - } - - /* Spare. */ - if (arena->spare != NULL) { - arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare); - arena->spare = NULL; - } - - assert(!arena->purging); - arena->nactive = 0; - - for (i = 0; i < NPSIZES; i++) - arena_run_heap_new(&arena->runs_avail[i]); - - malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); -} - -static void -arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, - size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty, - size_t flag_decommitted) -{ - size_t size = *p_size; - size_t run_ind = *p_run_ind; - size_t run_pages = *p_run_pages; - - /* Try to coalesce forward. */ - if (run_ind + run_pages < chunk_npages && - arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && - arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty && - arena_mapbits_decommitted_get(chunk, run_ind+run_pages) == - flag_decommitted) { - size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, - run_ind+run_pages); - size_t nrun_pages = nrun_size >> LG_PAGE; - - /* - * Remove successor from runs_avail; the coalesced run is - * inserted later. - */ - assert(arena_mapbits_unallocated_size_get(chunk, - run_ind+run_pages+nrun_pages-1) == nrun_size); - assert(arena_mapbits_dirty_get(chunk, - run_ind+run_pages+nrun_pages-1) == flag_dirty); - assert(arena_mapbits_decommitted_get(chunk, - run_ind+run_pages+nrun_pages-1) == flag_decommitted); - arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages); - - /* - * If the successor is dirty, remove it from the set of dirty - * pages. - */ - if (flag_dirty != 0) { - arena_run_dirty_remove(arena, chunk, run_ind+run_pages, - nrun_pages); - } - - size += nrun_size; - run_pages += nrun_pages; - - arena_mapbits_unallocated_size_set(chunk, run_ind, size); - arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, - size); - } - - /* Try to coalesce backward. */ - if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, - run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == - flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) == - flag_decommitted) { - size_t prun_size = arena_mapbits_unallocated_size_get(chunk, - run_ind-1); - size_t prun_pages = prun_size >> LG_PAGE; - - run_ind -= prun_pages; - - /* - * Remove predecessor from runs_avail; the coalesced run is - * inserted later. - */ - assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == - prun_size); - assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); - assert(arena_mapbits_decommitted_get(chunk, run_ind) == - flag_decommitted); - arena_avail_remove(arena, chunk, run_ind, prun_pages); - - /* - * If the predecessor is dirty, remove it from the set of dirty - * pages. - */ - if (flag_dirty != 0) { - arena_run_dirty_remove(arena, chunk, run_ind, - prun_pages); - } - - size += prun_size; - run_pages += prun_pages; - - arena_mapbits_unallocated_size_set(chunk, run_ind, size); - arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, - size); - } - - *p_size = size; - *p_run_ind = run_ind; - *p_run_pages = run_pages; -} - -static size_t -arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t run_ind) -{ - size_t size; - - assert(run_ind >= map_bias); - assert(run_ind < chunk_npages); - - if (arena_mapbits_large_get(chunk, run_ind) != 0) { - size = arena_mapbits_large_size_get(chunk, run_ind); - assert(size == PAGE || arena_mapbits_large_size_get(chunk, - run_ind+(size>>LG_PAGE)-1) == 0); - } else { - arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; - size = bin_info->run_size; - } - - return (size); -} - -static void -arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty, - bool cleaned, bool decommitted) -{ - arena_chunk_t *chunk; - arena_chunk_map_misc_t *miscelm; - size_t size, run_ind, run_pages, flag_dirty, flag_decommitted; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - miscelm = arena_run_to_miscelm(run); - run_ind = arena_miscelm_to_pageind(miscelm); - assert(run_ind >= map_bias); - assert(run_ind < chunk_npages); - size = arena_run_size_get(arena, chunk, run, run_ind); - run_pages = (size >> LG_PAGE); - arena_nactive_sub(arena, run_pages); - - /* - * The run is dirty if the caller claims to have dirtied it, as well as - * if it was already dirty before being allocated and the caller - * doesn't claim to have cleaned it. - */ - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); - if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind) - != 0) - dirty = true; - flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; - flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0; - - /* Mark pages as unallocated in the chunk map. */ - if (dirty || decommitted) { - size_t flags = flag_dirty | flag_decommitted; - arena_mapbits_unallocated_set(chunk, run_ind, size, flags); - arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, - flags); - } else { - arena_mapbits_unallocated_set(chunk, run_ind, size, - arena_mapbits_unzeroed_get(chunk, run_ind)); - arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, - arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); - } - - arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, - flag_dirty, flag_decommitted); - - /* Insert into runs_avail, now that coalescing is complete. */ - assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == - arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); - assert(arena_mapbits_decommitted_get(chunk, run_ind) == - arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1)); - arena_avail_insert(arena, chunk, run_ind, run_pages); - - if (dirty) - arena_run_dirty_insert(arena, chunk, run_ind, run_pages); - - /* Deallocate chunk if it is now completely unused. */ - if (size == arena_maxrun) { - assert(run_ind == map_bias); - assert(run_pages == (arena_maxrun >> LG_PAGE)); - arena_chunk_dalloc(tsdn, arena, chunk); - } - - /* - * It is okay to do dirty page processing here even if the chunk was - * deallocated above, since in that case it is the spare. Waiting - * until after possible chunk deallocation to do dirty processing - * allows for an old spare to be fully deallocated, thus decreasing the - * chances of spuriously crossing the dirty page purging threshold. - */ - if (dirty) - arena_maybe_purge(tsdn, arena); -} - -static void -arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, size_t oldsize, size_t newsize) -{ - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - size_t pageind = arena_miscelm_to_pageind(miscelm); - size_t head_npages = (oldsize - newsize) >> LG_PAGE; - size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); - size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); - size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? - CHUNK_MAP_UNZEROED : 0; - - assert(oldsize > newsize); - - /* - * Update the chunk map so that arena_run_dalloc() can treat the - * leading run as separately allocated. Set the last element of each - * run first, in case of single-page runs. - */ - assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); - arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - pageind+head_npages-1))); - arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); - - if (config_debug) { - UNUSED size_t tail_npages = newsize >> LG_PAGE; - assert(arena_mapbits_large_size_get(chunk, - pageind+head_npages+tail_npages-1) == 0); - assert(arena_mapbits_dirty_get(chunk, - pageind+head_npages+tail_npages-1) == flag_dirty); - } - arena_mapbits_large_set(chunk, pageind+head_npages, newsize, - flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - pageind+head_npages))); - - arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted != - 0)); -} - -static void -arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, size_t oldsize, size_t newsize, bool dirty) -{ - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - size_t pageind = arena_miscelm_to_pageind(miscelm); - size_t head_npages = newsize >> LG_PAGE; - size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); - size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); - size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? - CHUNK_MAP_UNZEROED : 0; - arena_chunk_map_misc_t *tail_miscelm; - arena_run_t *tail_run; - - assert(oldsize > newsize); - - /* - * Update the chunk map so that arena_run_dalloc() can treat the - * trailing run as separately allocated. Set the last element of each - * run first, in case of single-page runs. - */ - assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); - arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - pageind+head_npages-1))); - arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); - - if (config_debug) { - UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; - assert(arena_mapbits_large_size_get(chunk, - pageind+head_npages+tail_npages-1) == 0); - assert(arena_mapbits_dirty_get(chunk, - pageind+head_npages+tail_npages-1) == flag_dirty); - } - arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, - flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - pageind+head_npages))); - - tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages); - tail_run = &tail_miscelm->run; - arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted - != 0)); -} - -static void -arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) -{ - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - - arena_run_heap_insert(&bin->runs, miscelm); -} - -static arena_run_t * -arena_bin_nonfull_run_tryget(arena_bin_t *bin) -{ - arena_chunk_map_misc_t *miscelm; - - miscelm = arena_run_heap_remove_first(&bin->runs); - if (miscelm == NULL) - return (NULL); - if (config_stats) - bin->stats.reruns++; - - return (&miscelm->run); -} - -static arena_run_t * -arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) -{ - arena_run_t *run; - szind_t binind; - arena_bin_info_t *bin_info; - - /* Look for a usable run. */ - run = arena_bin_nonfull_run_tryget(bin); - if (run != NULL) - return (run); - /* No existing runs have any space available. */ - - binind = arena_bin_index(arena, bin); - bin_info = &arena_bin_info[binind]; - - /* Allocate a new run. */ - malloc_mutex_unlock(tsdn, &bin->lock); - /******************************/ - malloc_mutex_lock(tsdn, &arena->lock); - run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind); - if (run != NULL) { - /* Initialize run internals. */ - run->binind = binind; - run->nfree = bin_info->nregs; - bitmap_init(run->bitmap, &bin_info->bitmap_info); - } - malloc_mutex_unlock(tsdn, &arena->lock); - /********************************/ - malloc_mutex_lock(tsdn, &bin->lock); - if (run != NULL) { - if (config_stats) { - bin->stats.nruns++; - bin->stats.curruns++; - } - return (run); - } - - /* - * arena_run_alloc_small() failed, but another thread may have made - * sufficient memory available while this one dropped bin->lock above, - * so search one more time. - */ - run = arena_bin_nonfull_run_tryget(bin); - if (run != NULL) - return (run); - - return (NULL); -} - -/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ -static void * -arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) -{ - szind_t binind; - arena_bin_info_t *bin_info; - arena_run_t *run; - - binind = arena_bin_index(arena, bin); - bin_info = &arena_bin_info[binind]; - bin->runcur = NULL; - run = arena_bin_nonfull_run_get(tsdn, arena, bin); - if (bin->runcur != NULL && bin->runcur->nfree > 0) { - /* - * Another thread updated runcur while this one ran without the - * bin lock in arena_bin_nonfull_run_get(). - */ - void *ret; - assert(bin->runcur->nfree > 0); - ret = arena_run_reg_alloc(bin->runcur, bin_info); - if (run != NULL) { - arena_chunk_t *chunk; - - /* - * arena_run_alloc_small() may have allocated run, or - * it may have pulled run from the bin's run tree. - * Therefore it is unsafe to make any assumptions about - * how run has previously been used, and - * arena_bin_lower_run() must be called, as if a region - * were just deallocated from the run. - */ - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - if (run->nfree == bin_info->nregs) { - arena_dalloc_bin_run(tsdn, arena, chunk, run, - bin); - } else - arena_bin_lower_run(arena, run, bin); - } - return (ret); - } - - if (run == NULL) - return (NULL); - - bin->runcur = run; - - assert(bin->runcur->nfree > 0); - - return (arena_run_reg_alloc(bin->runcur, bin_info)); -} - -void -arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin, - szind_t binind, uint64_t prof_accumbytes) -{ - unsigned i, nfill; - arena_bin_t *bin; - - assert(tbin->ncached == 0); - - if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) - prof_idump(tsdn); - bin = &arena->bins[binind]; - malloc_mutex_lock(tsdn, &bin->lock); - for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> - tbin->lg_fill_div); i < nfill; i++) { - arena_run_t *run; - void *ptr; - if ((run = bin->runcur) != NULL && run->nfree > 0) - ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); - else - ptr = arena_bin_malloc_hard(tsdn, arena, bin); - if (ptr == NULL) { - /* - * OOM. tbin->avail isn't yet filled down to its first - * element, so the successful allocations (if any) must - * be moved just before tbin->avail before bailing out. - */ - if (i > 0) { - memmove(tbin->avail - i, tbin->avail - nfill, - i * sizeof(void *)); - } - break; - } - if (config_fill && unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ptr, &arena_bin_info[binind], - true); - } - /* Insert such that low regions get used first. */ - *(tbin->avail - nfill + i) = ptr; - } - if (config_stats) { - bin->stats.nmalloc += i; - bin->stats.nrequests += tbin->tstats.nrequests; - bin->stats.curregs += i; - bin->stats.nfills++; - tbin->tstats.nrequests = 0; - } - malloc_mutex_unlock(tsdn, &bin->lock); - tbin->ncached = i; - arena_decay_tick(tsdn, arena); -} - -void -arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) -{ - - size_t redzone_size = bin_info->redzone_size; - - if (zero) { - memset((void *)((uintptr_t)ptr - redzone_size), - JEMALLOC_ALLOC_JUNK, redzone_size); - memset((void *)((uintptr_t)ptr + bin_info->reg_size), - JEMALLOC_ALLOC_JUNK, redzone_size); - } else { - memset((void *)((uintptr_t)ptr - redzone_size), - JEMALLOC_ALLOC_JUNK, bin_info->reg_interval); - } -} - -#ifdef JEMALLOC_JET -#undef arena_redzone_corruption -#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption) -#endif -static void -arena_redzone_corruption(void *ptr, size_t usize, bool after, - size_t offset, uint8_t byte) -{ - - malloc_printf(": Corrupt redzone %zu byte%s %s %p " - "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", - after ? "after" : "before", ptr, usize, byte); -} -#ifdef JEMALLOC_JET -#undef arena_redzone_corruption -#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) -arena_redzone_corruption_t *arena_redzone_corruption = - JEMALLOC_N(n_arena_redzone_corruption); -#endif - -static void -arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) -{ - bool error = false; - - if (opt_junk_alloc) { - size_t size = bin_info->reg_size; - size_t redzone_size = bin_info->redzone_size; - size_t i; - - for (i = 1; i <= redzone_size; i++) { - uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); - if (*byte != JEMALLOC_ALLOC_JUNK) { - error = true; - arena_redzone_corruption(ptr, size, false, i, - *byte); - if (reset) - *byte = JEMALLOC_ALLOC_JUNK; - } - } - for (i = 0; i < redzone_size; i++) { - uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); - if (*byte != JEMALLOC_ALLOC_JUNK) { - error = true; - arena_redzone_corruption(ptr, size, true, i, - *byte); - if (reset) - *byte = JEMALLOC_ALLOC_JUNK; - } - } - } - - if (opt_abort && error) - abort(); -} - -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_small -#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small) -#endif -void -arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) -{ - size_t redzone_size = bin_info->redzone_size; - - arena_redzones_validate(ptr, bin_info, false); - memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK, - bin_info->reg_interval); -} -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_small -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) -arena_dalloc_junk_small_t *arena_dalloc_junk_small = - JEMALLOC_N(n_arena_dalloc_junk_small); -#endif - -void -arena_quarantine_junk_small(void *ptr, size_t usize) -{ - szind_t binind; - arena_bin_info_t *bin_info; - cassert(config_fill); - assert(opt_junk_free); - assert(opt_quarantine); - assert(usize <= SMALL_MAXCLASS); - - binind = size2index(usize); - bin_info = &arena_bin_info[binind]; - arena_redzones_validate(ptr, bin_info, true); -} - -static void * -arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) -{ - void *ret; - arena_bin_t *bin; - size_t usize; - arena_run_t *run; - - assert(binind < NBINS); - bin = &arena->bins[binind]; - usize = index2size(binind); - - malloc_mutex_lock(tsdn, &bin->lock); - if ((run = bin->runcur) != NULL && run->nfree > 0) - ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); - else - ret = arena_bin_malloc_hard(tsdn, arena, bin); - - if (ret == NULL) { - malloc_mutex_unlock(tsdn, &bin->lock); - return (NULL); - } - - if (config_stats) { - bin->stats.nmalloc++; - bin->stats.nrequests++; - bin->stats.curregs++; - } - malloc_mutex_unlock(tsdn, &bin->lock); - if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize)) - prof_idump(tsdn); - - if (!zero) { - if (config_fill) { - if (unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ret, - &arena_bin_info[binind], false); - } else if (unlikely(opt_zero)) - memset(ret, 0, usize); - } - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); - } else { - if (config_fill && unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ret, &arena_bin_info[binind], - true); - } - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); - memset(ret, 0, usize); - } - - arena_decay_tick(tsdn, arena); - return (ret); -} - -void * -arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) -{ - void *ret; - size_t usize; - uintptr_t random_offset; - arena_run_t *run; - arena_chunk_map_misc_t *miscelm; - UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); - - /* Large allocation. */ - usize = index2size(binind); - malloc_mutex_lock(tsdn, &arena->lock); - if (config_cache_oblivious) { - uint64_t r; - - /* - * Compute a uniformly distributed offset within the first page - * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 - * for 4 KiB pages and 64-byte cachelines. - */ - r = prng_lg_range_zu(&arena->offset_state, LG_PAGE - - LG_CACHELINE, false); - random_offset = ((uintptr_t)r) << LG_CACHELINE; - } else - random_offset = 0; - run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero); - if (run == NULL) { - malloc_mutex_unlock(tsdn, &arena->lock); - return (NULL); - } - miscelm = arena_run_to_miscelm(run); - ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + - random_offset); - if (config_stats) { - szind_t index = binind - NBINS; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += usize; - arena->stats.lstats[index].nmalloc++; - arena->stats.lstats[index].nrequests++; - arena->stats.lstats[index].curruns++; - } - if (config_prof) - idump = arena_prof_accum_locked(arena, usize); - malloc_mutex_unlock(tsdn, &arena->lock); - if (config_prof && idump) - prof_idump(tsdn); - - if (!zero) { - if (config_fill) { - if (unlikely(opt_junk_alloc)) - memset(ret, JEMALLOC_ALLOC_JUNK, usize); - else if (unlikely(opt_zero)) - memset(ret, 0, usize); - } - } - - arena_decay_tick(tsdn, arena); - return (ret); -} - -void * -arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, - bool zero) -{ - - assert(!tsdn_null(tsdn) || arena != NULL); - - if (likely(!tsdn_null(tsdn))) - arena = arena_choose(tsdn_tsd(tsdn), arena); - if (unlikely(arena == NULL)) - return (NULL); - - if (likely(size <= SMALL_MAXCLASS)) - return (arena_malloc_small(tsdn, arena, ind, zero)); - if (likely(size <= large_maxclass)) - return (arena_malloc_large(tsdn, arena, ind, zero)); - assert(index2size(ind) >= chunksize); - return (huge_malloc(tsdn, arena, index2size(ind), zero)); -} - -/* Only handles large allocations that require more than page alignment. */ -static void * -arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, - bool zero) -{ - void *ret; - size_t alloc_size, leadsize, trailsize; - arena_run_t *run; - arena_chunk_t *chunk; - arena_chunk_map_misc_t *miscelm; - void *rpages; - - assert(!tsdn_null(tsdn) || arena != NULL); - assert(usize == PAGE_CEILING(usize)); - - if (likely(!tsdn_null(tsdn))) - arena = arena_choose(tsdn_tsd(tsdn), arena); - if (unlikely(arena == NULL)) - return (NULL); - - alignment = PAGE_CEILING(alignment); - alloc_size = usize + large_pad + alignment - PAGE; - - malloc_mutex_lock(tsdn, &arena->lock); - run = arena_run_alloc_large(tsdn, arena, alloc_size, false); - if (run == NULL) { - malloc_mutex_unlock(tsdn, &arena->lock); - return (NULL); - } - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - miscelm = arena_run_to_miscelm(run); - rpages = arena_miscelm_to_rpages(miscelm); - - leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - - (uintptr_t)rpages; - assert(alloc_size >= leadsize + usize); - trailsize = alloc_size - leadsize - usize - large_pad; - if (leadsize != 0) { - arena_chunk_map_misc_t *head_miscelm = miscelm; - arena_run_t *head_run = run; - - miscelm = arena_miscelm_get_mutable(chunk, - arena_miscelm_to_pageind(head_miscelm) + (leadsize >> - LG_PAGE)); - run = &miscelm->run; - - arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size, - alloc_size - leadsize); - } - if (trailsize != 0) { - arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad + - trailsize, usize + large_pad, false); - } - if (arena_run_init_large(arena, run, usize + large_pad, zero)) { - size_t run_ind = - arena_miscelm_to_pageind(arena_run_to_miscelm(run)); - bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0); - bool decommitted = (arena_mapbits_decommitted_get(chunk, - run_ind) != 0); - - assert(decommitted); /* Cause of OOM. */ - arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted); - malloc_mutex_unlock(tsdn, &arena->lock); - return (NULL); - } - ret = arena_miscelm_to_rpages(miscelm); - - if (config_stats) { - szind_t index = size2index(usize) - NBINS; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += usize; - arena->stats.lstats[index].nmalloc++; - arena->stats.lstats[index].nrequests++; - arena->stats.lstats[index].curruns++; - } - malloc_mutex_unlock(tsdn, &arena->lock); - - if (config_fill && !zero) { - if (unlikely(opt_junk_alloc)) - memset(ret, JEMALLOC_ALLOC_JUNK, usize); - else if (unlikely(opt_zero)) - memset(ret, 0, usize); - } - arena_decay_tick(tsdn, arena); - return (ret); -} - -void * -arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, - bool zero, tcache_t *tcache) -{ - void *ret; - - if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE - && (usize & PAGE_MASK) == 0))) { - /* Small; alignment doesn't require special run placement. */ - ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, - tcache, true); - } else if (usize <= large_maxclass && alignment <= PAGE) { - /* - * Large; alignment doesn't require special run placement. - * However, the cached pointer may be at a random offset from - * the base of the run, so do some bit manipulation to retrieve - * the base. - */ - ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, - tcache, true); - if (config_cache_oblivious) - ret = (void *)((uintptr_t)ret & ~PAGE_MASK); - } else { - if (likely(usize <= large_maxclass)) { - ret = arena_palloc_large(tsdn, arena, usize, alignment, - zero); - } else if (likely(alignment <= chunksize)) - ret = huge_malloc(tsdn, arena, usize, zero); - else { - ret = huge_palloc(tsdn, arena, usize, alignment, zero); - } - } - return (ret); -} - -void -arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size) -{ - arena_chunk_t *chunk; - size_t pageind; - szind_t binind; - - cassert(config_prof); - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); - assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS); - assert(size <= SMALL_MAXCLASS); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - binind = size2index(size); - assert(binind < NBINS); - arena_mapbits_large_binind_set(chunk, pageind, binind); - - assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); - assert(isalloc(tsdn, ptr, true) == size); -} - -static void -arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ - - /* Dissociate run from bin. */ - if (run == bin->runcur) - bin->runcur = NULL; - else { - szind_t binind = arena_bin_index(extent_node_arena_get( - &chunk->node), bin); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - - /* - * The following block's conditional is necessary because if the - * run only contains one region, then it never gets inserted - * into the non-full runs tree. - */ - if (bin_info->nregs != 1) { - arena_chunk_map_misc_t *miscelm = - arena_run_to_miscelm(run); - - arena_run_heap_remove(&bin->runs, miscelm); - } - } -} - -static void -arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, arena_bin_t *bin) -{ - - assert(run != bin->runcur); - - malloc_mutex_unlock(tsdn, &bin->lock); - /******************************/ - malloc_mutex_lock(tsdn, &arena->lock); - arena_run_dalloc(tsdn, arena, run, true, false, false); - malloc_mutex_unlock(tsdn, &arena->lock); - /****************************/ - malloc_mutex_lock(tsdn, &bin->lock); - if (config_stats) - bin->stats.curruns--; -} - -static void -arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin) -{ - - /* - * Make sure that if bin->runcur is non-NULL, it refers to the - * oldest/lowest non-full run. It is okay to NULL runcur out rather - * than proactively keeping it pointing at the oldest/lowest non-full - * run. - */ - if (bin->runcur != NULL && - arena_snad_comp(arena_run_to_miscelm(bin->runcur), - arena_run_to_miscelm(run)) > 0) { - /* Switch runcur. */ - if (bin->runcur->nfree > 0) - arena_bin_runs_insert(bin, bin->runcur); - bin->runcur = run; - if (config_stats) - bin->stats.reruns++; - } else - arena_bin_runs_insert(bin, run); -} - -static void -arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, - void *ptr, arena_chunk_map_bits_t *bitselm, bool junked) -{ - size_t pageind, rpages_ind; - arena_run_t *run; - arena_bin_t *bin; - arena_bin_info_t *bin_info; - szind_t binind; - - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); - run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; - binind = run->binind; - bin = &arena->bins[binind]; - bin_info = &arena_bin_info[binind]; - - if (!junked && config_fill && unlikely(opt_junk_free)) - arena_dalloc_junk_small(ptr, bin_info); - - arena_run_reg_dalloc(run, ptr); - if (run->nfree == bin_info->nregs) { - arena_dissociate_bin_run(chunk, run, bin); - arena_dalloc_bin_run(tsdn, arena, chunk, run, bin); - } else if (run->nfree == 1 && run != bin->runcur) - arena_bin_lower_run(arena, run, bin); - - if (config_stats) { - bin->stats.ndalloc++; - bin->stats.curregs--; - } -} - -void -arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, - arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm) -{ - - arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true); -} - -void -arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind, arena_chunk_map_bits_t *bitselm) -{ - arena_run_t *run; - arena_bin_t *bin; - size_t rpages_ind; - - rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); - run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; - bin = &arena->bins[run->binind]; - malloc_mutex_lock(tsdn, &bin->lock); - arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false); - malloc_mutex_unlock(tsdn, &bin->lock); -} - -void -arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, - void *ptr, size_t pageind) -{ - arena_chunk_map_bits_t *bitselm; - - if (config_debug) { - /* arena_ptr_small_binind_get() does extra sanity checking. */ - assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, - pageind)) != BININD_INVALID); - } - bitselm = arena_bitselm_get_mutable(chunk, pageind); - arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm); - arena_decay_tick(tsdn, arena); -} - -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_large -#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large) -#endif -void -arena_dalloc_junk_large(void *ptr, size_t usize) -{ - - if (config_fill && unlikely(opt_junk_free)) - memset(ptr, JEMALLOC_FREE_JUNK, usize); -} -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_large -#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) -arena_dalloc_junk_large_t *arena_dalloc_junk_large = - JEMALLOC_N(n_arena_dalloc_junk_large); -#endif - -static void -arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena, - arena_chunk_t *chunk, void *ptr, bool junked) -{ - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, - pageind); - arena_run_t *run = &miscelm->run; - - if (config_fill || config_stats) { - size_t usize = arena_mapbits_large_size_get(chunk, pageind) - - large_pad; - - if (!junked) - arena_dalloc_junk_large(ptr, usize); - if (config_stats) { - szind_t index = size2index(usize) - NBINS; - - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= usize; - arena->stats.lstats[index].ndalloc++; - arena->stats.lstats[index].curruns--; - } - } - - arena_run_dalloc(tsdn, arena, run, true, false, false); -} - -void -arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, - arena_chunk_t *chunk, void *ptr) -{ - - arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true); -} - -void -arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, - void *ptr) -{ - - malloc_mutex_lock(tsdn, &arena->lock); - arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false); - malloc_mutex_unlock(tsdn, &arena->lock); - arena_decay_tick(tsdn, arena); -} - -static void -arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, - void *ptr, size_t oldsize, size_t size) -{ - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, - pageind); - arena_run_t *run = &miscelm->run; - - assert(size < oldsize); - - /* - * Shrink the run, and make trailing pages available for other - * allocations. - */ - malloc_mutex_lock(tsdn, &arena->lock); - arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size + - large_pad, true); - if (config_stats) { - szind_t oldindex = size2index(oldsize) - NBINS; - szind_t index = size2index(size) - NBINS; - - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= oldsize; - arena->stats.lstats[oldindex].ndalloc++; - arena->stats.lstats[oldindex].curruns--; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[index].nmalloc++; - arena->stats.lstats[index].nrequests++; - arena->stats.lstats[index].curruns++; - } - malloc_mutex_unlock(tsdn, &arena->lock); -} - -static bool -arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, - void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) -{ - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t npages = (oldsize + large_pad) >> LG_PAGE; - size_t followsize; - - assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) - - large_pad); - - /* Try to extend the run. */ - malloc_mutex_lock(tsdn, &arena->lock); - if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, - pageind+npages) != 0) - goto label_fail; - followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages); - if (oldsize + followsize >= usize_min) { - /* - * The next run is available and sufficiently large. Split the - * following run, then merge the first part with the existing - * allocation. - */ - arena_run_t *run; - size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask; - - usize = usize_max; - while (oldsize + followsize < usize) - usize = index2size(size2index(usize)-1); - assert(usize >= usize_min); - assert(usize >= oldsize); - splitsize = usize - oldsize; - if (splitsize == 0) - goto label_fail; - - run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run; - if (arena_run_split_large(arena, run, splitsize, zero)) - goto label_fail; - - if (config_cache_oblivious && zero) { - /* - * Zero the trailing bytes of the original allocation's - * last page, since they are in an indeterminate state. - * There will always be trailing bytes, because ptr's - * offset from the beginning of the run is a multiple of - * CACHELINE in [0 .. PAGE). - */ - void *zbase = (void *)((uintptr_t)ptr + oldsize); - void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + - PAGE)); - size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; - assert(nzero > 0); - memset(zbase, 0, nzero); - } - - size = oldsize + splitsize; - npages = (size + large_pad) >> LG_PAGE; - - /* - * Mark the extended run as dirty if either portion of the run - * was dirty before allocation. This is rather pedantic, - * because there's not actually any sequence of events that - * could cause the resulting run to be passed to - * arena_run_dalloc() with the dirty argument set to false - * (which is when dirty flag consistency would really matter). - */ - flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | - arena_mapbits_dirty_get(chunk, pageind+npages-1); - flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0; - arena_mapbits_large_set(chunk, pageind, size + large_pad, - flag_dirty | (flag_unzeroed_mask & - arena_mapbits_unzeroed_get(chunk, pageind))); - arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - pageind+npages-1))); - - if (config_stats) { - szind_t oldindex = size2index(oldsize) - NBINS; - szind_t index = size2index(size) - NBINS; - - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= oldsize; - arena->stats.lstats[oldindex].ndalloc++; - arena->stats.lstats[oldindex].curruns--; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[index].nmalloc++; - arena->stats.lstats[index].nrequests++; - arena->stats.lstats[index].curruns++; - } - malloc_mutex_unlock(tsdn, &arena->lock); - return (false); - } -label_fail: - malloc_mutex_unlock(tsdn, &arena->lock); - return (true); -} - -#ifdef JEMALLOC_JET -#undef arena_ralloc_junk_large -#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large) -#endif -static void -arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) -{ - - if (config_fill && unlikely(opt_junk_free)) { - memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK, - old_usize - usize); - } -} -#ifdef JEMALLOC_JET -#undef arena_ralloc_junk_large -#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) -arena_ralloc_junk_large_t *arena_ralloc_junk_large = - JEMALLOC_N(n_arena_ralloc_junk_large); -#endif - -/* - * Try to resize a large allocation, in order to avoid copying. This will - * always fail if growing an object, and the following run is already in use. - */ -static bool -arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, - size_t usize_max, bool zero) -{ - arena_chunk_t *chunk; - arena_t *arena; - - if (oldsize == usize_max) { - /* Current size class is compatible and maximal. */ - return (false); - } - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = extent_node_arena_get(&chunk->node); - - if (oldsize < usize_max) { - bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr, - oldsize, usize_min, usize_max, zero); - if (config_fill && !ret && !zero) { - if (unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)ptr + oldsize), - JEMALLOC_ALLOC_JUNK, - isalloc(tsdn, ptr, config_prof) - oldsize); - } else if (unlikely(opt_zero)) { - memset((void *)((uintptr_t)ptr + oldsize), 0, - isalloc(tsdn, ptr, config_prof) - oldsize); - } - } - return (ret); - } - - assert(oldsize > usize_max); - /* Fill before shrinking in order avoid a race. */ - arena_ralloc_junk_large(ptr, oldsize, usize_max); - arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max); - return (false); -} - -bool -arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, - size_t extra, bool zero) -{ - size_t usize_min, usize_max; - - /* Calls with non-zero extra had to clamp extra. */ - assert(extra == 0 || size + extra <= HUGE_MAXCLASS); - - if (unlikely(size > HUGE_MAXCLASS)) - return (true); - - usize_min = s2u(size); - usize_max = s2u(size + extra); - if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { - arena_chunk_t *chunk; - - /* - * Avoid moving the allocation if the size class can be left the - * same. - */ - if (oldsize <= SMALL_MAXCLASS) { - assert(arena_bin_info[size2index(oldsize)].reg_size == - oldsize); - if ((usize_max > SMALL_MAXCLASS || - size2index(usize_max) != size2index(oldsize)) && - (size > oldsize || usize_max < oldsize)) - return (true); - } else { - if (usize_max <= SMALL_MAXCLASS) - return (true); - if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min, - usize_max, zero)) - return (true); - } - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node)); - return (false); - } else { - return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min, - usize_max, zero)); - } -} - -static void * -arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, bool zero, tcache_t *tcache) -{ - - if (alignment == 0) - return (arena_malloc(tsdn, arena, usize, size2index(usize), - zero, tcache, true)); - usize = sa2u(usize, alignment); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) - return (NULL); - return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); -} - -void * -arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, - size_t alignment, bool zero, tcache_t *tcache) -{ - void *ret; - size_t usize; - - usize = s2u(size); - if (unlikely(usize == 0 || size > HUGE_MAXCLASS)) - return (NULL); - - if (likely(usize <= large_maxclass)) { - size_t copysize; - - /* Try to avoid moving the allocation. */ - if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0, - zero)) - return (ptr); - - /* - * size and oldsize are different enough that we need to move - * the object. In that case, fall back to allocating new space - * and copying. - */ - ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, - alignment, zero, tcache); - if (ret == NULL) - return (NULL); - - /* - * Junk/zero-filling were already done by - * ipalloc()/arena_malloc(). - */ - - copysize = (usize < oldsize) ? usize : oldsize; - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); - memcpy(ret, ptr, copysize); - isqalloc(tsd, ptr, oldsize, tcache, true); - } else { - ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, - zero, tcache); - } - return (ret); -} - -dss_prec_t -arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena) -{ - dss_prec_t ret; - - malloc_mutex_lock(tsdn, &arena->lock); - ret = arena->dss_prec; - malloc_mutex_unlock(tsdn, &arena->lock); - return (ret); -} - -bool -arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec) -{ - - if (!have_dss) - return (dss_prec != dss_prec_disabled); - malloc_mutex_lock(tsdn, &arena->lock); - arena->dss_prec = dss_prec; - malloc_mutex_unlock(tsdn, &arena->lock); - return (false); -} - -ssize_t -arena_lg_dirty_mult_default_get(void) -{ - - return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); -} - -bool -arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) -{ - - if (opt_purge != purge_mode_ratio) - return (true); - if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) - return (true); - atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); - return (false); -} - -ssize_t -arena_decay_time_default_get(void) -{ - - return ((ssize_t)atomic_read_z((size_t *)&decay_time_default)); -} - -bool -arena_decay_time_default_set(ssize_t decay_time) -{ - - if (opt_purge != purge_mode_decay) - return (true); - if (!arena_decay_time_valid(decay_time)) - return (true); - atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time); - return (false); -} - -static void -arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads, - const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, - size_t *nactive, size_t *ndirty) -{ - - *nthreads += arena_nthreads_get(arena, false); - *dss = dss_prec_names[arena->dss_prec]; - *lg_dirty_mult = arena->lg_dirty_mult; - *decay_time = arena->decay.time; - *nactive += arena->nactive; - *ndirty += arena->ndirty; -} - -void -arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, - const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, - size_t *nactive, size_t *ndirty) -{ - - malloc_mutex_lock(tsdn, &arena->lock); - arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, - decay_time, nactive, ndirty); - malloc_mutex_unlock(tsdn, &arena->lock); -} - -void -arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, - const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, - size_t *nactive, size_t *ndirty, arena_stats_t *astats, - malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, - malloc_huge_stats_t *hstats) -{ - unsigned i; - - cassert(config_stats); - - malloc_mutex_lock(tsdn, &arena->lock); - arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, - decay_time, nactive, ndirty); - - astats->mapped += arena->stats.mapped; - astats->retained += arena->stats.retained; - astats->npurge += arena->stats.npurge; - astats->nmadvise += arena->stats.nmadvise; - astats->purged += arena->stats.purged; - astats->metadata_mapped += arena->stats.metadata_mapped; - astats->metadata_allocated += arena_metadata_allocated_get(arena); - astats->allocated_large += arena->stats.allocated_large; - astats->nmalloc_large += arena->stats.nmalloc_large; - astats->ndalloc_large += arena->stats.ndalloc_large; - astats->nrequests_large += arena->stats.nrequests_large; - astats->allocated_huge += arena->stats.allocated_huge; - astats->nmalloc_huge += arena->stats.nmalloc_huge; - astats->ndalloc_huge += arena->stats.ndalloc_huge; - - for (i = 0; i < nlclasses; i++) { - lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; - lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; - lstats[i].nrequests += arena->stats.lstats[i].nrequests; - lstats[i].curruns += arena->stats.lstats[i].curruns; - } - - for (i = 0; i < nhclasses; i++) { - hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; - hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; - hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; - } - malloc_mutex_unlock(tsdn, &arena->lock); - - for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; - - malloc_mutex_lock(tsdn, &bin->lock); - bstats[i].nmalloc += bin->stats.nmalloc; - bstats[i].ndalloc += bin->stats.ndalloc; - bstats[i].nrequests += bin->stats.nrequests; - bstats[i].curregs += bin->stats.curregs; - if (config_tcache) { - bstats[i].nfills += bin->stats.nfills; - bstats[i].nflushes += bin->stats.nflushes; - } - bstats[i].nruns += bin->stats.nruns; - bstats[i].reruns += bin->stats.reruns; - bstats[i].curruns += bin->stats.curruns; - malloc_mutex_unlock(tsdn, &bin->lock); - } -} - -unsigned -arena_nthreads_get(arena_t *arena, bool internal) -{ - - return (atomic_read_u(&arena->nthreads[internal])); -} - -void -arena_nthreads_inc(arena_t *arena, bool internal) -{ - - atomic_add_u(&arena->nthreads[internal], 1); -} - -void -arena_nthreads_dec(arena_t *arena, bool internal) -{ - - atomic_sub_u(&arena->nthreads[internal], 1); -} - -size_t -arena_extent_sn_next(arena_t *arena) -{ - - return (atomic_add_z(&arena->extent_sn_next, 1) - 1); -} - -arena_t * -arena_new(tsdn_t *tsdn, unsigned ind) -{ - arena_t *arena; - unsigned i; - - /* - * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly - * because there is no way to clean up if base_alloc() OOMs. - */ - if (config_stats) { - arena = (arena_t *)base_alloc(tsdn, - CACHELINE_CEILING(sizeof(arena_t)) + - QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t))) - + (nhclasses * sizeof(malloc_huge_stats_t))); - } else - arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t)); - if (arena == NULL) - return (NULL); - - arena->ind = ind; - arena->nthreads[0] = arena->nthreads[1] = 0; - if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA)) - return (NULL); - - if (config_stats) { - memset(&arena->stats, 0, sizeof(arena_stats_t)); - arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena - + CACHELINE_CEILING(sizeof(arena_t))); - memset(arena->stats.lstats, 0, nlclasses * - sizeof(malloc_large_stats_t)); - arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena - + CACHELINE_CEILING(sizeof(arena_t)) + - QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); - memset(arena->stats.hstats, 0, nhclasses * - sizeof(malloc_huge_stats_t)); - if (config_tcache) - ql_new(&arena->tcache_ql); - } - - if (config_prof) - arena->prof_accumbytes = 0; - - if (config_cache_oblivious) { - /* - * A nondeterministic seed based on the address of arena reduces - * the likelihood of lockstep non-uniform cache index - * utilization among identical concurrent processes, but at the - * cost of test repeatability. For debug builds, instead use a - * deterministic seed. - */ - arena->offset_state = config_debug ? ind : - (size_t)(uintptr_t)arena; - } - - arena->dss_prec = chunk_dss_prec_get(); - - ql_new(&arena->achunks); - - arena->extent_sn_next = 0; - - arena->spare = NULL; - - arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); - arena->purging = false; - arena->nactive = 0; - arena->ndirty = 0; - - for (i = 0; i < NPSIZES; i++) - arena_run_heap_new(&arena->runs_avail[i]); - - qr_new(&arena->runs_dirty, rd_link); - qr_new(&arena->chunks_cache, cc_link); - - if (opt_purge == purge_mode_decay) - arena_decay_init(arena, arena_decay_time_default_get()); - - ql_new(&arena->huge); - if (malloc_mutex_init(&arena->huge_mtx, "arena_huge", - WITNESS_RANK_ARENA_HUGE)) - return (NULL); - - extent_tree_szsnad_new(&arena->chunks_szsnad_cached); - extent_tree_ad_new(&arena->chunks_ad_cached); - extent_tree_szsnad_new(&arena->chunks_szsnad_retained); - extent_tree_ad_new(&arena->chunks_ad_retained); - if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks", - WITNESS_RANK_ARENA_CHUNKS)) - return (NULL); - ql_new(&arena->node_cache); - if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache", - WITNESS_RANK_ARENA_NODE_CACHE)) - return (NULL); - - arena->chunk_hooks = chunk_hooks_default; - - /* Initialize bins. */ - for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; - if (malloc_mutex_init(&bin->lock, "arena_bin", - WITNESS_RANK_ARENA_BIN)) - return (NULL); - bin->runcur = NULL; - arena_run_heap_new(&bin->runs); - if (config_stats) - memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); - } - - return (arena); -} - -/* - * Calculate bin_info->run_size such that it meets the following constraints: - * - * *) bin_info->run_size <= arena_maxrun - * *) bin_info->nregs <= RUN_MAXREGS - * - * bin_info->nregs and bin_info->reg0_offset are also calculated here, since - * these settings are all interdependent. - */ -static void -bin_info_run_size_calc(arena_bin_info_t *bin_info) -{ - size_t pad_size; - size_t try_run_size, perfect_run_size, actual_run_size; - uint32_t try_nregs, perfect_nregs, actual_nregs; - - /* - * Determine redzone size based on minimum alignment and minimum - * redzone size. Add padding to the end of the run if it is needed to - * align the regions. The padding allows each redzone to be half the - * minimum alignment; without the padding, each redzone would have to - * be twice as large in order to maintain alignment. - */ - if (config_fill && unlikely(opt_redzone)) { - size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1); - if (align_min <= REDZONE_MINSIZE) { - bin_info->redzone_size = REDZONE_MINSIZE; - pad_size = 0; - } else { - bin_info->redzone_size = align_min >> 1; - pad_size = bin_info->redzone_size; - } - } else { - bin_info->redzone_size = 0; - pad_size = 0; - } - bin_info->reg_interval = bin_info->reg_size + - (bin_info->redzone_size << 1); - - /* - * Compute run size under ideal conditions (no redzones, no limit on run - * size). - */ - try_run_size = PAGE; - try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); - do { - perfect_run_size = try_run_size; - perfect_nregs = try_nregs; - - try_run_size += PAGE; - try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); - } while (perfect_run_size != perfect_nregs * bin_info->reg_size); - assert(perfect_nregs <= RUN_MAXREGS); - - actual_run_size = perfect_run_size; - actual_nregs = (uint32_t)((actual_run_size - pad_size) / - bin_info->reg_interval); - - /* - * Redzones can require enough padding that not even a single region can - * fit within the number of pages that would normally be dedicated to a - * run for this size class. Increase the run size until at least one - * region fits. - */ - while (actual_nregs == 0) { - assert(config_fill && unlikely(opt_redzone)); - - actual_run_size += PAGE; - actual_nregs = (uint32_t)((actual_run_size - pad_size) / - bin_info->reg_interval); - } - - /* - * Make sure that the run will fit within an arena chunk. - */ - while (actual_run_size > arena_maxrun) { - actual_run_size -= PAGE; - actual_nregs = (uint32_t)((actual_run_size - pad_size) / - bin_info->reg_interval); - } - assert(actual_nregs > 0); - assert(actual_run_size == s2u(actual_run_size)); - - /* Copy final settings. */ - bin_info->run_size = actual_run_size; - bin_info->nregs = actual_nregs; - bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs * - bin_info->reg_interval) - pad_size + bin_info->redzone_size); - - assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs - * bin_info->reg_interval) + pad_size == bin_info->run_size); -} - -static void -bin_info_init(void) -{ - arena_bin_info_t *bin_info; - -#define BIN_INFO_INIT_bin_yes(index, size) \ - bin_info = &arena_bin_info[index]; \ - bin_info->reg_size = size; \ - bin_info_run_size_calc(bin_info); \ - bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); -#define BIN_INFO_INIT_bin_no(index, size) -#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ - BIN_INFO_INIT_bin_##bin(index, (ZU(1)< strlen(enabled_states[i])); - } - } - assert(sizeof(enabled_states)/sizeof(const char *) == - sizeof(thp_initially_huge_states)/sizeof(bool)); - -#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) - fd = (int)syscall(SYS_open, - "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); -#else - fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); -#endif - if (fd == -1) { - goto label_error; - } - -#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read) - nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf)); -#else - nread = read(fd, &buf, sizeof(buf)); -#endif - -#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) - syscall(SYS_close, fd); -#else - close(fd); -#endif - - if (nread < 1) { - goto label_error; - } - for (i = 0; i < sizeof(enabled_states)/sizeof(const char *); - i++) { - if (strncmp(buf, enabled_states[i], (size_t)nread) == 0) { - thp_initially_huge = thp_initially_huge_states[i]; - return; - } - } - -label_error: - thp_initially_huge = false; -} - -void -arena_boot(void) -{ - unsigned i; - - if (config_thp && opt_thp) { - init_thp_initially_huge(); - } - - arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); - arena_decay_time_default_set(opt_decay_time); - - /* - * Compute the header size such that it is large enough to contain the - * page map. The page map is biased to omit entries for the header - * itself, so some iteration is necessary to compute the map bias. - * - * 1) Compute safe header_size and map_bias values that include enough - * space for an unbiased page map. - * 2) Refine map_bias based on (1) to omit the header pages in the page - * map. The resulting map_bias may be one too small. - * 3) Refine map_bias based on (2). The result will be >= the result - * from (2), and will always be correct. - */ - map_bias = 0; - for (i = 0; i < 3; i++) { - size_t header_size = offsetof(arena_chunk_t, map_bits) + - ((sizeof(arena_chunk_map_bits_t) + - sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); - map_bias = (header_size + PAGE_MASK) >> LG_PAGE; - } - assert(map_bias > 0); - - map_misc_offset = offsetof(arena_chunk_t, map_bits) + - sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); - - arena_maxrun = chunksize - (map_bias << LG_PAGE); - assert(arena_maxrun > 0); - large_maxclass = index2size(size2index(chunksize)-1); - assert(large_maxclass > 0); - assert(large_maxclass + large_pad <= arena_maxrun); - nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS); - nhclasses = NSIZES - nlclasses - NBINS; - - bin_info_init(); -} - -void -arena_prefork0(tsdn_t *tsdn, arena_t *arena) -{ - - malloc_mutex_prefork(tsdn, &arena->lock); -} - -void -arena_prefork1(tsdn_t *tsdn, arena_t *arena) -{ - - malloc_mutex_prefork(tsdn, &arena->chunks_mtx); -} - -void -arena_prefork2(tsdn_t *tsdn, arena_t *arena) -{ - - malloc_mutex_prefork(tsdn, &arena->node_cache_mtx); -} - -void -arena_prefork3(tsdn_t *tsdn, arena_t *arena) -{ - unsigned i; - - for (i = 0; i < NBINS; i++) - malloc_mutex_prefork(tsdn, &arena->bins[i].lock); - malloc_mutex_prefork(tsdn, &arena->huge_mtx); -} - -void -arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) -{ - unsigned i; - - malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx); - for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock); - malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx); - malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx); - malloc_mutex_postfork_parent(tsdn, &arena->lock); -} - -void -arena_postfork_child(tsdn_t *tsdn, arena_t *arena) -{ - unsigned i; - - malloc_mutex_postfork_child(tsdn, &arena->huge_mtx); - for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock); - malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx); - malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx); - malloc_mutex_postfork_child(tsdn, &arena->lock); -} diff --git a/sources/jemalloc/src/je_atomic.c b/sources/jemalloc/src/je_atomic.c deleted file mode 100755 index 77ee3131..00000000 --- a/sources/jemalloc/src/je_atomic.c +++ /dev/null @@ -1,2 +0,0 @@ -#define JEMALLOC_ATOMIC_C_ -#include "jemalloc/internal/jemalloc_internal.h" diff --git a/sources/jemalloc/src/je_base.c b/sources/jemalloc/src/je_base.c deleted file mode 100755 index 5681a3f3..00000000 --- a/sources/jemalloc/src/je_base.c +++ /dev/null @@ -1,187 +0,0 @@ -#define JEMALLOC_BASE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -static malloc_mutex_t base_mtx; -static size_t base_extent_sn_next; -static extent_tree_t base_avail_szsnad; -static extent_node_t *base_nodes; -static size_t base_allocated; -static size_t base_resident; -static size_t base_mapped; - -/******************************************************************************/ - -static extent_node_t * -base_node_try_alloc(tsdn_t *tsdn) -{ - extent_node_t *node; - - malloc_mutex_assert_owner(tsdn, &base_mtx); - - if (base_nodes == NULL) - return (NULL); - node = base_nodes; - base_nodes = *(extent_node_t **)node; - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); - return (node); -} - -static void -base_node_dalloc(tsdn_t *tsdn, extent_node_t *node) -{ - - malloc_mutex_assert_owner(tsdn, &base_mtx); - - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); - *(extent_node_t **)node = base_nodes; - base_nodes = node; -} - -static void -base_extent_node_init(extent_node_t *node, void *addr, size_t size) -{ - size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1; - - extent_node_init(node, NULL, addr, size, sn, true, true); -} - -static extent_node_t * -base_chunk_alloc(tsdn_t *tsdn, size_t minsize) -{ - extent_node_t *node; - size_t csize, nsize; - void *addr; - - malloc_mutex_assert_owner(tsdn, &base_mtx); - assert(minsize != 0); - node = base_node_try_alloc(tsdn); - /* Allocate enough space to also carve a node out if necessary. */ - nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0; - csize = CHUNK_CEILING(minsize + nsize); - addr = chunk_alloc_base(csize); - if (addr == NULL) { - if (node != NULL) - base_node_dalloc(tsdn, node); - return (NULL); - } - base_mapped += csize; - if (node == NULL) { - node = (extent_node_t *)addr; - addr = (void *)((uintptr_t)addr + nsize); - csize -= nsize; - if (config_stats) { - base_allocated += nsize; - base_resident += PAGE_CEILING(nsize); - } - } - base_extent_node_init(node, addr, csize); - return (node); -} - -/* - * base_alloc() guarantees demand-zeroed memory, in order to make multi-page - * sparse data structures such as radix tree nodes efficient with respect to - * physical memory usage. - */ -void * -base_alloc(tsdn_t *tsdn, size_t size) -{ - void *ret; - size_t csize, usize; - extent_node_t *node; - extent_node_t key; - - /* - * Round size up to nearest multiple of the cacheline size, so that - * there is no chance of false cache line sharing. - */ - csize = CACHELINE_CEILING(size); - - usize = s2u(csize); - extent_node_init(&key, NULL, NULL, usize, 0, false, false); - malloc_mutex_lock(tsdn, &base_mtx); - node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key); - if (node != NULL) { - /* Use existing space. */ - extent_tree_szsnad_remove(&base_avail_szsnad, node); - } else { - /* Try to allocate more space. */ - node = base_chunk_alloc(tsdn, csize); - } - if (node == NULL) { - ret = NULL; - goto label_return; - } - - ret = extent_node_addr_get(node); - if (extent_node_size_get(node) > csize) { - extent_node_addr_set(node, (void *)((uintptr_t)ret + csize)); - extent_node_size_set(node, extent_node_size_get(node) - csize); - extent_tree_szsnad_insert(&base_avail_szsnad, node); - } else - base_node_dalloc(tsdn, node); - if (config_stats) { - base_allocated += csize; - /* - * Add one PAGE to base_resident for every page boundary that is - * crossed by the new allocation. - */ - base_resident += PAGE_CEILING((uintptr_t)ret + csize) - - PAGE_CEILING((uintptr_t)ret); - } - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize); -label_return: - malloc_mutex_unlock(tsdn, &base_mtx); - return (ret); -} - -void -base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident, - size_t *mapped) -{ - - malloc_mutex_lock(tsdn, &base_mtx); - assert(base_allocated <= base_resident); - assert(base_resident <= base_mapped); - *allocated = base_allocated; - *resident = base_resident; - *mapped = base_mapped; - malloc_mutex_unlock(tsdn, &base_mtx); -} - -bool -base_boot(void) -{ - - if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE)) - return (true); - base_extent_sn_next = 0; - extent_tree_szsnad_new(&base_avail_szsnad); - base_nodes = NULL; - - return (false); -} - -void -base_prefork(tsdn_t *tsdn) -{ - - malloc_mutex_prefork(tsdn, &base_mtx); -} - -void -base_postfork_parent(tsdn_t *tsdn) -{ - - malloc_mutex_postfork_parent(tsdn, &base_mtx); -} - -void -base_postfork_child(tsdn_t *tsdn) -{ - - malloc_mutex_postfork_child(tsdn, &base_mtx); -} diff --git a/sources/jemalloc/src/je_bitmap.c b/sources/jemalloc/src/je_bitmap.c deleted file mode 100755 index ac0f3b38..00000000 --- a/sources/jemalloc/src/je_bitmap.c +++ /dev/null @@ -1,111 +0,0 @@ -#define JEMALLOC_BITMAP_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ - -#ifdef USE_TREE - -void -bitmap_info_init(bitmap_info_t *binfo, size_t nbits) -{ - unsigned i; - size_t group_count; - - assert(nbits > 0); - assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); - - /* - * Compute the number of groups necessary to store nbits bits, and - * progressively work upward through the levels until reaching a level - * that requires only one group. - */ - binfo->levels[0].group_offset = 0; - group_count = BITMAP_BITS2GROUPS(nbits); - for (i = 1; group_count > 1; i++) { - assert(i < BITMAP_MAX_LEVELS); - binfo->levels[i].group_offset = binfo->levels[i-1].group_offset - + group_count; - group_count = BITMAP_BITS2GROUPS(group_count); - } - binfo->levels[i].group_offset = binfo->levels[i-1].group_offset - + group_count; - assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX); - binfo->nlevels = i; - binfo->nbits = nbits; -} - -static size_t -bitmap_info_ngroups(const bitmap_info_t *binfo) -{ - - return (binfo->levels[binfo->nlevels].group_offset); -} - -void -bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ - size_t extra; - unsigned i; - - /* - * Bits are actually inverted with regard to the external bitmap - * interface, so the bitmap starts out with all 1 bits, except for - * trailing unused bits (if any). Note that each group uses bit 0 to - * correspond to the first logical bit in the group, so extra bits - * are the most significant bits of the last group. - */ - memset(bitmap, 0xffU, bitmap_size(binfo)); - extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) - & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) - bitmap[binfo->levels[1].group_offset - 1] >>= extra; - for (i = 1; i < binfo->nlevels; i++) { - size_t group_count = binfo->levels[i].group_offset - - binfo->levels[i-1].group_offset; - extra = (BITMAP_GROUP_NBITS - (group_count & - BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) - bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; - } -} - -#else /* USE_TREE */ - -void -bitmap_info_init(bitmap_info_t *binfo, size_t nbits) -{ - - assert(nbits > 0); - assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); - - binfo->ngroups = BITMAP_BITS2GROUPS(nbits); - binfo->nbits = nbits; -} - -static size_t -bitmap_info_ngroups(const bitmap_info_t *binfo) -{ - - return (binfo->ngroups); -} - -void -bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ - size_t extra; - - memset(bitmap, 0xffU, bitmap_size(binfo)); - extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) - & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) - bitmap[binfo->ngroups - 1] >>= extra; -} - -#endif /* USE_TREE */ - -size_t -bitmap_size(const bitmap_info_t *binfo) -{ - - return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP); -} diff --git a/sources/jemalloc/src/je_chunk.c b/sources/jemalloc/src/je_chunk.c deleted file mode 100755 index 94f28f2d..00000000 --- a/sources/jemalloc/src/je_chunk.c +++ /dev/null @@ -1,799 +0,0 @@ -#define JEMALLOC_CHUNK_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -const char *opt_dss = DSS_DEFAULT; -size_t opt_lg_chunk = 0; - -/* Used exclusively for gdump triggering. */ -static size_t curchunks; -static size_t highchunks; - -rtree_t chunks_rtree; - -/* Various chunk-related settings. */ -size_t chunksize; -size_t chunksize_mask; /* (chunksize - 1). */ -size_t chunk_npages; - -static void *chunk_alloc_default(void *new_addr, size_t size, - size_t alignment, bool *zero, bool *commit, unsigned arena_ind); -static bool chunk_dalloc_default(void *chunk, size_t size, bool committed, - unsigned arena_ind); -static bool chunk_commit_default(void *chunk, size_t size, size_t offset, - size_t length, unsigned arena_ind); -static bool chunk_decommit_default(void *chunk, size_t size, size_t offset, - size_t length, unsigned arena_ind); -static bool chunk_purge_default(void *chunk, size_t size, size_t offset, - size_t length, unsigned arena_ind); -static bool chunk_split_default(void *chunk, size_t size, size_t size_a, - size_t size_b, bool committed, unsigned arena_ind); -static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, - size_t size_b, bool committed, unsigned arena_ind); - -const chunk_hooks_t chunk_hooks_default = { - chunk_alloc_default, - chunk_dalloc_default, - chunk_commit_default, - chunk_decommit_default, - chunk_purge_default, - chunk_split_default, - chunk_merge_default -}; - -/******************************************************************************/ -/* - * Function prototypes for static functions that are referenced prior to - * definition. - */ - -static void chunk_record(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad, - extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn, - bool zeroed, bool committed); - -/******************************************************************************/ - -static chunk_hooks_t -chunk_hooks_get_locked(arena_t *arena) -{ - - return (arena->chunk_hooks); -} - -chunk_hooks_t -chunk_hooks_get(tsdn_t *tsdn, arena_t *arena) -{ - chunk_hooks_t chunk_hooks; - - malloc_mutex_lock(tsdn, &arena->chunks_mtx); - chunk_hooks = chunk_hooks_get_locked(arena); - malloc_mutex_unlock(tsdn, &arena->chunks_mtx); - - return (chunk_hooks); -} - -chunk_hooks_t -chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks) -{ - chunk_hooks_t old_chunk_hooks; - - malloc_mutex_lock(tsdn, &arena->chunks_mtx); - old_chunk_hooks = arena->chunk_hooks; - /* - * Copy each field atomically so that it is impossible for readers to - * see partially updated pointers. There are places where readers only - * need one hook function pointer (therefore no need to copy the - * entirety of arena->chunk_hooks), and stale reads do not affect - * correctness, so they perform unlocked reads. - */ -#define ATOMIC_COPY_HOOK(n) do { \ - union { \ - chunk_##n##_t **n; \ - void **v; \ - } u; \ - u.n = &arena->chunk_hooks.n; \ - atomic_write_p(u.v, chunk_hooks->n); \ -} while (0) - ATOMIC_COPY_HOOK(alloc); - ATOMIC_COPY_HOOK(dalloc); - ATOMIC_COPY_HOOK(commit); - ATOMIC_COPY_HOOK(decommit); - ATOMIC_COPY_HOOK(purge); - ATOMIC_COPY_HOOK(split); - ATOMIC_COPY_HOOK(merge); -#undef ATOMIC_COPY_HOOK - malloc_mutex_unlock(tsdn, &arena->chunks_mtx); - - return (old_chunk_hooks); -} - -static void -chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, bool locked) -{ - static const chunk_hooks_t uninitialized_hooks = - CHUNK_HOOKS_INITIALIZER; - - if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) == - 0) { - *chunk_hooks = locked ? chunk_hooks_get_locked(arena) : - chunk_hooks_get(tsdn, arena); - } -} - -static void -chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks) -{ - - chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true); -} - -static void -chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks) -{ - - chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false); -} - -bool -chunk_register(const void *chunk, const extent_node_t *node, bool *gdump) -{ - - assert(extent_node_addr_get(node) == chunk); - - if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node)) - return (true); - if (config_prof && opt_prof) { - size_t size = extent_node_size_get(node); - size_t nadd = (size == 0) ? 1 : size / chunksize; - size_t cur = atomic_add_z(&curchunks, nadd); - size_t high = atomic_read_z(&highchunks); - while (cur > high && atomic_cas_z(&highchunks, high, cur)) { - /* - * Don't refresh cur, because it may have decreased - * since this thread lost the highchunks update race. - */ - high = atomic_read_z(&highchunks); - } - *gdump = (cur > high && prof_gdump_get_unlocked()); - } - - return (false); -} - -void -chunk_deregister(const void *chunk, const extent_node_t *node) -{ - bool err; - - err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL); - assert(!err); - if (config_prof && opt_prof) { - size_t size = extent_node_size_get(node); - size_t nsub = (size == 0) ? 1 : size / chunksize; - assert(atomic_read_z(&curchunks) >= nsub); - atomic_sub_z(&curchunks, nsub); - } -} - -/* - * Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that - * best fits. - */ -static extent_node_t * -chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size) -{ - extent_node_t *node; - size_t qsize; - extent_node_t key; - - assert(size == CHUNK_CEILING(size)); - - qsize = extent_size_quantize_ceil(size); - extent_node_init(&key, arena, NULL, qsize, 0, false, false); - node = extent_tree_szsnad_nsearch(chunks_szsnad, &key); - assert(node == NULL || extent_node_size_get(node) >= size); - return node; -} - -static void * -chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache, - void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, - bool *commit, bool dalloc_node) -{ - void *ret; - extent_node_t *node; - size_t alloc_size, leadsize, trailsize; - bool zeroed, committed; - - assert(CHUNK_CEILING(size) == size); - assert(alignment > 0); - assert(new_addr == NULL || alignment == chunksize); - assert(CHUNK_ADDR2BASE(new_addr) == new_addr); - /* - * Cached chunks use the node linkage embedded in their headers, in - * which case dalloc_node is true, and new_addr is non-NULL because - * we're operating on a specific chunk. - */ - assert(dalloc_node || new_addr != NULL); - - alloc_size = size + CHUNK_CEILING(alignment) - chunksize; - /* Beware size_t wrap-around. */ - if (alloc_size < size) - return (NULL); - malloc_mutex_lock(tsdn, &arena->chunks_mtx); - chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); - if (new_addr != NULL) { - extent_node_t key; - extent_node_init(&key, arena, new_addr, alloc_size, 0, false, - false); - node = extent_tree_ad_search(chunks_ad, &key); - } else { - node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size); - } - if (node == NULL || (new_addr != NULL && extent_node_size_get(node) < - size)) { - malloc_mutex_unlock(tsdn, &arena->chunks_mtx); - return (NULL); - } - leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node), - alignment) - (uintptr_t)extent_node_addr_get(node); - assert(new_addr == NULL || leadsize == 0); - assert(extent_node_size_get(node) >= leadsize + size); - trailsize = extent_node_size_get(node) - leadsize - size; - ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize); - *sn = extent_node_sn_get(node); - zeroed = extent_node_zeroed_get(node); - if (zeroed) - *zero = true; - committed = extent_node_committed_get(node); - if (committed) - *commit = true; - /* Split the lead. */ - if (leadsize != 0 && - chunk_hooks->split(extent_node_addr_get(node), - extent_node_size_get(node), leadsize, size, false, arena->ind)) { - malloc_mutex_unlock(tsdn, &arena->chunks_mtx); - return (NULL); - } - /* Remove node from the tree. */ - extent_tree_szsnad_remove(chunks_szsnad, node); - extent_tree_ad_remove(chunks_ad, node); - arena_chunk_cache_maybe_remove(arena, node, cache); - if (leadsize != 0) { - /* Insert the leading space as a smaller chunk. */ - extent_node_size_set(node, leadsize); - extent_tree_szsnad_insert(chunks_szsnad, node); - extent_tree_ad_insert(chunks_ad, node); - arena_chunk_cache_maybe_insert(arena, node, cache); - node = NULL; - } - if (trailsize != 0) { - /* Split the trail. */ - if (chunk_hooks->split(ret, size + trailsize, size, - trailsize, false, arena->ind)) { - if (dalloc_node && node != NULL) - arena_node_dalloc(tsdn, arena, node); - malloc_mutex_unlock(tsdn, &arena->chunks_mtx); - chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, - chunks_ad, cache, ret, size + trailsize, *sn, - zeroed, committed); - return (NULL); - } - /* Insert the trailing space as a smaller chunk. */ - if (node == NULL) { - node = arena_node_alloc(tsdn, arena); - if (node == NULL) { - malloc_mutex_unlock(tsdn, &arena->chunks_mtx); - chunk_record(tsdn, arena, chunk_hooks, - chunks_szsnad, chunks_ad, cache, ret, size - + trailsize, *sn, zeroed, committed); - return (NULL); - } - } - extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size), - trailsize, *sn, zeroed, committed); - extent_tree_szsnad_insert(chunks_szsnad, node); - extent_tree_ad_insert(chunks_ad, node); - arena_chunk_cache_maybe_insert(arena, node, cache); - node = NULL; - } - if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) { - malloc_mutex_unlock(tsdn, &arena->chunks_mtx); - chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad, - cache, ret, size, *sn, zeroed, committed); - return (NULL); - } - malloc_mutex_unlock(tsdn, &arena->chunks_mtx); - - assert(dalloc_node || node != NULL); - if (dalloc_node && node != NULL) - arena_node_dalloc(tsdn, arena, node); - if (*zero) { - if (!zeroed) - memset(ret, 0, size); - else if (config_debug) { - size_t i; - size_t *p = (size_t *)(uintptr_t)ret; - - for (i = 0; i < size / sizeof(size_t); i++) - assert(p[i] == 0); - } - if (config_valgrind) - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size); - } - return (ret); -} - -/* - * If the caller specifies (!*zero), it is still possible to receive zeroed - * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes - * advantage of this to avoid demanding zeroed chunks, but taking advantage of - * them if they are returned. - */ -static void * -chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, - size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) -{ - void *ret; - - assert(size != 0); - assert((size & chunksize_mask) == 0); - assert(alignment != 0); - assert((alignment & chunksize_mask) == 0); - - /* "primary" dss. */ - if (have_dss && dss_prec == dss_prec_primary && (ret = - chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, - commit)) != NULL) - return (ret); - /* mmap. */ - if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) != - NULL) - return (ret); - /* "secondary" dss. */ - if (have_dss && dss_prec == dss_prec_secondary && (ret = - chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, - commit)) != NULL) - return (ret); - - /* All strategies for allocation failed. */ - return (NULL); -} - -void * -chunk_alloc_base(size_t size) -{ - void *ret; - bool zero, commit; - - /* - * Directly call chunk_alloc_mmap() rather than chunk_alloc_core() - * because it's critical that chunk_alloc_base() return untouched - * demand-zeroed virtual memory. - */ - zero = true; - commit = true; - ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit); - if (ret == NULL) - return (NULL); - if (config_valgrind) - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - - return (ret); -} - -void * -chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, - bool *commit, bool dalloc_node) -{ - void *ret; - - assert(size != 0); - assert((size & chunksize_mask) == 0); - assert(alignment != 0); - assert((alignment & chunksize_mask) == 0); - - ret = chunk_recycle(tsdn, arena, chunk_hooks, - &arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true, - new_addr, size, alignment, sn, zero, commit, dalloc_node); - if (ret == NULL) - return (NULL); - if (config_valgrind) - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - return (ret); -} - -static arena_t * -chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind) -{ - arena_t *arena; - - arena = arena_get(tsdn, arena_ind, false); - /* - * The arena we're allocating on behalf of must have been initialized - * already. - */ - assert(arena != NULL); - return (arena); -} - -static void * -chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, - size_t size, size_t alignment, bool *zero, bool *commit) -{ - void *ret; - - ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero, - commit, arena->dss_prec); - if (ret == NULL) - return (NULL); - if (config_valgrind) - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - - return (ret); -} - -static void * -chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, - bool *commit, unsigned arena_ind) -{ - tsdn_t *tsdn; - arena_t *arena; - - tsdn = tsdn_fetch(); - arena = chunk_arena_get(tsdn, arena_ind); - - return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment, - zero, commit)); -} - -static void * -chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, - bool *commit) -{ - void *ret; - - assert(size != 0); - assert((size & chunksize_mask) == 0); - assert(alignment != 0); - assert((alignment & chunksize_mask) == 0); - - ret = chunk_recycle(tsdn, arena, chunk_hooks, - &arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false, - new_addr, size, alignment, sn, zero, commit, true); - - if (config_stats && ret != NULL) - arena->stats.retained -= size; - - return (ret); -} - -void * -chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, - bool *commit) -{ - void *ret; - - chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); - - ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size, - alignment, sn, zero, commit); - if (ret == NULL) { - if (chunk_hooks->alloc == chunk_alloc_default) { - /* Call directly to propagate tsdn. */ - ret = chunk_alloc_default_impl(tsdn, arena, new_addr, - size, alignment, zero, commit); - } else { - ret = chunk_hooks->alloc(new_addr, size, alignment, - zero, commit, arena->ind); - } - - if (ret == NULL) - return (NULL); - - *sn = arena_extent_sn_next(arena); - - if (config_valgrind && chunk_hooks->alloc != - chunk_alloc_default) - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); - } - - return (ret); -} - -static void -chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache, - void *chunk, size_t size, size_t sn, bool zeroed, bool committed) -{ - bool unzeroed; - extent_node_t *node, *prev; - extent_node_t key; - - assert(!cache || !zeroed); - unzeroed = cache || !zeroed; - JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); - - malloc_mutex_lock(tsdn, &arena->chunks_mtx); - chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); - extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0, - false, false); - node = extent_tree_ad_nsearch(chunks_ad, &key); - /* Try to coalesce forward. */ - if (node != NULL && extent_node_addr_get(node) == - extent_node_addr_get(&key) && extent_node_committed_get(node) == - committed && !chunk_hooks->merge(chunk, size, - extent_node_addr_get(node), extent_node_size_get(node), false, - arena->ind)) { - /* - * Coalesce chunk with the following address range. This does - * not change the position within chunks_ad, so only - * remove/insert from/into chunks_szsnad. - */ - extent_tree_szsnad_remove(chunks_szsnad, node); - arena_chunk_cache_maybe_remove(arena, node, cache); - extent_node_addr_set(node, chunk); - extent_node_size_set(node, size + extent_node_size_get(node)); - if (sn < extent_node_sn_get(node)) - extent_node_sn_set(node, sn); - extent_node_zeroed_set(node, extent_node_zeroed_get(node) && - !unzeroed); - extent_tree_szsnad_insert(chunks_szsnad, node); - arena_chunk_cache_maybe_insert(arena, node, cache); - } else { - /* Coalescing forward failed, so insert a new node. */ - node = arena_node_alloc(tsdn, arena); - if (node == NULL) { - /* - * Node allocation failed, which is an exceedingly - * unlikely failure. Leak chunk after making sure its - * pages have already been purged, so that this is only - * a virtual memory leak. - */ - if (cache) { - chunk_purge_wrapper(tsdn, arena, chunk_hooks, - chunk, size, 0, size); - } - goto label_return; - } - extent_node_init(node, arena, chunk, size, sn, !unzeroed, - committed); - extent_tree_ad_insert(chunks_ad, node); - extent_tree_szsnad_insert(chunks_szsnad, node); - arena_chunk_cache_maybe_insert(arena, node, cache); - } - - /* Try to coalesce backward. */ - prev = extent_tree_ad_prev(chunks_ad, node); - if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) + - extent_node_size_get(prev)) == chunk && - extent_node_committed_get(prev) == committed && - !chunk_hooks->merge(extent_node_addr_get(prev), - extent_node_size_get(prev), chunk, size, false, arena->ind)) { - /* - * Coalesce chunk with the previous address range. This does - * not change the position within chunks_ad, so only - * remove/insert node from/into chunks_szsnad. - */ - extent_tree_szsnad_remove(chunks_szsnad, prev); - extent_tree_ad_remove(chunks_ad, prev); - arena_chunk_cache_maybe_remove(arena, prev, cache); - extent_tree_szsnad_remove(chunks_szsnad, node); - arena_chunk_cache_maybe_remove(arena, node, cache); - extent_node_addr_set(node, extent_node_addr_get(prev)); - extent_node_size_set(node, extent_node_size_get(prev) + - extent_node_size_get(node)); - if (extent_node_sn_get(prev) < extent_node_sn_get(node)) - extent_node_sn_set(node, extent_node_sn_get(prev)); - extent_node_zeroed_set(node, extent_node_zeroed_get(prev) && - extent_node_zeroed_get(node)); - extent_tree_szsnad_insert(chunks_szsnad, node); - arena_chunk_cache_maybe_insert(arena, node, cache); - - arena_node_dalloc(tsdn, arena, prev); - } - -label_return: - malloc_mutex_unlock(tsdn, &arena->chunks_mtx); -} - -void -chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - void *chunk, size_t size, size_t sn, bool committed) -{ - - assert(chunk != NULL); - assert(CHUNK_ADDR2BASE(chunk) == chunk); - assert(size != 0); - assert((size & chunksize_mask) == 0); - - chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached, - &arena->chunks_ad_cached, true, chunk, size, sn, false, - committed); - arena_maybe_purge(tsdn, arena); -} - -static bool -chunk_dalloc_default_impl(void *chunk, size_t size) -{ - - if (!have_dss || !chunk_in_dss(chunk)) - return (chunk_dalloc_mmap(chunk, size)); - return (true); -} - -static bool -chunk_dalloc_default(void *chunk, size_t size, bool committed, - unsigned arena_ind) -{ - - return (chunk_dalloc_default_impl(chunk, size)); -} - -void -chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - void *chunk, size_t size, size_t sn, bool zeroed, bool committed) -{ - bool err; - - assert(chunk != NULL); - assert(CHUNK_ADDR2BASE(chunk) == chunk); - assert(size != 0); - assert((size & chunksize_mask) == 0); - - chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); - /* Try to deallocate. */ - if (chunk_hooks->dalloc == chunk_dalloc_default) { - /* Call directly to propagate tsdn. */ - err = chunk_dalloc_default_impl(chunk, size); - } else - err = chunk_hooks->dalloc(chunk, size, committed, arena->ind); - - if (!err) - return; - /* Try to decommit; purge if that fails. */ - if (committed) { - committed = chunk_hooks->decommit(chunk, size, 0, size, - arena->ind); - } - zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size, - arena->ind); - chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained, - &arena->chunks_ad_retained, false, chunk, size, sn, zeroed, - committed); - - if (config_stats) - arena->stats.retained += size; -} - -static bool -chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length, - unsigned arena_ind) -{ - - return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset), - length)); -} - -static bool -chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length, - unsigned arena_ind) -{ - - return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset), - length)); -} - -static bool -chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, - unsigned arena_ind) -{ - - assert(chunk != NULL); - assert(CHUNK_ADDR2BASE(chunk) == chunk); - assert((offset & PAGE_MASK) == 0); - assert(length != 0); - assert((length & PAGE_MASK) == 0); - - return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset), - length)); -} - -bool -chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - void *chunk, size_t size, size_t offset, size_t length) -{ - - chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); - return (chunk_hooks->purge(chunk, size, offset, length, arena->ind)); -} - -static bool -chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b, - bool committed, unsigned arena_ind) -{ - - if (!maps_coalesce) - return (true); - return (false); -} - -static bool -chunk_merge_default_impl(void *chunk_a, void *chunk_b) -{ - - if (!maps_coalesce) - return (true); - if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b)) - return (true); - - return (false); -} - -static bool -chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, - bool committed, unsigned arena_ind) -{ - - return (chunk_merge_default_impl(chunk_a, chunk_b)); -} - -static rtree_node_elm_t * -chunks_rtree_node_alloc(size_t nelms) -{ - - return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms * - sizeof(rtree_node_elm_t))); -} - -bool -chunk_boot(void) -{ -#ifdef _WIN32 - SYSTEM_INFO info; - GetSystemInfo(&info); - - /* - * Verify actual page size is equal to or an integral multiple of - * configured page size. - */ - if (info.dwPageSize & ((1U << LG_PAGE) - 1)) - return (true); - - /* - * Configure chunksize (if not set) to match granularity (usually 64K), - * so pages_map will always take fast path. - */ - if (!opt_lg_chunk) { - opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity) - - 1; - } -#else - if (!opt_lg_chunk) - opt_lg_chunk = LG_CHUNK_DEFAULT; -#endif - - /* Set variables according to the value of opt_lg_chunk. */ - chunksize = (ZU(1) << opt_lg_chunk); - assert(chunksize >= PAGE); - chunksize_mask = chunksize - 1; - chunk_npages = (chunksize >> LG_PAGE); - - if (have_dss) - chunk_dss_boot(); - if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) - - opt_lg_chunk), chunks_rtree_node_alloc, NULL)) - return (true); - - return (false); -} diff --git a/sources/jemalloc/src/je_chunk_dss.c b/sources/jemalloc/src/je_chunk_dss.c deleted file mode 100755 index 8c679395..00000000 --- a/sources/jemalloc/src/je_chunk_dss.c +++ /dev/null @@ -1,247 +0,0 @@ -#define JEMALLOC_CHUNK_DSS_C_ -#include "jemalloc/internal/jemalloc_internal.h" -/******************************************************************************/ -/* Data. */ - -const char *dss_prec_names[] = { - "disabled", - "primary", - "secondary", - "N/A" -}; - -/* - * Current dss precedence default, used when creating new arenas. NB: This is - * stored as unsigned rather than dss_prec_t because in principle there's no - * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use - * atomic operations to synchronize the setting. - */ -static unsigned dss_prec_default = (unsigned)DSS_PREC_DEFAULT; - -/* Base address of the DSS. */ -static void *dss_base; -/* Atomic boolean indicating whether the DSS is exhausted. */ -static unsigned dss_exhausted; -/* Atomic current upper limit on DSS addresses. */ -static void *dss_max; - -/******************************************************************************/ - -static void * -chunk_dss_sbrk(intptr_t increment) -{ - -#ifdef JEMALLOC_DSS - return (sbrk(increment)); -#else - not_implemented(); - return (NULL); -#endif -} - -dss_prec_t -chunk_dss_prec_get(void) -{ - dss_prec_t ret; - - if (!have_dss) - return (dss_prec_disabled); - ret = (dss_prec_t)atomic_read_u(&dss_prec_default); - return (ret); -} - -bool -chunk_dss_prec_set(dss_prec_t dss_prec) -{ - - if (!have_dss) - return (dss_prec != dss_prec_disabled); - atomic_write_u(&dss_prec_default, (unsigned)dss_prec); - return (false); -} - -static void * -chunk_dss_max_update(void *new_addr) -{ - void *max_cur; - spin_t spinner; - - /* - * Get the current end of the DSS as max_cur and assure that dss_max is - * up to date. - */ - spin_init(&spinner); - while (true) { - void *max_prev = atomic_read_p(&dss_max); - - max_cur = chunk_dss_sbrk(0); - if ((uintptr_t)max_prev > (uintptr_t)max_cur) { - /* - * Another thread optimistically updated dss_max. Wait - * for it to finish. - */ - spin_adaptive(&spinner); - continue; - } - if (!atomic_cas_p(&dss_max, max_prev, max_cur)) - break; - } - /* Fixed new_addr can only be supported if it is at the edge of DSS. */ - if (new_addr != NULL && max_cur != new_addr) - return (NULL); - - return (max_cur); -} - -void * -chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, - size_t alignment, bool *zero, bool *commit) -{ - cassert(have_dss); - assert(size > 0 && (size & chunksize_mask) == 0); - assert(alignment > 0 && (alignment & chunksize_mask) == 0); - - /* - * sbrk() uses a signed increment argument, so take care not to - * interpret a huge allocation request as a negative increment. - */ - if ((intptr_t)size < 0) - return (NULL); - - if (!atomic_read_u(&dss_exhausted)) { - /* - * The loop is necessary to recover from races with other - * threads that are using the DSS for something other than - * malloc. - */ - while (true) { - void *ret, *max_cur, *dss_next, *dss_prev; - void *gap_addr_chunk, *gap_addr_subchunk; - size_t gap_size_chunk, gap_size_subchunk; - intptr_t incr; - - max_cur = chunk_dss_max_update(new_addr); - if (max_cur == NULL) - goto label_oom; - - /* - * Compute how much chunk-aligned gap space (if any) is - * necessary to satisfy alignment. This space can be - * recycled for later use. - */ - gap_addr_chunk = (void *)(CHUNK_CEILING( - (uintptr_t)max_cur)); - ret = (void *)ALIGNMENT_CEILING( - (uintptr_t)gap_addr_chunk, alignment); - gap_size_chunk = (uintptr_t)ret - - (uintptr_t)gap_addr_chunk; - /* - * Compute the address just past the end of the desired - * allocation space. - */ - dss_next = (void *)((uintptr_t)ret + size); - if ((uintptr_t)ret < (uintptr_t)max_cur || - (uintptr_t)dss_next < (uintptr_t)max_cur) - goto label_oom; /* Wrap-around. */ - /* Compute the increment, including subchunk bytes. */ - gap_addr_subchunk = max_cur; - gap_size_subchunk = (uintptr_t)ret - - (uintptr_t)gap_addr_subchunk; - incr = gap_size_subchunk + size; - - assert((uintptr_t)max_cur + incr == (uintptr_t)ret + - size); - - /* - * Optimistically update dss_max, and roll back below if - * sbrk() fails. No other thread will try to extend the - * DSS while dss_max is greater than the current DSS - * max reported by sbrk(0). - */ - if (atomic_cas_p(&dss_max, max_cur, dss_next)) - continue; - - /* Try to allocate. */ - dss_prev = chunk_dss_sbrk(incr); - if (dss_prev == max_cur) { - /* Success. */ - if (gap_size_chunk != 0) { - chunk_hooks_t chunk_hooks = - CHUNK_HOOKS_INITIALIZER; - chunk_dalloc_wrapper(tsdn, arena, - &chunk_hooks, gap_addr_chunk, - gap_size_chunk, - arena_extent_sn_next(arena), false, - true); - } - if (*zero) { - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( - ret, size); - memset(ret, 0, size); - } - if (!*commit) - *commit = pages_decommit(ret, size); - return (ret); - } - - /* - * Failure, whether due to OOM or a race with a raw - * sbrk() call from outside the allocator. Try to roll - * back optimistic dss_max update; if rollback fails, - * it's due to another caller of this function having - * succeeded since this invocation started, in which - * case rollback is not necessary. - */ - atomic_cas_p(&dss_max, dss_next, max_cur); - if (dss_prev == (void *)-1) { - /* OOM. */ - atomic_write_u(&dss_exhausted, (unsigned)true); - goto label_oom; - } - } - } -label_oom: - return (NULL); -} - -static bool -chunk_in_dss_helper(void *chunk, void *max) -{ - - return ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk < - (uintptr_t)max); -} - -bool -chunk_in_dss(void *chunk) -{ - - cassert(have_dss); - - return (chunk_in_dss_helper(chunk, atomic_read_p(&dss_max))); -} - -bool -chunk_dss_mergeable(void *chunk_a, void *chunk_b) -{ - void *max; - - cassert(have_dss); - - max = atomic_read_p(&dss_max); - return (chunk_in_dss_helper(chunk_a, max) == - chunk_in_dss_helper(chunk_b, max)); -} - -void -chunk_dss_boot(void) -{ - - cassert(have_dss); - - dss_base = chunk_dss_sbrk(0); - dss_exhausted = (unsigned)(dss_base == (void *)-1); - dss_max = dss_base; -} - -/******************************************************************************/ diff --git a/sources/jemalloc/src/je_chunk_mmap.c b/sources/jemalloc/src/je_chunk_mmap.c deleted file mode 100755 index 73fc497a..00000000 --- a/sources/jemalloc/src/je_chunk_mmap.c +++ /dev/null @@ -1,78 +0,0 @@ -#define JEMALLOC_CHUNK_MMAP_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ - -static void * -chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) -{ - void *ret; - size_t alloc_size; - - alloc_size = size + alignment - PAGE; - /* Beware size_t wrap-around. */ - if (alloc_size < size) - return (NULL); - do { - void *pages; - size_t leadsize; - pages = pages_map(NULL, alloc_size, commit); - if (pages == NULL) - return (NULL); - leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - - (uintptr_t)pages; - ret = pages_trim(pages, alloc_size, leadsize, size, commit); - } while (ret == NULL); - - assert(ret != NULL); - *zero = true; - return (ret); -} - -void * -chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, - bool *commit) -{ - void *ret; - size_t offset; - - /* - * Ideally, there would be a way to specify alignment to mmap() (like - * NetBSD has), but in the absence of such a feature, we have to work - * hard to efficiently create aligned mappings. The reliable, but - * slow method is to create a mapping that is over-sized, then trim the - * excess. However, that always results in one or two calls to - * pages_unmap(). - * - * Optimistically try mapping precisely the right amount before falling - * back to the slow method, with the expectation that the optimistic - * approach works most of the time. - */ - - assert(alignment != 0); - assert((alignment & chunksize_mask) == 0); - - ret = pages_map(new_addr, size, commit); - if (ret == NULL || ret == new_addr) - return (ret); - assert(new_addr == NULL); - offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); - if (offset != 0) { - pages_unmap(ret, size); - return (chunk_alloc_mmap_slow(size, alignment, zero, commit)); - } - - assert(ret != NULL); - *zero = true; - return (ret); -} - -bool -chunk_dalloc_mmap(void *chunk, size_t size) -{ - - if (config_munmap) - pages_unmap(chunk, size); - - return (!config_munmap); -} diff --git a/sources/jemalloc/src/je_ckh.c b/sources/jemalloc/src/je_ckh.c deleted file mode 100755 index 159bd8ae..00000000 --- a/sources/jemalloc/src/je_ckh.c +++ /dev/null @@ -1,569 +0,0 @@ -/* - ******************************************************************************* - * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each - * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash - * functions are employed. The original cuckoo hashing algorithm was described - * in: - * - * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms - * 51(2):122-144. - * - * Generalization of cuckoo hashing was discussed in: - * - * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical - * alternative to traditional hash tables. In Proceedings of the 7th - * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA, - * January 2006. - * - * This implementation uses precisely two hash functions because that is the - * fewest that can work, and supporting multiple hashes is an implementation - * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006) - * that shows approximate expected maximum load factors for various - * configurations: - * - * | #cells/bucket | - * #hashes | 1 | 2 | 4 | 8 | - * --------+-------+-------+-------+-------+ - * 1 | 0.006 | 0.006 | 0.03 | 0.12 | - * 2 | 0.49 | 0.86 |>0.93< |>0.96< | - * 3 | 0.91 | 0.97 | 0.98 | 0.999 | - * 4 | 0.97 | 0.99 | 0.999 | | - * - * The number of cells per bucket is chosen such that a bucket fits in one cache - * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing, - * respectively. - * - ******************************************************************************/ -#define JEMALLOC_CKH_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static bool ckh_grow(tsd_t *tsd, ckh_t *ckh); -static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); - -/******************************************************************************/ - -/* - * Search bucket for key and return the cell number if found; SIZE_T_MAX - * otherwise. - */ -JEMALLOC_INLINE_C size_t -ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) -{ - ckhc_t *cell; - unsigned i; - - for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { - cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; - if (cell->key != NULL && ckh->keycomp(key, cell->key)) - return ((bucket << LG_CKH_BUCKET_CELLS) + i); - } - - return (SIZE_T_MAX); -} - -/* - * Search table for key and return cell number if found; SIZE_T_MAX otherwise. - */ -JEMALLOC_INLINE_C size_t -ckh_isearch(ckh_t *ckh, const void *key) -{ - size_t hashes[2], bucket, cell; - - assert(ckh != NULL); - - ckh->hash(key, hashes); - - /* Search primary bucket. */ - bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); - cell = ckh_bucket_search(ckh, bucket, key); - if (cell != SIZE_T_MAX) - return (cell); - - /* Search secondary bucket. */ - bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); - cell = ckh_bucket_search(ckh, bucket, key); - return (cell); -} - -JEMALLOC_INLINE_C bool -ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, - const void *data) -{ - ckhc_t *cell; - unsigned offset, i; - - /* - * Cycle through the cells in the bucket, starting at a random position. - * The randomness avoids worst-case search overhead as buckets fill up. - */ - offset = (unsigned)prng_lg_range_u64(&ckh->prng_state, - LG_CKH_BUCKET_CELLS); - for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { - cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + - ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; - if (cell->key == NULL) { - cell->key = key; - cell->data = data; - ckh->count++; - return (false); - } - } - - return (true); -} - -/* - * No space is available in bucket. Randomly evict an item, then try to find an - * alternate location for that item. Iteratively repeat this - * eviction/relocation procedure until either success or detection of an - * eviction/relocation bucket cycle. - */ -JEMALLOC_INLINE_C bool -ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, - void const **argdata) -{ - const void *key, *data, *tkey, *tdata; - ckhc_t *cell; - size_t hashes[2], bucket, tbucket; - unsigned i; - - bucket = argbucket; - key = *argkey; - data = *argdata; - while (true) { - /* - * Choose a random item within the bucket to evict. This is - * critical to correct function, because without (eventually) - * evicting all items within a bucket during iteration, it - * would be possible to get stuck in an infinite loop if there - * were an item for which both hashes indicated the same - * bucket. - */ - i = (unsigned)prng_lg_range_u64(&ckh->prng_state, - LG_CKH_BUCKET_CELLS); - cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; - assert(cell->key != NULL); - - /* Swap cell->{key,data} and {key,data} (evict). */ - tkey = cell->key; tdata = cell->data; - cell->key = key; cell->data = data; - key = tkey; data = tdata; - -#ifdef CKH_COUNT - ckh->nrelocs++; -#endif - - /* Find the alternate bucket for the evicted item. */ - ckh->hash(key, hashes); - tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (tbucket == bucket) { - tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - - 1); - /* - * It may be that (tbucket == bucket) still, if the - * item's hashes both indicate this bucket. However, - * we are guaranteed to eventually escape this bucket - * during iteration, assuming pseudo-random item - * selection (true randomness would make infinite - * looping a remote possibility). The reason we can - * never get trapped forever is that there are two - * cases: - * - * 1) This bucket == argbucket, so we will quickly - * detect an eviction cycle and terminate. - * 2) An item was evicted to this bucket from another, - * which means that at least one item in this bucket - * has hashes that indicate distinct buckets. - */ - } - /* Check for a cycle. */ - if (tbucket == argbucket) { - *argkey = key; - *argdata = data; - return (true); - } - - bucket = tbucket; - if (!ckh_try_bucket_insert(ckh, bucket, key, data)) - return (false); - } -} - -JEMALLOC_INLINE_C bool -ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) -{ - size_t hashes[2], bucket; - const void *key = *argkey; - const void *data = *argdata; - - ckh->hash(key, hashes); - - /* Try to insert in primary bucket. */ - bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (!ckh_try_bucket_insert(ckh, bucket, key, data)) - return (false); - - /* Try to insert in secondary bucket. */ - bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (!ckh_try_bucket_insert(ckh, bucket, key, data)) - return (false); - - /* - * Try to find a place for this item via iterative eviction/relocation. - */ - return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata)); -} - -/* - * Try to rebuild the hash table from scratch by inserting all items from the - * old table into the new. - */ -JEMALLOC_INLINE_C bool -ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) -{ - size_t count, i, nins; - const void *key, *data; - - count = ckh->count; - ckh->count = 0; - for (i = nins = 0; nins < count; i++) { - if (aTab[i].key != NULL) { - key = aTab[i].key; - data = aTab[i].data; - if (ckh_try_insert(ckh, &key, &data)) { - ckh->count = count; - return (true); - } - nins++; - } - } - - return (false); -} - -static bool -ckh_grow(tsd_t *tsd, ckh_t *ckh) -{ - bool ret; - ckhc_t *tab, *ttab; - unsigned lg_prevbuckets, lg_curcells; - -#ifdef CKH_COUNT - ckh->ngrows++; -#endif - - /* - * It is possible (though unlikely, given well behaved hashes) that the - * table will have to be doubled more than once in order to create a - * usable table. - */ - lg_prevbuckets = ckh->lg_curbuckets; - lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; - while (true) { - size_t usize; - - lg_curcells++; - usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { - ret = true; - goto label_return; - } - tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, - true, NULL, true, arena_ichoose(tsd, NULL)); - if (tab == NULL) { - ret = true; - goto label_return; - } - /* Swap in new table. */ - ttab = ckh->tab; - ckh->tab = tab; - tab = ttab; - ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; - - if (!ckh_rebuild(ckh, tab)) { - idalloctm(tsd_tsdn(tsd), tab, NULL, true, true); - break; - } - - /* Rebuilding failed, so back out partially rebuilt table. */ - idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true); - ckh->tab = tab; - ckh->lg_curbuckets = lg_prevbuckets; - } - - ret = false; -label_return: - return (ret); -} - -static void -ckh_shrink(tsd_t *tsd, ckh_t *ckh) -{ - ckhc_t *tab, *ttab; - size_t usize; - unsigned lg_prevbuckets, lg_curcells; - - /* - * It is possible (though unlikely, given well behaved hashes) that the - * table rebuild will fail. - */ - lg_prevbuckets = ckh->lg_curbuckets; - lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; - usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) - return; - tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, - true, arena_ichoose(tsd, NULL)); - if (tab == NULL) { - /* - * An OOM error isn't worth propagating, since it doesn't - * prevent this or future operations from proceeding. - */ - return; - } - /* Swap in new table. */ - ttab = ckh->tab; - ckh->tab = tab; - tab = ttab; - ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; - - if (!ckh_rebuild(ckh, tab)) { - idalloctm(tsd_tsdn(tsd), tab, NULL, true, true); -#ifdef CKH_COUNT - ckh->nshrinks++; -#endif - return; - } - - /* Rebuilding failed, so back out partially rebuilt table. */ - idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true); - ckh->tab = tab; - ckh->lg_curbuckets = lg_prevbuckets; -#ifdef CKH_COUNT - ckh->nshrinkfails++; -#endif -} - -bool -ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, - ckh_keycomp_t *keycomp) -{ - bool ret; - size_t mincells, usize; - unsigned lg_mincells; - - assert(minitems > 0); - assert(hash != NULL); - assert(keycomp != NULL); - -#ifdef CKH_COUNT - ckh->ngrows = 0; - ckh->nshrinks = 0; - ckh->nshrinkfails = 0; - ckh->ninserts = 0; - ckh->nrelocs = 0; -#endif - ckh->prng_state = 42; /* Value doesn't really matter. */ - ckh->count = 0; - - /* - * Find the minimum power of 2 that is large enough to fit minitems - * entries. We are using (2+,2) cuckoo hashing, which has an expected - * maximum load factor of at least ~0.86, so 0.75 is a conservative load - * factor that will typically allow mincells items to fit without ever - * growing the table. - */ - assert(LG_CKH_BUCKET_CELLS > 0); - mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; - for (lg_mincells = LG_CKH_BUCKET_CELLS; - (ZU(1) << lg_mincells) < mincells; - lg_mincells++) - ; /* Do nothing. */ - ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; - ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; - ckh->hash = hash; - ckh->keycomp = keycomp; - - usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { - ret = true; - goto label_return; - } - ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, - NULL, true, arena_ichoose(tsd, NULL)); - if (ckh->tab == NULL) { - ret = true; - goto label_return; - } - - ret = false; -label_return: - return (ret); -} - -void -ckh_delete(tsd_t *tsd, ckh_t *ckh) -{ - - assert(ckh != NULL); - -#ifdef CKH_VERBOSE - malloc_printf( - "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64"," - " nshrinkfails: %"FMTu64", ninserts: %"FMTu64"," - " nrelocs: %"FMTu64"\n", __func__, ckh, - (unsigned long long)ckh->ngrows, - (unsigned long long)ckh->nshrinks, - (unsigned long long)ckh->nshrinkfails, - (unsigned long long)ckh->ninserts, - (unsigned long long)ckh->nrelocs); -#endif - - idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true); - if (config_debug) - memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t)); -} - -size_t -ckh_count(ckh_t *ckh) -{ - - assert(ckh != NULL); - - return (ckh->count); -} - -bool -ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) -{ - size_t i, ncells; - - for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + - LG_CKH_BUCKET_CELLS)); i < ncells; i++) { - if (ckh->tab[i].key != NULL) { - if (key != NULL) - *key = (void *)ckh->tab[i].key; - if (data != NULL) - *data = (void *)ckh->tab[i].data; - *tabind = i + 1; - return (false); - } - } - - return (true); -} - -bool -ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) -{ - bool ret; - - assert(ckh != NULL); - assert(ckh_search(ckh, key, NULL, NULL)); - -#ifdef CKH_COUNT - ckh->ninserts++; -#endif - - while (ckh_try_insert(ckh, &key, &data)) { - if (ckh_grow(tsd, ckh)) { - ret = true; - goto label_return; - } - } - - ret = false; -label_return: - return (ret); -} - -bool -ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, - void **data) -{ - size_t cell; - - assert(ckh != NULL); - - cell = ckh_isearch(ckh, searchkey); - if (cell != SIZE_T_MAX) { - if (key != NULL) - *key = (void *)ckh->tab[cell].key; - if (data != NULL) - *data = (void *)ckh->tab[cell].data; - ckh->tab[cell].key = NULL; - ckh->tab[cell].data = NULL; /* Not necessary. */ - - ckh->count--; - /* Try to halve the table if it is less than 1/4 full. */ - if (ckh->count < (ZU(1) << (ckh->lg_curbuckets - + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets - > ckh->lg_minbuckets) { - /* Ignore error due to OOM. */ - ckh_shrink(tsd, ckh); - } - - return (false); - } - - return (true); -} - -bool -ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) -{ - size_t cell; - - assert(ckh != NULL); - - cell = ckh_isearch(ckh, searchkey); - if (cell != SIZE_T_MAX) { - if (key != NULL) - *key = (void *)ckh->tab[cell].key; - if (data != NULL) - *data = (void *)ckh->tab[cell].data; - return (false); - } - - return (true); -} - -void -ckh_string_hash(const void *key, size_t r_hash[2]) -{ - - hash(key, strlen((const char *)key), 0x94122f33U, r_hash); -} - -bool -ckh_string_keycomp(const void *k1, const void *k2) -{ - - assert(k1 != NULL); - assert(k2 != NULL); - - return (strcmp((char *)k1, (char *)k2) ? false : true); -} - -void -ckh_pointer_hash(const void *key, size_t r_hash[2]) -{ - union { - const void *v; - size_t i; - } u; - - assert(sizeof(u.v) == sizeof(u.i)); - u.v = key; - hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash); -} - -bool -ckh_pointer_keycomp(const void *k1, const void *k2) -{ - - return ((k1 == k2) ? true : false); -} diff --git a/sources/jemalloc/src/je_ctl.c b/sources/jemalloc/src/je_ctl.c deleted file mode 100755 index 56bc4f4c..00000000 --- a/sources/jemalloc/src/je_ctl.c +++ /dev/null @@ -1,2258 +0,0 @@ -#define JEMALLOC_CTL_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -/* - * ctl_mtx protects the following: - * - ctl_stats.* - */ -static malloc_mutex_t ctl_mtx; -static bool ctl_initialized; -static uint64_t ctl_epoch; -static ctl_stats_t ctl_stats; - -/******************************************************************************/ -/* Helpers for named and indexed nodes. */ - -JEMALLOC_INLINE_C const ctl_named_node_t * -ctl_named_node(const ctl_node_t *node) -{ - - return ((node->named) ? (const ctl_named_node_t *)node : NULL); -} - -JEMALLOC_INLINE_C const ctl_named_node_t * -ctl_named_children(const ctl_named_node_t *node, size_t index) -{ - const ctl_named_node_t *children = ctl_named_node(node->children); - - return (children ? &children[index] : NULL); -} - -JEMALLOC_INLINE_C const ctl_indexed_node_t * -ctl_indexed_node(const ctl_node_t *node) -{ - - return (!node->named ? (const ctl_indexed_node_t *)node : NULL); -} - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -#define CTL_PROTO(n) \ -static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ - void *oldp, size_t *oldlenp, void *newp, size_t newlen); - -#define INDEX_PROTO(n) \ -static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \ - const size_t *mib, size_t miblen, size_t i); - -static bool ctl_arena_init(ctl_arena_stats_t *astats); -static void ctl_arena_clear(ctl_arena_stats_t *astats); -static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, - arena_t *arena); -static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, - ctl_arena_stats_t *astats); -static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i); -static bool ctl_grow(tsdn_t *tsdn); -static void ctl_refresh(tsdn_t *tsdn); -static bool ctl_init(tsdn_t *tsdn); -static int ctl_lookup(tsdn_t *tsdn, const char *name, - ctl_node_t const **nodesp, size_t *mibp, size_t *depthp); - -CTL_PROTO(version) -CTL_PROTO(epoch) -CTL_PROTO(thread_tcache_enabled) -CTL_PROTO(thread_tcache_flush) -CTL_PROTO(thread_prof_name) -CTL_PROTO(thread_prof_active) -CTL_PROTO(thread_arena) -CTL_PROTO(thread_allocated) -CTL_PROTO(thread_allocatedp) -CTL_PROTO(thread_deallocated) -CTL_PROTO(thread_deallocatedp) -CTL_PROTO(config_cache_oblivious) -CTL_PROTO(config_debug) -CTL_PROTO(config_fill) -CTL_PROTO(config_lazy_lock) -CTL_PROTO(config_malloc_conf) -CTL_PROTO(config_munmap) -CTL_PROTO(config_prof) -CTL_PROTO(config_prof_libgcc) -CTL_PROTO(config_prof_libunwind) -CTL_PROTO(config_stats) -CTL_PROTO(config_tcache) -CTL_PROTO(config_thp) -CTL_PROTO(config_tls) -CTL_PROTO(config_utrace) -CTL_PROTO(config_valgrind) -CTL_PROTO(config_xmalloc) -CTL_PROTO(opt_abort) -CTL_PROTO(opt_dss) -CTL_PROTO(opt_lg_chunk) -CTL_PROTO(opt_narenas) -CTL_PROTO(opt_purge) -CTL_PROTO(opt_lg_dirty_mult) -CTL_PROTO(opt_decay_time) -CTL_PROTO(opt_stats_print) -CTL_PROTO(opt_junk) -CTL_PROTO(opt_zero) -CTL_PROTO(opt_quarantine) -CTL_PROTO(opt_redzone) -CTL_PROTO(opt_utrace) -CTL_PROTO(opt_xmalloc) -CTL_PROTO(opt_tcache) -CTL_PROTO(opt_lg_tcache_max) -CTL_PROTO(opt_thp) -CTL_PROTO(opt_prof) -CTL_PROTO(opt_prof_prefix) -CTL_PROTO(opt_prof_active) -CTL_PROTO(opt_prof_thread_active_init) -CTL_PROTO(opt_lg_prof_sample) -CTL_PROTO(opt_lg_prof_interval) -CTL_PROTO(opt_prof_gdump) -CTL_PROTO(opt_prof_final) -CTL_PROTO(opt_prof_leak) -CTL_PROTO(opt_prof_accum) -CTL_PROTO(tcache_create) -CTL_PROTO(tcache_flush) -CTL_PROTO(tcache_destroy) -static void arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all); -CTL_PROTO(arena_i_purge) -CTL_PROTO(arena_i_decay) -CTL_PROTO(arena_i_reset) -CTL_PROTO(arena_i_dss) -CTL_PROTO(arena_i_lg_dirty_mult) -CTL_PROTO(arena_i_decay_time) -CTL_PROTO(arena_i_chunk_hooks) -INDEX_PROTO(arena_i) -CTL_PROTO(arenas_bin_i_size) -CTL_PROTO(arenas_bin_i_nregs) -CTL_PROTO(arenas_bin_i_run_size) -INDEX_PROTO(arenas_bin_i) -CTL_PROTO(arenas_lrun_i_size) -INDEX_PROTO(arenas_lrun_i) -CTL_PROTO(arenas_hchunk_i_size) -INDEX_PROTO(arenas_hchunk_i) -CTL_PROTO(arenas_narenas) -CTL_PROTO(arenas_initialized) -CTL_PROTO(arenas_lg_dirty_mult) -CTL_PROTO(arenas_decay_time) -CTL_PROTO(arenas_quantum) -CTL_PROTO(arenas_page) -CTL_PROTO(arenas_tcache_max) -CTL_PROTO(arenas_nbins) -CTL_PROTO(arenas_nhbins) -CTL_PROTO(arenas_nlruns) -CTL_PROTO(arenas_nhchunks) -CTL_PROTO(arenas_extend) -CTL_PROTO(prof_thread_active_init) -CTL_PROTO(prof_active) -CTL_PROTO(prof_dump) -CTL_PROTO(prof_gdump) -CTL_PROTO(prof_reset) -CTL_PROTO(prof_interval) -CTL_PROTO(lg_prof_sample) -CTL_PROTO(stats_arenas_i_small_allocated) -CTL_PROTO(stats_arenas_i_small_nmalloc) -CTL_PROTO(stats_arenas_i_small_ndalloc) -CTL_PROTO(stats_arenas_i_small_nrequests) -CTL_PROTO(stats_arenas_i_large_allocated) -CTL_PROTO(stats_arenas_i_large_nmalloc) -CTL_PROTO(stats_arenas_i_large_ndalloc) -CTL_PROTO(stats_arenas_i_large_nrequests) -CTL_PROTO(stats_arenas_i_huge_allocated) -CTL_PROTO(stats_arenas_i_huge_nmalloc) -CTL_PROTO(stats_arenas_i_huge_ndalloc) -CTL_PROTO(stats_arenas_i_huge_nrequests) -CTL_PROTO(stats_arenas_i_bins_j_nmalloc) -CTL_PROTO(stats_arenas_i_bins_j_ndalloc) -CTL_PROTO(stats_arenas_i_bins_j_nrequests) -CTL_PROTO(stats_arenas_i_bins_j_curregs) -CTL_PROTO(stats_arenas_i_bins_j_nfills) -CTL_PROTO(stats_arenas_i_bins_j_nflushes) -CTL_PROTO(stats_arenas_i_bins_j_nruns) -CTL_PROTO(stats_arenas_i_bins_j_nreruns) -CTL_PROTO(stats_arenas_i_bins_j_curruns) -INDEX_PROTO(stats_arenas_i_bins_j) -CTL_PROTO(stats_arenas_i_lruns_j_nmalloc) -CTL_PROTO(stats_arenas_i_lruns_j_ndalloc) -CTL_PROTO(stats_arenas_i_lruns_j_nrequests) -CTL_PROTO(stats_arenas_i_lruns_j_curruns) -INDEX_PROTO(stats_arenas_i_lruns_j) -CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc) -CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc) -CTL_PROTO(stats_arenas_i_hchunks_j_nrequests) -CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks) -INDEX_PROTO(stats_arenas_i_hchunks_j) -CTL_PROTO(stats_arenas_i_nthreads) -CTL_PROTO(stats_arenas_i_dss) -CTL_PROTO(stats_arenas_i_lg_dirty_mult) -CTL_PROTO(stats_arenas_i_decay_time) -CTL_PROTO(stats_arenas_i_pactive) -CTL_PROTO(stats_arenas_i_pdirty) -CTL_PROTO(stats_arenas_i_mapped) -CTL_PROTO(stats_arenas_i_retained) -CTL_PROTO(stats_arenas_i_npurge) -CTL_PROTO(stats_arenas_i_nmadvise) -CTL_PROTO(stats_arenas_i_purged) -CTL_PROTO(stats_arenas_i_metadata_mapped) -CTL_PROTO(stats_arenas_i_metadata_allocated) -INDEX_PROTO(stats_arenas_i) -CTL_PROTO(stats_cactive) -CTL_PROTO(stats_allocated) -CTL_PROTO(stats_active) -CTL_PROTO(stats_metadata) -CTL_PROTO(stats_resident) -CTL_PROTO(stats_mapped) -CTL_PROTO(stats_retained) - -/******************************************************************************/ -/* mallctl tree. */ - -/* Maximum tree depth. */ -#define CTL_MAX_DEPTH 6 - -#define NAME(n) {true}, n -#define CHILD(t, c) \ - sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ - (ctl_node_t *)c##_node, \ - NULL -#define CTL(c) 0, NULL, c##_ctl - -/* - * Only handles internal indexed nodes, since there are currently no external - * ones. - */ -#define INDEX(i) {false}, i##_index - -static const ctl_named_node_t thread_tcache_node[] = { - {NAME("enabled"), CTL(thread_tcache_enabled)}, - {NAME("flush"), CTL(thread_tcache_flush)} -}; - -static const ctl_named_node_t thread_prof_node[] = { - {NAME("name"), CTL(thread_prof_name)}, - {NAME("active"), CTL(thread_prof_active)} -}; - -static const ctl_named_node_t thread_node[] = { - {NAME("arena"), CTL(thread_arena)}, - {NAME("allocated"), CTL(thread_allocated)}, - {NAME("allocatedp"), CTL(thread_allocatedp)}, - {NAME("deallocated"), CTL(thread_deallocated)}, - {NAME("deallocatedp"), CTL(thread_deallocatedp)}, - {NAME("tcache"), CHILD(named, thread_tcache)}, - {NAME("prof"), CHILD(named, thread_prof)} -}; - -static const ctl_named_node_t config_node[] = { - {NAME("cache_oblivious"), CTL(config_cache_oblivious)}, - {NAME("debug"), CTL(config_debug)}, - {NAME("fill"), CTL(config_fill)}, - {NAME("lazy_lock"), CTL(config_lazy_lock)}, - {NAME("malloc_conf"), CTL(config_malloc_conf)}, - {NAME("munmap"), CTL(config_munmap)}, - {NAME("prof"), CTL(config_prof)}, - {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, - {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, - {NAME("stats"), CTL(config_stats)}, - {NAME("tcache"), CTL(config_tcache)}, - {NAME("thp"), CTL(config_thp)}, - {NAME("tls"), CTL(config_tls)}, - {NAME("utrace"), CTL(config_utrace)}, - {NAME("valgrind"), CTL(config_valgrind)}, - {NAME("xmalloc"), CTL(config_xmalloc)} -}; - -static const ctl_named_node_t opt_node[] = { - {NAME("abort"), CTL(opt_abort)}, - {NAME("dss"), CTL(opt_dss)}, - {NAME("lg_chunk"), CTL(opt_lg_chunk)}, - {NAME("narenas"), CTL(opt_narenas)}, - {NAME("purge"), CTL(opt_purge)}, - {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, - {NAME("decay_time"), CTL(opt_decay_time)}, - {NAME("stats_print"), CTL(opt_stats_print)}, - {NAME("junk"), CTL(opt_junk)}, - {NAME("zero"), CTL(opt_zero)}, - {NAME("quarantine"), CTL(opt_quarantine)}, - {NAME("redzone"), CTL(opt_redzone)}, - {NAME("utrace"), CTL(opt_utrace)}, - {NAME("xmalloc"), CTL(opt_xmalloc)}, - {NAME("tcache"), CTL(opt_tcache)}, - {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, - {NAME("thp"), CTL(opt_thp)}, - {NAME("prof"), CTL(opt_prof)}, - {NAME("prof_prefix"), CTL(opt_prof_prefix)}, - {NAME("prof_active"), CTL(opt_prof_active)}, - {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)}, - {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, - {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, - {NAME("prof_gdump"), CTL(opt_prof_gdump)}, - {NAME("prof_final"), CTL(opt_prof_final)}, - {NAME("prof_leak"), CTL(opt_prof_leak)}, - {NAME("prof_accum"), CTL(opt_prof_accum)} -}; - -static const ctl_named_node_t tcache_node[] = { - {NAME("create"), CTL(tcache_create)}, - {NAME("flush"), CTL(tcache_flush)}, - {NAME("destroy"), CTL(tcache_destroy)} -}; - -static const ctl_named_node_t arena_i_node[] = { - {NAME("purge"), CTL(arena_i_purge)}, - {NAME("decay"), CTL(arena_i_decay)}, - {NAME("reset"), CTL(arena_i_reset)}, - {NAME("dss"), CTL(arena_i_dss)}, - {NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)}, - {NAME("decay_time"), CTL(arena_i_decay_time)}, - {NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)} -}; -static const ctl_named_node_t super_arena_i_node[] = { - {NAME(""), CHILD(named, arena_i)} -}; - -static const ctl_indexed_node_t arena_node[] = { - {INDEX(arena_i)} -}; - -static const ctl_named_node_t arenas_bin_i_node[] = { - {NAME("size"), CTL(arenas_bin_i_size)}, - {NAME("nregs"), CTL(arenas_bin_i_nregs)}, - {NAME("run_size"), CTL(arenas_bin_i_run_size)} -}; -static const ctl_named_node_t super_arenas_bin_i_node[] = { - {NAME(""), CHILD(named, arenas_bin_i)} -}; - -static const ctl_indexed_node_t arenas_bin_node[] = { - {INDEX(arenas_bin_i)} -}; - -static const ctl_named_node_t arenas_lrun_i_node[] = { - {NAME("size"), CTL(arenas_lrun_i_size)} -}; -static const ctl_named_node_t super_arenas_lrun_i_node[] = { - {NAME(""), CHILD(named, arenas_lrun_i)} -}; - -static const ctl_indexed_node_t arenas_lrun_node[] = { - {INDEX(arenas_lrun_i)} -}; - -static const ctl_named_node_t arenas_hchunk_i_node[] = { - {NAME("size"), CTL(arenas_hchunk_i_size)} -}; -static const ctl_named_node_t super_arenas_hchunk_i_node[] = { - {NAME(""), CHILD(named, arenas_hchunk_i)} -}; - -static const ctl_indexed_node_t arenas_hchunk_node[] = { - {INDEX(arenas_hchunk_i)} -}; - -static const ctl_named_node_t arenas_node[] = { - {NAME("narenas"), CTL(arenas_narenas)}, - {NAME("initialized"), CTL(arenas_initialized)}, - {NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)}, - {NAME("decay_time"), CTL(arenas_decay_time)}, - {NAME("quantum"), CTL(arenas_quantum)}, - {NAME("page"), CTL(arenas_page)}, - {NAME("tcache_max"), CTL(arenas_tcache_max)}, - {NAME("nbins"), CTL(arenas_nbins)}, - {NAME("nhbins"), CTL(arenas_nhbins)}, - {NAME("bin"), CHILD(indexed, arenas_bin)}, - {NAME("nlruns"), CTL(arenas_nlruns)}, - {NAME("lrun"), CHILD(indexed, arenas_lrun)}, - {NAME("nhchunks"), CTL(arenas_nhchunks)}, - {NAME("hchunk"), CHILD(indexed, arenas_hchunk)}, - {NAME("extend"), CTL(arenas_extend)} -}; - -static const ctl_named_node_t prof_node[] = { - {NAME("thread_active_init"), CTL(prof_thread_active_init)}, - {NAME("active"), CTL(prof_active)}, - {NAME("dump"), CTL(prof_dump)}, - {NAME("gdump"), CTL(prof_gdump)}, - {NAME("reset"), CTL(prof_reset)}, - {NAME("interval"), CTL(prof_interval)}, - {NAME("lg_sample"), CTL(lg_prof_sample)} -}; - -static const ctl_named_node_t stats_arenas_i_metadata_node[] = { - {NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)}, - {NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)} -}; - -static const ctl_named_node_t stats_arenas_i_small_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} -}; - -static const ctl_named_node_t stats_arenas_i_large_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} -}; - -static const ctl_named_node_t stats_arenas_i_huge_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_huge_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)} -}; - -static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { - {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, - {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)}, - {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, - {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, - {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, - {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)}, - {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)} -}; -static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_bins_j)} -}; - -static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { - {INDEX(stats_arenas_i_bins_j)} -}; - -static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = { - {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)}, - {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)} -}; -static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_lruns_j)} -}; - -static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = { - {INDEX(stats_arenas_i_lruns_j)} -}; - -static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = { - {NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_hchunks_j_nrequests)}, - {NAME("curhchunks"), CTL(stats_arenas_i_hchunks_j_curhchunks)} -}; -static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_hchunks_j)} -}; - -static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = { - {INDEX(stats_arenas_i_hchunks_j)} -}; - -static const ctl_named_node_t stats_arenas_i_node[] = { - {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, - {NAME("dss"), CTL(stats_arenas_i_dss)}, - {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)}, - {NAME("decay_time"), CTL(stats_arenas_i_decay_time)}, - {NAME("pactive"), CTL(stats_arenas_i_pactive)}, - {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, - {NAME("mapped"), CTL(stats_arenas_i_mapped)}, - {NAME("retained"), CTL(stats_arenas_i_retained)}, - {NAME("npurge"), CTL(stats_arenas_i_npurge)}, - {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, - {NAME("purged"), CTL(stats_arenas_i_purged)}, - {NAME("metadata"), CHILD(named, stats_arenas_i_metadata)}, - {NAME("small"), CHILD(named, stats_arenas_i_small)}, - {NAME("large"), CHILD(named, stats_arenas_i_large)}, - {NAME("huge"), CHILD(named, stats_arenas_i_huge)}, - {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, - {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}, - {NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)} -}; -static const ctl_named_node_t super_stats_arenas_i_node[] = { - {NAME(""), CHILD(named, stats_arenas_i)} -}; - -static const ctl_indexed_node_t stats_arenas_node[] = { - {INDEX(stats_arenas_i)} -}; - -static const ctl_named_node_t stats_node[] = { - {NAME("cactive"), CTL(stats_cactive)}, - {NAME("allocated"), CTL(stats_allocated)}, - {NAME("active"), CTL(stats_active)}, - {NAME("metadata"), CTL(stats_metadata)}, - {NAME("resident"), CTL(stats_resident)}, - {NAME("mapped"), CTL(stats_mapped)}, - {NAME("retained"), CTL(stats_retained)}, - {NAME("arenas"), CHILD(indexed, stats_arenas)} -}; - -static const ctl_named_node_t root_node[] = { - {NAME("version"), CTL(version)}, - {NAME("epoch"), CTL(epoch)}, - {NAME("thread"), CHILD(named, thread)}, - {NAME("config"), CHILD(named, config)}, - {NAME("opt"), CHILD(named, opt)}, - {NAME("tcache"), CHILD(named, tcache)}, - {NAME("arena"), CHILD(indexed, arena)}, - {NAME("arenas"), CHILD(named, arenas)}, - {NAME("prof"), CHILD(named, prof)}, - {NAME("stats"), CHILD(named, stats)} -}; -static const ctl_named_node_t super_root_node[] = { - {NAME(""), CHILD(named, root)} -}; - -#undef NAME -#undef CHILD -#undef CTL -#undef INDEX - -/******************************************************************************/ - -static bool -ctl_arena_init(ctl_arena_stats_t *astats) -{ - - if (astats->lstats == NULL) { - astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses * - sizeof(malloc_large_stats_t)); - if (astats->lstats == NULL) - return (true); - } - - if (astats->hstats == NULL) { - astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses * - sizeof(malloc_huge_stats_t)); - if (astats->hstats == NULL) - return (true); - } - - return (false); -} - -static void -ctl_arena_clear(ctl_arena_stats_t *astats) -{ - - astats->nthreads = 0; - astats->dss = dss_prec_names[dss_prec_limit]; - astats->lg_dirty_mult = -1; - astats->decay_time = -1; - astats->pactive = 0; - astats->pdirty = 0; - if (config_stats) { - memset(&astats->astats, 0, sizeof(arena_stats_t)); - astats->allocated_small = 0; - astats->nmalloc_small = 0; - astats->ndalloc_small = 0; - astats->nrequests_small = 0; - memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t)); - memset(astats->lstats, 0, nlclasses * - sizeof(malloc_large_stats_t)); - memset(astats->hstats, 0, nhclasses * - sizeof(malloc_huge_stats_t)); - } -} - -static void -ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena) -{ - unsigned i; - - if (config_stats) { - arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss, - &cstats->lg_dirty_mult, &cstats->decay_time, - &cstats->pactive, &cstats->pdirty, &cstats->astats, - cstats->bstats, cstats->lstats, cstats->hstats); - - for (i = 0; i < NBINS; i++) { - cstats->allocated_small += cstats->bstats[i].curregs * - index2size(i); - cstats->nmalloc_small += cstats->bstats[i].nmalloc; - cstats->ndalloc_small += cstats->bstats[i].ndalloc; - cstats->nrequests_small += cstats->bstats[i].nrequests; - } - } else { - arena_basic_stats_merge(tsdn, arena, &cstats->nthreads, - &cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time, - &cstats->pactive, &cstats->pdirty); - } -} - -static void -ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) -{ - unsigned i; - - sstats->nthreads += astats->nthreads; - sstats->pactive += astats->pactive; - sstats->pdirty += astats->pdirty; - - if (config_stats) { - sstats->astats.mapped += astats->astats.mapped; - sstats->astats.retained += astats->astats.retained; - sstats->astats.npurge += astats->astats.npurge; - sstats->astats.nmadvise += astats->astats.nmadvise; - sstats->astats.purged += astats->astats.purged; - - sstats->astats.metadata_mapped += - astats->astats.metadata_mapped; - sstats->astats.metadata_allocated += - astats->astats.metadata_allocated; - - sstats->allocated_small += astats->allocated_small; - sstats->nmalloc_small += astats->nmalloc_small; - sstats->ndalloc_small += astats->ndalloc_small; - sstats->nrequests_small += astats->nrequests_small; - - sstats->astats.allocated_large += - astats->astats.allocated_large; - sstats->astats.nmalloc_large += astats->astats.nmalloc_large; - sstats->astats.ndalloc_large += astats->astats.ndalloc_large; - sstats->astats.nrequests_large += - astats->astats.nrequests_large; - - sstats->astats.allocated_huge += astats->astats.allocated_huge; - sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge; - sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge; - - for (i = 0; i < NBINS; i++) { - sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; - sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; - sstats->bstats[i].nrequests += - astats->bstats[i].nrequests; - sstats->bstats[i].curregs += astats->bstats[i].curregs; - if (config_tcache) { - sstats->bstats[i].nfills += - astats->bstats[i].nfills; - sstats->bstats[i].nflushes += - astats->bstats[i].nflushes; - } - sstats->bstats[i].nruns += astats->bstats[i].nruns; - sstats->bstats[i].reruns += astats->bstats[i].reruns; - sstats->bstats[i].curruns += astats->bstats[i].curruns; - } - - for (i = 0; i < nlclasses; i++) { - sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; - sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; - sstats->lstats[i].nrequests += - astats->lstats[i].nrequests; - sstats->lstats[i].curruns += astats->lstats[i].curruns; - } - - for (i = 0; i < nhclasses; i++) { - sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc; - sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc; - sstats->hstats[i].curhchunks += - astats->hstats[i].curhchunks; - } - } -} - -static void -ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i) -{ - ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; - ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; - - ctl_arena_clear(astats); - ctl_arena_stats_amerge(tsdn, astats, arena); - /* Merge into sum stats as well. */ - ctl_arena_stats_smerge(sstats, astats); -} - -static bool -ctl_grow(tsdn_t *tsdn) -{ - ctl_arena_stats_t *astats; - - /* Initialize new arena. */ - if (arena_init(tsdn, ctl_stats.narenas) == NULL) - return (true); - - /* Allocate extended arena stats. */ - astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) * - sizeof(ctl_arena_stats_t)); - if (astats == NULL) - return (true); - - /* Initialize the new astats element. */ - memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) * - sizeof(ctl_arena_stats_t)); - memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t)); - if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) { - a0dalloc(astats); - return (true); - } - /* Swap merged stats to their new location. */ - { - ctl_arena_stats_t tstats; - memcpy(&tstats, &astats[ctl_stats.narenas], - sizeof(ctl_arena_stats_t)); - memcpy(&astats[ctl_stats.narenas], - &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t)); - memcpy(&astats[ctl_stats.narenas + 1], &tstats, - sizeof(ctl_arena_stats_t)); - } - a0dalloc(ctl_stats.arenas); - ctl_stats.arenas = astats; - ctl_stats.narenas++; - - return (false); -} - -static void -ctl_refresh(tsdn_t *tsdn) -{ - unsigned i; - VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); - - /* - * Clear sum stats, since they will be merged into by - * ctl_arena_refresh(). - */ - ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); - - for (i = 0; i < ctl_stats.narenas; i++) - tarenas[i] = arena_get(tsdn, i, false); - - for (i = 0; i < ctl_stats.narenas; i++) { - bool initialized = (tarenas[i] != NULL); - - ctl_stats.arenas[i].initialized = initialized; - if (initialized) - ctl_arena_refresh(tsdn, tarenas[i], i); - } - - if (config_stats) { - size_t base_allocated, base_resident, base_mapped; - base_stats_get(tsdn, &base_allocated, &base_resident, - &base_mapped); - ctl_stats.allocated = - ctl_stats.arenas[ctl_stats.narenas].allocated_small + - ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large + - ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge; - ctl_stats.active = - (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE); - ctl_stats.metadata = base_allocated + - ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + - ctl_stats.arenas[ctl_stats.narenas].astats - .metadata_allocated; - ctl_stats.resident = base_resident + - ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + - ((ctl_stats.arenas[ctl_stats.narenas].pactive + - ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE); - ctl_stats.mapped = base_mapped + - ctl_stats.arenas[ctl_stats.narenas].astats.mapped; - ctl_stats.retained = - ctl_stats.arenas[ctl_stats.narenas].astats.retained; - } - - ctl_epoch++; -} - -static bool -ctl_init(tsdn_t *tsdn) -{ - bool ret; - - malloc_mutex_lock(tsdn, &ctl_mtx); - if (!ctl_initialized) { - /* - * Allocate space for one extra arena stats element, which - * contains summed stats across all arenas. - */ - ctl_stats.narenas = narenas_total_get(); - ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc( - (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t)); - if (ctl_stats.arenas == NULL) { - ret = true; - goto label_return; - } - memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) * - sizeof(ctl_arena_stats_t)); - - /* - * Initialize all stats structures, regardless of whether they - * ever get used. Lazy initialization would allow errors to - * cause inconsistent state to be viewable by the application. - */ - if (config_stats) { - unsigned i; - for (i = 0; i <= ctl_stats.narenas; i++) { - if (ctl_arena_init(&ctl_stats.arenas[i])) { - unsigned j; - for (j = 0; j < i; j++) { - a0dalloc( - ctl_stats.arenas[j].lstats); - a0dalloc( - ctl_stats.arenas[j].hstats); - } - a0dalloc(ctl_stats.arenas); - ctl_stats.arenas = NULL; - ret = true; - goto label_return; - } - } - } - ctl_stats.arenas[ctl_stats.narenas].initialized = true; - - ctl_epoch = 0; - ctl_refresh(tsdn); - ctl_initialized = true; - } - - ret = false; -label_return: - malloc_mutex_unlock(tsdn, &ctl_mtx); - return (ret); -} - -static int -ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, - size_t *mibp, size_t *depthp) -{ - int ret; - const char *elm, *tdot, *dot; - size_t elen, i, j; - const ctl_named_node_t *node; - - elm = name; - /* Equivalent to strchrnul(). */ - dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); - elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); - if (elen == 0) { - ret = ENOENT; - goto label_return; - } - node = super_root_node; - for (i = 0; i < *depthp; i++) { - assert(node); - assert(node->nchildren > 0); - if (ctl_named_node(node->children) != NULL) { - const ctl_named_node_t *pnode = node; - - /* Children are named. */ - for (j = 0; j < node->nchildren; j++) { - const ctl_named_node_t *child = - ctl_named_children(node, j); - if (strlen(child->name) == elen && - strncmp(elm, child->name, elen) == 0) { - node = child; - if (nodesp != NULL) - nodesp[i] = - (const ctl_node_t *)node; - mibp[i] = j; - break; - } - } - if (node == pnode) { - ret = ENOENT; - goto label_return; - } - } else { - uintmax_t index; - const ctl_indexed_node_t *inode; - - /* Children are indexed. */ - index = malloc_strtoumax(elm, NULL, 10); - if (index == UINTMAX_MAX || index > SIZE_T_MAX) { - ret = ENOENT; - goto label_return; - } - - inode = ctl_indexed_node(node->children); - node = inode->index(tsdn, mibp, *depthp, (size_t)index); - if (node == NULL) { - ret = ENOENT; - goto label_return; - } - - if (nodesp != NULL) - nodesp[i] = (const ctl_node_t *)node; - mibp[i] = (size_t)index; - } - - if (node->ctl != NULL) { - /* Terminal node. */ - if (*dot != '\0') { - /* - * The name contains more elements than are - * in this path through the tree. - */ - ret = ENOENT; - goto label_return; - } - /* Complete lookup successful. */ - *depthp = i + 1; - break; - } - - /* Update elm. */ - if (*dot == '\0') { - /* No more elements. */ - ret = ENOENT; - goto label_return; - } - elm = &dot[1]; - dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : - strchr(elm, '\0'); - elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); - } - - ret = 0; -label_return: - return (ret); -} - -int -ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - size_t depth; - ctl_node_t const *nodes[CTL_MAX_DEPTH]; - size_t mib[CTL_MAX_DEPTH]; - const ctl_named_node_t *node; - - if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) { - ret = EAGAIN; - goto label_return; - } - - depth = CTL_MAX_DEPTH; - ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth); - if (ret != 0) - goto label_return; - - node = ctl_named_node(nodes[depth-1]); - if (node != NULL && node->ctl) - ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen); - else { - /* The name refers to a partial path through the ctl tree. */ - ret = ENOENT; - } - -label_return: - return(ret); -} - -int -ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp) -{ - int ret; - - if (!ctl_initialized && ctl_init(tsdn)) { - ret = EAGAIN; - goto label_return; - } - - ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp); -label_return: - return(ret); -} - -int -ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - const ctl_named_node_t *node; - size_t i; - - if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) { - ret = EAGAIN; - goto label_return; - } - - /* Iterate down the tree. */ - node = super_root_node; - for (i = 0; i < miblen; i++) { - assert(node); - assert(node->nchildren > 0); - if (ctl_named_node(node->children) != NULL) { - /* Children are named. */ - if (node->nchildren <= (unsigned)mib[i]) { - ret = ENOENT; - goto label_return; - } - node = ctl_named_children(node, mib[i]); - } else { - const ctl_indexed_node_t *inode; - - /* Indexed element. */ - inode = ctl_indexed_node(node->children); - node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]); - if (node == NULL) { - ret = ENOENT; - goto label_return; - } - } - } - - /* Call the ctl function. */ - if (node && node->ctl) - ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen); - else { - /* Partial MIB. */ - ret = ENOENT; - } - -label_return: - return(ret); -} - -bool -ctl_boot(void) -{ - - if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL)) - return (true); - - ctl_initialized = false; - - return (false); -} - -void -ctl_prefork(tsdn_t *tsdn) -{ - - malloc_mutex_prefork(tsdn, &ctl_mtx); -} - -void -ctl_postfork_parent(tsdn_t *tsdn) -{ - - malloc_mutex_postfork_parent(tsdn, &ctl_mtx); -} - -void -ctl_postfork_child(tsdn_t *tsdn) -{ - - malloc_mutex_postfork_child(tsdn, &ctl_mtx); -} - -/******************************************************************************/ -/* *_ctl() functions. */ - -#define READONLY() do { \ - if (newp != NULL || newlen != 0) { \ - ret = EPERM; \ - goto label_return; \ - } \ -} while (0) - -#define WRITEONLY() do { \ - if (oldp != NULL || oldlenp != NULL) { \ - ret = EPERM; \ - goto label_return; \ - } \ -} while (0) - -#define READ_XOR_WRITE() do { \ - if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \ - newlen != 0)) { \ - ret = EPERM; \ - goto label_return; \ - } \ -} while (0) - -#define READ(v, t) do { \ - if (oldp != NULL && oldlenp != NULL) { \ - if (*oldlenp != sizeof(t)) { \ - size_t copylen = (sizeof(t) <= *oldlenp) \ - ? sizeof(t) : *oldlenp; \ - memcpy(oldp, (void *)&(v), copylen); \ - ret = EINVAL; \ - goto label_return; \ - } \ - *(t *)oldp = (v); \ - } \ -} while (0) - -#define WRITE(v, t) do { \ - if (newp != NULL) { \ - if (newlen != sizeof(t)) { \ - ret = EINVAL; \ - goto label_return; \ - } \ - (v) = *(t *)newp; \ - } \ -} while (0) - -/* - * There's a lot of code duplication in the following macros due to limitations - * in how nested cpp macros are expanded. - */ -#define CTL_RO_CLGEN(c, l, n, v, t) \ -static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - if (!(c)) \ - return (ENOENT); \ - if (l) \ - malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - if (l) \ - malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ - return (ret); \ -} - -#define CTL_RO_CGEN(c, n, v, t) \ -static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - if (!(c)) \ - return (ENOENT); \ - malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ - return (ret); \ -} - -#define CTL_RO_GEN(n, v, t) \ -static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ - return (ret); \ -} - -/* - * ctl_mtx is not acquired, under the assumption that no pertinent data will - * mutate during the call. - */ -#define CTL_RO_NL_CGEN(c, n, v, t) \ -static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - if (!(c)) \ - return (ENOENT); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - return (ret); \ -} - -#define CTL_RO_NL_GEN(n, v, t) \ -static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - return (ret); \ -} - -#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ -static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - if (!(c)) \ - return (ENOENT); \ - READONLY(); \ - oldval = (m(tsd)); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - return (ret); \ -} - -#define CTL_RO_CONFIG_GEN(n, t) \ -static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - READONLY(); \ - oldval = n; \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - return (ret); \ -} - -/******************************************************************************/ - -CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) - -static int -epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - UNUSED uint64_t newval; - - malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); - WRITE(newval, uint64_t); - if (newp != NULL) - ctl_refresh(tsd_tsdn(tsd)); - READ(ctl_epoch, uint64_t); - - ret = 0; -label_return: - malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); - return (ret); -} - -/******************************************************************************/ - -CTL_RO_CONFIG_GEN(config_cache_oblivious, bool) -CTL_RO_CONFIG_GEN(config_debug, bool) -CTL_RO_CONFIG_GEN(config_fill, bool) -CTL_RO_CONFIG_GEN(config_lazy_lock, bool) -CTL_RO_CONFIG_GEN(config_malloc_conf, const char *) -CTL_RO_CONFIG_GEN(config_munmap, bool) -CTL_RO_CONFIG_GEN(config_prof, bool) -CTL_RO_CONFIG_GEN(config_prof_libgcc, bool) -CTL_RO_CONFIG_GEN(config_prof_libunwind, bool) -CTL_RO_CONFIG_GEN(config_stats, bool) -CTL_RO_CONFIG_GEN(config_tcache, bool) -CTL_RO_CONFIG_GEN(config_thp, bool) -CTL_RO_CONFIG_GEN(config_tls, bool) -CTL_RO_CONFIG_GEN(config_utrace, bool) -CTL_RO_CONFIG_GEN(config_valgrind, bool) -CTL_RO_CONFIG_GEN(config_xmalloc, bool) - -/******************************************************************************/ - -CTL_RO_NL_GEN(opt_abort, opt_abort, bool) -CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) -CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) -CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) -CTL_RO_NL_GEN(opt_purge, purge_mode_names[opt_purge], const char *) -CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) -CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t) -CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) -CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) -CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) -CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool) -CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) -CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) -CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) -CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool) -CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) -CTL_RO_NL_CGEN(config_thp, opt_thp, opt_thp, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) -CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init, - opt_prof_thread_active_init, bool) -CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) -CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) -CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) -CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) - -/******************************************************************************/ - -static int -thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - arena_t *oldarena; - unsigned newind, oldind; - - oldarena = arena_choose(tsd, NULL); - if (oldarena == NULL) - return (EAGAIN); - - malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); - newind = oldind = oldarena->ind; - WRITE(newind, unsigned); - READ(oldind, unsigned); - if (newind != oldind) { - arena_t *newarena; - - if (newind >= ctl_stats.narenas) { - /* New arena index is out of range. */ - ret = EFAULT; - goto label_return; - } - - /* Initialize arena if necessary. */ - newarena = arena_get(tsd_tsdn(tsd), newind, true); - if (newarena == NULL) { - ret = EAGAIN; - goto label_return; - } - /* Set new arena/tcache associations. */ - arena_migrate(tsd, oldind, newind); - if (config_tcache) { - tcache_t *tcache = tsd_tcache_get(tsd); - if (tcache != NULL) { - tcache_arena_reassociate(tsd_tsdn(tsd), tcache, - oldarena, newarena); - } - } - } - - ret = 0; -label_return: - malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); - return (ret); -} - -CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get, - uint64_t) -CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get, - uint64_t *) -CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get, - uint64_t) -CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, - tsd_thread_deallocatedp_get, uint64_t *) - -static int -thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - bool oldval; - - if (!config_tcache) - return (ENOENT); - - oldval = tcache_enabled_get(); - if (newp != NULL) { - if (newlen != sizeof(bool)) { - ret = EINVAL; - goto label_return; - } - tcache_enabled_set(*(bool *)newp); - } - READ(oldval, bool); - - ret = 0; -label_return: - return (ret); -} - -static int -thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - - if (!config_tcache) - return (ENOENT); - - READONLY(); - WRITEONLY(); - - tcache_flush(); - - ret = 0; -label_return: - return (ret); -} - -static int -thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - - if (!config_prof) - return (ENOENT); - - READ_XOR_WRITE(); - - if (newp != NULL) { - if (newlen != sizeof(const char *)) { - ret = EINVAL; - goto label_return; - } - - if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != - 0) - goto label_return; - } else { - const char *oldname = prof_thread_name_get(tsd); - READ(oldname, const char *); - } - - ret = 0; -label_return: - return (ret); -} - -static int -thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - bool oldval; - - if (!config_prof) - return (ENOENT); - - oldval = prof_thread_active_get(tsd); - if (newp != NULL) { - if (newlen != sizeof(bool)) { - ret = EINVAL; - goto label_return; - } - if (prof_thread_active_set(tsd, *(bool *)newp)) { - ret = EAGAIN; - goto label_return; - } - } - READ(oldval, bool); - - ret = 0; -label_return: - return (ret); -} - -/******************************************************************************/ - -static int -tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned tcache_ind; - - if (!config_tcache) - return (ENOENT); - - READONLY(); - if (tcaches_create(tsd, &tcache_ind)) { - ret = EFAULT; - goto label_return; - } - READ(tcache_ind, unsigned); - - ret = 0; -label_return: - return ret; -} - -static int -tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned tcache_ind; - - if (!config_tcache) - return (ENOENT); - - WRITEONLY(); - tcache_ind = UINT_MAX; - WRITE(tcache_ind, unsigned); - if (tcache_ind == UINT_MAX) { - ret = EFAULT; - goto label_return; - } - tcaches_flush(tsd, tcache_ind); - - ret = 0; -label_return: - return (ret); -} - -static int -tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned tcache_ind; - - if (!config_tcache) - return (ENOENT); - - WRITEONLY(); - tcache_ind = UINT_MAX; - WRITE(tcache_ind, unsigned); - if (tcache_ind == UINT_MAX) { - ret = EFAULT; - goto label_return; - } - tcaches_destroy(tsd, tcache_ind); - - ret = 0; -label_return: - return (ret); -} - -/******************************************************************************/ - -static void -arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all) -{ - - malloc_mutex_lock(tsdn, &ctl_mtx); - { - unsigned narenas = ctl_stats.narenas; - - if (arena_ind == narenas) { - unsigned i; - VARIABLE_ARRAY(arena_t *, tarenas, narenas); - - for (i = 0; i < narenas; i++) - tarenas[i] = arena_get(tsdn, i, false); - - /* - * No further need to hold ctl_mtx, since narenas and - * tarenas contain everything needed below. - */ - malloc_mutex_unlock(tsdn, &ctl_mtx); - - for (i = 0; i < narenas; i++) { - if (tarenas[i] != NULL) - arena_purge(tsdn, tarenas[i], all); - } - } else { - arena_t *tarena; - - assert(arena_ind < narenas); - - tarena = arena_get(tsdn, arena_ind, false); - - /* No further need to hold ctl_mtx. */ - malloc_mutex_unlock(tsdn, &ctl_mtx); - - if (tarena != NULL) - arena_purge(tsdn, tarena, all); - } - } -} - -static int -arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - - READONLY(); - WRITEONLY(); - arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], true); - - ret = 0; -label_return: - return (ret); -} - -static int -arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - - READONLY(); - WRITEONLY(); - arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], false); - - ret = 0; -label_return: - return (ret); -} - -static int -arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned arena_ind; - arena_t *arena; - - READONLY(); - WRITEONLY(); - - if ((config_valgrind && unlikely(in_valgrind)) || (config_fill && - unlikely(opt_quarantine))) { - ret = EFAULT; - goto label_return; - } - - arena_ind = (unsigned)mib[1]; - if (config_debug) { - malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); - assert(arena_ind < ctl_stats.narenas); - malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); - } - assert(arena_ind >= opt_narenas); - - arena = arena_get(tsd_tsdn(tsd), arena_ind, false); - - arena_reset(tsd, arena); - - ret = 0; -label_return: - return (ret); -} - -static int -arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - const char *dss = NULL; - unsigned arena_ind = (unsigned)mib[1]; - dss_prec_t dss_prec_old = dss_prec_limit; - dss_prec_t dss_prec = dss_prec_limit; - - malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); - WRITE(dss, const char *); - if (dss != NULL) { - int i; - bool match = false; - - for (i = 0; i < dss_prec_limit; i++) { - if (strcmp(dss_prec_names[i], dss) == 0) { - dss_prec = i; - match = true; - break; - } - } - - if (!match) { - ret = EINVAL; - goto label_return; - } - } - - if (arena_ind < ctl_stats.narenas) { - arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false); - if (arena == NULL || (dss_prec != dss_prec_limit && - arena_dss_prec_set(tsd_tsdn(tsd), arena, dss_prec))) { - ret = EFAULT; - goto label_return; - } - dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena); - } else { - if (dss_prec != dss_prec_limit && - chunk_dss_prec_set(dss_prec)) { - ret = EFAULT; - goto label_return; - } - dss_prec_old = chunk_dss_prec_get(); - } - - dss = dss_prec_names[dss_prec_old]; - READ(dss, const char *); - - ret = 0; -label_return: - malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); - return (ret); -} - -static int -arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned arena_ind = (unsigned)mib[1]; - arena_t *arena; - - arena = arena_get(tsd_tsdn(tsd), arena_ind, false); - if (arena == NULL) { - ret = EFAULT; - goto label_return; - } - - if (oldp != NULL && oldlenp != NULL) { - size_t oldval = arena_lg_dirty_mult_get(tsd_tsdn(tsd), arena); - READ(oldval, ssize_t); - } - if (newp != NULL) { - if (newlen != sizeof(ssize_t)) { - ret = EINVAL; - goto label_return; - } - if (arena_lg_dirty_mult_set(tsd_tsdn(tsd), arena, - *(ssize_t *)newp)) { - ret = EFAULT; - goto label_return; - } - } - - ret = 0; -label_return: - return (ret); -} - -static int -arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned arena_ind = (unsigned)mib[1]; - arena_t *arena; - - arena = arena_get(tsd_tsdn(tsd), arena_ind, false); - if (arena == NULL) { - ret = EFAULT; - goto label_return; - } - - if (oldp != NULL && oldlenp != NULL) { - size_t oldval = arena_decay_time_get(tsd_tsdn(tsd), arena); - READ(oldval, ssize_t); - } - if (newp != NULL) { - if (newlen != sizeof(ssize_t)) { - ret = EINVAL; - goto label_return; - } - if (arena_decay_time_set(tsd_tsdn(tsd), arena, - *(ssize_t *)newp)) { - ret = EFAULT; - goto label_return; - } - } - - ret = 0; -label_return: - return (ret); -} - -static int -arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned arena_ind = (unsigned)mib[1]; - arena_t *arena; - - malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); - if (arena_ind < narenas_total_get() && (arena = - arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { - if (newp != NULL) { - chunk_hooks_t old_chunk_hooks, new_chunk_hooks; - WRITE(new_chunk_hooks, chunk_hooks_t); - old_chunk_hooks = chunk_hooks_set(tsd_tsdn(tsd), arena, - &new_chunk_hooks); - READ(old_chunk_hooks, chunk_hooks_t); - } else { - chunk_hooks_t old_chunk_hooks = - chunk_hooks_get(tsd_tsdn(tsd), arena); - READ(old_chunk_hooks, chunk_hooks_t); - } - } else { - ret = EFAULT; - goto label_return; - } - ret = 0; -label_return: - malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); - return (ret); -} - -static const ctl_named_node_t * -arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) -{ - const ctl_named_node_t *ret; - - malloc_mutex_lock(tsdn, &ctl_mtx); - if (i > ctl_stats.narenas) { - ret = NULL; - goto label_return; - } - - ret = super_arena_i_node; -label_return: - malloc_mutex_unlock(tsdn, &ctl_mtx); - return (ret); -} - -/******************************************************************************/ - -static int -arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned narenas; - - malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); - READONLY(); - if (*oldlenp != sizeof(unsigned)) { - ret = EINVAL; - goto label_return; - } - narenas = ctl_stats.narenas; - READ(narenas, unsigned); - - ret = 0; -label_return: - malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); - return (ret); -} - -static int -arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned nread, i; - - malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); - READONLY(); - if (*oldlenp != ctl_stats.narenas * sizeof(bool)) { - ret = EINVAL; - nread = (*oldlenp < ctl_stats.narenas * sizeof(bool)) - ? (unsigned)(*oldlenp / sizeof(bool)) : ctl_stats.narenas; - } else { - ret = 0; - nread = ctl_stats.narenas; - } - - for (i = 0; i < nread; i++) - ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; - -label_return: - malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); - return (ret); -} - -static int -arenas_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - - if (oldp != NULL && oldlenp != NULL) { - size_t oldval = arena_lg_dirty_mult_default_get(); - READ(oldval, ssize_t); - } - if (newp != NULL) { - if (newlen != sizeof(ssize_t)) { - ret = EINVAL; - goto label_return; - } - if (arena_lg_dirty_mult_default_set(*(ssize_t *)newp)) { - ret = EFAULT; - goto label_return; - } - } - - ret = 0; -label_return: - return (ret); -} - -static int -arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - - if (oldp != NULL && oldlenp != NULL) { - size_t oldval = arena_decay_time_default_get(); - READ(oldval, ssize_t); - } - if (newp != NULL) { - if (newlen != sizeof(ssize_t)) { - ret = EINVAL; - goto label_return; - } - if (arena_decay_time_default_set(*(ssize_t *)newp)) { - ret = EFAULT; - goto label_return; - } - } - - ret = 0; -label_return: - return (ret); -} - -CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) -CTL_RO_NL_GEN(arenas_page, PAGE, size_t) -CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) -CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) -CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned) -CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) -CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) -CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) -static const ctl_named_node_t * -arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) -{ - - if (i > NBINS) - return (NULL); - return (super_arenas_bin_i_node); -} - -CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned) -CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t) -static const ctl_named_node_t * -arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) -{ - - if (i > nlclasses) - return (NULL); - return (super_arenas_lrun_i_node); -} - -CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned) -CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]), - size_t) -static const ctl_named_node_t * -arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) -{ - - if (i > nhclasses) - return (NULL); - return (super_arenas_hchunk_i_node); -} - -static int -arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned narenas; - - malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); - READONLY(); - if (ctl_grow(tsd_tsdn(tsd))) { - ret = EAGAIN; - goto label_return; - } - narenas = ctl_stats.narenas - 1; - READ(narenas, unsigned); - - ret = 0; -label_return: - malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); - return (ret); -} - -/******************************************************************************/ - -static int -prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - bool oldval; - - if (!config_prof) - return (ENOENT); - - if (newp != NULL) { - if (newlen != sizeof(bool)) { - ret = EINVAL; - goto label_return; - } - oldval = prof_thread_active_init_set(tsd_tsdn(tsd), - *(bool *)newp); - } else - oldval = prof_thread_active_init_get(tsd_tsdn(tsd)); - READ(oldval, bool); - - ret = 0; -label_return: - return (ret); -} - -static int -prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - bool oldval; - - if (!config_prof) - return (ENOENT); - - if (newp != NULL) { - if (newlen != sizeof(bool)) { - ret = EINVAL; - goto label_return; - } - oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp); - } else - oldval = prof_active_get(tsd_tsdn(tsd)); - READ(oldval, bool); - - ret = 0; -label_return: - return (ret); -} - -static int -prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - const char *filename = NULL; - - if (!config_prof) - return (ENOENT); - - WRITEONLY(); - WRITE(filename, const char *); - - if (prof_mdump(tsd, filename)) { - ret = EFAULT; - goto label_return; - } - - ret = 0; -label_return: - return (ret); -} - -static int -prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - bool oldval; - - if (!config_prof) - return (ENOENT); - - if (newp != NULL) { - if (newlen != sizeof(bool)) { - ret = EINVAL; - goto label_return; - } - oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp); - } else - oldval = prof_gdump_get(tsd_tsdn(tsd)); - READ(oldval, bool); - - ret = 0; -label_return: - return (ret); -} - -static int -prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - size_t lg_sample = lg_prof_sample; - - if (!config_prof) - return (ENOENT); - - WRITEONLY(); - WRITE(lg_sample, size_t); - if (lg_sample >= (sizeof(uint64_t) << 3)) - lg_sample = (sizeof(uint64_t) << 3) - 1; - - prof_reset(tsd, lg_sample); - - ret = 0; -label_return: - return (ret); -} - -CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) -CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t) - -/******************************************************************************/ - -CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) -CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) -CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) -CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t) -CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t) -CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) -CTL_RO_CGEN(config_stats, stats_retained, ctl_stats.retained, size_t) - -CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) -CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult, - ssize_t) -CTL_RO_GEN(stats_arenas_i_decay_time, ctl_stats.arenas[mib[2]].decay_time, - ssize_t) -CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) -CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) -CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, - ctl_stats.arenas[mib[2]].astats.mapped, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_retained, - ctl_stats.arenas[mib[2]].astats.retained, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, - ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, - ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_purged, - ctl_stats.arenas[mib[2]].astats.purged, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped, - ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_allocated, - ctl_stats.arenas[mib[2]].astats.metadata_allocated, size_t) - -CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, - ctl_stats.arenas[mib[2]].allocated_small, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, - ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, - ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, - ctl_stats.arenas[mib[2]].nrequests_small, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, - ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, - ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, - ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, - ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated, - ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc, - ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc, - ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests, - ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */ - -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, - ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs, - ctl_stats.arenas[mib[2]].bstats[mib[4]].curregs, size_t) -CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t) -CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) - -static const ctl_named_node_t * -stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, - size_t j) -{ - - if (j > NBINS) - return (NULL); - return (super_stats_arenas_i_bins_j_node); -} - -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc, - ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc, - ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests, - ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, - ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) - -static const ctl_named_node_t * -stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, - size_t j) -{ - - if (j > nlclasses) - return (NULL); - return (super_stats_arenas_i_lruns_j_node); -} - -CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc, - ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc, - ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests, - ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */ - uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks, - ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t) - -static const ctl_named_node_t * -stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, - size_t j) -{ - - if (j > nhclasses) - return (NULL); - return (super_stats_arenas_i_hchunks_j_node); -} - -static const ctl_named_node_t * -stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) -{ - const ctl_named_node_t * ret; - - malloc_mutex_lock(tsdn, &ctl_mtx); - if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) { - ret = NULL; - goto label_return; - } - - ret = super_stats_arenas_i_node; -label_return: - malloc_mutex_unlock(tsdn, &ctl_mtx); - return (ret); -} diff --git a/sources/jemalloc/src/je_extent.c b/sources/jemalloc/src/je_extent.c deleted file mode 100755 index ff8de2fe..00000000 --- a/sources/jemalloc/src/je_extent.c +++ /dev/null @@ -1,96 +0,0 @@ -#define JEMALLOC_EXTENT_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ - -#ifndef JEMALLOC_JET -static -#endif -size_t -extent_size_quantize_floor(size_t size) { - size_t ret; - szind_t ind; - - assert(size > 0); - - ind = size2index(size + 1); - if (ind == 0) { - /* Avoid underflow. */ - return (index2size(0)); - } - ret = index2size(ind - 1); - assert(ret <= size); - return (ret); -} - -size_t -extent_size_quantize_ceil(size_t size) { - size_t ret; - - assert(size > 0); - - ret = extent_size_quantize_floor(size); - if (ret < size) { - /* - * Skip a quantization that may have an adequately large extent, - * because under-sized extents may be mixed in. This only - * happens when an unusual size is requested, i.e. for aligned - * allocation, and is just one of several places where linear - * search would potentially find sufficiently aligned available - * memory somewhere lower. - */ - ret = index2size(size2index(ret + 1)); - } - return ret; -} - -JEMALLOC_INLINE_C int -extent_sz_comp(const extent_node_t *a, const extent_node_t *b) -{ - size_t a_qsize = extent_size_quantize_floor(extent_node_size_get(a)); - size_t b_qsize = extent_size_quantize_floor(extent_node_size_get(b)); - - return ((a_qsize > b_qsize) - (a_qsize < b_qsize)); -} - -JEMALLOC_INLINE_C int -extent_sn_comp(const extent_node_t *a, const extent_node_t *b) -{ - size_t a_sn = extent_node_sn_get(a); - size_t b_sn = extent_node_sn_get(b); - - return ((a_sn > b_sn) - (a_sn < b_sn)); -} - -JEMALLOC_INLINE_C int -extent_ad_comp(const extent_node_t *a, const extent_node_t *b) -{ - uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a); - uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b); - - return ((a_addr > b_addr) - (a_addr < b_addr)); -} - -JEMALLOC_INLINE_C int -extent_szsnad_comp(const extent_node_t *a, const extent_node_t *b) -{ - int ret; - - ret = extent_sz_comp(a, b); - if (ret != 0) - return (ret); - - ret = extent_sn_comp(a, b); - if (ret != 0) - return (ret); - - ret = extent_ad_comp(a, b); - return (ret); -} - -/* Generate red-black tree functions. */ -rb_gen(, extent_tree_szsnad_, extent_tree_t, extent_node_t, szsnad_link, - extent_szsnad_comp) - -/* Generate red-black tree functions. */ -rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp) diff --git a/sources/jemalloc/src/je_hash.c b/sources/jemalloc/src/je_hash.c deleted file mode 100755 index cfa4da02..00000000 --- a/sources/jemalloc/src/je_hash.c +++ /dev/null @@ -1,2 +0,0 @@ -#define JEMALLOC_HASH_C_ -#include "jemalloc/internal/jemalloc_internal.h" diff --git a/sources/jemalloc/src/je_huge.c b/sources/jemalloc/src/je_huge.c deleted file mode 100755 index 0fbaa41a..00000000 --- a/sources/jemalloc/src/je_huge.c +++ /dev/null @@ -1,498 +0,0 @@ -#define JEMALLOC_HUGE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ - -static extent_node_t * -huge_node_get(const void *ptr) -{ - extent_node_t *node; - - node = chunk_lookup(ptr, true); - assert(!extent_node_achunk_get(node)); - - return (node); -} - -static bool -huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node, bool *gdump) -{ - - assert(extent_node_addr_get(node) == ptr); - assert(!extent_node_achunk_get(node)); - return (chunk_register(ptr, node, gdump)); -} - -static void -huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node, bool *gdump) -{ - bool err; - - err = huge_node_set(tsdn, ptr, node, gdump); - assert(!err); -} - -static void -huge_node_unset(const void *ptr, const extent_node_t *node) -{ - - chunk_deregister(ptr, node); -} - -void * -huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) -{ - - assert(usize == s2u(usize)); - - return (huge_palloc(tsdn, arena, usize, chunksize, zero)); -} - -void * -huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, - bool zero) -{ - void *ret; - size_t ausize; - arena_t *iarena; - extent_node_t *node; - size_t sn; - bool is_zeroed, gdump; - - /* Allocate one or more contiguous chunks for this request. */ - - assert(!tsdn_null(tsdn) || arena != NULL); - /* prof_gdump() requirement. */ - witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0); - - ausize = sa2u(usize, alignment); - if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS)) - return (NULL); - assert(ausize >= chunksize); - - /* Allocate an extent node with which to track the chunk. */ - iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) : - a0get(); - node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)), - CACHELINE, false, NULL, true, iarena); - if (node == NULL) - return (NULL); - - /* - * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that - * it is possible to make correct junk/zero fill decisions below. - */ - is_zeroed = zero; - if (likely(!tsdn_null(tsdn))) - arena = arena_choose(tsdn_tsd(tsdn), arena); - if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn, - arena, usize, alignment, &sn, &is_zeroed)) == NULL) { - idalloctm(tsdn, node, NULL, true, true); - return (NULL); - } - - extent_node_init(node, arena, ret, usize, sn, is_zeroed, true); - - if (huge_node_set(tsdn, ret, node, &gdump)) { - arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn); - idalloctm(tsdn, node, NULL, true, true); - return (NULL); - } - if (config_prof && opt_prof && gdump) - prof_gdump(tsdn); - - /* Insert node into huge. */ - malloc_mutex_lock(tsdn, &arena->huge_mtx); - ql_elm_new(node, ql_link); - ql_tail_insert(&arena->huge, node, ql_link); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); - - if (zero || (config_fill && unlikely(opt_zero))) { - if (!is_zeroed) - memset(ret, 0, usize); - } else if (config_fill && unlikely(opt_junk_alloc)) - memset(ret, JEMALLOC_ALLOC_JUNK, usize); - - arena_decay_tick(tsdn, arena); - return (ret); -} - -#ifdef JEMALLOC_JET -#undef huge_dalloc_junk -#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl) -#endif -static void -huge_dalloc_junk(void *ptr, size_t usize) -{ - - if (config_fill && have_dss && unlikely(opt_junk_free)) { - /* - * Only bother junk filling if the chunk isn't about to be - * unmapped. - */ - if (!config_munmap || (have_dss && chunk_in_dss(ptr))) - memset(ptr, JEMALLOC_FREE_JUNK, usize); - } -} -#ifdef JEMALLOC_JET -#undef huge_dalloc_junk -#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) -huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); -#endif - -static void -huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize, - size_t usize_min, size_t usize_max, bool zero) -{ - size_t usize, usize_next; - extent_node_t *node; - arena_t *arena; - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - bool pre_zeroed, post_zeroed, gdump; - - /* prof_gdump() requirement. */ - witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0); - - /* Increase usize to incorporate extra. */ - for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1)) - <= oldsize; usize = usize_next) - ; /* Do nothing. */ - - if (oldsize == usize) - return; - - node = huge_node_get(ptr); - arena = extent_node_arena_get(node); - pre_zeroed = extent_node_zeroed_get(node); - - /* Fill if necessary (shrinking). */ - if (oldsize > usize) { - size_t sdiff = oldsize - usize; - if (config_fill && unlikely(opt_junk_free)) { - memset((void *)((uintptr_t)ptr + usize), - JEMALLOC_FREE_JUNK, sdiff); - post_zeroed = false; - } else { - post_zeroed = !chunk_purge_wrapper(tsdn, arena, - &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize, - sdiff); - } - } else - post_zeroed = pre_zeroed; - - malloc_mutex_lock(tsdn, &arena->huge_mtx); - /* Update the size of the huge allocation. */ - huge_node_unset(ptr, node); - assert(extent_node_size_get(node) != usize); - extent_node_size_set(node, usize); - huge_node_reset(tsdn, ptr, node, &gdump); - /* Update zeroed. */ - extent_node_zeroed_set(node, post_zeroed); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); - /* gdump without any locks held. */ - if (config_prof && opt_prof && gdump) - prof_gdump(tsdn); - - arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize); - - /* Fill if necessary (growing). */ - if (oldsize < usize) { - if (zero || (config_fill && unlikely(opt_zero))) { - if (!pre_zeroed) { - memset((void *)((uintptr_t)ptr + oldsize), 0, - usize - oldsize); - } - } else if (config_fill && unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)ptr + oldsize), - JEMALLOC_ALLOC_JUNK, usize - oldsize); - } - } -} - -static bool -huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize, - size_t usize) -{ - extent_node_t *node; - arena_t *arena; - chunk_hooks_t chunk_hooks; - size_t cdiff; - bool pre_zeroed, post_zeroed, gdump; - - node = huge_node_get(ptr); - arena = extent_node_arena_get(node); - pre_zeroed = extent_node_zeroed_get(node); - chunk_hooks = chunk_hooks_get(tsdn, arena); - - assert(oldsize > usize); - /* prof_gdump() requirement. */ - witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0); - - /* Split excess chunks. */ - cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); - if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize), - CHUNK_CEILING(usize), cdiff, true, arena->ind)) - return (true); - - if (oldsize > usize) { - size_t sdiff = oldsize - usize; - if (config_fill && unlikely(opt_junk_free)) { - huge_dalloc_junk((void *)((uintptr_t)ptr + usize), - sdiff); - post_zeroed = false; - } else { - post_zeroed = !chunk_purge_wrapper(tsdn, arena, - &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr + - usize), CHUNK_CEILING(oldsize), - CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff); - } - } else - post_zeroed = pre_zeroed; - - malloc_mutex_lock(tsdn, &arena->huge_mtx); - /* Update the size of the huge allocation. */ - huge_node_unset(ptr, node); - extent_node_size_set(node, usize); - huge_node_reset(tsdn, ptr, node, &gdump); - /* Update zeroed. */ - extent_node_zeroed_set(node, post_zeroed); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); - /* gdump without any locks held. */ - if (config_prof && opt_prof && gdump) - prof_gdump(tsdn); - - /* Zap the excess chunks. */ - arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize, - extent_node_sn_get(node)); - - return (false); -} - -static bool -huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize, - size_t usize, bool zero) { - extent_node_t *node; - arena_t *arena; - bool is_zeroed_subchunk, is_zeroed_chunk, gdump; - - node = huge_node_get(ptr); - arena = extent_node_arena_get(node); - malloc_mutex_lock(tsdn, &arena->huge_mtx); - is_zeroed_subchunk = extent_node_zeroed_get(node); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); - - /* prof_gdump() requirement. */ - witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0); - - /* - * Use is_zeroed_chunk to detect whether the trailing memory is zeroed, - * update extent's zeroed field, and zero as necessary. - */ - is_zeroed_chunk = false; - if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize, - &is_zeroed_chunk)) - return (true); - - malloc_mutex_lock(tsdn, &arena->huge_mtx); - huge_node_unset(ptr, node); - extent_node_size_set(node, usize); - extent_node_zeroed_set(node, extent_node_zeroed_get(node) && - is_zeroed_chunk); - huge_node_reset(tsdn, ptr, node, &gdump); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); - /* gdump without any locks held. */ - if (config_prof && opt_prof && gdump) - prof_gdump(tsdn); - - if (zero || (config_fill && unlikely(opt_zero))) { - if (!is_zeroed_subchunk) { - memset((void *)((uintptr_t)ptr + oldsize), 0, - CHUNK_CEILING(oldsize) - oldsize); - } - if (!is_zeroed_chunk) { - memset((void *)((uintptr_t)ptr + - CHUNK_CEILING(oldsize)), 0, usize - - CHUNK_CEILING(oldsize)); - } - } else if (config_fill && unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK, - usize - oldsize); - } - - return (false); -} - -bool -huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, - size_t usize_max, bool zero) -{ - - assert(s2u(oldsize) == oldsize); - /* The following should have been caught by callers. */ - assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS); - - /* Both allocations must be huge to avoid a move. */ - if (oldsize < chunksize || usize_max < chunksize) - return (true); - - if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) { - /* Attempt to expand the allocation in-place. */ - if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max, - zero)) { - arena_decay_tick(tsdn, huge_aalloc(ptr)); - return (false); - } - /* Try again, this time with usize_min. */ - if (usize_min < usize_max && CHUNK_CEILING(usize_min) > - CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn, - ptr, oldsize, usize_min, zero)) { - arena_decay_tick(tsdn, huge_aalloc(ptr)); - return (false); - } - } - - /* - * Avoid moving the allocation if the existing chunk size accommodates - * the new size. - */ - if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min) - && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) { - huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min, - usize_max, zero); - arena_decay_tick(tsdn, huge_aalloc(ptr)); - return (false); - } - - /* Attempt to shrink the allocation in-place. */ - if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) { - if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize, - usize_max)) { - arena_decay_tick(tsdn, huge_aalloc(ptr)); - return (false); - } - } - return (true); -} - -static void * -huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, bool zero) -{ - - if (alignment <= chunksize) - return (huge_malloc(tsdn, arena, usize, zero)); - return (huge_palloc(tsdn, arena, usize, alignment, zero)); -} - -void * -huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, - size_t usize, size_t alignment, bool zero, tcache_t *tcache) -{ - void *ret; - size_t copysize; - - /* The following should have been caught by callers. */ - assert(usize > 0 && usize <= HUGE_MAXCLASS); - - /* Try to avoid moving the allocation. */ - if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize, - zero)) - return (ptr); - - /* - * usize and oldsize are different enough that we need to use a - * different size class. In that case, fall back to allocating new - * space and copying. - */ - ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment, - zero); - if (ret == NULL) - return (NULL); - - copysize = (usize < oldsize) ? usize : oldsize; - memcpy(ret, ptr, copysize); - isqalloc(tsd, ptr, oldsize, tcache, true); - return (ret); -} - -void -huge_dalloc(tsdn_t *tsdn, void *ptr) -{ - extent_node_t *node; - arena_t *arena; - - node = huge_node_get(ptr); - arena = extent_node_arena_get(node); - huge_node_unset(ptr, node); - malloc_mutex_lock(tsdn, &arena->huge_mtx); - ql_remove(&arena->huge, node, ql_link); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); - - huge_dalloc_junk(extent_node_addr_get(node), - extent_node_size_get(node)); - arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node), - extent_node_addr_get(node), extent_node_size_get(node), - extent_node_sn_get(node)); - idalloctm(tsdn, node, NULL, true, true); - - arena_decay_tick(tsdn, arena); -} - -arena_t * -huge_aalloc(const void *ptr) -{ - - return (extent_node_arena_get(huge_node_get(ptr))); -} - -size_t -huge_salloc(tsdn_t *tsdn, const void *ptr) -{ - size_t size; - extent_node_t *node; - arena_t *arena; - - node = huge_node_get(ptr); - arena = extent_node_arena_get(node); - malloc_mutex_lock(tsdn, &arena->huge_mtx); - size = extent_node_size_get(node); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); - - return (size); -} - -prof_tctx_t * -huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr) -{ - prof_tctx_t *tctx; - extent_node_t *node; - arena_t *arena; - - node = huge_node_get(ptr); - arena = extent_node_arena_get(node); - malloc_mutex_lock(tsdn, &arena->huge_mtx); - tctx = extent_node_prof_tctx_get(node); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); - - return (tctx); -} - -void -huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) -{ - extent_node_t *node; - arena_t *arena; - - node = huge_node_get(ptr); - arena = extent_node_arena_get(node); - malloc_mutex_lock(tsdn, &arena->huge_mtx); - extent_node_prof_tctx_set(node, tctx); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); -} - -void -huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr) -{ - - huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U); -} diff --git a/sources/jemalloc/src/je_jemalloc.c b/sources/jemalloc/src/je_jemalloc.c deleted file mode 100755 index f73a26cd..00000000 --- a/sources/jemalloc/src/je_jemalloc.c +++ /dev/null @@ -1,2925 +0,0 @@ -#define JEMALLOC_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -/* Runtime configuration options. */ -const char *je_malloc_conf -#ifndef _WIN32 - JEMALLOC_ATTR(weak) -#endif - ; -bool opt_abort = -#ifdef JEMALLOC_DEBUG - true -#else - false -#endif - ; -const char *opt_junk = -#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) - "true" -#else - "false" -#endif - ; -bool opt_junk_alloc = -#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) - true -#else - false -#endif - ; -bool opt_junk_free = -#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) - true -#else - false -#endif - ; - -size_t opt_quarantine = ZU(0); -bool opt_redzone = false; -bool opt_utrace = false; -bool opt_xmalloc = false; -bool opt_zero = false; -unsigned opt_narenas = 0; - -/* Initialized to true if the process is running inside Valgrind. */ -bool in_valgrind; - -unsigned ncpus; - -/* Protects arenas initialization. */ -static malloc_mutex_t arenas_lock; -/* - * Arenas that are used to service external requests. Not all elements of the - * arenas array are necessarily used; arenas are created lazily as needed. - * - * arenas[0..narenas_auto) are used for automatic multiplexing of threads and - * arenas. arenas[narenas_auto..narenas_total) are only used if the application - * takes some action to create them and allocate from them. - */ -arena_t **arenas; -static unsigned narenas_total; /* Use narenas_total_*(). */ -static arena_t *a0; /* arenas[0]; read-only after initialization. */ -unsigned narenas_auto; /* Read-only after initialization. */ - -typedef enum { - malloc_init_uninitialized = 3, - malloc_init_a0_initialized = 2, - malloc_init_recursible = 1, - malloc_init_initialized = 0 /* Common case --> jnz. */ -} malloc_init_t; -static malloc_init_t malloc_init_state = malloc_init_uninitialized; - -/* False should be the common case. Set to true to trigger initialization. */ -static bool malloc_slow = true; - -/* When malloc_slow is true, set the corresponding bits for sanity check. */ -enum { - flag_opt_junk_alloc = (1U), - flag_opt_junk_free = (1U << 1), - flag_opt_quarantine = (1U << 2), - flag_opt_zero = (1U << 3), - flag_opt_utrace = (1U << 4), - flag_in_valgrind = (1U << 5), - flag_opt_xmalloc = (1U << 6) -}; -static uint8_t malloc_slow_flags; - -JEMALLOC_ALIGNED(CACHELINE) -const size_t pind2sz_tab[NPSIZES] = { -#define PSZ_yes(lg_grp, ndelta, lg_delta) \ - (((ZU(1)<= 0x0600 -static malloc_mutex_t init_lock = SRWLOCK_INIT; -#else -static malloc_mutex_t init_lock; -static bool init_lock_initialized = false; - -JEMALLOC_ATTR(constructor) -static void WINAPI -_init_init_lock(void) -{ - - /* If another constructor in the same binary is using mallctl to - * e.g. setup chunk hooks, it may end up running before this one, - * and malloc_init_hard will crash trying to lock the uninitialized - * lock. So we force an initialization of the lock in - * malloc_init_hard as well. We don't try to care about atomicity - * of the accessed to the init_lock_initialized boolean, since it - * really only matters early in the process creation, before any - * separate thread normally starts doing anything. */ - if (!init_lock_initialized) - malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT); - init_lock_initialized = true; -} - -#ifdef _MSC_VER -# pragma section(".CRT$XCU", read) -JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) -static const void (WINAPI *init_init_lock)(void) = _init_init_lock; -#endif -#endif -#else -static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; -#endif - -typedef struct { - void *p; /* Input pointer (as in realloc(p, s)). */ - size_t s; /* Request size. */ - void *r; /* Result pointer. */ -} malloc_utrace_t; - -#ifdef JEMALLOC_UTRACE -# define UTRACE(a, b, c) do { \ - if (unlikely(opt_utrace)) { \ - int utrace_serrno = errno; \ - malloc_utrace_t ut; \ - ut.p = (a); \ - ut.s = (b); \ - ut.r = (c); \ - utrace(&ut, sizeof(ut)); \ - errno = utrace_serrno; \ - } \ -} while (0) -#else -# define UTRACE(a, b, c) -#endif - -/******************************************************************************/ -/* - * Function prototypes for static functions that are referenced prior to - * definition. - */ - -static bool malloc_init_hard_a0(void); -static bool malloc_init_hard(void); - -/******************************************************************************/ -/* - * Begin miscellaneous support functions. - */ - -JEMALLOC_ALWAYS_INLINE_C bool -malloc_initialized(void) -{ - - return (malloc_init_state == malloc_init_initialized); -} - -JEMALLOC_ALWAYS_INLINE_C void -malloc_thread_init(void) -{ - - /* - * TSD initialization can't be safely done as a side effect of - * deallocation, because it is possible for a thread to do nothing but - * deallocate its TLS data via free(), in which case writing to TLS - * would cause write-after-free memory corruption. The quarantine - * facility *only* gets used as a side effect of deallocation, so make - * a best effort attempt at initializing its TSD by hooking all - * allocation events. - */ - if (config_fill && unlikely(opt_quarantine)) - quarantine_alloc_hook(); -} - -JEMALLOC_ALWAYS_INLINE_C bool -malloc_init_a0(void) -{ - - if (unlikely(malloc_init_state == malloc_init_uninitialized)) - return (malloc_init_hard_a0()); - return (false); -} - -JEMALLOC_ALWAYS_INLINE_C bool -malloc_init(void) -{ - - if (unlikely(!malloc_initialized()) && malloc_init_hard()) - return (true); - malloc_thread_init(); - - return (false); -} - -/* - * The a0*() functions are used instead of i{d,}alloc() in situations that - * cannot tolerate TLS variable access. - */ - -static void * -a0ialloc(size_t size, bool zero, bool is_metadata) -{ - - if (unlikely(malloc_init_a0())) - return (NULL); - - return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL, - is_metadata, arena_get(TSDN_NULL, 0, true), true)); -} - -static void -a0idalloc(void *ptr, bool is_metadata) -{ - - idalloctm(TSDN_NULL, ptr, false, is_metadata, true); -} - -arena_t * -a0get(void) -{ - - return (a0); -} - -void * -a0malloc(size_t size) -{ - - return (a0ialloc(size, false, true)); -} - -void -a0dalloc(void *ptr) -{ - - a0idalloc(ptr, true); -} - -/* - * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive - * situations that cannot tolerate TLS variable access (TLS allocation and very - * early internal data structure initialization). - */ - -void * -bootstrap_malloc(size_t size) -{ - - if (unlikely(size == 0)) - size = 1; - - return (a0ialloc(size, false, false)); -} - -void * -bootstrap_calloc(size_t num, size_t size) -{ - size_t num_size; - - num_size = num * size; - if (unlikely(num_size == 0)) { - assert(num == 0 || size == 0); - num_size = 1; - } - - return (a0ialloc(num_size, true, false)); -} - -void -bootstrap_free(void *ptr) -{ - - if (unlikely(ptr == NULL)) - return; - - a0idalloc(ptr, false); -} - -static void -arena_set(unsigned ind, arena_t *arena) -{ - - atomic_write_p((void **)&arenas[ind], arena); -} - -static void -narenas_total_set(unsigned narenas) -{ - - atomic_write_u(&narenas_total, narenas); -} - -static void -narenas_total_inc(void) -{ - - atomic_add_u(&narenas_total, 1); -} - -unsigned -narenas_total_get(void) -{ - - return (atomic_read_u(&narenas_total)); -} - -/* Create a new arena and insert it into the arenas array at index ind. */ -static arena_t * -arena_init_locked(tsdn_t *tsdn, unsigned ind) -{ - arena_t *arena; - - assert(ind <= narenas_total_get()); - if (ind > MALLOCX_ARENA_MAX) - return (NULL); - if (ind == narenas_total_get()) - narenas_total_inc(); - - /* - * Another thread may have already initialized arenas[ind] if it's an - * auto arena. - */ - arena = arena_get(tsdn, ind, false); - if (arena != NULL) { - assert(ind < narenas_auto); - return (arena); - } - - /* Actually initialize the arena. */ - arena = arena_new(tsdn, ind); - arena_set(ind, arena); - return (arena); -} - -arena_t * -arena_init(tsdn_t *tsdn, unsigned ind) -{ - arena_t *arena; - - malloc_mutex_lock(tsdn, &arenas_lock); - arena = arena_init_locked(tsdn, ind); - malloc_mutex_unlock(tsdn, &arenas_lock); - return (arena); -} - -static void -arena_bind(tsd_t *tsd, unsigned ind, bool internal) -{ - arena_t *arena; - - if (!tsd_nominal(tsd)) - return; - - arena = arena_get(tsd_tsdn(tsd), ind, false); - arena_nthreads_inc(arena, internal); - - if (internal) - tsd_iarena_set(tsd, arena); - else - tsd_arena_set(tsd, arena); -} - -void -arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) -{ - arena_t *oldarena, *newarena; - - oldarena = arena_get(tsd_tsdn(tsd), oldind, false); - newarena = arena_get(tsd_tsdn(tsd), newind, false); - arena_nthreads_dec(oldarena, false); - arena_nthreads_inc(newarena, false); - tsd_arena_set(tsd, newarena); -} - -static void -arena_unbind(tsd_t *tsd, unsigned ind, bool internal) -{ - arena_t *arena; - - arena = arena_get(tsd_tsdn(tsd), ind, false); - arena_nthreads_dec(arena, internal); - if (internal) - tsd_iarena_set(tsd, NULL); - else - tsd_arena_set(tsd, NULL); -} - -arena_tdata_t * -arena_tdata_get_hard(tsd_t *tsd, unsigned ind) -{ - arena_tdata_t *tdata, *arenas_tdata_old; - arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); - unsigned narenas_tdata_old, i; - unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); - unsigned narenas_actual = narenas_total_get(); - - /* - * Dissociate old tdata array (and set up for deallocation upon return) - * if it's too small. - */ - if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { - arenas_tdata_old = arenas_tdata; - narenas_tdata_old = narenas_tdata; - arenas_tdata = NULL; - narenas_tdata = 0; - tsd_arenas_tdata_set(tsd, arenas_tdata); - tsd_narenas_tdata_set(tsd, narenas_tdata); - } else { - arenas_tdata_old = NULL; - narenas_tdata_old = 0; - } - - /* Allocate tdata array if it's missing. */ - if (arenas_tdata == NULL) { - bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); - narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; - - if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { - *arenas_tdata_bypassp = true; - arenas_tdata = (arena_tdata_t *)a0malloc( - sizeof(arena_tdata_t) * narenas_tdata); - *arenas_tdata_bypassp = false; - } - if (arenas_tdata == NULL) { - tdata = NULL; - goto label_return; - } - assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); - tsd_arenas_tdata_set(tsd, arenas_tdata); - tsd_narenas_tdata_set(tsd, narenas_tdata); - } - - /* - * Copy to tdata array. It's possible that the actual number of arenas - * has increased since narenas_total_get() was called above, but that - * causes no correctness issues unless two threads concurrently execute - * the arenas.extend mallctl, which we trust mallctl synchronization to - * prevent. - */ - - /* Copy/initialize tickers. */ - for (i = 0; i < narenas_actual; i++) { - if (i < narenas_tdata_old) { - ticker_copy(&arenas_tdata[i].decay_ticker, - &arenas_tdata_old[i].decay_ticker); - } else { - ticker_init(&arenas_tdata[i].decay_ticker, - DECAY_NTICKS_PER_UPDATE); - } - } - if (narenas_tdata > narenas_actual) { - memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) - * (narenas_tdata - narenas_actual)); - } - - /* Read the refreshed tdata array. */ - tdata = &arenas_tdata[ind]; -label_return: - if (arenas_tdata_old != NULL) - a0dalloc(arenas_tdata_old); - return (tdata); -} - -/* Slow path, called only by arena_choose(). */ -arena_t * -arena_choose_hard(tsd_t *tsd, bool internal) -{ - arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); - - if (narenas_auto > 1) { - unsigned i, j, choose[2], first_null; - - /* - * Determine binding for both non-internal and internal - * allocation. - * - * choose[0]: For application allocation. - * choose[1]: For internal metadata allocation. - */ - - for (j = 0; j < 2; j++) - choose[j] = 0; - - first_null = narenas_auto; - malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); - assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); - for (i = 1; i < narenas_auto; i++) { - if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { - /* - * Choose the first arena that has the lowest - * number of threads assigned to it. - */ - for (j = 0; j < 2; j++) { - if (arena_nthreads_get(arena_get( - tsd_tsdn(tsd), i, false), !!j) < - arena_nthreads_get(arena_get( - tsd_tsdn(tsd), choose[j], false), - !!j)) - choose[j] = i; - } - } else if (first_null == narenas_auto) { - /* - * Record the index of the first uninitialized - * arena, in case all extant arenas are in use. - * - * NB: It is possible for there to be - * discontinuities in terms of initialized - * versus uninitialized arenas, due to the - * "thread.arena" mallctl. - */ - first_null = i; - } - } - - for (j = 0; j < 2; j++) { - if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), - choose[j], false), !!j) == 0 || first_null == - narenas_auto) { - /* - * Use an unloaded arena, or the least loaded - * arena if all arenas are already initialized. - */ - if (!!j == internal) { - ret = arena_get(tsd_tsdn(tsd), - choose[j], false); - } - } else { - arena_t *arena; - - /* Initialize a new arena. */ - choose[j] = first_null; - arena = arena_init_locked(tsd_tsdn(tsd), - choose[j]); - if (arena == NULL) { - malloc_mutex_unlock(tsd_tsdn(tsd), - &arenas_lock); - return (NULL); - } - if (!!j == internal) - ret = arena; - } - arena_bind(tsd, choose[j], !!j); - } - malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); - } else { - ret = arena_get(tsd_tsdn(tsd), 0, false); - arena_bind(tsd, 0, false); - arena_bind(tsd, 0, true); - } - - return (ret); -} - -void -thread_allocated_cleanup(tsd_t *tsd) -{ - - /* Do nothing. */ -} - -void -thread_deallocated_cleanup(tsd_t *tsd) -{ - - /* Do nothing. */ -} - -void -iarena_cleanup(tsd_t *tsd) -{ - arena_t *iarena; - - iarena = tsd_iarena_get(tsd); - if (iarena != NULL) - arena_unbind(tsd, iarena->ind, true); -} - -void -arena_cleanup(tsd_t *tsd) -{ - arena_t *arena; - - arena = tsd_arena_get(tsd); - if (arena != NULL) - arena_unbind(tsd, arena->ind, false); -} - -void -arenas_tdata_cleanup(tsd_t *tsd) -{ - arena_tdata_t *arenas_tdata; - - /* Prevent tsd->arenas_tdata from being (re)created. */ - *tsd_arenas_tdata_bypassp_get(tsd) = true; - - arenas_tdata = tsd_arenas_tdata_get(tsd); - if (arenas_tdata != NULL) { - tsd_arenas_tdata_set(tsd, NULL); - a0dalloc(arenas_tdata); - } -} - -void -narenas_tdata_cleanup(tsd_t *tsd) -{ - - /* Do nothing. */ -} - -void -arenas_tdata_bypass_cleanup(tsd_t *tsd) -{ - - /* Do nothing. */ -} - -static void -stats_print_atexit(void) -{ - - if (config_tcache && config_stats) { - tsdn_t *tsdn; - unsigned narenas, i; - - tsdn = tsdn_fetch(); - - /* - * Merge stats from extant threads. This is racy, since - * individual threads do not lock when recording tcache stats - * events. As a consequence, the final stats may be slightly - * out of date by the time they are reported, if other threads - * continue to allocate. - */ - for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { - arena_t *arena = arena_get(tsdn, i, false); - if (arena != NULL) { - tcache_t *tcache; - - /* - * tcache_stats_merge() locks bins, so if any - * code is introduced that acquires both arena - * and bin locks in the opposite order, - * deadlocks may result. - */ - malloc_mutex_lock(tsdn, &arena->lock); - ql_foreach(tcache, &arena->tcache_ql, link) { - tcache_stats_merge(tsdn, tcache, arena); - } - malloc_mutex_unlock(tsdn, &arena->lock); - } - } - } - je_malloc_stats_print(NULL, NULL, NULL); -} - -/* - * End miscellaneous support functions. - */ -/******************************************************************************/ -/* - * Begin initialization functions. - */ - -static char * -jemalloc_secure_getenv(const char *name) -{ -#ifdef JEMALLOC_HAVE_SECURE_GETENV - return secure_getenv(name); -#else -# ifdef JEMALLOC_HAVE_ISSETUGID - if (issetugid() != 0) - return (NULL); -# endif - return (getenv(name)); -#endif -} - -static unsigned -malloc_ncpus(void) -{ - long result; - -#ifdef _WIN32 - SYSTEM_INFO si; - GetSystemInfo(&si); - result = si.dwNumberOfProcessors; -#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) - /* - * glibc >= 2.6 has the CPU_COUNT macro. - * - * glibc's sysconf() uses isspace(). glibc allocates for the first time - * *before* setting up the isspace tables. Therefore we need a - * different method to get the number of CPUs. - */ - { - cpu_set_t set; - - pthread_getaffinity_np(pthread_self(), sizeof(set), &set); - result = CPU_COUNT(&set); - } -#else - result = sysconf(_SC_NPROCESSORS_ONLN); -#endif - return ((result == -1) ? 1 : (unsigned)result); -} - -static bool -malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, - char const **v_p, size_t *vlen_p) -{ - bool accept; - const char *opts = *opts_p; - - *k_p = opts; - - for (accept = false; !accept;) { - switch (*opts) { - case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': - case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': - case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': - case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': - case 'Y': case 'Z': - case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': - case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': - case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': - case 's': case 't': case 'u': case 'v': case 'w': case 'x': - case 'y': case 'z': - case '0': case '1': case '2': case '3': case '4': case '5': - case '6': case '7': case '8': case '9': - case '_': - opts++; - break; - case ':': - opts++; - *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; - *v_p = opts; - accept = true; - break; - case '\0': - if (opts != *opts_p) { - malloc_write(": Conf string ends " - "with key\n"); - } - return (true); - default: - malloc_write(": Malformed conf string\n"); - return (true); - } - } - - for (accept = false; !accept;) { - switch (*opts) { - case ',': - opts++; - /* - * Look ahead one character here, because the next time - * this function is called, it will assume that end of - * input has been cleanly reached if no input remains, - * but we have optimistically already consumed the - * comma if one exists. - */ - if (*opts == '\0') { - malloc_write(": Conf string ends " - "with comma\n"); - } - *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; - accept = true; - break; - case '\0': - *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; - accept = true; - break; - default: - opts++; - break; - } - } - - *opts_p = opts; - return (false); -} - -static void -malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, - size_t vlen) -{ - - malloc_printf(": %s: %.*s:%.*s\n", msg, (int)klen, k, - (int)vlen, v); -} - -static void -malloc_slow_flag_init(void) -{ - /* - * Combine the runtime options into malloc_slow for fast path. Called - * after processing all the options. - */ - malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) - | (opt_junk_free ? flag_opt_junk_free : 0) - | (opt_quarantine ? flag_opt_quarantine : 0) - | (opt_zero ? flag_opt_zero : 0) - | (opt_utrace ? flag_opt_utrace : 0) - | (opt_xmalloc ? flag_opt_xmalloc : 0); - - if (config_valgrind) - malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0); - - malloc_slow = (malloc_slow_flags != 0); -} - -static void -malloc_conf_init(void) -{ - unsigned i; - char buf[PATH_MAX + 1]; - const char *opts, *k, *v; - size_t klen, vlen; - - /* - * Automatically configure valgrind before processing options. The - * valgrind option remains in jemalloc 3.x for compatibility reasons. - */ - if (config_valgrind) { - in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; - if (config_fill && unlikely(in_valgrind)) { - opt_junk = "false"; - opt_junk_alloc = false; - opt_junk_free = false; - assert(!opt_zero); - opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; - opt_redzone = true; - } - if (config_tcache && unlikely(in_valgrind)) - opt_tcache = false; - } - - for (i = 0; i < 4; i++) { - /* Get runtime configuration. */ - switch (i) { - case 0: - opts = config_malloc_conf; - break; - case 1: - if (je_malloc_conf != NULL) { - /* - * Use options that were compiled into the - * program. - */ - opts = je_malloc_conf; - } else { - /* No configuration specified. */ - buf[0] = '\0'; - opts = buf; - } - break; - case 2: { - ssize_t linklen = 0; -#ifndef _WIN32 - int saved_errno = errno; - const char *linkname = -# ifdef JEMALLOC_PREFIX - "/etc/"JEMALLOC_PREFIX"malloc.conf" -# else - "/etc/malloc.conf" -# endif - ; - - /* - * Try to use the contents of the "/etc/malloc.conf" - * symbolic link's name. - */ - linklen = readlink(linkname, buf, sizeof(buf) - 1); - if (linklen == -1) { - /* No configuration specified. */ - linklen = 0; - /* Restore errno. */ - set_errno(saved_errno); - } -#endif - buf[linklen] = '\0'; - opts = buf; - break; - } case 3: { - const char *envname = -#ifdef JEMALLOC_PREFIX - JEMALLOC_CPREFIX"MALLOC_CONF" -#else - "MALLOC_CONF" -#endif - ; - - if ((opts = jemalloc_secure_getenv(envname)) != NULL) { - /* - * Do nothing; opts is already initialized to - * the value of the MALLOC_CONF environment - * variable. - */ - } else { - /* No configuration specified. */ - buf[0] = '\0'; - opts = buf; - } - break; - } default: - not_reached(); - buf[0] = '\0'; - opts = buf; - } - - while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, - &vlen)) { -#define CONF_MATCH(n) \ - (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) -#define CONF_MATCH_VALUE(n) \ - (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) -#define CONF_HANDLE_BOOL(o, n, cont) \ - if (CONF_MATCH(n)) { \ - if (CONF_MATCH_VALUE("true")) \ - o = true; \ - else if (CONF_MATCH_VALUE("false")) \ - o = false; \ - else { \ - malloc_conf_error( \ - "Invalid conf value", \ - k, klen, v, vlen); \ - } \ - if (cont) \ - continue; \ - } -#define CONF_MIN_no(um, min) false -#define CONF_MIN_yes(um, min) ((um) < (min)) -#define CONF_MAX_no(um, max) false -#define CONF_MAX_yes(um, max) ((um) > (max)) -#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ - if (CONF_MATCH(n)) { \ - uintmax_t um; \ - char *end; \ - \ - set_errno(0); \ - um = malloc_strtoumax(v, &end, 0); \ - if (get_errno() != 0 || (uintptr_t)end -\ - (uintptr_t)v != vlen) { \ - malloc_conf_error( \ - "Invalid conf value", \ - k, klen, v, vlen); \ - } else if (clip) { \ - if (CONF_MIN_##check_min(um, \ - (t)(min))) \ - o = (t)(min); \ - else if (CONF_MAX_##check_max( \ - um, (t)(max))) \ - o = (t)(max); \ - else \ - o = (t)um; \ - } else { \ - if (CONF_MIN_##check_min(um, \ - (t)(min)) || \ - CONF_MAX_##check_max(um, \ - (t)(max))) { \ - malloc_conf_error( \ - "Out-of-range " \ - "conf value", \ - k, klen, v, vlen); \ - } else \ - o = (t)um; \ - } \ - continue; \ - } -#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ - clip) \ - CONF_HANDLE_T_U(unsigned, o, n, min, max, \ - check_min, check_max, clip) -#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ - CONF_HANDLE_T_U(size_t, o, n, min, max, \ - check_min, check_max, clip) -#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ - if (CONF_MATCH(n)) { \ - long l; \ - char *end; \ - \ - set_errno(0); \ - l = strtol(v, &end, 0); \ - if (get_errno() != 0 || (uintptr_t)end -\ - (uintptr_t)v != vlen) { \ - malloc_conf_error( \ - "Invalid conf value", \ - k, klen, v, vlen); \ - } else if (l < (ssize_t)(min) || l > \ - (ssize_t)(max)) { \ - malloc_conf_error( \ - "Out-of-range conf value", \ - k, klen, v, vlen); \ - } else \ - o = l; \ - continue; \ - } -#define CONF_HANDLE_CHAR_P(o, n, d) \ - if (CONF_MATCH(n)) { \ - size_t cpylen = (vlen <= \ - sizeof(o)-1) ? vlen : \ - sizeof(o)-1; \ - strncpy(o, v, cpylen); \ - o[cpylen] = '\0'; \ - continue; \ - } - - CONF_HANDLE_BOOL(opt_abort, "abort", true) - /* - * Chunks always require at least one header page, as - * many as 2^(LG_SIZE_CLASS_GROUP+1) data pages (plus an - * additional page in the presence of cache-oblivious - * large), and possibly an additional page in the - * presence of redzones. In order to simplify options - * processing, use a conservative bound that - * accommodates all these constraints. - */ - CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + - LG_SIZE_CLASS_GROUP + 1 + ((config_cache_oblivious - || config_fill) ? 1 : 0), (sizeof(size_t) << 3) - 1, - yes, yes, true) - if (strncmp("dss", k, klen) == 0) { - int i; - bool match = false; - for (i = 0; i < dss_prec_limit; i++) { - if (strncmp(dss_prec_names[i], v, vlen) - == 0) { - if (chunk_dss_prec_set(i)) { - malloc_conf_error( - "Error setting dss", - k, klen, v, vlen); - } else { - opt_dss = - dss_prec_names[i]; - match = true; - break; - } - } - } - if (!match) { - malloc_conf_error("Invalid conf value", - k, klen, v, vlen); - } - continue; - } - CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, - UINT_MAX, yes, no, false) - if (strncmp("purge", k, klen) == 0) { - int i; - bool match = false; - for (i = 0; i < purge_mode_limit; i++) { - if (strncmp(purge_mode_names[i], v, - vlen) == 0) { - opt_purge = (purge_mode_t)i; - match = true; - break; - } - } - if (!match) { - malloc_conf_error("Invalid conf value", - k, klen, v, vlen); - } - continue; - } - CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", - -1, (sizeof(size_t) << 3) - 1) - CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1, - NSTIME_SEC_MAX); - CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) - if (config_fill) { - if (CONF_MATCH("junk")) { - if (CONF_MATCH_VALUE("true")) { - if (config_valgrind && - unlikely(in_valgrind)) { - malloc_conf_error( - "Deallocation-time " - "junk filling cannot " - "be enabled while " - "running inside " - "Valgrind", k, klen, v, - vlen); - } else { - opt_junk = "true"; - opt_junk_alloc = true; - opt_junk_free = true; - } - } else if (CONF_MATCH_VALUE("false")) { - opt_junk = "false"; - opt_junk_alloc = opt_junk_free = - false; - } else if (CONF_MATCH_VALUE("alloc")) { - opt_junk = "alloc"; - opt_junk_alloc = true; - opt_junk_free = false; - } else if (CONF_MATCH_VALUE("free")) { - if (config_valgrind && - unlikely(in_valgrind)) { - malloc_conf_error( - "Deallocation-time " - "junk filling cannot " - "be enabled while " - "running inside " - "Valgrind", k, klen, v, - vlen); - } else { - opt_junk = "free"; - opt_junk_alloc = false; - opt_junk_free = true; - } - } else { - malloc_conf_error( - "Invalid conf value", k, - klen, v, vlen); - } - continue; - } - CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", - 0, SIZE_T_MAX, no, no, false) - CONF_HANDLE_BOOL(opt_redzone, "redzone", true) - CONF_HANDLE_BOOL(opt_zero, "zero", true) - } - if (config_utrace) { - CONF_HANDLE_BOOL(opt_utrace, "utrace", true) - } - if (config_xmalloc) { - CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true) - } - if (config_tcache) { - CONF_HANDLE_BOOL(opt_tcache, "tcache", - !config_valgrind || !in_valgrind) - if (CONF_MATCH("tcache")) { - assert(config_valgrind && in_valgrind); - if (opt_tcache) { - opt_tcache = false; - malloc_conf_error( - "tcache cannot be enabled " - "while running inside Valgrind", - k, klen, v, vlen); - } - continue; - } - CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, - "lg_tcache_max", -1, - (sizeof(size_t) << 3) - 1) - } - if (config_thp) { - CONF_HANDLE_BOOL(opt_thp, "thp", true) - } - if (config_prof) { - CONF_HANDLE_BOOL(opt_prof, "prof", true) - CONF_HANDLE_CHAR_P(opt_prof_prefix, - "prof_prefix", "jeprof") - CONF_HANDLE_BOOL(opt_prof_active, "prof_active", - true) - CONF_HANDLE_BOOL(opt_prof_thread_active_init, - "prof_thread_active_init", true) - CONF_HANDLE_SIZE_T(opt_lg_prof_sample, - "lg_prof_sample", 0, (sizeof(uint64_t) << 3) - - 1, no, yes, true) - CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum", - true) - CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, - "lg_prof_interval", -1, - (sizeof(uint64_t) << 3) - 1) - CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump", - true) - CONF_HANDLE_BOOL(opt_prof_final, "prof_final", - true) - CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak", - true) - } - malloc_conf_error("Invalid conf pair", k, klen, v, - vlen); -#undef CONF_MATCH -#undef CONF_MATCH_VALUE -#undef CONF_HANDLE_BOOL -#undef CONF_MIN_no -#undef CONF_MIN_yes -#undef CONF_MAX_no -#undef CONF_MAX_yes -#undef CONF_HANDLE_T_U -#undef CONF_HANDLE_UNSIGNED -#undef CONF_HANDLE_SIZE_T -#undef CONF_HANDLE_SSIZE_T -#undef CONF_HANDLE_CHAR_P - } - } -} - -static bool -malloc_init_hard_needed(void) -{ - - if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == - malloc_init_recursible)) { - /* - * Another thread initialized the allocator before this one - * acquired init_lock, or this thread is the initializing - * thread, and it is recursively allocating. - */ - return (false); - } -#ifdef JEMALLOC_THREADED_INIT - if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { - spin_t spinner; - - /* Busy-wait until the initializing thread completes. */ - spin_init(&spinner); - do { - malloc_mutex_unlock(TSDN_NULL, &init_lock); - spin_adaptive(&spinner); - malloc_mutex_lock(TSDN_NULL, &init_lock); - } while (!malloc_initialized()); - return (false); - } -#endif - return (true); -} - -static bool -malloc_init_hard_a0_locked() -{ - - malloc_initializer = INITIALIZER; - - if (config_prof) - prof_boot0(); - malloc_conf_init(); - if (opt_stats_print) { - /* Print statistics at exit. */ - if (atexit(stats_print_atexit) != 0) { - malloc_write(": Error in atexit()\n"); - if (opt_abort) - abort(); - } - } - pages_boot(); - if (base_boot()) - return (true); - if (chunk_boot()) - return (true); - if (ctl_boot()) - return (true); - if (config_prof) - prof_boot1(); - arena_boot(); - if (config_tcache && tcache_boot(TSDN_NULL)) - return (true); - if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) - return (true); - /* - * Create enough scaffolding to allow recursive allocation in - * malloc_ncpus(). - */ - narenas_auto = 1; - narenas_total_set(narenas_auto); - arenas = &a0; - memset(arenas, 0, sizeof(arena_t *) * narenas_auto); - /* - * Initialize one arena here. The rest are lazily created in - * arena_choose_hard(). - */ - if (arena_init(TSDN_NULL, 0) == NULL) - return (true); - - malloc_init_state = malloc_init_a0_initialized; - - return (false); -} - -static bool -malloc_init_hard_a0(void) -{ - bool ret; - - malloc_mutex_lock(TSDN_NULL, &init_lock); - ret = malloc_init_hard_a0_locked(); - malloc_mutex_unlock(TSDN_NULL, &init_lock); - return (ret); -} - -/* Initialize data structures which may trigger recursive allocation. */ -static bool -malloc_init_hard_recursible(void) -{ - - malloc_init_state = malloc_init_recursible; - - ncpus = malloc_ncpus(); - -#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ - && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ - !defined(__native_client__)) - /* LinuxThreads' pthread_atfork() allocates. */ - if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, - jemalloc_postfork_child) != 0) { - malloc_write(": Error in pthread_atfork()\n"); - if (opt_abort) - abort(); - return (true); - } -#endif - - return (false); -} - -static bool -malloc_init_hard_finish(tsdn_t *tsdn) -{ - - if (malloc_mutex_boot()) - return (true); - - if (opt_narenas == 0) { - /* - * For SMP systems, create more than one arena per CPU by - * default. - */ - if (ncpus > 1) - opt_narenas = ncpus << 2; - else - opt_narenas = 1; - } - narenas_auto = opt_narenas; - /* - * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). - */ - if (narenas_auto > MALLOCX_ARENA_MAX) { - narenas_auto = MALLOCX_ARENA_MAX; - malloc_printf(": Reducing narenas to limit (%d)\n", - narenas_auto); - } - narenas_total_set(narenas_auto); - - /* Allocate and initialize arenas. */ - arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) * - (MALLOCX_ARENA_MAX+1)); - if (arenas == NULL) - return (true); - /* Copy the pointer to the one arena that was already initialized. */ - arena_set(0, a0); - - malloc_init_state = malloc_init_initialized; - malloc_slow_flag_init(); - - return (false); -} - -static bool -malloc_init_hard(void) -{ - tsd_t *tsd; - -#if defined(_WIN32) && _WIN32_WINNT < 0x0600 - _init_init_lock(); -#endif - malloc_mutex_lock(TSDN_NULL, &init_lock); - if (!malloc_init_hard_needed()) { - malloc_mutex_unlock(TSDN_NULL, &init_lock); - return (false); - } - - if (malloc_init_state != malloc_init_a0_initialized && - malloc_init_hard_a0_locked()) { - malloc_mutex_unlock(TSDN_NULL, &init_lock); - return (true); - } - - malloc_mutex_unlock(TSDN_NULL, &init_lock); - /* Recursive allocation relies on functional tsd. */ - tsd = malloc_tsd_boot0(); - if (tsd == NULL) - return (true); - if (malloc_init_hard_recursible()) - return (true); - malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); - - if (config_prof && prof_boot2(tsd)) { - malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); - return (true); - } - - if (malloc_init_hard_finish(tsd_tsdn(tsd))) { - malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); - return (true); - } - - malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); - malloc_tsd_boot1(); - return (false); -} - -/* - * End initialization functions. - */ -/******************************************************************************/ -/* - * Begin malloc(3)-compatible functions. - */ - -static void * -ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero, - prof_tctx_t *tctx, bool slow_path) -{ - void *p; - - if (tctx == NULL) - return (NULL); - if (usize <= SMALL_MAXCLASS) { - szind_t ind_large = size2index(LARGE_MINCLASS); - p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path); - if (p == NULL) - return (NULL); - arena_prof_promoted(tsd_tsdn(tsd), p, usize); - } else - p = ialloc(tsd, usize, ind, zero, slow_path); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path) -{ - void *p; - prof_tctx_t *tctx; - - tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) - p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path); - else - p = ialloc(tsd, usize, ind, zero, slow_path); - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return (NULL); - } - prof_malloc(tsd_tsdn(tsd), p, usize, tctx); - - return (p); -} - -/* - * ialloc_body() is inlined so that fast and slow paths are generated separately - * with statically known slow_path. - * - * This function guarantees that *tsdn is non-NULL on success. - */ -JEMALLOC_ALWAYS_INLINE_C void * -ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize, - bool slow_path) -{ - tsd_t *tsd; - szind_t ind; - - if (slow_path && unlikely(malloc_init())) { - *tsdn = NULL; - return (NULL); - } - - tsd = tsd_fetch(); - *tsdn = tsd_tsdn(tsd); - witness_assert_lockless(tsd_tsdn(tsd)); - - ind = size2index(size); - if (unlikely(ind >= NSIZES)) - return (NULL); - - if (config_stats || (config_prof && opt_prof) || (slow_path && - config_valgrind && unlikely(in_valgrind))) { - *usize = index2size(ind); - assert(*usize > 0 && *usize <= HUGE_MAXCLASS); - } - - if (config_prof && opt_prof) - return (ialloc_prof(tsd, *usize, ind, zero, slow_path)); - - return (ialloc(tsd, size, ind, zero, slow_path)); -} - -JEMALLOC_ALWAYS_INLINE_C void -ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func, - bool update_errno, bool slow_path) -{ - - assert(!tsdn_null(tsdn) || ret == NULL); - - if (unlikely(ret == NULL)) { - if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) { - malloc_printf(": Error in %s(): out of " - "memory\n", func); - abort(); - } - if (update_errno) - set_errno(ENOMEM); - } - if (config_stats && likely(ret != NULL)) { - assert(usize == isalloc(tsdn, ret, config_prof)); - *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize; - } - witness_assert_lockless(tsdn); -} - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) -je_malloc(size_t size) -{ - void *ret; - tsdn_t *tsdn; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - - if (size == 0) - size = 1; - - if (likely(!malloc_slow)) { - ret = ialloc_body(size, false, &tsdn, &usize, false); - ialloc_post_check(ret, tsdn, usize, "malloc", true, false); - } else { - ret = ialloc_body(size, false, &tsdn, &usize, true); - ialloc_post_check(ret, tsdn, usize, "malloc", true, true); - UTRACE(0, size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false); - } - - return (ret); -} - -static void * -imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, - prof_tctx_t *tctx) -{ - void *p; - - if (tctx == NULL) - return (NULL); - if (usize <= SMALL_MAXCLASS) { - assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS); - p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); - if (p == NULL) - return (NULL); - arena_prof_promoted(tsd_tsdn(tsd), p, usize); - } else - p = ipalloc(tsd, usize, alignment, false); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) -{ - void *p; - prof_tctx_t *tctx; - - tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) - p = imemalign_prof_sample(tsd, alignment, usize, tctx); - else - p = ipalloc(tsd, usize, alignment, false); - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return (NULL); - } - prof_malloc(tsd_tsdn(tsd), p, usize, tctx); - - return (p); -} - -JEMALLOC_ATTR(nonnull(1)) -static int -imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) -{ - int ret; - tsd_t *tsd; - size_t usize; - void *result; - - assert(min_alignment != 0); - - if (unlikely(malloc_init())) { - tsd = NULL; - result = NULL; - goto label_oom; - } - tsd = tsd_fetch(); - witness_assert_lockless(tsd_tsdn(tsd)); - if (size == 0) - size = 1; - - /* Make sure that alignment is a large enough power of 2. */ - if (unlikely(((alignment - 1) & alignment) != 0 - || (alignment < min_alignment))) { - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write(": Error allocating " - "aligned memory: invalid alignment\n"); - abort(); - } - result = NULL; - ret = EINVAL; - goto label_return; - } - - usize = sa2u(size, alignment); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { - result = NULL; - goto label_oom; - } - - if (config_prof && opt_prof) - result = imemalign_prof(tsd, alignment, usize); - else - result = ipalloc(tsd, usize, alignment, false); - if (unlikely(result == NULL)) - goto label_oom; - assert(((uintptr_t)result & (alignment - 1)) == ZU(0)); - - *memptr = result; - ret = 0; -label_return: - if (config_stats && likely(result != NULL)) { - assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof)); - *tsd_thread_allocatedp_get(tsd) += usize; - } - UTRACE(0, size, result); - JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize, - false); - witness_assert_lockless(tsd_tsdn(tsd)); - return (ret); -label_oom: - assert(result == NULL); - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write(": Error allocating aligned memory: " - "out of memory\n"); - abort(); - } - ret = ENOMEM; - witness_assert_lockless(tsd_tsdn(tsd)); - goto label_return; -} - -JEMALLOC_EXPORT int JEMALLOC_NOTHROW -JEMALLOC_ATTR(nonnull(1)) -je_posix_memalign(void **memptr, size_t alignment, size_t size) -{ - int ret; - - ret = imemalign(memptr, alignment, size, sizeof(void *)); - - return (ret); -} - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) -je_aligned_alloc(size_t alignment, size_t size) -{ - void *ret; - int err; - - if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) { - ret = NULL; - set_errno(err); - } - - return (ret); -} - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) -je_calloc(size_t num, size_t size) -{ - void *ret; - tsdn_t *tsdn; - size_t num_size; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - - num_size = num * size; - if (unlikely(num_size == 0)) { - if (num == 0 || size == 0) - num_size = 1; - else - num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */ - /* - * Try to avoid division here. We know that it isn't possible to - * overflow during multiplication if neither operand uses any of the - * most significant half of the bits in a size_t. - */ - } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << - 2))) && (num_size / size != num))) - num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */ - - if (likely(!malloc_slow)) { - ret = ialloc_body(num_size, true, &tsdn, &usize, false); - ialloc_post_check(ret, tsdn, usize, "calloc", true, false); - } else { - ret = ialloc_body(num_size, true, &tsdn, &usize, true); - ialloc_post_check(ret, tsdn, usize, "calloc", true, true); - UTRACE(0, num_size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true); - } - - return (ret); -} - -static void * -irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, - prof_tctx_t *tctx) -{ - void *p; - - if (tctx == NULL) - return (NULL); - if (usize <= SMALL_MAXCLASS) { - p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); - if (p == NULL) - return (NULL); - arena_prof_promoted(tsd_tsdn(tsd), p, usize); - } else - p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) -{ - void *p; - bool prof_active; - prof_tctx_t *old_tctx, *tctx; - - prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); - tctx = prof_alloc_prep(tsd, usize, prof_active, true); - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) - p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); - else - p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return (NULL); - } - prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, - old_tctx); - - return (p); -} - -JEMALLOC_INLINE_C void -ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) -{ - size_t usize; - UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); - - witness_assert_lockless(tsd_tsdn(tsd)); - - assert(ptr != NULL); - assert(malloc_initialized() || IS_INITIALIZER); - - if (config_prof && opt_prof) { - usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); - prof_free(tsd, ptr, usize); - } else if (config_stats || config_valgrind) - usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); - if (config_stats) - *tsd_thread_deallocatedp_get(tsd) += usize; - - if (likely(!slow_path)) - iqalloc(tsd, ptr, tcache, false); - else { - if (config_valgrind && unlikely(in_valgrind)) - rzsize = p2rz(tsd_tsdn(tsd), ptr); - iqalloc(tsd, ptr, tcache, true); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); - } -} - -JEMALLOC_INLINE_C void -isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) -{ - UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); - - witness_assert_lockless(tsd_tsdn(tsd)); - - assert(ptr != NULL); - assert(malloc_initialized() || IS_INITIALIZER); - - if (config_prof && opt_prof) - prof_free(tsd, ptr, usize); - if (config_stats) - *tsd_thread_deallocatedp_get(tsd) += usize; - if (config_valgrind && unlikely(in_valgrind)) - rzsize = p2rz(tsd_tsdn(tsd), ptr); - isqalloc(tsd, ptr, usize, tcache, slow_path); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); -} - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ALLOC_SIZE(2) -je_realloc(void *ptr, size_t size) -{ - void *ret; - tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - size_t old_usize = 0; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - - if (unlikely(size == 0)) { - if (ptr != NULL) { - tsd_t *tsd; - - /* realloc(ptr, 0) is equivalent to free(ptr). */ - UTRACE(ptr, 0, 0); - tsd = tsd_fetch(); - ifree(tsd, ptr, tcache_get(tsd, false), true); - return (NULL); - } - size = 1; - } - - if (likely(ptr != NULL)) { - tsd_t *tsd; - - assert(malloc_initialized() || IS_INITIALIZER); - malloc_thread_init(); - tsd = tsd_fetch(); - - witness_assert_lockless(tsd_tsdn(tsd)); - - old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); - if (config_valgrind && unlikely(in_valgrind)) { - old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) : - u2rz(old_usize); - } - - if (config_prof && opt_prof) { - usize = s2u(size); - ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ? - NULL : irealloc_prof(tsd, ptr, old_usize, usize); - } else { - if (config_stats || (config_valgrind && - unlikely(in_valgrind))) - usize = s2u(size); - ret = iralloc(tsd, ptr, old_usize, size, 0, false); - } - tsdn = tsd_tsdn(tsd); - } else { - /* realloc(NULL, size) is equivalent to malloc(size). */ - if (likely(!malloc_slow)) - ret = ialloc_body(size, false, &tsdn, &usize, false); - else - ret = ialloc_body(size, false, &tsdn, &usize, true); - assert(!tsdn_null(tsdn) || ret == NULL); - } - - if (unlikely(ret == NULL)) { - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write(": Error in realloc(): " - "out of memory\n"); - abort(); - } - set_errno(ENOMEM); - } - if (config_stats && likely(ret != NULL)) { - tsd_t *tsd; - - assert(usize == isalloc(tsdn, ret, config_prof)); - tsd = tsdn_tsd(tsdn); - *tsd_thread_allocatedp_get(tsd) += usize; - *tsd_thread_deallocatedp_get(tsd) += old_usize; - } - UTRACE(ptr, size, ret); - JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr, - old_usize, old_rzsize, maybe, false); - witness_assert_lockless(tsdn); - return (ret); -} - -JEMALLOC_EXPORT void JEMALLOC_NOTHROW -je_free(void *ptr) -{ - - UTRACE(ptr, 0, 0); - if (likely(ptr != NULL)) { - tsd_t *tsd = tsd_fetch(); - witness_assert_lockless(tsd_tsdn(tsd)); - if (likely(!malloc_slow)) - ifree(tsd, ptr, tcache_get(tsd, false), false); - else - ifree(tsd, ptr, tcache_get(tsd, false), true); - witness_assert_lockless(tsd_tsdn(tsd)); - } -} - -/* - * End malloc(3)-compatible functions. - */ -/******************************************************************************/ -/* - * Begin non-standard override functions. - */ - -#ifdef JEMALLOC_OVERRIDE_MEMALIGN -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) -je_memalign(size_t alignment, size_t size) -{ - void *ret JEMALLOC_CC_SILENCE_INIT(NULL); - if (unlikely(imemalign(&ret, alignment, size, 1) != 0)) - ret = NULL; - return (ret); -} -#endif - -#ifdef JEMALLOC_OVERRIDE_VALLOC -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) -je_valloc(size_t size) -{ - void *ret JEMALLOC_CC_SILENCE_INIT(NULL); - if (unlikely(imemalign(&ret, PAGE, size, 1) != 0)) - ret = NULL; - return (ret); -} -#endif - -/* - * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has - * #define je_malloc malloc - */ -#define malloc_is_malloc 1 -#define is_malloc_(a) malloc_is_ ## a -#define is_malloc(a) is_malloc_(a) - -#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)) -/* - * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible - * to inconsistently reference libc's malloc(3)-compatible functions - * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). - * - * These definitions interpose hooks in glibc. The functions are actually - * passed an extra argument for the caller return address, which will be - * ignored. - */ -JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; -JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; -JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; -# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK -JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = - je_memalign; -# endif - -#ifdef CPU_COUNT -/* - * To enable static linking with glibc, the libc specific malloc interface must - * be implemented also, so none of glibc's malloc.o functions are added to the - * link. - */ -#define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) -/* To force macro expansion of je_ prefix before stringification. */ -#define PREALIAS(je_fn) ALIAS(je_fn) -void *__libc_malloc(size_t size) PREALIAS(je_malloc); -void __libc_free(void* ptr) PREALIAS(je_free); -void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); -void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); -void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); -void *__libc_valloc(size_t size) PREALIAS(je_valloc); -int __posix_memalign(void** r, size_t a, size_t s) - PREALIAS(je_posix_memalign); -#undef PREALIAS -#undef ALIAS - -#endif - -#endif - -/* - * End non-standard override functions. - */ -/******************************************************************************/ -/* - * Begin non-standard functions. - */ - -JEMALLOC_ALWAYS_INLINE_C bool -imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, - size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) -{ - - if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) { - *alignment = 0; - *usize = s2u(size); - } else { - *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); - *usize = sa2u(size, *alignment); - } - if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS)) - return (true); - *zero = MALLOCX_ZERO_GET(flags); - if ((flags & MALLOCX_TCACHE_MASK) != 0) { - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) - *tcache = NULL; - else - *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } else - *tcache = tcache_get(tsd, true); - if ((flags & MALLOCX_ARENA_MASK) != 0) { - unsigned arena_ind = MALLOCX_ARENA_GET(flags); - *arena = arena_get(tsd_tsdn(tsd), arena_ind, true); - if (unlikely(*arena == NULL)) - return (true); - } else - *arena = NULL; - return (false); -} - -JEMALLOC_ALWAYS_INLINE_C void * -imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, arena_t *arena, bool slow_path) -{ - szind_t ind; - - if (unlikely(alignment != 0)) - return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); - ind = size2index(usize); - assert(ind < NSIZES); - return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena, - slow_path)); -} - -static void * -imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, arena_t *arena, bool slow_path) -{ - void *p; - - if (usize <= SMALL_MAXCLASS) { - assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : - sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); - p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero, - tcache, arena, slow_path); - if (p == NULL) - return (NULL); - arena_prof_promoted(tsdn, p, usize); - } else { - p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena, - slow_path); - } - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path) -{ - void *p; - size_t alignment; - bool zero; - tcache_t *tcache; - arena_t *arena; - prof_tctx_t *tctx; - - if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, - &zero, &tcache, &arena))) - return (NULL); - tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); - if (likely((uintptr_t)tctx == (uintptr_t)1U)) { - p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, - tcache, arena, slow_path); - } else if ((uintptr_t)tctx > (uintptr_t)1U) { - p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero, - tcache, arena, slow_path); - } else - p = NULL; - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return (NULL); - } - prof_malloc(tsd_tsdn(tsd), p, *usize, tctx); - - assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, - bool slow_path) -{ - void *p; - size_t alignment; - bool zero; - tcache_t *tcache; - arena_t *arena; - - if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, - &zero, &tcache, &arena))) - return (NULL); - p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache, - arena, slow_path); - assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); - return (p); -} - -/* This function guarantees that *tsdn is non-NULL on success. */ -JEMALLOC_ALWAYS_INLINE_C void * -imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize, - bool slow_path) -{ - tsd_t *tsd; - - if (slow_path && unlikely(malloc_init())) { - *tsdn = NULL; - return (NULL); - } - - tsd = tsd_fetch(); - *tsdn = tsd_tsdn(tsd); - witness_assert_lockless(tsd_tsdn(tsd)); - - if (likely(flags == 0)) { - szind_t ind = size2index(size); - if (unlikely(ind >= NSIZES)) - return (NULL); - if (config_stats || (config_prof && opt_prof) || (slow_path && - config_valgrind && unlikely(in_valgrind))) { - *usize = index2size(ind); - assert(*usize > 0 && *usize <= HUGE_MAXCLASS); - } - - if (config_prof && opt_prof) { - return (ialloc_prof(tsd, *usize, ind, false, - slow_path)); - } - - return (ialloc(tsd, size, ind, false, slow_path)); - } - - if (config_prof && opt_prof) - return (imallocx_prof(tsd, size, flags, usize, slow_path)); - - return (imallocx_no_prof(tsd, size, flags, usize, slow_path)); -} - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) -je_mallocx(size_t size, int flags) -{ - tsdn_t *tsdn; - void *p; - size_t usize; - - assert(size != 0); - - if (likely(!malloc_slow)) { - p = imallocx_body(size, flags, &tsdn, &usize, false); - ialloc_post_check(p, tsdn, usize, "mallocx", false, false); - } else { - p = imallocx_body(size, flags, &tsdn, &usize, true); - ialloc_post_check(p, tsdn, usize, "mallocx", false, true); - UTRACE(0, size, p); - JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize, - MALLOCX_ZERO_GET(flags)); - } - - return (p); -} - -static void * -irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, - size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, - prof_tctx_t *tctx) -{ - void *p; - - if (tctx == NULL) - return (NULL); - if (usize <= SMALL_MAXCLASS) { - p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment, - zero, tcache, arena); - if (p == NULL) - return (NULL); - arena_prof_promoted(tsd_tsdn(tsd), p, usize); - } else { - p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, - tcache, arena); - } - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, - size_t alignment, size_t *usize, bool zero, tcache_t *tcache, - arena_t *arena) -{ - void *p; - bool prof_active; - prof_tctx_t *old_tctx, *tctx; - - prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); - tctx = prof_alloc_prep(tsd, *usize, prof_active, false); - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { - p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, - alignment, zero, tcache, arena, tctx); - } else { - p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero, - tcache, arena); - } - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, false); - return (NULL); - } - - if (p == old_ptr && alignment != 0) { - /* - * The allocation did not move, so it is possible that the size - * class is smaller than would guarantee the requested - * alignment, and that the alignment constraint was - * serendipitously satisfied. Additionally, old_usize may not - * be the same as the current usize because of in-place large - * reallocation. Therefore, query the actual value of usize. - */ - *usize = isalloc(tsd_tsdn(tsd), p, config_prof); - } - prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, - old_usize, old_tctx); - - return (p); -} - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ALLOC_SIZE(2) -je_rallocx(void *ptr, size_t size, int flags) -{ - void *p; - tsd_t *tsd; - size_t usize; - size_t old_usize; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - size_t alignment = MALLOCX_ALIGN_GET(flags); - bool zero = flags & MALLOCX_ZERO; - arena_t *arena; - tcache_t *tcache; - - assert(ptr != NULL); - assert(size != 0); - assert(malloc_initialized() || IS_INITIALIZER); - malloc_thread_init(); - tsd = tsd_fetch(); - witness_assert_lockless(tsd_tsdn(tsd)); - - if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { - unsigned arena_ind = MALLOCX_ARENA_GET(flags); - arena = arena_get(tsd_tsdn(tsd), arena_ind, true); - if (unlikely(arena == NULL)) - goto label_oom; - } else - arena = NULL; - - if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) - tcache = NULL; - else - tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } else - tcache = tcache_get(tsd, true); - - old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); - if (config_valgrind && unlikely(in_valgrind)) - old_rzsize = u2rz(old_usize); - - if (config_prof && opt_prof) { - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) - goto label_oom; - p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, - zero, tcache, arena); - if (unlikely(p == NULL)) - goto label_oom; - } else { - p = iralloct(tsd, ptr, old_usize, size, alignment, zero, - tcache, arena); - if (unlikely(p == NULL)) - goto label_oom; - if (config_stats || (config_valgrind && unlikely(in_valgrind))) - usize = isalloc(tsd_tsdn(tsd), p, config_prof); - } - assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); - - if (config_stats) { - *tsd_thread_allocatedp_get(tsd) += usize; - *tsd_thread_deallocatedp_get(tsd) += old_usize; - } - UTRACE(ptr, size, p); - JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr, - old_usize, old_rzsize, no, zero); - witness_assert_lockless(tsd_tsdn(tsd)); - return (p); -label_oom: - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write(": Error in rallocx(): out of memory\n"); - abort(); - } - UTRACE(ptr, size, 0); - witness_assert_lockless(tsd_tsdn(tsd)); - return (NULL); -} - -JEMALLOC_ALWAYS_INLINE_C size_t -ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, - size_t extra, size_t alignment, bool zero) -{ - size_t usize; - - if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) - return (old_usize); - usize = isalloc(tsdn, ptr, config_prof); - - return (usize); -} - -static size_t -ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, - size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) -{ - size_t usize; - - if (tctx == NULL) - return (old_usize); - usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, - zero); - - return (usize); -} - -JEMALLOC_ALWAYS_INLINE_C size_t -ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, - size_t extra, size_t alignment, bool zero) -{ - size_t usize_max, usize; - bool prof_active; - prof_tctx_t *old_tctx, *tctx; - - prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); - /* - * usize isn't knowable before ixalloc() returns when extra is non-zero. - * Therefore, compute its maximum possible value and use that in - * prof_alloc_prep() to decide whether to capture a backtrace. - * prof_realloc() will use the actual usize to decide whether to sample. - */ - if (alignment == 0) { - usize_max = s2u(size+extra); - assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS); - } else { - usize_max = sa2u(size+extra, alignment); - if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) { - /* - * usize_max is out of range, and chances are that - * allocation will fail, but use the maximum possible - * value and carry on with prof_alloc_prep(), just in - * case allocation succeeds. - */ - usize_max = HUGE_MAXCLASS; - } - } - tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); - - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { - usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, - size, extra, alignment, zero, tctx); - } else { - usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, - extra, alignment, zero); - } - if (usize == old_usize) { - prof_alloc_rollback(tsd, tctx, false); - return (usize); - } - prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, - old_tctx); - - return (usize); -} - -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW -je_xallocx(void *ptr, size_t size, size_t extra, int flags) -{ - tsd_t *tsd; - size_t usize, old_usize; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - size_t alignment = MALLOCX_ALIGN_GET(flags); - bool zero = flags & MALLOCX_ZERO; - - assert(ptr != NULL); - assert(size != 0); - assert(SIZE_T_MAX - size >= extra); - assert(malloc_initialized() || IS_INITIALIZER); - malloc_thread_init(); - tsd = tsd_fetch(); - witness_assert_lockless(tsd_tsdn(tsd)); - - old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); - - /* - * The API explicitly absolves itself of protecting against (size + - * extra) numerical overflow, but we may need to clamp extra to avoid - * exceeding HUGE_MAXCLASS. - * - * Ordinarily, size limit checking is handled deeper down, but here we - * have to check as part of (size + extra) clamping, since we need the - * clamped value in the above helper functions. - */ - if (unlikely(size > HUGE_MAXCLASS)) { - usize = old_usize; - goto label_not_resized; - } - if (unlikely(HUGE_MAXCLASS - size < extra)) - extra = HUGE_MAXCLASS - size; - - if (config_valgrind && unlikely(in_valgrind)) - old_rzsize = u2rz(old_usize); - - if (config_prof && opt_prof) { - usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, - alignment, zero); - } else { - usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, - extra, alignment, zero); - } - if (unlikely(usize == old_usize)) - goto label_not_resized; - - if (config_stats) { - *tsd_thread_allocatedp_get(tsd) += usize; - *tsd_thread_deallocatedp_get(tsd) += old_usize; - } - JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr, - old_usize, old_rzsize, no, zero); -label_not_resized: - UTRACE(ptr, size, ptr); - witness_assert_lockless(tsd_tsdn(tsd)); - return (usize); -} - -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW -JEMALLOC_ATTR(pure) -je_sallocx(const void *ptr, int flags) -{ - size_t usize; - tsdn_t *tsdn; - - assert(malloc_initialized() || IS_INITIALIZER); - malloc_thread_init(); - - tsdn = tsdn_fetch(); - witness_assert_lockless(tsdn); - - if (config_ivsalloc) - usize = ivsalloc(tsdn, ptr, config_prof); - else - usize = isalloc(tsdn, ptr, config_prof); - - witness_assert_lockless(tsdn); - return (usize); -} - -JEMALLOC_EXPORT void JEMALLOC_NOTHROW -je_dallocx(void *ptr, int flags) -{ - tsd_t *tsd; - tcache_t *tcache; - - assert(ptr != NULL); - assert(malloc_initialized() || IS_INITIALIZER); - - tsd = tsd_fetch(); - witness_assert_lockless(tsd_tsdn(tsd)); - if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) - tcache = NULL; - else - tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } else - tcache = tcache_get(tsd, false); - - UTRACE(ptr, 0, 0); - if (likely(!malloc_slow)) - ifree(tsd, ptr, tcache, false); - else - ifree(tsd, ptr, tcache, true); - witness_assert_lockless(tsd_tsdn(tsd)); -} - -JEMALLOC_ALWAYS_INLINE_C size_t -inallocx(tsdn_t *tsdn, size_t size, int flags) -{ - size_t usize; - - witness_assert_lockless(tsdn); - - if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) - usize = s2u(size); - else - usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); - witness_assert_lockless(tsdn); - return (usize); -} - -JEMALLOC_EXPORT void JEMALLOC_NOTHROW -je_sdallocx(void *ptr, size_t size, int flags) -{ - tsd_t *tsd; - tcache_t *tcache; - size_t usize; - - assert(ptr != NULL); - assert(malloc_initialized() || IS_INITIALIZER); - tsd = tsd_fetch(); - usize = inallocx(tsd_tsdn(tsd), size, flags); - assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof)); - - witness_assert_lockless(tsd_tsdn(tsd)); - if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) - tcache = NULL; - else - tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } else - tcache = tcache_get(tsd, false); - - UTRACE(ptr, 0, 0); - if (likely(!malloc_slow)) - isfree(tsd, ptr, usize, tcache, false); - else - isfree(tsd, ptr, usize, tcache, true); - witness_assert_lockless(tsd_tsdn(tsd)); -} - -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW -JEMALLOC_ATTR(pure) -je_nallocx(size_t size, int flags) -{ - size_t usize; - tsdn_t *tsdn; - - assert(size != 0); - - if (unlikely(malloc_init())) - return (0); - - tsdn = tsdn_fetch(); - witness_assert_lockless(tsdn); - - usize = inallocx(tsdn, size, flags); - if (unlikely(usize > HUGE_MAXCLASS)) - return (0); - - witness_assert_lockless(tsdn); - return (usize); -} - -JEMALLOC_EXPORT int JEMALLOC_NOTHROW -je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) -{ - int ret; - tsd_t *tsd; - - if (unlikely(malloc_init())) - return (EAGAIN); - - tsd = tsd_fetch(); - witness_assert_lockless(tsd_tsdn(tsd)); - ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); - witness_assert_lockless(tsd_tsdn(tsd)); - return (ret); -} - -JEMALLOC_EXPORT int JEMALLOC_NOTHROW -je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) -{ - int ret; - tsdn_t *tsdn; - - if (unlikely(malloc_init())) - return (EAGAIN); - - tsdn = tsdn_fetch(); - witness_assert_lockless(tsdn); - ret = ctl_nametomib(tsdn, name, mibp, miblenp); - witness_assert_lockless(tsdn); - return (ret); -} - -JEMALLOC_EXPORT int JEMALLOC_NOTHROW -je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - tsd_t *tsd; - - if (unlikely(malloc_init())) - return (EAGAIN); - - tsd = tsd_fetch(); - witness_assert_lockless(tsd_tsdn(tsd)); - ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); - witness_assert_lockless(tsd_tsdn(tsd)); - return (ret); -} - -JEMALLOC_EXPORT void JEMALLOC_NOTHROW -je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) -{ - tsdn_t *tsdn; - - tsdn = tsdn_fetch(); - witness_assert_lockless(tsdn); - stats_print(write_cb, cbopaque, opts); - witness_assert_lockless(tsdn); -} - -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW -je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) -{ - size_t ret; - tsdn_t *tsdn; - - assert(malloc_initialized() || IS_INITIALIZER); - malloc_thread_init(); - - tsdn = tsdn_fetch(); - witness_assert_lockless(tsdn); - - if (config_ivsalloc) - ret = ivsalloc(tsdn, ptr, config_prof); - else - ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof); - - witness_assert_lockless(tsdn); - return (ret); -} - -/* - * End non-standard functions. - */ -/******************************************************************************/ -/* - * The following functions are used by threading libraries for protection of - * malloc during fork(). - */ - -/* - * If an application creates a thread before doing any allocation in the main - * thread, then calls fork(2) in the main thread followed by memory allocation - * in the child process, a race can occur that results in deadlock within the - * child: the main thread may have forked while the created thread had - * partially initialized the allocator. Ordinarily jemalloc prevents - * fork/malloc races via the following functions it registers during - * initialization using pthread_atfork(), but of course that does no good if - * the allocator isn't fully initialized at fork time. The following library - * constructor is a partial solution to this problem. It may still be possible - * to trigger the deadlock described above, but doing so would involve forking - * via a library constructor that runs before jemalloc's runs. - */ -#ifndef JEMALLOC_JET -JEMALLOC_ATTR(constructor) -static void -jemalloc_constructor(void) -{ - - malloc_init(); -} -#endif - -#ifndef JEMALLOC_MUTEX_INIT_CB -void -jemalloc_prefork(void) -#else -JEMALLOC_EXPORT void -_malloc_prefork(void) -#endif -{ - tsd_t *tsd; - unsigned i, j, narenas; - arena_t *arena; - -#ifdef JEMALLOC_MUTEX_INIT_CB - if (!malloc_initialized()) - return; -#endif - assert(malloc_initialized()); - - tsd = tsd_fetch(); - - narenas = narenas_total_get(); - - witness_prefork(tsd); - /* Acquire all mutexes in a safe order. */ - ctl_prefork(tsd_tsdn(tsd)); - tcache_prefork(tsd_tsdn(tsd)); - malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); - prof_prefork0(tsd_tsdn(tsd)); - for (i = 0; i < 3; i++) { - for (j = 0; j < narenas; j++) { - if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != - NULL) { - switch (i) { - case 0: - arena_prefork0(tsd_tsdn(tsd), arena); - break; - case 1: - arena_prefork1(tsd_tsdn(tsd), arena); - break; - case 2: - arena_prefork2(tsd_tsdn(tsd), arena); - break; - default: not_reached(); - } - } - } - } - base_prefork(tsd_tsdn(tsd)); - for (i = 0; i < narenas; i++) { - if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) - arena_prefork3(tsd_tsdn(tsd), arena); - } - prof_prefork1(tsd_tsdn(tsd)); -} - -#ifndef JEMALLOC_MUTEX_INIT_CB -void -jemalloc_postfork_parent(void) -#else -JEMALLOC_EXPORT void -_malloc_postfork(void) -#endif -{ - tsd_t *tsd; - unsigned i, narenas; - -#ifdef JEMALLOC_MUTEX_INIT_CB - if (!malloc_initialized()) - return; -#endif - assert(malloc_initialized()); - - tsd = tsd_fetch(); - - witness_postfork_parent(tsd); - /* Release all mutexes, now that fork() has completed. */ - base_postfork_parent(tsd_tsdn(tsd)); - for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { - arena_t *arena; - - if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) - arena_postfork_parent(tsd_tsdn(tsd), arena); - } - prof_postfork_parent(tsd_tsdn(tsd)); - malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); - tcache_postfork_parent(tsd_tsdn(tsd)); - ctl_postfork_parent(tsd_tsdn(tsd)); -} - -void -jemalloc_postfork_child(void) -{ - tsd_t *tsd; - unsigned i, narenas; - - assert(malloc_initialized()); - - tsd = tsd_fetch(); - - witness_postfork_child(tsd); - /* Release all mutexes, now that fork() has completed. */ - base_postfork_child(tsd_tsdn(tsd)); - for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { - arena_t *arena; - - if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) - arena_postfork_child(tsd_tsdn(tsd), arena); - } - prof_postfork_child(tsd_tsdn(tsd)); - malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); - tcache_postfork_child(tsd_tsdn(tsd)); - ctl_postfork_child(tsd_tsdn(tsd)); -} - -/******************************************************************************/ diff --git a/sources/jemalloc/src/je_mb.c b/sources/jemalloc/src/je_mb.c deleted file mode 100755 index dc2c0a25..00000000 --- a/sources/jemalloc/src/je_mb.c +++ /dev/null @@ -1,2 +0,0 @@ -#define JEMALLOC_MB_C_ -#include "jemalloc/internal/jemalloc_internal.h" diff --git a/sources/jemalloc/src/je_mutex.c b/sources/jemalloc/src/je_mutex.c deleted file mode 100755 index 6333e73d..00000000 --- a/sources/jemalloc/src/je_mutex.c +++ /dev/null @@ -1,158 +0,0 @@ -#define JEMALLOC_MUTEX_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -#include -#endif - -#ifndef _CRT_SPINCOUNT -#define _CRT_SPINCOUNT 4000 -#endif - -/******************************************************************************/ -/* Data. */ - -#ifdef JEMALLOC_LAZY_LOCK -bool isthreaded = false; -#endif -#ifdef JEMALLOC_MUTEX_INIT_CB -static bool postpone_init = true; -static malloc_mutex_t *postponed_mutexes = NULL; -#endif - -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -static void pthread_create_once(void); -#endif - -/******************************************************************************/ -/* - * We intercept pthread_create() calls in order to toggle isthreaded if the - * process goes multi-threaded. - */ - -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, - void *(*)(void *), void *__restrict); - -static void -pthread_create_once(void) -{ - - pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); - if (pthread_create_fptr == NULL) { - malloc_write(": Error in dlsym(RTLD_NEXT, " - "\"pthread_create\")\n"); - abort(); - } - - isthreaded = true; -} - -JEMALLOC_EXPORT int -pthread_create(pthread_t *__restrict thread, - const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), - void *__restrict arg) -{ - static pthread_once_t once_control = PTHREAD_ONCE_INIT; - - pthread_once(&once_control, pthread_create_once); - - return (pthread_create_fptr(thread, attr, start_routine, arg)); -} -#endif - -/******************************************************************************/ - -#ifdef JEMALLOC_MUTEX_INIT_CB -JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, - void *(calloc_cb)(size_t, size_t)); -#endif - -bool -malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank) -{ - -#ifdef _WIN32 -# if _WIN32_WINNT >= 0x0600 - InitializeSRWLock(&mutex->lock); -# else - if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, - _CRT_SPINCOUNT)) - return (true); -# endif -#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) - mutex->lock = OS_UNFAIR_LOCK_INIT; -#elif (defined(JEMALLOC_OSSPIN)) - mutex->lock = 0; -#elif (defined(JEMALLOC_MUTEX_INIT_CB)) - if (postpone_init) { - mutex->postponed_next = postponed_mutexes; - postponed_mutexes = mutex; - } else { - if (_pthread_mutex_init_calloc_cb(&mutex->lock, - bootstrap_calloc) != 0) - return (true); - } -#else - pthread_mutexattr_t attr; - - if (pthread_mutexattr_init(&attr) != 0) - return (true); - pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); - if (pthread_mutex_init(&mutex->lock, &attr) != 0) { - pthread_mutexattr_destroy(&attr); - return (true); - } - pthread_mutexattr_destroy(&attr); -#endif - if (config_debug) - witness_init(&mutex->witness, name, rank, NULL); - return (false); -} - -void -malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) -{ - - malloc_mutex_lock(tsdn, mutex); -} - -void -malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) -{ - - malloc_mutex_unlock(tsdn, mutex); -} - -void -malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) -{ - -#ifdef JEMALLOC_MUTEX_INIT_CB - malloc_mutex_unlock(tsdn, mutex); -#else - if (malloc_mutex_init(mutex, mutex->witness.name, - mutex->witness.rank)) { - malloc_printf(": Error re-initializing mutex in " - "child\n"); - if (opt_abort) - abort(); - } -#endif -} - -bool -malloc_mutex_boot(void) -{ - -#ifdef JEMALLOC_MUTEX_INIT_CB - postpone_init = false; - while (postponed_mutexes != NULL) { - if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, - bootstrap_calloc) != 0) - return (true); - postponed_mutexes = postponed_mutexes->postponed_next; - } -#endif - return (false); -} diff --git a/sources/jemalloc/src/je_nstime.c b/sources/jemalloc/src/je_nstime.c deleted file mode 100755 index 0948e29f..00000000 --- a/sources/jemalloc/src/je_nstime.c +++ /dev/null @@ -1,194 +0,0 @@ -#include "jemalloc/internal/jemalloc_internal.h" - -#define BILLION UINT64_C(1000000000) - -void -nstime_init(nstime_t *time, uint64_t ns) -{ - - time->ns = ns; -} - -void -nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) -{ - - time->ns = sec * BILLION + nsec; -} - -uint64_t -nstime_ns(const nstime_t *time) -{ - - return (time->ns); -} - -uint64_t -nstime_sec(const nstime_t *time) -{ - - return (time->ns / BILLION); -} - -uint64_t -nstime_nsec(const nstime_t *time) -{ - - return (time->ns % BILLION); -} - -void -nstime_copy(nstime_t *time, const nstime_t *source) -{ - - *time = *source; -} - -int -nstime_compare(const nstime_t *a, const nstime_t *b) -{ - - return ((a->ns > b->ns) - (a->ns < b->ns)); -} - -void -nstime_add(nstime_t *time, const nstime_t *addend) -{ - - assert(UINT64_MAX - time->ns >= addend->ns); - - time->ns += addend->ns; -} - -void -nstime_subtract(nstime_t *time, const nstime_t *subtrahend) -{ - - assert(nstime_compare(time, subtrahend) >= 0); - - time->ns -= subtrahend->ns; -} - -void -nstime_imultiply(nstime_t *time, uint64_t multiplier) -{ - - assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) << - 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns)); - - time->ns *= multiplier; -} - -void -nstime_idivide(nstime_t *time, uint64_t divisor) -{ - - assert(divisor != 0); - - time->ns /= divisor; -} - -uint64_t -nstime_divide(const nstime_t *time, const nstime_t *divisor) -{ - - assert(divisor->ns != 0); - - return (time->ns / divisor->ns); -} - -#ifdef _WIN32 -# define NSTIME_MONOTONIC true -static void -nstime_get(nstime_t *time) -{ - FILETIME ft; - uint64_t ticks_100ns; - - GetSystemTimeAsFileTime(&ft); - ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; - - nstime_init(time, ticks_100ns * 100); -} -#elif JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE -# define NSTIME_MONOTONIC true -static void -nstime_get(nstime_t *time) -{ - struct timespec ts; - - clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); - nstime_init2(time, ts.tv_sec, ts.tv_nsec); -} -#elif JEMALLOC_HAVE_CLOCK_MONOTONIC -# define NSTIME_MONOTONIC true -static void -nstime_get(nstime_t *time) -{ - struct timespec ts; - - clock_gettime(CLOCK_MONOTONIC, &ts); - nstime_init2(time, ts.tv_sec, ts.tv_nsec); -} -#elif JEMALLOC_HAVE_MACH_ABSOLUTE_TIME -# define NSTIME_MONOTONIC true -static void -nstime_get(nstime_t *time) -{ - - nstime_init(time, mach_absolute_time()); -} -#else -# define NSTIME_MONOTONIC false -static void -nstime_get(nstime_t *time) -{ - struct timeval tv; - - gettimeofday(&tv, NULL); - nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000); -} -#endif - -#ifdef JEMALLOC_JET -#undef nstime_monotonic -#define nstime_monotonic JEMALLOC_N(n_nstime_monotonic) -#endif -bool -nstime_monotonic(void) -{ - - return (NSTIME_MONOTONIC); -#undef NSTIME_MONOTONIC -} -#ifdef JEMALLOC_JET -#undef nstime_monotonic -#define nstime_monotonic JEMALLOC_N(nstime_monotonic) -nstime_monotonic_t *nstime_monotonic = JEMALLOC_N(n_nstime_monotonic); -#endif - -#ifdef JEMALLOC_JET -#undef nstime_update -#define nstime_update JEMALLOC_N(n_nstime_update) -#endif -bool -nstime_update(nstime_t *time) -{ - nstime_t old_time; - - nstime_copy(&old_time, time); - nstime_get(time); - - /* Handle non-monotonic clocks. */ - if (unlikely(nstime_compare(&old_time, time) > 0)) { - nstime_copy(time, &old_time); - return (true); - } - - return (false); -} -#ifdef JEMALLOC_JET -#undef nstime_update -#define nstime_update JEMALLOC_N(nstime_update) -nstime_update_t *nstime_update = JEMALLOC_N(n_nstime_update); -#endif diff --git a/sources/jemalloc/src/je_pages.c b/sources/jemalloc/src/je_pages.c deleted file mode 100755 index 7698e49b..00000000 --- a/sources/jemalloc/src/je_pages.c +++ /dev/null @@ -1,302 +0,0 @@ -#define JEMALLOC_PAGES_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT -#include -#endif - -/******************************************************************************/ -/* Data. */ - -#ifndef _WIN32 -# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE) -# define PAGES_PROT_DECOMMIT (PROT_NONE) -static int mmap_flags; -#endif -static bool os_overcommits; - -/******************************************************************************/ - -void * -pages_map(void *addr, size_t size, bool *commit) -{ - void *ret; - - assert(size != 0); - - if (os_overcommits) - *commit = true; - -#ifdef _WIN32 - /* - * If VirtualAlloc can't allocate at the given address when one is - * given, it fails and returns NULL. - */ - ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0), - PAGE_READWRITE); -#else - /* - * We don't use MAP_FIXED here, because it can cause the *replacement* - * of existing mappings, and we only want to create new mappings. - */ - { - int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; - - ret = mmap(addr, size, prot, mmap_flags, -1, 0); - } - assert(ret != NULL); - - if (ret == MAP_FAILED) - ret = NULL; - else if (addr != NULL && ret != addr) { - /* - * We succeeded in mapping memory, but not in the right place. - */ - pages_unmap(ret, size); - ret = NULL; - } -#endif - assert(ret == NULL || (addr == NULL && ret != addr) - || (addr != NULL && ret == addr)); - return (ret); -} - -void -pages_unmap(void *addr, size_t size) -{ - -#ifdef _WIN32 - if (VirtualFree(addr, 0, MEM_RELEASE) == 0) -#else - if (munmap(addr, size) == -1) -#endif - { - char buf[BUFERROR_BUF]; - - buferror(get_errno(), buf, sizeof(buf)); - malloc_printf(": Error in " -#ifdef _WIN32 - "VirtualFree" -#else - "munmap" -#endif - "(): %s\n", buf); - if (opt_abort) - abort(); - } -} - -void * -pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, - bool *commit) -{ - void *ret = (void *)((uintptr_t)addr + leadsize); - - assert(alloc_size >= leadsize + size); -#ifdef _WIN32 - { - void *new_addr; - - pages_unmap(addr, alloc_size); - new_addr = pages_map(ret, size, commit); - if (new_addr == ret) - return (ret); - if (new_addr) - pages_unmap(new_addr, size); - return (NULL); - } -#else - { - size_t trailsize = alloc_size - leadsize - size; - - if (leadsize != 0) - pages_unmap(addr, leadsize); - if (trailsize != 0) - pages_unmap((void *)((uintptr_t)ret + size), trailsize); - return (ret); - } -#endif -} - -static bool -pages_commit_impl(void *addr, size_t size, bool commit) -{ - - if (os_overcommits) - return (true); - -#ifdef _WIN32 - return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT, - PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT))); -#else - { - int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; - void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED, - -1, 0); - if (result == MAP_FAILED) - return (true); - if (result != addr) { - /* - * We succeeded in mapping memory, but not in the right - * place. - */ - pages_unmap(result, size); - return (true); - } - return (false); - } -#endif -} - -bool -pages_commit(void *addr, size_t size) -{ - - return (pages_commit_impl(addr, size, true)); -} - -bool -pages_decommit(void *addr, size_t size) -{ - - return (pages_commit_impl(addr, size, false)); -} - -bool -pages_purge(void *addr, size_t size) -{ - bool unzeroed; - -#ifdef _WIN32 - VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); - unzeroed = true; -#elif (defined(JEMALLOC_PURGE_MADVISE_FREE) || \ - defined(JEMALLOC_PURGE_MADVISE_DONTNEED)) -# if defined(JEMALLOC_PURGE_MADVISE_FREE) -# define JEMALLOC_MADV_PURGE MADV_FREE -# define JEMALLOC_MADV_ZEROS false -# elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) -# define JEMALLOC_MADV_PURGE MADV_DONTNEED -# define JEMALLOC_MADV_ZEROS true -# else -# error No madvise(2) flag defined for purging unused dirty pages -# endif - int err = madvise(addr, size, JEMALLOC_MADV_PURGE); - unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0); -# undef JEMALLOC_MADV_PURGE -# undef JEMALLOC_MADV_ZEROS -#else - /* Last resort no-op. */ - unzeroed = true; -#endif - return (unzeroed); -} - -bool -pages_huge(void *addr, size_t size) -{ - - assert(PAGE_ADDR2BASE(addr) == addr); - assert(PAGE_CEILING(size) == size); - -#ifdef JEMALLOC_HAVE_MADVISE_HUGE - return (madvise(addr, size, MADV_HUGEPAGE) != 0); -#else - return (false); -#endif -} - -bool -pages_nohuge(void *addr, size_t size) -{ - - assert(PAGE_ADDR2BASE(addr) == addr); - assert(PAGE_CEILING(size) == size); - -#ifdef JEMALLOC_HAVE_MADVISE_HUGE - return (madvise(addr, size, MADV_NOHUGEPAGE) != 0); -#else - return (false); -#endif -} - -#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT -static bool -os_overcommits_sysctl(void) -{ - int vm_overcommit; - size_t sz; - - sz = sizeof(vm_overcommit); - if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) - return (false); /* Error. */ - - return ((vm_overcommit & 0x3) == 0); -} -#endif - -#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY -/* - * Use syscall(2) rather than {open,read,close}(2) when possible to avoid - * reentry during bootstrapping if another library has interposed system call - * wrappers. - */ -static bool -os_overcommits_proc(void) -{ - int fd; - char buf[1]; - ssize_t nread; - -#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) - fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY); -#else - fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY); -#endif - if (fd == -1) - return (false); /* Error. */ - -#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read) - nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf)); -#else - nread = read(fd, &buf, sizeof(buf)); -#endif - -#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) - syscall(SYS_close, fd); -#else - close(fd); -#endif - - if (nread < 1) - return (false); /* Error. */ - /* - * /proc/sys/vm/overcommit_memory meanings: - * 0: Heuristic overcommit. - * 1: Always overcommit. - * 2: Never overcommit. - */ - return (buf[0] == '0' || buf[0] == '1'); -} -#endif - -void -pages_boot(void) -{ - -#ifndef _WIN32 - mmap_flags = MAP_PRIVATE | MAP_ANON; -#endif - -#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT - os_overcommits = os_overcommits_sysctl(); -#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY) - os_overcommits = os_overcommits_proc(); -# ifdef MAP_NORESERVE - if (os_overcommits) - mmap_flags |= MAP_NORESERVE; -# endif -#else - os_overcommits = false; -#endif -} diff --git a/sources/jemalloc/src/je_prng.c b/sources/jemalloc/src/je_prng.c deleted file mode 100755 index 76646a2a..00000000 --- a/sources/jemalloc/src/je_prng.c +++ /dev/null @@ -1,2 +0,0 @@ -#define JEMALLOC_PRNG_C_ -#include "jemalloc/internal/jemalloc_internal.h" diff --git a/sources/jemalloc/src/je_prof.c b/sources/jemalloc/src/je_prof.c deleted file mode 100755 index c89dade1..00000000 --- a/sources/jemalloc/src/je_prof.c +++ /dev/null @@ -1,2355 +0,0 @@ -#define JEMALLOC_PROF_C_ -#include "jemalloc/internal/jemalloc_internal.h" -/******************************************************************************/ - -#ifdef JEMALLOC_PROF_LIBUNWIND -#define UNW_LOCAL_ONLY -#include -#endif - -#ifdef JEMALLOC_PROF_LIBGCC -#include -#endif - -/******************************************************************************/ -/* Data. */ - -bool opt_prof = false; -bool opt_prof_active = true; -bool opt_prof_thread_active_init = true; -size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; -ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; -bool opt_prof_gdump = false; -bool opt_prof_final = false; -bool opt_prof_leak = false; -bool opt_prof_accum = false; -char opt_prof_prefix[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PATH_MAX + -#endif - 1]; - -/* - * Initialized as opt_prof_active, and accessed via - * prof_active_[gs]et{_unlocked,}(). - */ -bool prof_active; -static malloc_mutex_t prof_active_mtx; - -/* - * Initialized as opt_prof_thread_active_init, and accessed via - * prof_thread_active_init_[gs]et(). - */ -static bool prof_thread_active_init; -static malloc_mutex_t prof_thread_active_init_mtx; - -/* - * Initialized as opt_prof_gdump, and accessed via - * prof_gdump_[gs]et{_unlocked,}(). - */ -bool prof_gdump_val; -static malloc_mutex_t prof_gdump_mtx; - -uint64_t prof_interval = 0; - -size_t lg_prof_sample; - -/* - * Table of mutexes that are shared among gctx's. These are leaf locks, so - * there is no problem with using them for more than one gctx at the same time. - * The primary motivation for this sharing though is that gctx's are ephemeral, - * and destroying mutexes causes complications for systems that allocate when - * creating/destroying mutexes. - */ -static malloc_mutex_t *gctx_locks; -static unsigned cum_gctxs; /* Atomic counter. */ - -/* - * Table of mutexes that are shared among tdata's. No operations require - * holding multiple tdata locks, so there is no problem with using them for more - * than one tdata at the same time, even though a gctx lock may be acquired - * while holding a tdata lock. - */ -static malloc_mutex_t *tdata_locks; - -/* - * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data - * structure that knows about all backtraces currently captured. - */ -static ckh_t bt2gctx; -static malloc_mutex_t bt2gctx_mtx; - -/* - * Tree of all extant prof_tdata_t structures, regardless of state, - * {attached,detached,expired}. - */ -static prof_tdata_tree_t tdatas; -static malloc_mutex_t tdatas_mtx; - -static uint64_t next_thr_uid; -static malloc_mutex_t next_thr_uid_mtx; - -static malloc_mutex_t prof_dump_seq_mtx; -static uint64_t prof_dump_seq; -static uint64_t prof_dump_iseq; -static uint64_t prof_dump_mseq; -static uint64_t prof_dump_useq; - -/* - * This buffer is rather large for stack allocation, so use a single buffer for - * all profile dumps. - */ -static malloc_mutex_t prof_dump_mtx; -static char prof_dump_buf[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PROF_DUMP_BUFSIZE -#else - 1 -#endif -]; -static size_t prof_dump_buf_end; -static int prof_dump_fd; - -/* Do not dump any profiles until bootstrapping is complete. */ -static bool prof_booted = false; - -/******************************************************************************/ -/* - * Function prototypes for static functions that are referenced prior to - * definition. - */ - -static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); -static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); -static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, - bool even_if_attached); -static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, - bool even_if_attached); -static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); - -/******************************************************************************/ -/* Red-black trees. */ - -JEMALLOC_INLINE_C int -prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) -{ - uint64_t a_thr_uid = a->thr_uid; - uint64_t b_thr_uid = b->thr_uid; - int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); - if (ret == 0) { - uint64_t a_thr_discrim = a->thr_discrim; - uint64_t b_thr_discrim = b->thr_discrim; - ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim < - b_thr_discrim); - if (ret == 0) { - uint64_t a_tctx_uid = a->tctx_uid; - uint64_t b_tctx_uid = b->tctx_uid; - ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < - b_tctx_uid); - } - } - return (ret); -} - -rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, - tctx_link, prof_tctx_comp) - -JEMALLOC_INLINE_C int -prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) -{ - unsigned a_len = a->bt.len; - unsigned b_len = b->bt.len; - unsigned comp_len = (a_len < b_len) ? a_len : b_len; - int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); - if (ret == 0) - ret = (a_len > b_len) - (a_len < b_len); - return (ret); -} - -rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, - prof_gctx_comp) - -JEMALLOC_INLINE_C int -prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) -{ - int ret; - uint64_t a_uid = a->thr_uid; - uint64_t b_uid = b->thr_uid; - - ret = ((a_uid > b_uid) - (a_uid < b_uid)); - if (ret == 0) { - uint64_t a_discrim = a->thr_discrim; - uint64_t b_discrim = b->thr_discrim; - - ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); - } - return (ret); -} - -rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, - prof_tdata_comp) - -/******************************************************************************/ - -void -prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) -{ - prof_tdata_t *tdata; - - cassert(config_prof); - - if (updated) { - /* - * Compute a new sample threshold. This isn't very important in - * practice, because this function is rarely executed, so the - * potential for sample bias is minimal except in contrived - * programs. - */ - tdata = prof_tdata_get(tsd, true); - if (tdata != NULL) - prof_sample_threshold_update(tdata); - } - - if ((uintptr_t)tctx > (uintptr_t)1U) { - malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); - tctx->prepared = false; - if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) - prof_tctx_destroy(tsd, tctx); - else - malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); - } -} - -void -prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, - prof_tctx_t *tctx) -{ - - prof_tctx_set(tsdn, ptr, usize, tctx); - - malloc_mutex_lock(tsdn, tctx->tdata->lock); - tctx->cnts.curobjs++; - tctx->cnts.curbytes += usize; - if (opt_prof_accum) { - tctx->cnts.accumobjs++; - tctx->cnts.accumbytes += usize; - } - tctx->prepared = false; - malloc_mutex_unlock(tsdn, tctx->tdata->lock); -} - -void -prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) -{ - - malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); - assert(tctx->cnts.curobjs > 0); - assert(tctx->cnts.curbytes >= usize); - tctx->cnts.curobjs--; - tctx->cnts.curbytes -= usize; - - if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) - prof_tctx_destroy(tsd, tctx); - else - malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); -} - -void -bt_init(prof_bt_t *bt, void **vec) -{ - - cassert(config_prof); - - bt->vec = vec; - bt->len = 0; -} - -JEMALLOC_INLINE_C void -prof_enter(tsd_t *tsd, prof_tdata_t *tdata) -{ - - cassert(config_prof); - assert(tdata == prof_tdata_get(tsd, false)); - - if (tdata != NULL) { - assert(!tdata->enq); - tdata->enq = true; - } - - malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); -} - -JEMALLOC_INLINE_C void -prof_leave(tsd_t *tsd, prof_tdata_t *tdata) -{ - - cassert(config_prof); - assert(tdata == prof_tdata_get(tsd, false)); - - malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - - if (tdata != NULL) { - bool idump, gdump; - - assert(tdata->enq); - tdata->enq = false; - idump = tdata->enq_idump; - tdata->enq_idump = false; - gdump = tdata->enq_gdump; - tdata->enq_gdump = false; - - if (idump) - prof_idump(tsd_tsdn(tsd)); - if (gdump) - prof_gdump(tsd_tsdn(tsd)); - } -} - -#ifdef JEMALLOC_PROF_LIBUNWIND -void -prof_backtrace(prof_bt_t *bt) -{ - int nframes; - - cassert(config_prof); - assert(bt->len == 0); - assert(bt->vec != NULL); - - nframes = unw_backtrace(bt->vec, PROF_BT_MAX); - if (nframes <= 0) - return; - bt->len = nframes; -} -#elif (defined(JEMALLOC_PROF_LIBGCC)) -static _Unwind_Reason_Code -prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) -{ - - cassert(config_prof); - - return (_URC_NO_REASON); -} - -static _Unwind_Reason_Code -prof_unwind_callback(struct _Unwind_Context *context, void *arg) -{ - prof_unwind_data_t *data = (prof_unwind_data_t *)arg; - void *ip; - - cassert(config_prof); - - ip = (void *)_Unwind_GetIP(context); - if (ip == NULL) - return (_URC_END_OF_STACK); - data->bt->vec[data->bt->len] = ip; - data->bt->len++; - if (data->bt->len == data->max) - return (_URC_END_OF_STACK); - - return (_URC_NO_REASON); -} - -void -prof_backtrace(prof_bt_t *bt) -{ - prof_unwind_data_t data = {bt, PROF_BT_MAX}; - - cassert(config_prof); - - _Unwind_Backtrace(prof_unwind_callback, &data); -} -#elif (defined(JEMALLOC_PROF_GCC)) -void -prof_backtrace(prof_bt_t *bt) -{ -#define BT_FRAME(i) \ - if ((i) < PROF_BT_MAX) { \ - void *p; \ - if (__builtin_frame_address(i) == 0) \ - return; \ - p = __builtin_return_address(i); \ - if (p == NULL) \ - return; \ - bt->vec[(i)] = p; \ - bt->len = (i) + 1; \ - } else \ - return; - - cassert(config_prof); - - BT_FRAME(0) - BT_FRAME(1) - BT_FRAME(2) - BT_FRAME(3) - BT_FRAME(4) - BT_FRAME(5) - BT_FRAME(6) - BT_FRAME(7) - BT_FRAME(8) - BT_FRAME(9) - - BT_FRAME(10) - BT_FRAME(11) - BT_FRAME(12) - BT_FRAME(13) - BT_FRAME(14) - BT_FRAME(15) - BT_FRAME(16) - BT_FRAME(17) - BT_FRAME(18) - BT_FRAME(19) - - BT_FRAME(20) - BT_FRAME(21) - BT_FRAME(22) - BT_FRAME(23) - BT_FRAME(24) - BT_FRAME(25) - BT_FRAME(26) - BT_FRAME(27) - BT_FRAME(28) - BT_FRAME(29) - - BT_FRAME(30) - BT_FRAME(31) - BT_FRAME(32) - BT_FRAME(33) - BT_FRAME(34) - BT_FRAME(35) - BT_FRAME(36) - BT_FRAME(37) - BT_FRAME(38) - BT_FRAME(39) - - BT_FRAME(40) - BT_FRAME(41) - BT_FRAME(42) - BT_FRAME(43) - BT_FRAME(44) - BT_FRAME(45) - BT_FRAME(46) - BT_FRAME(47) - BT_FRAME(48) - BT_FRAME(49) - - BT_FRAME(50) - BT_FRAME(51) - BT_FRAME(52) - BT_FRAME(53) - BT_FRAME(54) - BT_FRAME(55) - BT_FRAME(56) - BT_FRAME(57) - BT_FRAME(58) - BT_FRAME(59) - - BT_FRAME(60) - BT_FRAME(61) - BT_FRAME(62) - BT_FRAME(63) - BT_FRAME(64) - BT_FRAME(65) - BT_FRAME(66) - BT_FRAME(67) - BT_FRAME(68) - BT_FRAME(69) - - BT_FRAME(70) - BT_FRAME(71) - BT_FRAME(72) - BT_FRAME(73) - BT_FRAME(74) - BT_FRAME(75) - BT_FRAME(76) - BT_FRAME(77) - BT_FRAME(78) - BT_FRAME(79) - - BT_FRAME(80) - BT_FRAME(81) - BT_FRAME(82) - BT_FRAME(83) - BT_FRAME(84) - BT_FRAME(85) - BT_FRAME(86) - BT_FRAME(87) - BT_FRAME(88) - BT_FRAME(89) - - BT_FRAME(90) - BT_FRAME(91) - BT_FRAME(92) - BT_FRAME(93) - BT_FRAME(94) - BT_FRAME(95) - BT_FRAME(96) - BT_FRAME(97) - BT_FRAME(98) - BT_FRAME(99) - - BT_FRAME(100) - BT_FRAME(101) - BT_FRAME(102) - BT_FRAME(103) - BT_FRAME(104) - BT_FRAME(105) - BT_FRAME(106) - BT_FRAME(107) - BT_FRAME(108) - BT_FRAME(109) - - BT_FRAME(110) - BT_FRAME(111) - BT_FRAME(112) - BT_FRAME(113) - BT_FRAME(114) - BT_FRAME(115) - BT_FRAME(116) - BT_FRAME(117) - BT_FRAME(118) - BT_FRAME(119) - - BT_FRAME(120) - BT_FRAME(121) - BT_FRAME(122) - BT_FRAME(123) - BT_FRAME(124) - BT_FRAME(125) - BT_FRAME(126) - BT_FRAME(127) -#undef BT_FRAME -} -#else -void -prof_backtrace(prof_bt_t *bt) -{ - - cassert(config_prof); - not_reached(); -} -#endif - -static malloc_mutex_t * -prof_gctx_mutex_choose(void) -{ - unsigned ngctxs = atomic_add_u(&cum_gctxs, 1); - - return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]); -} - -static malloc_mutex_t * -prof_tdata_mutex_choose(uint64_t thr_uid) -{ - - return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]); -} - -static prof_gctx_t * -prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) -{ - /* - * Create a single allocation that has space for vec of length bt->len. - */ - size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); - prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, - size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), - true); - if (gctx == NULL) - return (NULL); - gctx->lock = prof_gctx_mutex_choose(); - /* - * Set nlimbo to 1, in order to avoid a race condition with - * prof_tctx_destroy()/prof_gctx_try_destroy(). - */ - gctx->nlimbo = 1; - tctx_tree_new(&gctx->tctxs); - /* Duplicate bt. */ - memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); - gctx->bt.vec = gctx->vec; - gctx->bt.len = bt->len; - return (gctx); -} - -static void -prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, - prof_tdata_t *tdata) -{ - - cassert(config_prof); - - /* - * Check that gctx is still unused by any thread cache before destroying - * it. prof_lookup() increments gctx->nlimbo in order to avoid a race - * condition with this function, as does prof_tctx_destroy() in order to - * avoid a race between the main body of prof_tctx_destroy() and entry - * into this function. - */ - prof_enter(tsd, tdata_self); - malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); - assert(gctx->nlimbo != 0); - if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { - /* Remove gctx from bt2gctx. */ - if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) - not_reached(); - prof_leave(tsd, tdata_self); - /* Destroy gctx. */ - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true); - } else { - /* - * Compensate for increment in prof_tctx_destroy() or - * prof_lookup(). - */ - gctx->nlimbo--; - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - prof_leave(tsd, tdata_self); - } -} - -static bool -prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) -{ - - malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); - - if (opt_prof_accum) - return (false); - if (tctx->cnts.curobjs != 0) - return (false); - if (tctx->prepared) - return (false); - return (true); -} - -static bool -prof_gctx_should_destroy(prof_gctx_t *gctx) -{ - - if (opt_prof_accum) - return (false); - if (!tctx_tree_empty(&gctx->tctxs)) - return (false); - if (gctx->nlimbo != 0) - return (false); - return (true); -} - -static void -prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) -{ - prof_tdata_t *tdata = tctx->tdata; - prof_gctx_t *gctx = tctx->gctx; - bool destroy_tdata, destroy_tctx, destroy_gctx; - - malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); - - assert(tctx->cnts.curobjs == 0); - assert(tctx->cnts.curbytes == 0); - assert(!opt_prof_accum); - assert(tctx->cnts.accumobjs == 0); - assert(tctx->cnts.accumbytes == 0); - - ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); - destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); - malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - - malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); - switch (tctx->state) { - case prof_tctx_state_nominal: - tctx_tree_remove(&gctx->tctxs, tctx); - destroy_tctx = true; - if (prof_gctx_should_destroy(gctx)) { - /* - * Increment gctx->nlimbo in order to keep another - * thread from winning the race to destroy gctx while - * this one has gctx->lock dropped. Without this, it - * would be possible for another thread to: - * - * 1) Sample an allocation associated with gctx. - * 2) Deallocate the sampled object. - * 3) Successfully prof_gctx_try_destroy(gctx). - * - * The result would be that gctx no longer exists by the - * time this thread accesses it in - * prof_gctx_try_destroy(). - */ - gctx->nlimbo++; - destroy_gctx = true; - } else - destroy_gctx = false; - break; - case prof_tctx_state_dumping: - /* - * A dumping thread needs tctx to remain valid until dumping - * has finished. Change state such that the dumping thread will - * complete destruction during a late dump iteration phase. - */ - tctx->state = prof_tctx_state_purgatory; - destroy_tctx = false; - destroy_gctx = false; - break; - default: - not_reached(); - destroy_tctx = false; - destroy_gctx = false; - } - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - if (destroy_gctx) { - prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, - tdata); - } - - malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); - - if (destroy_tdata) - prof_tdata_destroy(tsd, tdata, false); - - if (destroy_tctx) - idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true); -} - -static bool -prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, - void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) -{ - union { - prof_gctx_t *p; - void *v; - } gctx; - union { - prof_bt_t *p; - void *v; - } btkey; - bool new_gctx; - - prof_enter(tsd, tdata); - if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { - /* bt has never been seen before. Insert it. */ - gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); - if (gctx.v == NULL) { - prof_leave(tsd, tdata); - return (true); - } - btkey.p = &gctx.p->bt; - if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { - /* OOM. */ - prof_leave(tsd, tdata); - idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true, true); - return (true); - } - new_gctx = true; - } else { - /* - * Increment nlimbo, in order to avoid a race condition with - * prof_tctx_destroy()/prof_gctx_try_destroy(). - */ - malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); - gctx.p->nlimbo++; - malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); - new_gctx = false; - } - prof_leave(tsd, tdata); - - *p_btkey = btkey.v; - *p_gctx = gctx.p; - *p_new_gctx = new_gctx; - return (false); -} - -prof_tctx_t * -prof_lookup(tsd_t *tsd, prof_bt_t *bt) -{ - union { - prof_tctx_t *p; - void *v; - } ret; - prof_tdata_t *tdata; - bool not_found; - - cassert(config_prof); - - tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) - return (NULL); - - malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); - not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); - if (!not_found) /* Note double negative! */ - ret.p->prepared = true; - malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - if (not_found) { - void *btkey; - prof_gctx_t *gctx; - bool new_gctx, error; - - /* - * This thread's cache lacks bt. Look for it in the global - * cache. - */ - if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, - &new_gctx)) - return (NULL); - - /* Link a prof_tctx_t into gctx for this thread. */ - ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), - size2index(sizeof(prof_tctx_t)), false, NULL, true, - arena_ichoose(tsd, NULL), true); - if (ret.p == NULL) { - if (new_gctx) - prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - return (NULL); - } - ret.p->tdata = tdata; - ret.p->thr_uid = tdata->thr_uid; - ret.p->thr_discrim = tdata->thr_discrim; - memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); - ret.p->gctx = gctx; - ret.p->tctx_uid = tdata->tctx_uid_next++; - ret.p->prepared = true; - ret.p->state = prof_tctx_state_initializing; - malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); - error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); - malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - if (error) { - if (new_gctx) - prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true); - return (NULL); - } - malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); - ret.p->state = prof_tctx_state_nominal; - tctx_tree_insert(&gctx->tctxs, ret.p); - gctx->nlimbo--; - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - } - - return (ret.p); -} - -/* - * The bodies of this function and prof_leakcheck() are compiled out unless heap - * profiling is enabled, so that it is possible to compile jemalloc with - * floating point support completely disabled. Avoiding floating point code is - * important on memory-constrained systems, but it also enables a workaround for - * versions of glibc that don't properly save/restore floating point registers - * during dynamic lazy symbol loading (which internally calls into whatever - * malloc implementation happens to be integrated into the application). Note - * that some compilers (e.g. gcc 4.8) may use floating point registers for fast - * memory moves, so jemalloc must be compiled with such optimizations disabled - * (e.g. - * -mno-sse) in order for the workaround to be complete. - */ -void -prof_sample_threshold_update(prof_tdata_t *tdata) -{ -#ifdef JEMALLOC_PROF - uint64_t r; - double u; - - if (!config_prof) - return; - - if (lg_prof_sample == 0) { - tdata->bytes_until_sample = 0; - return; - } - - /* - * Compute sample interval as a geometrically distributed random - * variable with mean (2^lg_prof_sample). - * - * __ __ - * | log(u) | 1 - * tdata->bytes_until_sample = | -------- |, where p = --------------- - * | log(1-p) | lg_prof_sample - * 2 - * - * For more information on the math, see: - * - * Non-Uniform Random Variate Generation - * Luc Devroye - * Springer-Verlag, New York, 1986 - * pp 500 - * (http://luc.devroye.org/rnbookindex.html) - */ - r = prng_lg_range_u64(&tdata->prng_state, 53); - u = (double)r * (1.0/9007199254740992.0L); - tdata->bytes_until_sample = (uint64_t)(log(u) / - log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) - + (uint64_t)1U; -#endif -} - -#ifdef JEMALLOC_JET -static prof_tdata_t * -prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) -{ - size_t *tdata_count = (size_t *)arg; - - (*tdata_count)++; - - return (NULL); -} - -size_t -prof_tdata_count(void) -{ - size_t tdata_count = 0; - tsdn_t *tsdn; - - tsdn = tsdn_fetch(); - malloc_mutex_lock(tsdn, &tdatas_mtx); - tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, - (void *)&tdata_count); - malloc_mutex_unlock(tsdn, &tdatas_mtx); - - return (tdata_count); -} -#endif - -#ifdef JEMALLOC_JET -size_t -prof_bt_count(void) -{ - size_t bt_count; - tsd_t *tsd; - prof_tdata_t *tdata; - - tsd = tsd_fetch(); - tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) - return (0); - - malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); - bt_count = ckh_count(&bt2gctx); - malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - - return (bt_count); -} -#endif - -#ifdef JEMALLOC_JET -#undef prof_dump_open -#define prof_dump_open JEMALLOC_N(prof_dump_open_impl) -#endif -static int -prof_dump_open(bool propagate_err, const char *filename) -{ - int fd; - - fd = creat(filename, 0644); - if (fd == -1 && !propagate_err) { - malloc_printf(": creat(\"%s\"), 0644) failed\n", - filename); - if (opt_abort) - abort(); - } - - return (fd); -} -#ifdef JEMALLOC_JET -#undef prof_dump_open -#define prof_dump_open JEMALLOC_N(prof_dump_open) -prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl); -#endif - -static bool -prof_dump_flush(bool propagate_err) -{ - bool ret = false; - ssize_t err; - - cassert(config_prof); - - err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); - if (err == -1) { - if (!propagate_err) { - malloc_write(": write() failed during heap " - "profile flush\n"); - if (opt_abort) - abort(); - } - ret = true; - } - prof_dump_buf_end = 0; - - return (ret); -} - -static bool -prof_dump_close(bool propagate_err) -{ - bool ret; - - assert(prof_dump_fd != -1); - ret = prof_dump_flush(propagate_err); - close(prof_dump_fd); - prof_dump_fd = -1; - - return (ret); -} - -static bool -prof_dump_write(bool propagate_err, const char *s) -{ - size_t i, slen, n; - - cassert(config_prof); - - i = 0; - slen = strlen(s); - while (i < slen) { - /* Flush the buffer if it is full. */ - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) - if (prof_dump_flush(propagate_err) && propagate_err) - return (true); - - if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { - /* Finish writing. */ - n = slen - i; - } else { - /* Write as much of s as will fit. */ - n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; - } - memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); - prof_dump_buf_end += n; - i += n; - } - - return (false); -} - -JEMALLOC_FORMAT_PRINTF(2, 3) -static bool -prof_dump_printf(bool propagate_err, const char *format, ...) -{ - bool ret; - va_list ap; - char buf[PROF_PRINTF_BUFSIZE]; - - va_start(ap, format); - malloc_vsnprintf(buf, sizeof(buf), format, ap); - va_end(ap); - ret = prof_dump_write(propagate_err, buf); - - return (ret); -} - -static void -prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) -{ - - malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); - - malloc_mutex_lock(tsdn, tctx->gctx->lock); - - switch (tctx->state) { - case prof_tctx_state_initializing: - malloc_mutex_unlock(tsdn, tctx->gctx->lock); - return; - case prof_tctx_state_nominal: - tctx->state = prof_tctx_state_dumping; - malloc_mutex_unlock(tsdn, tctx->gctx->lock); - - memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); - - tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; - tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; - if (opt_prof_accum) { - tdata->cnt_summed.accumobjs += - tctx->dump_cnts.accumobjs; - tdata->cnt_summed.accumbytes += - tctx->dump_cnts.accumbytes; - } - break; - case prof_tctx_state_dumping: - case prof_tctx_state_purgatory: - not_reached(); - } -} - -static void -prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) -{ - - malloc_mutex_assert_owner(tsdn, gctx->lock); - - gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; - gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; - if (opt_prof_accum) { - gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; - gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; - } -} - -static prof_tctx_t * -prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) -{ - tsdn_t *tsdn = (tsdn_t *)arg; - - malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); - - switch (tctx->state) { - case prof_tctx_state_nominal: - /* New since dumping started; ignore. */ - break; - case prof_tctx_state_dumping: - case prof_tctx_state_purgatory: - prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); - break; - default: - not_reached(); - } - - return (NULL); -} - -struct prof_tctx_dump_iter_arg_s { - tsdn_t *tsdn; - bool propagate_err; -}; - -static prof_tctx_t * -prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) -{ - struct prof_tctx_dump_iter_arg_s *arg = - (struct prof_tctx_dump_iter_arg_s *)opaque; - - malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); - - switch (tctx->state) { - case prof_tctx_state_initializing: - case prof_tctx_state_nominal: - /* Not captured by this dump. */ - break; - case prof_tctx_state_dumping: - case prof_tctx_state_purgatory: - if (prof_dump_printf(arg->propagate_err, - " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " - "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, - tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, - tctx->dump_cnts.accumbytes)) - return (tctx); - break; - default: - not_reached(); - } - return (NULL); -} - -static prof_tctx_t * -prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) -{ - tsdn_t *tsdn = (tsdn_t *)arg; - prof_tctx_t *ret; - - malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); - - switch (tctx->state) { - case prof_tctx_state_nominal: - /* New since dumping started; ignore. */ - break; - case prof_tctx_state_dumping: - tctx->state = prof_tctx_state_nominal; - break; - case prof_tctx_state_purgatory: - ret = tctx; - goto label_return; - default: - not_reached(); - } - - ret = NULL; -label_return: - return (ret); -} - -static void -prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) -{ - - cassert(config_prof); - - malloc_mutex_lock(tsdn, gctx->lock); - - /* - * Increment nlimbo so that gctx won't go away before dump. - * Additionally, link gctx into the dump list so that it is included in - * prof_dump()'s second pass. - */ - gctx->nlimbo++; - gctx_tree_insert(gctxs, gctx); - - memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); - - malloc_mutex_unlock(tsdn, gctx->lock); -} - -struct prof_gctx_merge_iter_arg_s { - tsdn_t *tsdn; - size_t leak_ngctx; -}; - -static prof_gctx_t * -prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) -{ - struct prof_gctx_merge_iter_arg_s *arg = - (struct prof_gctx_merge_iter_arg_s *)opaque; - - malloc_mutex_lock(arg->tsdn, gctx->lock); - tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, - (void *)arg->tsdn); - if (gctx->cnt_summed.curobjs != 0) - arg->leak_ngctx++; - malloc_mutex_unlock(arg->tsdn, gctx->lock); - - return (NULL); -} - -static void -prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) -{ - prof_tdata_t *tdata = prof_tdata_get(tsd, false); - prof_gctx_t *gctx; - - /* - * Standard tree iteration won't work here, because as soon as we - * decrement gctx->nlimbo and unlock gctx, another thread can - * concurrently destroy it, which will corrupt the tree. Therefore, - * tear down the tree one node at a time during iteration. - */ - while ((gctx = gctx_tree_first(gctxs)) != NULL) { - gctx_tree_remove(gctxs, gctx); - malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); - { - prof_tctx_t *next; - - next = NULL; - do { - prof_tctx_t *to_destroy = - tctx_tree_iter(&gctx->tctxs, next, - prof_tctx_finish_iter, - (void *)tsd_tsdn(tsd)); - if (to_destroy != NULL) { - next = tctx_tree_next(&gctx->tctxs, - to_destroy); - tctx_tree_remove(&gctx->tctxs, - to_destroy); - idalloctm(tsd_tsdn(tsd), to_destroy, - NULL, true, true); - } else - next = NULL; - } while (next != NULL); - } - gctx->nlimbo--; - if (prof_gctx_should_destroy(gctx)) { - gctx->nlimbo++; - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - } else - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - } -} - -struct prof_tdata_merge_iter_arg_s { - tsdn_t *tsdn; - prof_cnt_t cnt_all; -}; - -static prof_tdata_t * -prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, - void *opaque) -{ - struct prof_tdata_merge_iter_arg_s *arg = - (struct prof_tdata_merge_iter_arg_s *)opaque; - - malloc_mutex_lock(arg->tsdn, tdata->lock); - if (!tdata->expired) { - size_t tabind; - union { - prof_tctx_t *p; - void *v; - } tctx; - - tdata->dumping = true; - memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); - for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, - &tctx.v);) - prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); - - arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; - arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; - if (opt_prof_accum) { - arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; - arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; - } - } else - tdata->dumping = false; - malloc_mutex_unlock(arg->tsdn, tdata->lock); - - return (NULL); -} - -static prof_tdata_t * -prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) -{ - bool propagate_err = *(bool *)arg; - - if (!tdata->dumping) - return (NULL); - - if (prof_dump_printf(propagate_err, - " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n", - tdata->thr_uid, tdata->cnt_summed.curobjs, - tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs, - tdata->cnt_summed.accumbytes, - (tdata->thread_name != NULL) ? " " : "", - (tdata->thread_name != NULL) ? tdata->thread_name : "")) - return (tdata); - return (NULL); -} - -#ifdef JEMALLOC_JET -#undef prof_dump_header -#define prof_dump_header JEMALLOC_N(prof_dump_header_impl) -#endif -static bool -prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) -{ - bool ret; - - if (prof_dump_printf(propagate_err, - "heap_v2/%"FMTu64"\n" - " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", - ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, - cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) - return (true); - - malloc_mutex_lock(tsdn, &tdatas_mtx); - ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, - (void *)&propagate_err) != NULL); - malloc_mutex_unlock(tsdn, &tdatas_mtx); - return (ret); -} -#ifdef JEMALLOC_JET -#undef prof_dump_header -#define prof_dump_header JEMALLOC_N(prof_dump_header) -prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl); -#endif - -static bool -prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, - const prof_bt_t *bt, prof_gctx_tree_t *gctxs) -{ - bool ret; - unsigned i; - struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; - - cassert(config_prof); - malloc_mutex_assert_owner(tsdn, gctx->lock); - - /* Avoid dumping such gctx's that have no useful data. */ - if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || - (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { - assert(gctx->cnt_summed.curobjs == 0); - assert(gctx->cnt_summed.curbytes == 0); - assert(gctx->cnt_summed.accumobjs == 0); - assert(gctx->cnt_summed.accumbytes == 0); - ret = false; - goto label_return; - } - - if (prof_dump_printf(propagate_err, "@")) { - ret = true; - goto label_return; - } - for (i = 0; i < bt->len; i++) { - if (prof_dump_printf(propagate_err, " %#"FMTxPTR, - (uintptr_t)bt->vec[i])) { - ret = true; - goto label_return; - } - } - - if (prof_dump_printf(propagate_err, - "\n" - " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", - gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes, - gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) { - ret = true; - goto label_return; - } - - prof_tctx_dump_iter_arg.tsdn = tsdn; - prof_tctx_dump_iter_arg.propagate_err = propagate_err; - if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, - (void *)&prof_tctx_dump_iter_arg) != NULL) { - ret = true; - goto label_return; - } - - ret = false; -label_return: - return (ret); -} - -#ifndef _WIN32 -JEMALLOC_FORMAT_PRINTF(1, 2) -static int -prof_open_maps(const char *format, ...) -{ - int mfd; - va_list ap; - char filename[PATH_MAX + 1]; - - va_start(ap, format); - malloc_vsnprintf(filename, sizeof(filename), format, ap); - va_end(ap); - mfd = open(filename, O_RDONLY); - - return (mfd); -} -#endif - -static int -prof_getpid(void) -{ - -#ifdef _WIN32 - return (GetCurrentProcessId()); -#else - return (getpid()); -#endif -} - -static bool -prof_dump_maps(bool propagate_err) -{ - bool ret; - int mfd; - - cassert(config_prof); -#ifdef __FreeBSD__ - mfd = prof_open_maps("/proc/curproc/map"); -#elif defined(_WIN32) - mfd = -1; // Not implemented -#else - { - int pid = prof_getpid(); - - mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); - if (mfd == -1) - mfd = prof_open_maps("/proc/%d/maps", pid); - } -#endif - if (mfd != -1) { - ssize_t nread; - - if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && - propagate_err) { - ret = true; - goto label_return; - } - nread = 0; - do { - prof_dump_buf_end += nread; - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { - /* Make space in prof_dump_buf before read(). */ - if (prof_dump_flush(propagate_err) && - propagate_err) { - ret = true; - goto label_return; - } - } - nread = read(mfd, &prof_dump_buf[prof_dump_buf_end], - PROF_DUMP_BUFSIZE - prof_dump_buf_end); - } while (nread > 0); - } else { - ret = true; - goto label_return; - } - - ret = false; -label_return: - if (mfd != -1) - close(mfd); - return (ret); -} - -/* - * See prof_sample_threshold_update() comment for why the body of this function - * is conditionally compiled. - */ -static void -prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, - const char *filename) -{ - -#ifdef JEMALLOC_PROF - /* - * Scaling is equivalent AdjustSamples() in jeprof, but the result may - * differ slightly from what jeprof reports, because here we scale the - * summary values, whereas jeprof scales each context individually and - * reports the sums of the scaled values. - */ - if (cnt_all->curbytes != 0) { - double sample_period = (double)((uint64_t)1 << lg_prof_sample); - double ratio = (((double)cnt_all->curbytes) / - (double)cnt_all->curobjs) / sample_period; - double scale_factor = 1.0 / (1.0 - exp(-ratio)); - uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) - * scale_factor); - uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * - scale_factor); - - malloc_printf(": Leak approximation summary: ~%"FMTu64 - " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", - curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != - 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); - malloc_printf( - ": Run jeprof on \"%s\" for leak detail\n", - filename); - } -#endif -} - -struct prof_gctx_dump_iter_arg_s { - tsdn_t *tsdn; - bool propagate_err; -}; - -static prof_gctx_t * -prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) -{ - prof_gctx_t *ret; - struct prof_gctx_dump_iter_arg_s *arg = - (struct prof_gctx_dump_iter_arg_s *)opaque; - - malloc_mutex_lock(arg->tsdn, gctx->lock); - - if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, - gctxs)) { - ret = gctx; - goto label_return; - } - - ret = NULL; -label_return: - malloc_mutex_unlock(arg->tsdn, gctx->lock); - return (ret); -} - -static bool -prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) -{ - prof_tdata_t *tdata; - struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; - size_t tabind; - union { - prof_gctx_t *p; - void *v; - } gctx; - struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; - struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg; - prof_gctx_tree_t gctxs; - - cassert(config_prof); - - tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) - return (true); - - malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); - prof_enter(tsd, tdata); - - /* - * Put gctx's in limbo and clear their counters in preparation for - * summing. - */ - gctx_tree_new(&gctxs); - for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) - prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, &gctxs); - - /* - * Iterate over tdatas, and for the non-expired ones snapshot their tctx - * stats and merge them into the associated gctx's. - */ - prof_tdata_merge_iter_arg.tsdn = tsd_tsdn(tsd); - memset(&prof_tdata_merge_iter_arg.cnt_all, 0, sizeof(prof_cnt_t)); - malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); - tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, - (void *)&prof_tdata_merge_iter_arg); - malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); - - /* Merge tctx stats into gctx's. */ - prof_gctx_merge_iter_arg.tsdn = tsd_tsdn(tsd); - prof_gctx_merge_iter_arg.leak_ngctx = 0; - gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, - (void *)&prof_gctx_merge_iter_arg); - - prof_leave(tsd, tdata); - - /* Create dump file. */ - if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) - goto label_open_close_error; - - /* Dump profile header. */ - if (prof_dump_header(tsd_tsdn(tsd), propagate_err, - &prof_tdata_merge_iter_arg.cnt_all)) - goto label_write_error; - - /* Dump per gctx profile stats. */ - prof_gctx_dump_iter_arg.tsdn = tsd_tsdn(tsd); - prof_gctx_dump_iter_arg.propagate_err = propagate_err; - if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter, - (void *)&prof_gctx_dump_iter_arg) != NULL) - goto label_write_error; - - /* Dump /proc//maps if possible. */ - if (prof_dump_maps(propagate_err)) - goto label_write_error; - - if (prof_dump_close(propagate_err)) - goto label_open_close_error; - - prof_gctx_finish(tsd, &gctxs); - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); - - if (leakcheck) { - prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all, - prof_gctx_merge_iter_arg.leak_ngctx, filename); - } - return (false); -label_write_error: - prof_dump_close(propagate_err); -label_open_close_error: - prof_gctx_finish(tsd, &gctxs); - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); - return (true); -} - -#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) -#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) -static void -prof_dump_filename(char *filename, char v, uint64_t vseq) -{ - - cassert(config_prof); - - if (vseq != VSEQ_INVALID) { - /* "...v.heap" */ - malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"FMTu64".%c%"FMTu64".heap", - opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); - } else { - /* "....heap" */ - malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"FMTu64".%c.heap", - opt_prof_prefix, prof_getpid(), prof_dump_seq, v); - } - prof_dump_seq++; -} - -static void -prof_fdump(void) -{ - tsd_t *tsd; - char filename[DUMP_FILENAME_BUFSIZE]; - - cassert(config_prof); - assert(opt_prof_final); - assert(opt_prof_prefix[0] != '\0'); - - if (!prof_booted) - return; - tsd = tsd_fetch(); - - malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump_filename(filename, 'f', VSEQ_INVALID); - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump(tsd, false, filename, opt_prof_leak); -} - -void -prof_idump(tsdn_t *tsdn) -{ - tsd_t *tsd; - prof_tdata_t *tdata; - - cassert(config_prof); - - if (!prof_booted || tsdn_null(tsdn)) - return; - tsd = tsdn_tsd(tsdn); - tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) - return; - if (tdata->enq) { - tdata->enq_idump = true; - return; - } - - if (opt_prof_prefix[0] != '\0') { - char filename[PATH_MAX + 1]; - malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump_filename(filename, 'i', prof_dump_iseq); - prof_dump_iseq++; - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump(tsd, false, filename, false); - } -} - -bool -prof_mdump(tsd_t *tsd, const char *filename) -{ - char filename_buf[DUMP_FILENAME_BUFSIZE]; - - cassert(config_prof); - - if (!opt_prof || !prof_booted) - return (true); - - if (filename == NULL) { - /* No filename specified, so automatically generate one. */ - if (opt_prof_prefix[0] == '\0') - return (true); - malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump_filename(filename_buf, 'm', prof_dump_mseq); - prof_dump_mseq++; - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - filename = filename_buf; - } - return (prof_dump(tsd, true, filename, false)); -} - -void -prof_gdump(tsdn_t *tsdn) -{ - tsd_t *tsd; - prof_tdata_t *tdata; - - cassert(config_prof); - - if (!prof_booted || tsdn_null(tsdn)) - return; - tsd = tsdn_tsd(tsdn); - tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) - return; - if (tdata->enq) { - tdata->enq_gdump = true; - return; - } - - if (opt_prof_prefix[0] != '\0') { - char filename[DUMP_FILENAME_BUFSIZE]; - malloc_mutex_lock(tsdn, &prof_dump_seq_mtx); - prof_dump_filename(filename, 'u', prof_dump_useq); - prof_dump_useq++; - malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx); - prof_dump(tsd, false, filename, false); - } -} - -static void -prof_bt_hash(const void *key, size_t r_hash[2]) -{ - prof_bt_t *bt = (prof_bt_t *)key; - - cassert(config_prof); - - hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); -} - -static bool -prof_bt_keycomp(const void *k1, const void *k2) -{ - const prof_bt_t *bt1 = (prof_bt_t *)k1; - const prof_bt_t *bt2 = (prof_bt_t *)k2; - - cassert(config_prof); - - if (bt1->len != bt2->len) - return (false); - return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); -} - -JEMALLOC_INLINE_C uint64_t -prof_thr_uid_alloc(tsdn_t *tsdn) -{ - uint64_t thr_uid; - - malloc_mutex_lock(tsdn, &next_thr_uid_mtx); - thr_uid = next_thr_uid; - next_thr_uid++; - malloc_mutex_unlock(tsdn, &next_thr_uid_mtx); - - return (thr_uid); -} - -static prof_tdata_t * -prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, - char *thread_name, bool active) -{ - prof_tdata_t *tdata; - - cassert(config_prof); - - /* Initialize an empty cache for this thread. */ - tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), - size2index(sizeof(prof_tdata_t)), false, NULL, true, - arena_get(TSDN_NULL, 0, true), true); - if (tdata == NULL) - return (NULL); - - tdata->lock = prof_tdata_mutex_choose(thr_uid); - tdata->thr_uid = thr_uid; - tdata->thr_discrim = thr_discrim; - tdata->thread_name = thread_name; - tdata->attached = true; - tdata->expired = false; - tdata->tctx_uid_next = 0; - - if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, - prof_bt_keycomp)) { - idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true); - return (NULL); - } - - tdata->prng_state = (uint64_t)(uintptr_t)tdata; - prof_sample_threshold_update(tdata); - - tdata->enq = false; - tdata->enq_idump = false; - tdata->enq_gdump = false; - - tdata->dumping = false; - tdata->active = active; - - malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); - tdata_tree_insert(&tdatas, tdata); - malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); - - return (tdata); -} - -prof_tdata_t * -prof_tdata_init(tsd_t *tsd) -{ - - return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, - NULL, prof_thread_active_init_get(tsd_tsdn(tsd)))); -} - -static bool -prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) -{ - - if (tdata->attached && !even_if_attached) - return (false); - if (ckh_count(&tdata->bt2tctx) != 0) - return (false); - return (true); -} - -static bool -prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, - bool even_if_attached) -{ - - malloc_mutex_assert_owner(tsdn, tdata->lock); - - return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); -} - -static void -prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, - bool even_if_attached) -{ - - malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); - - tdata_tree_remove(&tdatas, tdata); - - assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); - - if (tdata->thread_name != NULL) - idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true); - ckh_delete(tsd, &tdata->bt2tctx); - idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true); -} - -static void -prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) -{ - - malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); - prof_tdata_destroy_locked(tsd, tdata, even_if_attached); - malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); -} - -static void -prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) -{ - bool destroy_tdata; - - malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); - if (tdata->attached) { - destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, - true); - /* - * Only detach if !destroy_tdata, because detaching would allow - * another thread to win the race to destroy tdata. - */ - if (!destroy_tdata) - tdata->attached = false; - tsd_prof_tdata_set(tsd, NULL); - } else - destroy_tdata = false; - malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - if (destroy_tdata) - prof_tdata_destroy(tsd, tdata, true); -} - -prof_tdata_t * -prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) -{ - uint64_t thr_uid = tdata->thr_uid; - uint64_t thr_discrim = tdata->thr_discrim + 1; - char *thread_name = (tdata->thread_name != NULL) ? - prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL; - bool active = tdata->active; - - prof_tdata_detach(tsd, tdata); - return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, - active)); -} - -static bool -prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) -{ - bool destroy_tdata; - - malloc_mutex_lock(tsdn, tdata->lock); - if (!tdata->expired) { - tdata->expired = true; - destroy_tdata = tdata->attached ? false : - prof_tdata_should_destroy(tsdn, tdata, false); - } else - destroy_tdata = false; - malloc_mutex_unlock(tsdn, tdata->lock); - - return (destroy_tdata); -} - -static prof_tdata_t * -prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) -{ - tsdn_t *tsdn = (tsdn_t *)arg; - - return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); -} - -void -prof_reset(tsd_t *tsd, size_t lg_sample) -{ - prof_tdata_t *next; - - assert(lg_sample < (sizeof(uint64_t) << 3)); - - malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); - malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); - - lg_prof_sample = lg_sample; - - next = NULL; - do { - prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, - prof_tdata_reset_iter, (void *)tsd); - if (to_destroy != NULL) { - next = tdata_tree_next(&tdatas, to_destroy); - prof_tdata_destroy_locked(tsd, to_destroy, false); - } else - next = NULL; - } while (next != NULL); - - malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); -} - -void -prof_tdata_cleanup(tsd_t *tsd) -{ - prof_tdata_t *tdata; - - if (!config_prof) - return; - - tdata = tsd_prof_tdata_get(tsd); - if (tdata != NULL) - prof_tdata_detach(tsd, tdata); -} - -bool -prof_active_get(tsdn_t *tsdn) -{ - bool prof_active_current; - - malloc_mutex_lock(tsdn, &prof_active_mtx); - prof_active_current = prof_active; - malloc_mutex_unlock(tsdn, &prof_active_mtx); - return (prof_active_current); -} - -bool -prof_active_set(tsdn_t *tsdn, bool active) -{ - bool prof_active_old; - - malloc_mutex_lock(tsdn, &prof_active_mtx); - prof_active_old = prof_active; - prof_active = active; - malloc_mutex_unlock(tsdn, &prof_active_mtx); - return (prof_active_old); -} - -const char * -prof_thread_name_get(tsd_t *tsd) -{ - prof_tdata_t *tdata; - - tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) - return (""); - return (tdata->thread_name != NULL ? tdata->thread_name : ""); -} - -static char * -prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) -{ - char *ret; - size_t size; - - if (thread_name == NULL) - return (NULL); - - size = strlen(thread_name) + 1; - if (size == 1) - return (""); - - ret = iallocztm(tsdn, size, size2index(size), false, NULL, true, - arena_get(TSDN_NULL, 0, true), true); - if (ret == NULL) - return (NULL); - memcpy(ret, thread_name, size); - return (ret); -} - -int -prof_thread_name_set(tsd_t *tsd, const char *thread_name) -{ - prof_tdata_t *tdata; - unsigned i; - char *s; - - tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) - return (EAGAIN); - - /* Validate input. */ - if (thread_name == NULL) - return (EFAULT); - for (i = 0; thread_name[i] != '\0'; i++) { - char c = thread_name[i]; - if (!isgraph(c) && !isblank(c)) - return (EFAULT); - } - - s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name); - if (s == NULL) - return (EAGAIN); - - if (tdata->thread_name != NULL) { - idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true); - tdata->thread_name = NULL; - } - if (strlen(s) > 0) - tdata->thread_name = s; - return (0); -} - -bool -prof_thread_active_get(tsd_t *tsd) -{ - prof_tdata_t *tdata; - - tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) - return (false); - return (tdata->active); -} - -bool -prof_thread_active_set(tsd_t *tsd, bool active) -{ - prof_tdata_t *tdata; - - tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) - return (true); - tdata->active = active; - return (false); -} - -bool -prof_thread_active_init_get(tsdn_t *tsdn) -{ - bool active_init; - - malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); - active_init = prof_thread_active_init; - malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); - return (active_init); -} - -bool -prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) -{ - bool active_init_old; - - malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); - active_init_old = prof_thread_active_init; - prof_thread_active_init = active_init; - malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); - return (active_init_old); -} - -bool -prof_gdump_get(tsdn_t *tsdn) -{ - bool prof_gdump_current; - - malloc_mutex_lock(tsdn, &prof_gdump_mtx); - prof_gdump_current = prof_gdump_val; - malloc_mutex_unlock(tsdn, &prof_gdump_mtx); - return (prof_gdump_current); -} - -bool -prof_gdump_set(tsdn_t *tsdn, bool gdump) -{ - bool prof_gdump_old; - - malloc_mutex_lock(tsdn, &prof_gdump_mtx); - prof_gdump_old = prof_gdump_val; - prof_gdump_val = gdump; - malloc_mutex_unlock(tsdn, &prof_gdump_mtx); - return (prof_gdump_old); -} - -void -prof_boot0(void) -{ - - cassert(config_prof); - - memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, - sizeof(PROF_PREFIX_DEFAULT)); -} - -void -prof_boot1(void) -{ - - cassert(config_prof); - - /* - * opt_prof must be in its final state before any arenas are - * initialized, so this function must be executed early. - */ - - if (opt_prof_leak && !opt_prof) { - /* - * Enable opt_prof, but in such a way that profiles are never - * automatically dumped. - */ - opt_prof = true; - opt_prof_gdump = false; - } else if (opt_prof) { - if (opt_lg_prof_interval >= 0) { - prof_interval = (((uint64_t)1U) << - opt_lg_prof_interval); - } - } -} - -bool -prof_boot2(tsd_t *tsd) -{ - - cassert(config_prof); - - if (opt_prof) { - unsigned i; - - lg_prof_sample = opt_lg_prof_sample; - - prof_active = opt_prof_active; - if (malloc_mutex_init(&prof_active_mtx, "prof_active", - WITNESS_RANK_PROF_ACTIVE)) - return (true); - - prof_gdump_val = opt_prof_gdump; - if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", - WITNESS_RANK_PROF_GDUMP)) - return (true); - - prof_thread_active_init = opt_prof_thread_active_init; - if (malloc_mutex_init(&prof_thread_active_init_mtx, - "prof_thread_active_init", - WITNESS_RANK_PROF_THREAD_ACTIVE_INIT)) - return (true); - - if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, - prof_bt_keycomp)) - return (true); - if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", - WITNESS_RANK_PROF_BT2GCTX)) - return (true); - - tdata_tree_new(&tdatas); - if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", - WITNESS_RANK_PROF_TDATAS)) - return (true); - - next_thr_uid = 0; - if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", - WITNESS_RANK_PROF_NEXT_THR_UID)) - return (true); - - if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq", - WITNESS_RANK_PROF_DUMP_SEQ)) - return (true); - if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", - WITNESS_RANK_PROF_DUMP)) - return (true); - - if (opt_prof_final && opt_prof_prefix[0] != '\0' && - atexit(prof_fdump) != 0) { - malloc_write(": Error in atexit()\n"); - if (opt_abort) - abort(); - } - - gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), - PROF_NCTX_LOCKS * sizeof(malloc_mutex_t)); - if (gctx_locks == NULL) - return (true); - for (i = 0; i < PROF_NCTX_LOCKS; i++) { - if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", - WITNESS_RANK_PROF_GCTX)) - return (true); - } - - tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), - PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t)); - if (tdata_locks == NULL) - return (true); - for (i = 0; i < PROF_NTDATA_LOCKS; i++) { - if (malloc_mutex_init(&tdata_locks[i], "prof_tdata", - WITNESS_RANK_PROF_TDATA)) - return (true); - } - } - -#ifdef JEMALLOC_PROF_LIBGCC - /* - * Cause the backtracing machinery to allocate its internal state - * before enabling profiling. - */ - _Unwind_Backtrace(prof_unwind_init_callback, NULL); -#endif - - prof_booted = true; - - return (false); -} - -void -prof_prefork0(tsdn_t *tsdn) -{ - - if (opt_prof) { - unsigned i; - - malloc_mutex_prefork(tsdn, &prof_dump_mtx); - malloc_mutex_prefork(tsdn, &bt2gctx_mtx); - malloc_mutex_prefork(tsdn, &tdatas_mtx); - for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_prefork(tsdn, &tdata_locks[i]); - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_prefork(tsdn, &gctx_locks[i]); - } -} - -void -prof_prefork1(tsdn_t *tsdn) -{ - - if (opt_prof) { - malloc_mutex_prefork(tsdn, &prof_active_mtx); - malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx); - malloc_mutex_prefork(tsdn, &prof_gdump_mtx); - malloc_mutex_prefork(tsdn, &next_thr_uid_mtx); - malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx); - } -} - -void -prof_postfork_parent(tsdn_t *tsdn) -{ - - if (opt_prof) { - unsigned i; - - malloc_mutex_postfork_parent(tsdn, - &prof_thread_active_init_mtx); - malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx); - malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx); - malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx); - malloc_mutex_postfork_parent(tsdn, &prof_active_mtx); - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]); - for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]); - malloc_mutex_postfork_parent(tsdn, &tdatas_mtx); - malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx); - malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx); - } -} - -void -prof_postfork_child(tsdn_t *tsdn) -{ - - if (opt_prof) { - unsigned i; - - malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx); - malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx); - malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx); - malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx); - malloc_mutex_postfork_child(tsdn, &prof_active_mtx); - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_child(tsdn, &gctx_locks[i]); - for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_postfork_child(tsdn, &tdata_locks[i]); - malloc_mutex_postfork_child(tsdn, &tdatas_mtx); - malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx); - malloc_mutex_postfork_child(tsdn, &prof_dump_mtx); - } -} - -/******************************************************************************/ diff --git a/sources/jemalloc/src/je_quarantine.c b/sources/jemalloc/src/je_quarantine.c deleted file mode 100755 index 18903fb5..00000000 --- a/sources/jemalloc/src/je_quarantine.c +++ /dev/null @@ -1,183 +0,0 @@ -#define JEMALLOC_QUARANTINE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/* - * Quarantine pointers close to NULL are used to encode state information that - * is used for cleaning up during thread shutdown. - */ -#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1) -#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2) -#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine); -static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine); -static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, - size_t upper_bound); - -/******************************************************************************/ - -static quarantine_t * -quarantine_init(tsdn_t *tsdn, size_t lg_maxobjs) -{ - quarantine_t *quarantine; - size_t size; - - size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) * - sizeof(quarantine_obj_t)); - quarantine = (quarantine_t *)iallocztm(tsdn, size, size2index(size), - false, NULL, true, arena_get(TSDN_NULL, 0, true), true); - if (quarantine == NULL) - return (NULL); - quarantine->curbytes = 0; - quarantine->curobjs = 0; - quarantine->first = 0; - quarantine->lg_maxobjs = lg_maxobjs; - - return (quarantine); -} - -void -quarantine_alloc_hook_work(tsd_t *tsd) -{ - quarantine_t *quarantine; - - if (!tsd_nominal(tsd)) - return; - - quarantine = quarantine_init(tsd_tsdn(tsd), LG_MAXOBJS_INIT); - /* - * Check again whether quarantine has been initialized, because - * quarantine_init() may have triggered recursive initialization. - */ - if (tsd_quarantine_get(tsd) == NULL) - tsd_quarantine_set(tsd, quarantine); - else - idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); -} - -static quarantine_t * -quarantine_grow(tsd_t *tsd, quarantine_t *quarantine) -{ - quarantine_t *ret; - - ret = quarantine_init(tsd_tsdn(tsd), quarantine->lg_maxobjs + 1); - if (ret == NULL) { - quarantine_drain_one(tsd_tsdn(tsd), quarantine); - return (quarantine); - } - - ret->curbytes = quarantine->curbytes; - ret->curobjs = quarantine->curobjs; - if (quarantine->first + quarantine->curobjs <= (ZU(1) << - quarantine->lg_maxobjs)) { - /* objs ring buffer data are contiguous. */ - memcpy(ret->objs, &quarantine->objs[quarantine->first], - quarantine->curobjs * sizeof(quarantine_obj_t)); - } else { - /* objs ring buffer data wrap around. */ - size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) - - quarantine->first; - size_t ncopy_b = quarantine->curobjs - ncopy_a; - - memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a - * sizeof(quarantine_obj_t)); - memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * - sizeof(quarantine_obj_t)); - } - idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); - - tsd_quarantine_set(tsd, ret); - return (ret); -} - -static void -quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine) -{ - quarantine_obj_t *obj = &quarantine->objs[quarantine->first]; - assert(obj->usize == isalloc(tsdn, obj->ptr, config_prof)); - idalloctm(tsdn, obj->ptr, NULL, false, true); - quarantine->curbytes -= obj->usize; - quarantine->curobjs--; - quarantine->first = (quarantine->first + 1) & ((ZU(1) << - quarantine->lg_maxobjs) - 1); -} - -static void -quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound) -{ - - while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) - quarantine_drain_one(tsdn, quarantine); -} - -void -quarantine(tsd_t *tsd, void *ptr) -{ - quarantine_t *quarantine; - size_t usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); - - cassert(config_fill); - assert(opt_quarantine); - - if ((quarantine = tsd_quarantine_get(tsd)) == NULL) { - idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true); - return; - } - /* - * Drain one or more objects if the quarantine size limit would be - * exceeded by appending ptr. - */ - if (quarantine->curbytes + usize > opt_quarantine) { - size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine - - usize : 0; - quarantine_drain(tsd_tsdn(tsd), quarantine, upper_bound); - } - /* Grow the quarantine ring buffer if it's full. */ - if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) - quarantine = quarantine_grow(tsd, quarantine); - /* quarantine_grow() must free a slot if it fails to grow. */ - assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs)); - /* Append ptr if its size doesn't exceed the quarantine size. */ - if (quarantine->curbytes + usize <= opt_quarantine) { - size_t offset = (quarantine->first + quarantine->curobjs) & - ((ZU(1) << quarantine->lg_maxobjs) - 1); - quarantine_obj_t *obj = &quarantine->objs[offset]; - obj->ptr = ptr; - obj->usize = usize; - quarantine->curbytes += usize; - quarantine->curobjs++; - if (config_fill && unlikely(opt_junk_free)) { - /* - * Only do redzone validation if Valgrind isn't in - * operation. - */ - if ((!config_valgrind || likely(!in_valgrind)) - && usize <= SMALL_MAXCLASS) - arena_quarantine_junk_small(ptr, usize); - else - memset(ptr, JEMALLOC_FREE_JUNK, usize); - } - } else { - assert(quarantine->curbytes == 0); - idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true); - } -} - -void -quarantine_cleanup(tsd_t *tsd) -{ - quarantine_t *quarantine; - - if (!config_fill) - return; - - quarantine = tsd_quarantine_get(tsd); - if (quarantine != NULL) { - quarantine_drain(tsd_tsdn(tsd), quarantine, 0); - idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); - tsd_quarantine_set(tsd, NULL); - } -} diff --git a/sources/jemalloc/src/je_rtree.c b/sources/jemalloc/src/je_rtree.c deleted file mode 100755 index f2e2997d..00000000 --- a/sources/jemalloc/src/je_rtree.c +++ /dev/null @@ -1,132 +0,0 @@ -#define JEMALLOC_RTREE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -static unsigned -hmin(unsigned ha, unsigned hb) -{ - - return (ha < hb ? ha : hb); -} - -/* Only the most significant bits of keys passed to rtree_[gs]et() are used. */ -bool -rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, - rtree_node_dalloc_t *dalloc) -{ - unsigned bits_in_leaf, height, i; - - assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) / - RTREE_BITS_PER_LEVEL)); - assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3)); - - bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL - : (bits % RTREE_BITS_PER_LEVEL); - if (bits > bits_in_leaf) { - height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL; - if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits) - height++; - } else - height = 1; - assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits); - - rtree->alloc = alloc; - rtree->dalloc = dalloc; - rtree->height = height; - - /* Root level. */ - rtree->levels[0].subtree = NULL; - rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL : - bits_in_leaf; - rtree->levels[0].cumbits = rtree->levels[0].bits; - /* Interior levels. */ - for (i = 1; i < height-1; i++) { - rtree->levels[i].subtree = NULL; - rtree->levels[i].bits = RTREE_BITS_PER_LEVEL; - rtree->levels[i].cumbits = rtree->levels[i-1].cumbits + - RTREE_BITS_PER_LEVEL; - } - /* Leaf level. */ - if (height > 1) { - rtree->levels[height-1].subtree = NULL; - rtree->levels[height-1].bits = bits_in_leaf; - rtree->levels[height-1].cumbits = bits; - } - - /* Compute lookup table to be used by rtree_start_level(). */ - for (i = 0; i < RTREE_HEIGHT_MAX; i++) { - rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height - - 1); - } - - return (false); -} - -static void -rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level) -{ - - if (level + 1 < rtree->height) { - size_t nchildren, i; - - nchildren = ZU(1) << rtree->levels[level].bits; - for (i = 0; i < nchildren; i++) { - rtree_node_elm_t *child = node[i].child; - if (child != NULL) - rtree_delete_subtree(rtree, child, level + 1); - } - } - rtree->dalloc(node); -} - -void -rtree_delete(rtree_t *rtree) -{ - unsigned i; - - for (i = 0; i < rtree->height; i++) { - rtree_node_elm_t *subtree = rtree->levels[i].subtree; - if (subtree != NULL) - rtree_delete_subtree(rtree, subtree, i); - } -} - -static rtree_node_elm_t * -rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp) -{ - rtree_node_elm_t *node; - - if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) { - spin_t spinner; - - /* - * Another thread is already in the process of initializing. - * Spin-wait until initialization is complete. - */ - spin_init(&spinner); - do { - spin_adaptive(&spinner); - node = atomic_read_p((void **)elmp); - } while (node == RTREE_NODE_INITIALIZING); - } else { - node = rtree->alloc(ZU(1) << rtree->levels[level].bits); - if (node == NULL) - return (NULL); - atomic_write_p((void **)elmp, node); - } - - return (node); -} - -rtree_node_elm_t * -rtree_subtree_read_hard(rtree_t *rtree, unsigned level) -{ - - return (rtree_node_init(rtree, level, &rtree->levels[level].subtree)); -} - -rtree_node_elm_t * -rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level) -{ - - return (rtree_node_init(rtree, level+1, &elm->child)); -} diff --git a/sources/jemalloc/src/je_spin.c b/sources/jemalloc/src/je_spin.c deleted file mode 100755 index 5242d95a..00000000 --- a/sources/jemalloc/src/je_spin.c +++ /dev/null @@ -1,2 +0,0 @@ -#define JEMALLOC_SPIN_C_ -#include "jemalloc/internal/jemalloc_internal.h" diff --git a/sources/jemalloc/src/je_stats.c b/sources/jemalloc/src/je_stats.c deleted file mode 100755 index b76afc5a..00000000 --- a/sources/jemalloc/src/je_stats.c +++ /dev/null @@ -1,1173 +0,0 @@ -#define JEMALLOC_STATS_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -#define CTL_GET(n, v, t) do { \ - size_t sz = sizeof(t); \ - xmallctl(n, (void *)v, &sz, NULL, 0); \ -} while (0) - -#define CTL_M2_GET(n, i, v, t) do { \ - size_t mib[6]; \ - size_t miblen = sizeof(mib) / sizeof(size_t); \ - size_t sz = sizeof(t); \ - xmallctlnametomib(n, mib, &miblen); \ - mib[2] = (i); \ - xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ -} while (0) - -#define CTL_M2_M4_GET(n, i, j, v, t) do { \ - size_t mib[6]; \ - size_t miblen = sizeof(mib) / sizeof(size_t); \ - size_t sz = sizeof(t); \ - xmallctlnametomib(n, mib, &miblen); \ - mib[2] = (i); \ - mib[4] = (j); \ - xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ -} while (0) - -/******************************************************************************/ -/* Data. */ - -bool opt_stats_print = false; - -size_t stats_cactive = 0; - -/******************************************************************************/ - -static void -stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, - bool json, bool large, bool huge, unsigned i) -{ - size_t page; - bool in_gap, in_gap_prev; - unsigned nbins, j; - - CTL_GET("arenas.page", &page, size_t); - - CTL_GET("arenas.nbins", &nbins, unsigned); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"bins\": [\n"); - } else { - if (config_tcache) { - malloc_cprintf(write_cb, cbopaque, - "bins: size ind allocated nmalloc" - " ndalloc nrequests curregs" - " curruns regs pgs util nfills" - " nflushes newruns reruns\n"); - } else { - malloc_cprintf(write_cb, cbopaque, - "bins: size ind allocated nmalloc" - " ndalloc nrequests curregs" - " curruns regs pgs util newruns" - " reruns\n"); - } - } - for (j = 0, in_gap = false; j < nbins; j++) { - uint64_t nruns; - size_t reg_size, run_size, curregs; - size_t curruns; - uint32_t nregs; - uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; - uint64_t nreruns; - - CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns, - uint64_t); - in_gap_prev = in_gap; - in_gap = (nruns == 0); - - if (!json && in_gap_prev && !in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - } - - CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); - CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); - CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, size_t); - - CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc, - uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc, - uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs, - size_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, - &nrequests, uint64_t); - if (config_tcache) { - CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, - &nfills, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, - &nflushes, uint64_t); - } - CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, &nreruns, - uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, &curruns, - size_t); - - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t{\n" - "\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n" - "\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n" - "\t\t\t\t\t\t\"curregs\": %zu,\n" - "\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n", - nmalloc, - ndalloc, - curregs, - nrequests); - if (config_tcache) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\t\"nfills\": %"FMTu64",\n" - "\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n", - nfills, - nflushes); - } - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\t\"nreruns\": %"FMTu64",\n" - "\t\t\t\t\t\t\"curruns\": %zu\n" - "\t\t\t\t\t}%s\n", - nreruns, - curruns, - (j + 1 < nbins) ? "," : ""); - } else if (!in_gap) { - size_t availregs, milli; - char util[6]; /* "x.yyy". */ - - availregs = nregs * curruns; - milli = (availregs != 0) ? (1000 * curregs) / availregs - : 1000; - - if (milli > 1000) { - /* - * Race detected: the counters were read in - * separate mallctl calls and concurrent - * operations happened in between. In this case - * no meaningful utilization can be computed. - */ - malloc_snprintf(util, sizeof(util), " race"); - } else if (milli < 10) { - malloc_snprintf(util, sizeof(util), - "0.00%zu", milli); - } else if (milli < 100) { - malloc_snprintf(util, sizeof(util), "0.0%zu", - milli); - } else if (milli < 1000) { - malloc_snprintf(util, sizeof(util), "0.%zu", - milli); - } else { - assert(milli == 1000); - malloc_snprintf(util, sizeof(util), "1"); - } - - if (config_tcache) { - malloc_cprintf(write_cb, cbopaque, - "%20zu %3u %12zu %12"FMTu64 - " %12"FMTu64" %12"FMTu64" %12zu" - " %12zu %4u %3zu %-5s %12"FMTu64 - " %12"FMTu64" %12"FMTu64" %12"FMTu64"\n", - reg_size, j, curregs * reg_size, nmalloc, - ndalloc, nrequests, curregs, curruns, nregs, - run_size / page, util, nfills, nflushes, - nruns, nreruns); - } else { - malloc_cprintf(write_cb, cbopaque, - "%20zu %3u %12zu %12"FMTu64 - " %12"FMTu64" %12"FMTu64" %12zu" - " %12zu %4u %3zu %-5s %12"FMTu64 - " %12"FMTu64"\n", - reg_size, j, curregs * reg_size, nmalloc, - ndalloc, nrequests, curregs, curruns, nregs, - run_size / page, util, nruns, nreruns); - } - } - } - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t]%s\n", (large || huge) ? "," : ""); - } else { - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - } - } -} - -static void -stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, - bool json, bool huge, unsigned i) -{ - unsigned nbins, nlruns, j; - bool in_gap, in_gap_prev; - - CTL_GET("arenas.nbins", &nbins, unsigned); - CTL_GET("arenas.nlruns", &nlruns, unsigned); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"lruns\": [\n"); - } else { - malloc_cprintf(write_cb, cbopaque, - "large: size ind allocated nmalloc" - " ndalloc nrequests curruns\n"); - } - for (j = 0, in_gap = false; j < nlruns; j++) { - uint64_t nmalloc, ndalloc, nrequests; - size_t run_size, curruns; - - CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc, - uint64_t); - CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc, - uint64_t); - CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j, - &nrequests, uint64_t); - in_gap_prev = in_gap; - in_gap = (nrequests == 0); - - if (!json && in_gap_prev && !in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - } - - CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t); - CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, &curruns, - size_t); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t{\n" - "\t\t\t\t\t\t\"curruns\": %zu\n" - "\t\t\t\t\t}%s\n", - curruns, - (j + 1 < nlruns) ? "," : ""); - } else if (!in_gap) { - malloc_cprintf(write_cb, cbopaque, - "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64" %12zu\n", - run_size, nbins + j, curruns * run_size, nmalloc, - ndalloc, nrequests, curruns); - } - } - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t]%s\n", huge ? "," : ""); - } else { - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - } - } -} - -static void -stats_arena_hchunks_print(void (*write_cb)(void *, const char *), - void *cbopaque, bool json, unsigned i) -{ - unsigned nbins, nlruns, nhchunks, j; - bool in_gap, in_gap_prev; - - CTL_GET("arenas.nbins", &nbins, unsigned); - CTL_GET("arenas.nlruns", &nlruns, unsigned); - CTL_GET("arenas.nhchunks", &nhchunks, unsigned); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"hchunks\": [\n"); - } else { - malloc_cprintf(write_cb, cbopaque, - "huge: size ind allocated nmalloc" - " ndalloc nrequests curhchunks\n"); - } - for (j = 0, in_gap = false; j < nhchunks; j++) { - uint64_t nmalloc, ndalloc, nrequests; - size_t hchunk_size, curhchunks; - - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j, - &nmalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j, - &ndalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j, - &nrequests, uint64_t); - in_gap_prev = in_gap; - in_gap = (nrequests == 0); - - if (!json && in_gap_prev && !in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - } - - CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, size_t); - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, j, - &curhchunks, size_t); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t{\n" - "\t\t\t\t\t\t\"curhchunks\": %zu\n" - "\t\t\t\t\t}%s\n", - curhchunks, - (j + 1 < nhchunks) ? "," : ""); - } else if (!in_gap) { - malloc_cprintf(write_cb, cbopaque, - "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64" %12zu\n", - hchunk_size, nbins + nlruns + j, - curhchunks * hchunk_size, nmalloc, ndalloc, - nrequests, curhchunks); - } - } - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t]\n"); - } else { - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - } - } -} - -static void -stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, - bool json, unsigned i, bool bins, bool large, bool huge) -{ - unsigned nthreads; - const char *dss; - ssize_t lg_dirty_mult, decay_time; - size_t page, pactive, pdirty, mapped, retained; - size_t metadata_mapped, metadata_allocated; - uint64_t npurge, nmadvise, purged; - size_t small_allocated; - uint64_t small_nmalloc, small_ndalloc, small_nrequests; - size_t large_allocated; - uint64_t large_nmalloc, large_ndalloc, large_nrequests; - size_t huge_allocated; - uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests; - - CTL_GET("arenas.page", &page, size_t); - - CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"nthreads\": %u,\n", nthreads); - } else { - malloc_cprintf(write_cb, cbopaque, - "assigned threads: %u\n", nthreads); - } - - CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"dss\": \"%s\",\n", dss); - } else { - malloc_cprintf(write_cb, cbopaque, - "dss allocation precedence: %s\n", dss); - } - - CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"lg_dirty_mult\": %zd,\n", lg_dirty_mult); - } else { - if (opt_purge == purge_mode_ratio) { - if (lg_dirty_mult >= 0) { - malloc_cprintf(write_cb, cbopaque, - "min active:dirty page ratio: %u:1\n", - (1U << lg_dirty_mult)); - } else { - malloc_cprintf(write_cb, cbopaque, - "min active:dirty page ratio: N/A\n"); - } - } - } - - CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"decay_time\": %zd,\n", decay_time); - } else { - if (opt_purge == purge_mode_decay) { - if (decay_time >= 0) { - malloc_cprintf(write_cb, cbopaque, - "decay time: %zd\n", decay_time); - } else { - malloc_cprintf(write_cb, cbopaque, - "decay time: N/A\n"); - } - } - } - - CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); - CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); - CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t); - CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t); - CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"pactive\": %zu,\n", pactive); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"pdirty\": %zu,\n", pdirty); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"npurge\": %"FMTu64",\n", npurge); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"nmadvise\": %"FMTu64",\n", nmadvise); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"purged\": %"FMTu64",\n", purged); - } else { - malloc_cprintf(write_cb, cbopaque, - "purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64 - ", purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged); - } - - CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated, - size_t); - CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests, - uint64_t); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"small\": {\n"); - - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"allocated\": %zu,\n", small_allocated); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", small_nmalloc); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", small_ndalloc); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", small_nrequests); - - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t},\n"); - } else { - malloc_cprintf(write_cb, cbopaque, - " allocated nmalloc" - " ndalloc nrequests\n"); - malloc_cprintf(write_cb, cbopaque, - "small: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - small_allocated, small_nmalloc, small_ndalloc, - small_nrequests); - } - - CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated, - size_t); - CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests, - uint64_t); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"large\": {\n"); - - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"allocated\": %zu,\n", large_allocated); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", large_nmalloc); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", large_ndalloc); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", large_nrequests); - - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t},\n"); - } else { - malloc_cprintf(write_cb, cbopaque, - "large: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - large_allocated, large_nmalloc, large_ndalloc, - large_nrequests); - } - - CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t); - CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests, - uint64_t); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"huge\": {\n"); - - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"allocated\": %zu,\n", huge_allocated); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", huge_nmalloc); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", huge_ndalloc); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", huge_nrequests); - - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t},\n"); - } else { - malloc_cprintf(write_cb, cbopaque, - "huge: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests); - malloc_cprintf(write_cb, cbopaque, - "total: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - small_allocated + large_allocated + huge_allocated, - small_nmalloc + large_nmalloc + huge_nmalloc, - small_ndalloc + large_ndalloc + huge_ndalloc, - small_nrequests + large_nrequests + huge_nrequests); - } - if (!json) { - malloc_cprintf(write_cb, cbopaque, - "active: %12zu\n", pactive * page); - } - - CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"mapped\": %zu,\n", mapped); - } else { - malloc_cprintf(write_cb, cbopaque, - "mapped: %12zu\n", mapped); - } - - CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"retained\": %zu,\n", retained); - } else { - malloc_cprintf(write_cb, cbopaque, - "retained: %12zu\n", retained); - } - - CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped, - size_t); - CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated, - size_t); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\"metadata\": {\n"); - - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"mapped\": %zu,\n", metadata_mapped); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"allocated\": %zu\n", metadata_allocated); - - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t}%s\n", (bins || large || huge) ? "," : ""); - } else { - malloc_cprintf(write_cb, cbopaque, - "metadata: mapped: %zu, allocated: %zu\n", - metadata_mapped, metadata_allocated); - } - - if (bins) { - stats_arena_bins_print(write_cb, cbopaque, json, large, huge, - i); - } - if (large) - stats_arena_lruns_print(write_cb, cbopaque, json, huge, i); - if (huge) - stats_arena_hchunks_print(write_cb, cbopaque, json, i); -} - -static void -stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque, - bool json, bool more) -{ - const char *cpv; - bool bv; - unsigned uv; - uint32_t u32v; - uint64_t u64v; - ssize_t ssv; - size_t sv, bsz, usz, ssz, sssz, cpsz; - - bsz = sizeof(bool); - usz = sizeof(unsigned); - ssz = sizeof(size_t); - sssz = sizeof(ssize_t); - cpsz = sizeof(const char *); - - CTL_GET("version", &cpv, const char *); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\"version\": \"%s\",\n", cpv); - } else - malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); - - /* config. */ -#define CONFIG_WRITE_BOOL_JSON(n, c) \ - if (json) { \ - CTL_GET("config."#n, &bv, bool); \ - malloc_cprintf(write_cb, cbopaque, \ - "\t\t\t\""#n"\": %s%s\n", bv ? "true" : "false", \ - (c)); \ - } - - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\"config\": {\n"); - } - - CONFIG_WRITE_BOOL_JSON(cache_oblivious, ",") - - CTL_GET("config.debug", &bv, bool); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"debug\": %s,\n", bv ? "true" : "false"); - } else { - malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", - bv ? "enabled" : "disabled"); - } - - CONFIG_WRITE_BOOL_JSON(fill, ",") - CONFIG_WRITE_BOOL_JSON(lazy_lock, ",") - - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"malloc_conf\": \"%s\",\n", - config_malloc_conf); - } else { - malloc_cprintf(write_cb, cbopaque, - "config.malloc_conf: \"%s\"\n", config_malloc_conf); - } - - CONFIG_WRITE_BOOL_JSON(munmap, ",") - CONFIG_WRITE_BOOL_JSON(prof, ",") - CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",") - CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",") - CONFIG_WRITE_BOOL_JSON(stats, ",") - CONFIG_WRITE_BOOL_JSON(tcache, ",") - CONFIG_WRITE_BOOL_JSON(tls, ",") - CONFIG_WRITE_BOOL_JSON(utrace, ",") - CONFIG_WRITE_BOOL_JSON(valgrind, ",") - CONFIG_WRITE_BOOL_JSON(xmalloc, "") - - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t},\n"); - } -#undef CONFIG_WRITE_BOOL_JSON - - /* opt. */ -#define OPT_WRITE_BOOL(n, c) \ - if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0) { \ - if (json) { \ - malloc_cprintf(write_cb, cbopaque, \ - "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \ - "false", (c)); \ - } else { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %s\n", bv ? "true" : "false"); \ - } \ - } -#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \ - bool bv2; \ - if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \ - je_mallctl(#m, &bv2, (void *)&bsz, NULL, 0) == 0) { \ - if (json) { \ - malloc_cprintf(write_cb, cbopaque, \ - "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \ - "false", (c)); \ - } else { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %s ("#m": %s)\n", bv ? "true" \ - : "false", bv2 ? "true" : "false"); \ - } \ - } \ -} -#define OPT_WRITE_UNSIGNED(n, c) \ - if (je_mallctl("opt."#n, (void *)&uv, &usz, NULL, 0) == 0) { \ - if (json) { \ - malloc_cprintf(write_cb, cbopaque, \ - "\t\t\t\""#n"\": %u%s\n", uv, (c)); \ - } else { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %u\n", uv); \ - } \ - } -#define OPT_WRITE_SIZE_T(n, c) \ - if (je_mallctl("opt."#n, (void *)&sv, &ssz, NULL, 0) == 0) { \ - if (json) { \ - malloc_cprintf(write_cb, cbopaque, \ - "\t\t\t\""#n"\": %zu%s\n", sv, (c)); \ - } else { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zu\n", sv); \ - } \ - } -#define OPT_WRITE_SSIZE_T(n, c) \ - if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0) { \ - if (json) { \ - malloc_cprintf(write_cb, cbopaque, \ - "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \ - } else { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zd\n", ssv); \ - } \ - } -#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \ - ssize_t ssv2; \ - if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \ - je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) { \ - if (json) { \ - malloc_cprintf(write_cb, cbopaque, \ - "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \ - } else { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zd ("#m": %zd)\n", \ - ssv, ssv2); \ - } \ - } \ -} -#define OPT_WRITE_CHAR_P(n, c) \ - if (je_mallctl("opt."#n, (void *)&cpv, &cpsz, NULL, 0) == 0) { \ - if (json) { \ - malloc_cprintf(write_cb, cbopaque, \ - "\t\t\t\""#n"\": \"%s\"%s\n", cpv, (c)); \ - } else { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": \"%s\"\n", cpv); \ - } \ - } - - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\"opt\": {\n"); - } else { - malloc_cprintf(write_cb, cbopaque, - "Run-time option settings:\n"); - } - OPT_WRITE_BOOL(abort, ",") - OPT_WRITE_SIZE_T(lg_chunk, ",") - OPT_WRITE_CHAR_P(dss, ",") - OPT_WRITE_UNSIGNED(narenas, ",") - OPT_WRITE_CHAR_P(purge, ",") - if (json || opt_purge == purge_mode_ratio) { - OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, - arenas.lg_dirty_mult, ",") - } - if (json || opt_purge == purge_mode_decay) { - OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time, ",") - } - OPT_WRITE_CHAR_P(junk, ",") - OPT_WRITE_SIZE_T(quarantine, ",") - OPT_WRITE_BOOL(redzone, ",") - OPT_WRITE_BOOL(zero, ",") - OPT_WRITE_BOOL(utrace, ",") - OPT_WRITE_BOOL(xmalloc, ",") - OPT_WRITE_BOOL(tcache, ",") - OPT_WRITE_SSIZE_T(lg_tcache_max, ",") - OPT_WRITE_BOOL(thp, ",") - OPT_WRITE_BOOL(prof, ",") - OPT_WRITE_CHAR_P(prof_prefix, ",") - OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",") - OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init, - ",") - OPT_WRITE_SSIZE_T_MUTABLE(lg_prof_sample, prof.lg_sample, ",") - OPT_WRITE_BOOL(prof_accum, ",") - OPT_WRITE_SSIZE_T(lg_prof_interval, ",") - OPT_WRITE_BOOL(prof_gdump, ",") - OPT_WRITE_BOOL(prof_final, ",") - OPT_WRITE_BOOL(prof_leak, ",") - /* - * stats_print is always emitted, so as long as stats_print comes last - * it's safe to unconditionally omit the comma here (rather than having - * to conditionally omit it elsewhere depending on configuration). - */ - OPT_WRITE_BOOL(stats_print, "") - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t},\n"); - } - -#undef OPT_WRITE_BOOL -#undef OPT_WRITE_BOOL_MUTABLE -#undef OPT_WRITE_SIZE_T -#undef OPT_WRITE_SSIZE_T -#undef OPT_WRITE_CHAR_P - - /* arenas. */ - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\"arenas\": {\n"); - } - - CTL_GET("arenas.narenas", &uv, unsigned); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"narenas\": %u,\n", uv); - } else - malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv); - - CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"lg_dirty_mult\": %zd,\n", ssv); - } else if (opt_purge == purge_mode_ratio) { - if (ssv >= 0) { - malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: " - "%u:1\n", (1U << ssv)); - } else { - malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: " - "N/A\n"); - } - } - CTL_GET("arenas.decay_time", &ssv, ssize_t); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"decay_time\": %zd,\n", ssv); - } else if (opt_purge == purge_mode_decay) { - malloc_cprintf(write_cb, cbopaque, - "Unused dirty page decay time: %zd%s\n", - ssv, (ssv < 0) ? " (no decay)" : ""); - } - - CTL_GET("arenas.quantum", &sv, size_t); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"quantum\": %zu,\n", sv); - } else - malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv); - - CTL_GET("arenas.page", &sv, size_t); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"page\": %zu,\n", sv); - } else - malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); - - if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) { - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"tcache_max\": %zu,\n", sv); - } else { - malloc_cprintf(write_cb, cbopaque, - "Maximum thread-cached size class: %zu\n", sv); - } - } - - if (json) { - unsigned nbins, nlruns, nhchunks, i; - - CTL_GET("arenas.nbins", &nbins, unsigned); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"nbins\": %u,\n", nbins); - - if (config_tcache) { - CTL_GET("arenas.nhbins", &uv, unsigned); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"nhbins\": %u,\n", uv); - } - - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"bin\": [\n"); - for (i = 0; i < nbins; i++) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t{\n"); - - CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"size\": %zu,\n", sv); - - CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"nregs\": %"FMTu32",\n", u32v); - - CTL_M2_GET("arenas.bin.0.run_size", i, &sv, size_t); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"run_size\": %zu\n", sv); - - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t}%s\n", (i + 1 < nbins) ? "," : ""); - } - malloc_cprintf(write_cb, cbopaque, - "\t\t\t],\n"); - - CTL_GET("arenas.nlruns", &nlruns, unsigned); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"nlruns\": %u,\n", nlruns); - - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"lrun\": [\n"); - for (i = 0; i < nlruns; i++) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t{\n"); - - CTL_M2_GET("arenas.lrun.0.size", i, &sv, size_t); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"size\": %zu\n", sv); - - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t}%s\n", (i + 1 < nlruns) ? "," : ""); - } - malloc_cprintf(write_cb, cbopaque, - "\t\t\t],\n"); - - CTL_GET("arenas.nhchunks", &nhchunks, unsigned); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"nhchunks\": %u,\n", nhchunks); - - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"hchunk\": [\n"); - for (i = 0; i < nhchunks; i++) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t{\n"); - - CTL_M2_GET("arenas.hchunk.0.size", i, &sv, size_t); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t\t\"size\": %zu\n", sv); - - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\t}%s\n", (i + 1 < nhchunks) ? "," : ""); - } - malloc_cprintf(write_cb, cbopaque, - "\t\t\t]\n"); - - malloc_cprintf(write_cb, cbopaque, - "\t\t}%s\n", (config_prof || more) ? "," : ""); - } - - /* prof. */ - if (config_prof && json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\"prof\": {\n"); - - CTL_GET("prof.thread_active_init", &bv, bool); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"thread_active_init\": %s,\n", bv ? "true" : - "false"); - - CTL_GET("prof.active", &bv, bool); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"active\": %s,\n", bv ? "true" : "false"); - - CTL_GET("prof.gdump", &bv, bool); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"gdump\": %s,\n", bv ? "true" : "false"); - - CTL_GET("prof.interval", &u64v, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"interval\": %"FMTu64",\n", u64v); - - CTL_GET("prof.lg_sample", &ssv, ssize_t); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"lg_sample\": %zd\n", ssv); - - malloc_cprintf(write_cb, cbopaque, - "\t\t}%s\n", more ? "," : ""); - } -} - -static void -stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque, - bool json, bool merged, bool unmerged, bool bins, bool large, bool huge) -{ - size_t *cactive; - size_t allocated, active, metadata, resident, mapped, retained; - - CTL_GET("stats.cactive", &cactive, size_t *); - CTL_GET("stats.allocated", &allocated, size_t); - CTL_GET("stats.active", &active, size_t); - CTL_GET("stats.metadata", &metadata, size_t); - CTL_GET("stats.resident", &resident, size_t); - CTL_GET("stats.mapped", &mapped, size_t); - CTL_GET("stats.retained", &retained, size_t); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\"stats\": {\n"); - - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"cactive\": %zu,\n", atomic_read_z(cactive)); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"allocated\": %zu,\n", allocated); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"active\": %zu,\n", active); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"metadata\": %zu,\n", metadata); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"resident\": %zu,\n", resident); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"mapped\": %zu,\n", mapped); - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"retained\": %zu\n", retained); - - malloc_cprintf(write_cb, cbopaque, - "\t\t}%s\n", (merged || unmerged) ? "," : ""); - } else { - malloc_cprintf(write_cb, cbopaque, - "Allocated: %zu, active: %zu, metadata: %zu," - " resident: %zu, mapped: %zu, retained: %zu\n", - allocated, active, metadata, resident, mapped, retained); - malloc_cprintf(write_cb, cbopaque, - "Current active ceiling: %zu\n", - atomic_read_z(cactive)); - } - - if (merged || unmerged) { - unsigned narenas; - - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\"stats.arenas\": {\n"); - } - - CTL_GET("arenas.narenas", &narenas, unsigned); - { - VARIABLE_ARRAY(bool, initialized, narenas); - size_t isz; - unsigned i, j, ninitialized; - - isz = sizeof(bool) * narenas; - xmallctl("arenas.initialized", (void *)initialized, - &isz, NULL, 0); - for (i = ninitialized = 0; i < narenas; i++) { - if (initialized[i]) - ninitialized++; - } - - /* Merged stats. */ - if (merged && (ninitialized > 1 || !unmerged)) { - /* Print merged arena stats. */ - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t\"merged\": {\n"); - } else { - malloc_cprintf(write_cb, cbopaque, - "\nMerged arenas stats:\n"); - } - stats_arena_print(write_cb, cbopaque, json, - narenas, bins, large, huge); - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t\t}%s\n", unmerged ? "," : - ""); - } - } - - /* Unmerged stats. */ - if (unmerged) { - for (i = j = 0; i < narenas; i++) { - if (initialized[i]) { - if (json) { - j++; - malloc_cprintf(write_cb, - cbopaque, - "\t\t\t\"%u\": {\n", - i); - } else { - malloc_cprintf(write_cb, - cbopaque, - "\narenas[%u]:\n", - i); - } - stats_arena_print(write_cb, - cbopaque, json, i, bins, - large, huge); - if (json) { - malloc_cprintf(write_cb, - cbopaque, - "\t\t\t}%s\n", (j < - ninitialized) ? "," - : ""); - } - } - } - } - } - - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t\t}\n"); - } - } -} - -void -stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) -{ - int err; - uint64_t epoch; - size_t u64sz; - bool json = false; - bool general = true; - bool merged = config_stats; - bool unmerged = config_stats; - bool bins = true; - bool large = true; - bool huge = true; - - /* - * Refresh stats, in case mallctl() was called by the application. - * - * Check for OOM here, since refreshing the ctl cache can trigger - * allocation. In practice, none of the subsequent mallctl()-related - * calls in this function will cause OOM if this one succeeds. - * */ - epoch = 1; - u64sz = sizeof(uint64_t); - err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch, - sizeof(uint64_t)); - if (err != 0) { - if (err == EAGAIN) { - malloc_write(": Memory allocation failure in " - "mallctl(\"epoch\", ...)\n"); - return; - } - malloc_write(": Failure in mallctl(\"epoch\", " - "...)\n"); - abort(); - } - - if (opts != NULL) { - unsigned i; - - for (i = 0; opts[i] != '\0'; i++) { - switch (opts[i]) { - case 'J': - json = true; - break; - case 'g': - general = false; - break; - case 'm': - merged = false; - break; - case 'a': - unmerged = false; - break; - case 'b': - bins = false; - break; - case 'l': - large = false; - break; - case 'h': - huge = false; - break; - default:; - } - } - } - - if (json) { - malloc_cprintf(write_cb, cbopaque, - "{\n" - "\t\"jemalloc\": {\n"); - } else { - malloc_cprintf(write_cb, cbopaque, - "___ Begin jemalloc statistics ___\n"); - } - - if (general) { - bool more = (merged || unmerged); - stats_general_print(write_cb, cbopaque, json, more); - } - if (config_stats) { - stats_print_helper(write_cb, cbopaque, json, merged, unmerged, - bins, large, huge); - } - if (json) { - malloc_cprintf(write_cb, cbopaque, - "\t}\n" - "}\n"); - } else { - malloc_cprintf(write_cb, cbopaque, - "--- End jemalloc statistics ---\n"); - } -} diff --git a/sources/jemalloc/src/je_tcache.c b/sources/jemalloc/src/je_tcache.c deleted file mode 100755 index e3b04be6..00000000 --- a/sources/jemalloc/src/je_tcache.c +++ /dev/null @@ -1,619 +0,0 @@ -#define JEMALLOC_TCACHE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -bool opt_tcache = true; -ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; - -tcache_bin_info_t *tcache_bin_info; -static unsigned stack_nelms; /* Total stack elms per tcache. */ - -unsigned nhbins; -size_t tcache_maxclass; - -tcaches_t *tcaches; - -/* Index of first element within tcaches that has never been used. */ -static unsigned tcaches_past; - -/* Head of singly linked list tracking available tcaches elements. */ -static tcaches_t *tcaches_avail; - -/* Protects tcaches{,_past,_avail}. */ -static malloc_mutex_t tcaches_mtx; - -/******************************************************************************/ - -size_t -tcache_salloc(tsdn_t *tsdn, const void *ptr) -{ - - return (arena_salloc(tsdn, ptr, false)); -} - -void -tcache_event_hard(tsd_t *tsd, tcache_t *tcache) -{ - szind_t binind = tcache->next_gc_bin; - tcache_bin_t *tbin = &tcache->tbins[binind]; - tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; - - if (tbin->low_water > 0) { - /* - * Flush (ceiling) 3/4 of the objects below the low water mark. - */ - if (binind < NBINS) { - tcache_bin_flush_small(tsd, tcache, tbin, binind, - tbin->ncached - tbin->low_water + (tbin->low_water - >> 2)); - } else { - tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached - - tbin->low_water + (tbin->low_water >> 2), tcache); - } - /* - * Reduce fill count by 2X. Limit lg_fill_div such that the - * fill count is always at least 1. - */ - if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1) - tbin->lg_fill_div++; - } else if (tbin->low_water < 0) { - /* - * Increase fill count by 2X. Make sure lg_fill_div stays - * greater than 0. - */ - if (tbin->lg_fill_div > 1) - tbin->lg_fill_div--; - } - tbin->low_water = tbin->ncached; - - tcache->next_gc_bin++; - if (tcache->next_gc_bin == nhbins) - tcache->next_gc_bin = 0; -} - -void * -tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, - tcache_bin_t *tbin, szind_t binind, bool *tcache_success) -{ - void *ret; - - arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ? - tcache->prof_accumbytes : 0); - if (config_prof) - tcache->prof_accumbytes = 0; - ret = tcache_alloc_easy(tbin, tcache_success); - - return (ret); -} - -void -tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, - szind_t binind, unsigned rem) -{ - arena_t *arena; - void *ptr; - unsigned i, nflush, ndeferred; - bool merged_stats = false; - - assert(binind < NBINS); - assert(rem <= tbin->ncached); - - arena = arena_choose(tsd, NULL); - assert(arena != NULL); - for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { - /* Lock the arena bin associated with the first object. */ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - *(tbin->avail - 1)); - arena_t *bin_arena = extent_node_arena_get(&chunk->node); - arena_bin_t *bin = &bin_arena->bins[binind]; - - if (config_prof && bin_arena == arena) { - if (arena_prof_accum(tsd_tsdn(tsd), arena, - tcache->prof_accumbytes)) - prof_idump(tsd_tsdn(tsd)); - tcache->prof_accumbytes = 0; - } - - malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); - if (config_stats && bin_arena == arena) { - assert(!merged_stats); - merged_stats = true; - bin->stats.nflushes++; - bin->stats.nrequests += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - } - ndeferred = 0; - for (i = 0; i < nflush; i++) { - ptr = *(tbin->avail - 1 - i); - assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (extent_node_arena_get(&chunk->node) == bin_arena) { - size_t pageind = ((uintptr_t)ptr - - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_bits_t *bitselm = - arena_bitselm_get_mutable(chunk, pageind); - arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), - bin_arena, chunk, ptr, bitselm); - } else { - /* - * This object was allocated via a different - * arena bin than the one that is currently - * locked. Stash the object, so that it can be - * handled in a future pass. - */ - *(tbin->avail - 1 - ndeferred) = ptr; - ndeferred++; - } - } - malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); - arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred); - } - if (config_stats && !merged_stats) { - /* - * The flush loop didn't happen to flush to this thread's - * arena, so the stats didn't get merged. Manually do so now. - */ - arena_bin_t *bin = &arena->bins[binind]; - malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); - bin->stats.nflushes++; - bin->stats.nrequests += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); - } - - memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * - sizeof(void *)); - tbin->ncached = rem; - if ((int)tbin->ncached < tbin->low_water) - tbin->low_water = tbin->ncached; -} - -void -tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, - unsigned rem, tcache_t *tcache) -{ - arena_t *arena; - void *ptr; - unsigned i, nflush, ndeferred; - bool merged_stats = false; - - assert(binind < nhbins); - assert(rem <= tbin->ncached); - - arena = arena_choose(tsd, NULL); - assert(arena != NULL); - for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { - /* Lock the arena associated with the first object. */ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - *(tbin->avail - 1)); - arena_t *locked_arena = extent_node_arena_get(&chunk->node); - UNUSED bool idump; - - if (config_prof) - idump = false; - malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock); - if ((config_prof || config_stats) && locked_arena == arena) { - if (config_prof) { - idump = arena_prof_accum_locked(arena, - tcache->prof_accumbytes); - tcache->prof_accumbytes = 0; - } - if (config_stats) { - merged_stats = true; - arena->stats.nrequests_large += - tbin->tstats.nrequests; - arena->stats.lstats[binind - NBINS].nrequests += - tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - } - } - ndeferred = 0; - for (i = 0; i < nflush; i++) { - ptr = *(tbin->avail - 1 - i); - assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (extent_node_arena_get(&chunk->node) == - locked_arena) { - arena_dalloc_large_junked_locked(tsd_tsdn(tsd), - locked_arena, chunk, ptr); - } else { - /* - * This object was allocated via a different - * arena than the one that is currently locked. - * Stash the object, so that it can be handled - * in a future pass. - */ - *(tbin->avail - 1 - ndeferred) = ptr; - ndeferred++; - } - } - malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock); - if (config_prof && idump) - prof_idump(tsd_tsdn(tsd)); - arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - - ndeferred); - } - if (config_stats && !merged_stats) { - /* - * The flush loop didn't happen to flush to this thread's - * arena, so the stats didn't get merged. Manually do so now. - */ - malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); - arena->stats.nrequests_large += tbin->tstats.nrequests; - arena->stats.lstats[binind - NBINS].nrequests += - tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); - } - - memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * - sizeof(void *)); - tbin->ncached = rem; - if ((int)tbin->ncached < tbin->low_water) - tbin->low_water = tbin->ncached; -} - -static void -tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) -{ - - if (config_stats) { - /* Link into list of extant tcaches. */ - malloc_mutex_lock(tsdn, &arena->lock); - ql_elm_new(tcache, link); - ql_tail_insert(&arena->tcache_ql, tcache, link); - malloc_mutex_unlock(tsdn, &arena->lock); - } -} - -static void -tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) -{ - - if (config_stats) { - /* Unlink from list of extant tcaches. */ - malloc_mutex_lock(tsdn, &arena->lock); - if (config_debug) { - bool in_ql = false; - tcache_t *iter; - ql_foreach(iter, &arena->tcache_ql, link) { - if (iter == tcache) { - in_ql = true; - break; - } - } - assert(in_ql); - } - ql_remove(&arena->tcache_ql, tcache, link); - tcache_stats_merge(tsdn, tcache, arena); - malloc_mutex_unlock(tsdn, &arena->lock); - } -} - -void -tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena, - arena_t *newarena) -{ - - tcache_arena_dissociate(tsdn, tcache, oldarena); - tcache_arena_associate(tsdn, tcache, newarena); -} - -tcache_t * -tcache_get_hard(tsd_t *tsd) -{ - arena_t *arena; - - if (!tcache_enabled_get()) { - if (tsd_nominal(tsd)) - tcache_enabled_set(false); /* Memoize. */ - return (NULL); - } - arena = arena_choose(tsd, NULL); - if (unlikely(arena == NULL)) - return (NULL); - return (tcache_create(tsd_tsdn(tsd), arena)); -} - -tcache_t * -tcache_create(tsdn_t *tsdn, arena_t *arena) -{ - tcache_t *tcache; - size_t size, stack_offset; - unsigned i; - - size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); - /* Naturally align the pointer stacks. */ - size = PTR_CEILING(size); - stack_offset = size; - size += stack_nelms * sizeof(void *); - /* Avoid false cacheline sharing. */ - size = sa2u(size, CACHELINE); - - tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true, - arena_get(TSDN_NULL, 0, true)); - if (tcache == NULL) - return (NULL); - - tcache_arena_associate(tsdn, tcache, arena); - - ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR); - - assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); - for (i = 0; i < nhbins; i++) { - tcache->tbins[i].lg_fill_div = 1; - stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); - /* - * avail points past the available space. Allocations will - * access the slots toward higher addresses (for the benefit of - * prefetch). - */ - tcache->tbins[i].avail = (void **)((uintptr_t)tcache + - (uintptr_t)stack_offset); - } - - return (tcache); -} - -static void -tcache_destroy(tsd_t *tsd, tcache_t *tcache) -{ - arena_t *arena; - unsigned i; - - arena = arena_choose(tsd, NULL); - tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena); - - for (i = 0; i < NBINS; i++) { - tcache_bin_t *tbin = &tcache->tbins[i]; - tcache_bin_flush_small(tsd, tcache, tbin, i, 0); - - if (config_stats && tbin->tstats.nrequests != 0) { - arena_bin_t *bin = &arena->bins[i]; - malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); - bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); - } - } - - for (; i < nhbins; i++) { - tcache_bin_t *tbin = &tcache->tbins[i]; - tcache_bin_flush_large(tsd, tbin, i, 0, tcache); - - if (config_stats && tbin->tstats.nrequests != 0) { - malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); - arena->stats.nrequests_large += tbin->tstats.nrequests; - arena->stats.lstats[i - NBINS].nrequests += - tbin->tstats.nrequests; - malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); - } - } - - if (config_prof && tcache->prof_accumbytes > 0 && - arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) - prof_idump(tsd_tsdn(tsd)); - - idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true); -} - -void -tcache_cleanup(tsd_t *tsd) -{ - tcache_t *tcache; - - if (!config_tcache) - return; - - if ((tcache = tsd_tcache_get(tsd)) != NULL) { - tcache_destroy(tsd, tcache); - tsd_tcache_set(tsd, NULL); - } -} - -void -tcache_enabled_cleanup(tsd_t *tsd) -{ - - /* Do nothing. */ -} - -void -tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) -{ - unsigned i; - - cassert(config_stats); - - malloc_mutex_assert_owner(tsdn, &arena->lock); - - /* Merge and reset tcache stats. */ - for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; - tcache_bin_t *tbin = &tcache->tbins[i]; - malloc_mutex_lock(tsdn, &bin->lock); - bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(tsdn, &bin->lock); - tbin->tstats.nrequests = 0; - } - - for (; i < nhbins; i++) { - malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS]; - tcache_bin_t *tbin = &tcache->tbins[i]; - arena->stats.nrequests_large += tbin->tstats.nrequests; - lstats->nrequests += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - } -} - -static bool -tcaches_create_prep(tsd_t *tsd) { - bool err; - - malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); - - if (tcaches == NULL) { - tcaches = base_alloc(tsd_tsdn(tsd), sizeof(tcache_t *) * - (MALLOCX_TCACHE_MAX+1)); - if (tcaches == NULL) { - err = true; - goto label_return; - } - } - - if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) { - err = true; - goto label_return; - } - - err = false; -label_return: - malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); - return err; -} - -bool -tcaches_create(tsd_t *tsd, unsigned *r_ind) { - bool err; - arena_t *arena; - tcache_t *tcache; - tcaches_t *elm; - - if (tcaches_create_prep(tsd)) { - err = true; - goto label_return; - } - - arena = arena_ichoose(tsd, NULL); - if (unlikely(arena == NULL)) { - err = true; - goto label_return; - } - tcache = tcache_create(tsd_tsdn(tsd), arena); - if (tcache == NULL) { - err = true; - goto label_return; - } - - malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); - if (tcaches_avail != NULL) { - elm = tcaches_avail; - tcaches_avail = tcaches_avail->next; - elm->tcache = tcache; - *r_ind = (unsigned)(elm - tcaches); - } else { - elm = &tcaches[tcaches_past]; - elm->tcache = tcache; - *r_ind = tcaches_past; - tcaches_past++; - } - malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); - - err = false; -label_return: - malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &tcaches_mtx); - return err; -} - -static void -tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm) { - malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx); - - if (elm->tcache == NULL) { - return; - } - tcache_destroy(tsd, elm->tcache); - elm->tcache = NULL; -} - -void -tcaches_flush(tsd_t *tsd, unsigned ind) { - malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); - tcaches_elm_flush(tsd, &tcaches[ind]); - malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); -} - -void -tcaches_destroy(tsd_t *tsd, unsigned ind) { - tcaches_t *elm; - - malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); - elm = &tcaches[ind]; - tcaches_elm_flush(tsd, elm); - elm->next = tcaches_avail; - tcaches_avail = elm; - malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); -} - -bool -tcache_boot(tsdn_t *tsdn) { - unsigned i; - - cassert(config_tcache); - - /* - * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is - * known. - */ - if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS) - tcache_maxclass = SMALL_MAXCLASS; - else if ((ZU(1) << opt_lg_tcache_max) > large_maxclass) - tcache_maxclass = large_maxclass; - else - tcache_maxclass = (ZU(1) << opt_lg_tcache_max); - - if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES)) { - return true; - } - - nhbins = size2index(tcache_maxclass) + 1; - - /* Initialize tcache_bin_info. */ - tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins * - sizeof(tcache_bin_info_t)); - if (tcache_bin_info == NULL) - return (true); - stack_nelms = 0; - for (i = 0; i < NBINS; i++) { - if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { - tcache_bin_info[i].ncached_max = - TCACHE_NSLOTS_SMALL_MIN; - } else if ((arena_bin_info[i].nregs << 1) <= - TCACHE_NSLOTS_SMALL_MAX) { - tcache_bin_info[i].ncached_max = - (arena_bin_info[i].nregs << 1); - } else { - tcache_bin_info[i].ncached_max = - TCACHE_NSLOTS_SMALL_MAX; - } - stack_nelms += tcache_bin_info[i].ncached_max; - } - for (; i < nhbins; i++) { - tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; - stack_nelms += tcache_bin_info[i].ncached_max; - } - - return (false); -} - -void -tcache_prefork(tsdn_t *tsdn) { - if (!config_prof && opt_tcache) { - malloc_mutex_prefork(tsdn, &tcaches_mtx); - } -} - -void -tcache_postfork_parent(tsdn_t *tsdn) { - if (!config_prof && opt_tcache) { - malloc_mutex_postfork_parent(tsdn, &tcaches_mtx); - } -} - -void -tcache_postfork_child(tsdn_t *tsdn) { - if (!config_prof && opt_tcache) { - malloc_mutex_postfork_child(tsdn, &tcaches_mtx); - } -} diff --git a/sources/jemalloc/src/je_ticker.c b/sources/jemalloc/src/je_ticker.c deleted file mode 100755 index db090240..00000000 --- a/sources/jemalloc/src/je_ticker.c +++ /dev/null @@ -1,2 +0,0 @@ -#define JEMALLOC_TICKER_C_ -#include "jemalloc/internal/jemalloc_internal.h" diff --git a/sources/jemalloc/src/je_tsd.c b/sources/jemalloc/src/je_tsd.c deleted file mode 100755 index ec69a51c..00000000 --- a/sources/jemalloc/src/je_tsd.c +++ /dev/null @@ -1,197 +0,0 @@ -#define JEMALLOC_TSD_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -static unsigned ncleanups; -static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; - -malloc_tsd_data(, , tsd_t, TSD_INITIALIZER) - -/******************************************************************************/ - -void * -malloc_tsd_malloc(size_t size) -{ - - return (a0malloc(CACHELINE_CEILING(size))); -} - -void -malloc_tsd_dalloc(void *wrapper) -{ - - a0dalloc(wrapper); -} - -void -malloc_tsd_no_cleanup(void *arg) -{ - - not_reached(); -} - -#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) -#ifndef _WIN32 -JEMALLOC_EXPORT -#endif -void -_malloc_thread_cleanup(void) -{ - bool pending[MALLOC_TSD_CLEANUPS_MAX], again; - unsigned i; - - for (i = 0; i < ncleanups; i++) - pending[i] = true; - - do { - again = false; - for (i = 0; i < ncleanups; i++) { - if (pending[i]) { - pending[i] = cleanups[i](); - if (pending[i]) - again = true; - } - } - } while (again); -} -#endif - -void -malloc_tsd_cleanup_register(bool (*f)(void)) -{ - - assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); - cleanups[ncleanups] = f; - ncleanups++; -} - -void -tsd_cleanup(void *arg) -{ - tsd_t *tsd = (tsd_t *)arg; - - switch (tsd->state) { - case tsd_state_uninitialized: - /* Do nothing. */ - break; - case tsd_state_nominal: -#define O(n, t) \ - n##_cleanup(tsd); -MALLOC_TSD -#undef O - tsd->state = tsd_state_purgatory; - tsd_set(tsd); - break; - case tsd_state_purgatory: - /* - * The previous time this destructor was called, we set the - * state to tsd_state_purgatory so that other destructors - * wouldn't cause re-creation of the tsd. This time, do - * nothing, and do not request another callback. - */ - break; - case tsd_state_reincarnated: - /* - * Another destructor deallocated memory after this destructor - * was called. Reset state to tsd_state_purgatory and request - * another callback. - */ - tsd->state = tsd_state_purgatory; - tsd_set(tsd); - break; - default: - not_reached(); - } -} - -tsd_t * -malloc_tsd_boot0(void) -{ - tsd_t *tsd; - - ncleanups = 0; - if (tsd_boot0()) - return (NULL); - tsd = tsd_fetch(); - *tsd_arenas_tdata_bypassp_get(tsd) = true; - return (tsd); -} - -void -malloc_tsd_boot1(void) -{ - - tsd_boot1(); - *tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false; -} - -#ifdef _WIN32 -static BOOL WINAPI -_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) -{ - - switch (fdwReason) { -#ifdef JEMALLOC_LAZY_LOCK - case DLL_THREAD_ATTACH: - isthreaded = true; - break; -#endif - case DLL_THREAD_DETACH: - _malloc_thread_cleanup(); - break; - default: - break; - } - return (true); -} - -#ifdef _MSC_VER -# ifdef _M_IX86 -# pragma comment(linker, "/INCLUDE:__tls_used") -# pragma comment(linker, "/INCLUDE:_tls_callback") -# else -# pragma comment(linker, "/INCLUDE:_tls_used") -# pragma comment(linker, "/INCLUDE:tls_callback") -# endif -# pragma section(".CRT$XLY",long,read) -#endif -JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) -BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, - DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; -#endif - -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -void * -tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) -{ - pthread_t self = pthread_self(); - tsd_init_block_t *iter; - - /* Check whether this thread has already inserted into the list. */ - malloc_mutex_lock(TSDN_NULL, &head->lock); - ql_foreach(iter, &head->blocks, link) { - if (iter->thread == self) { - malloc_mutex_unlock(TSDN_NULL, &head->lock); - return (iter->data); - } - } - /* Insert block into list. */ - ql_elm_new(block, link); - block->thread = self; - ql_tail_insert(&head->blocks, block, link); - malloc_mutex_unlock(TSDN_NULL, &head->lock); - return (NULL); -} - -void -tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) -{ - - malloc_mutex_lock(TSDN_NULL, &head->lock); - ql_remove(&head->blocks, block, link); - malloc_mutex_unlock(TSDN_NULL, &head->lock); -} -#endif diff --git a/sources/jemalloc/src/je_util.c b/sources/jemalloc/src/je_util.c deleted file mode 100755 index dd8c2363..00000000 --- a/sources/jemalloc/src/je_util.c +++ /dev/null @@ -1,666 +0,0 @@ -/* - * Define simple versions of assertion macros that won't recurse in case - * of assertion failures in malloc_*printf(). - */ -#define assert(e) do { \ - if (config_debug && !(e)) { \ - malloc_write(": Failed assertion\n"); \ - abort(); \ - } \ -} while (0) - -#define not_reached() do { \ - if (config_debug) { \ - malloc_write(": Unreachable code reached\n"); \ - abort(); \ - } \ - unreachable(); \ -} while (0) - -#define not_implemented() do { \ - if (config_debug) { \ - malloc_write(": Not implemented\n"); \ - abort(); \ - } \ -} while (0) - -#define JEMALLOC_UTIL_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static void wrtmessage(void *cbopaque, const char *s); -#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) -static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, - size_t *slen_p); -#define D2S_BUFSIZE (1 + U2S_BUFSIZE) -static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p); -#define O2S_BUFSIZE (1 + U2S_BUFSIZE) -static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p); -#define X2S_BUFSIZE (2 + U2S_BUFSIZE) -static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, - size_t *slen_p); - -/******************************************************************************/ - -/* malloc_message() setup. */ -static void -wrtmessage(void *cbopaque, const char *s) -{ - -#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write) - /* - * Use syscall(2) rather than write(2) when possible in order to avoid - * the possibility of memory allocation within libc. This is necessary - * on FreeBSD; most operating systems do not have this problem though. - * - * syscall() returns long or int, depending on platform, so capture the - * unused result in the widest plausible type to avoid compiler - * warnings. - */ - UNUSED long result = syscall(SYS_write, STDERR_FILENO, s, strlen(s)); -#else - UNUSED ssize_t result = write(STDERR_FILENO, s, strlen(s)); -#endif -} - -JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); - -/* - * Wrapper around malloc_message() that avoids the need for - * je_malloc_message(...) throughout the code. - */ -void -malloc_write(const char *s) -{ - - if (je_malloc_message != NULL) - je_malloc_message(NULL, s); - else - wrtmessage(NULL, s); -} - -/* - * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so - * provide a wrapper. - */ -int -buferror(int err, char *buf, size_t buflen) -{ - -#ifdef _WIN32 - FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, - (LPSTR)buf, (DWORD)buflen, NULL); - return (0); -#elif defined(__GLIBC__) && defined(_GNU_SOURCE) - char *b = strerror_r(err, buf, buflen); - if (b != buf) { - strncpy(buf, b, buflen); - buf[buflen-1] = '\0'; - } - return (0); -#else - return (strerror_r(err, buf, buflen)); -#endif -} - -uintmax_t -malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) -{ - uintmax_t ret, digit; - unsigned b; - bool neg; - const char *p, *ns; - - p = nptr; - if (base < 0 || base == 1 || base > 36) { - ns = p; - set_errno(EINVAL); - ret = UINTMAX_MAX; - goto label_return; - } - b = base; - - /* Swallow leading whitespace and get sign, if any. */ - neg = false; - while (true) { - switch (*p) { - case '\t': case '\n': case '\v': case '\f': case '\r': case ' ': - p++; - break; - case '-': - neg = true; - /* Fall through. */ - case '+': - p++; - /* Fall through. */ - default: - goto label_prefix; - } - } - - /* Get prefix, if any. */ - label_prefix: - /* - * Note where the first non-whitespace/sign character is so that it is - * possible to tell whether any digits are consumed (e.g., " 0" vs. - * " -x"). - */ - ns = p; - if (*p == '0') { - switch (p[1]) { - case '0': case '1': case '2': case '3': case '4': case '5': - case '6': case '7': - if (b == 0) - b = 8; - if (b == 8) - p++; - break; - case 'X': case 'x': - switch (p[2]) { - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': - case 'A': case 'B': case 'C': case 'D': case 'E': - case 'F': - case 'a': case 'b': case 'c': case 'd': case 'e': - case 'f': - if (b == 0) - b = 16; - if (b == 16) - p += 2; - break; - default: - break; - } - break; - default: - p++; - ret = 0; - goto label_return; - } - } - if (b == 0) - b = 10; - - /* Convert. */ - ret = 0; - while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b) - || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b) - || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) { - uintmax_t pret = ret; - ret *= b; - ret += digit; - if (ret < pret) { - /* Overflow. */ - set_errno(ERANGE); - ret = UINTMAX_MAX; - goto label_return; - } - p++; - } - if (neg) - ret = (uintmax_t)(-((intmax_t)ret)); - - if (p == ns) { - /* No conversion performed. */ - set_errno(EINVAL); - ret = UINTMAX_MAX; - goto label_return; - } - -label_return: - if (endptr != NULL) { - if (p == ns) { - /* No characters were converted. */ - *endptr = (char *)nptr; - } else - *endptr = (char *)p; - } - return (ret); -} - -static char * -u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) -{ - unsigned i; - - i = U2S_BUFSIZE - 1; - s[i] = '\0'; - switch (base) { - case 10: - do { - i--; - s[i] = "0123456789"[x % (uint64_t)10]; - x /= (uint64_t)10; - } while (x > 0); - break; - case 16: { - const char *digits = (uppercase) - ? "0123456789ABCDEF" - : "0123456789abcdef"; - - do { - i--; - s[i] = digits[x & 0xf]; - x >>= 4; - } while (x > 0); - break; - } default: { - const char *digits = (uppercase) - ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" - : "0123456789abcdefghijklmnopqrstuvwxyz"; - - assert(base >= 2 && base <= 36); - do { - i--; - s[i] = digits[x % (uint64_t)base]; - x /= (uint64_t)base; - } while (x > 0); - }} - - *slen_p = U2S_BUFSIZE - 1 - i; - return (&s[i]); -} - -static char * -d2s(intmax_t x, char sign, char *s, size_t *slen_p) -{ - bool neg; - - if ((neg = (x < 0))) - x = -x; - s = u2s(x, 10, false, s, slen_p); - if (neg) - sign = '-'; - switch (sign) { - case '-': - if (!neg) - break; - /* Fall through. */ - case ' ': - case '+': - s--; - (*slen_p)++; - *s = sign; - break; - default: not_reached(); - } - return (s); -} - -static char * -o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) -{ - - s = u2s(x, 8, false, s, slen_p); - if (alt_form && *s != '0') { - s--; - (*slen_p)++; - *s = '0'; - } - return (s); -} - -static char * -x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) -{ - - s = u2s(x, 16, uppercase, s, slen_p); - if (alt_form) { - s -= 2; - (*slen_p) += 2; - memcpy(s, uppercase ? "0X" : "0x", 2); - } - return (s); -} - -size_t -malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) -{ - size_t i; - const char *f; - -#define APPEND_C(c) do { \ - if (i < size) \ - str[i] = (c); \ - i++; \ -} while (0) -#define APPEND_S(s, slen) do { \ - if (i < size) { \ - size_t cpylen = (slen <= size - i) ? slen : size - i; \ - memcpy(&str[i], s, cpylen); \ - } \ - i += slen; \ -} while (0) -#define APPEND_PADDED_S(s, slen, width, left_justify) do { \ - /* Left padding. */ \ - size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \ - (size_t)width - slen : 0); \ - if (!left_justify && pad_len != 0) { \ - size_t j; \ - for (j = 0; j < pad_len; j++) \ - APPEND_C(' '); \ - } \ - /* Value. */ \ - APPEND_S(s, slen); \ - /* Right padding. */ \ - if (left_justify && pad_len != 0) { \ - size_t j; \ - for (j = 0; j < pad_len; j++) \ - APPEND_C(' '); \ - } \ -} while (0) -#define GET_ARG_NUMERIC(val, len) do { \ - switch (len) { \ - case '?': \ - val = va_arg(ap, int); \ - break; \ - case '?' | 0x80: \ - val = va_arg(ap, unsigned int); \ - break; \ - case 'l': \ - val = va_arg(ap, long); \ - break; \ - case 'l' | 0x80: \ - val = va_arg(ap, unsigned long); \ - break; \ - case 'q': \ - val = va_arg(ap, long long); \ - break; \ - case 'q' | 0x80: \ - val = va_arg(ap, unsigned long long); \ - break; \ - case 'j': \ - val = va_arg(ap, intmax_t); \ - break; \ - case 'j' | 0x80: \ - val = va_arg(ap, uintmax_t); \ - break; \ - case 't': \ - val = va_arg(ap, ptrdiff_t); \ - break; \ - case 'z': \ - val = va_arg(ap, ssize_t); \ - break; \ - case 'z' | 0x80: \ - val = va_arg(ap, size_t); \ - break; \ - case 'p': /* Synthetic; used for %p. */ \ - val = va_arg(ap, uintptr_t); \ - break; \ - default: \ - not_reached(); \ - val = 0; \ - } \ -} while (0) - - i = 0; - f = format; - while (true) { - switch (*f) { - case '\0': goto label_out; - case '%': { - bool alt_form = false; - bool left_justify = false; - bool plus_space = false; - bool plus_plus = false; - int prec = -1; - int width = -1; - unsigned char len = '?'; - char *s; - size_t slen; - - f++; - /* Flags. */ - while (true) { - switch (*f) { - case '#': - assert(!alt_form); - alt_form = true; - break; - case '-': - assert(!left_justify); - left_justify = true; - break; - case ' ': - assert(!plus_space); - plus_space = true; - break; - case '+': - assert(!plus_plus); - plus_plus = true; - break; - default: goto label_width; - } - f++; - } - /* Width. */ - label_width: - switch (*f) { - case '*': - width = va_arg(ap, int); - f++; - if (width < 0) { - left_justify = true; - width = -width; - } - break; - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': { - uintmax_t uwidth; - set_errno(0); - uwidth = malloc_strtoumax(f, (char **)&f, 10); - assert(uwidth != UINTMAX_MAX || get_errno() != - ERANGE); - width = (int)uwidth; - break; - } default: - break; - } - /* Width/precision separator. */ - if (*f == '.') - f++; - else - goto label_length; - /* Precision. */ - switch (*f) { - case '*': - prec = va_arg(ap, int); - f++; - break; - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': { - uintmax_t uprec; - set_errno(0); - uprec = malloc_strtoumax(f, (char **)&f, 10); - assert(uprec != UINTMAX_MAX || get_errno() != - ERANGE); - prec = (int)uprec; - break; - } - default: break; - } - /* Length. */ - label_length: - switch (*f) { - case 'l': - f++; - if (*f == 'l') { - len = 'q'; - f++; - } else - len = 'l'; - break; - case 'q': case 'j': case 't': case 'z': - len = *f; - f++; - break; - default: break; - } - /* Conversion specifier. */ - switch (*f) { - case '%': - /* %% */ - APPEND_C(*f); - f++; - break; - case 'd': case 'i': { - intmax_t val JEMALLOC_CC_SILENCE_INIT(0); - char buf[D2S_BUFSIZE]; - - GET_ARG_NUMERIC(val, len); - s = d2s(val, (plus_plus ? '+' : (plus_space ? - ' ' : '-')), buf, &slen); - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - } case 'o': { - uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); - char buf[O2S_BUFSIZE]; - - GET_ARG_NUMERIC(val, len | 0x80); - s = o2s(val, alt_form, buf, &slen); - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - } case 'u': { - uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); - char buf[U2S_BUFSIZE]; - - GET_ARG_NUMERIC(val, len | 0x80); - s = u2s(val, 10, false, buf, &slen); - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - } case 'x': case 'X': { - uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); - char buf[X2S_BUFSIZE]; - - GET_ARG_NUMERIC(val, len | 0x80); - s = x2s(val, alt_form, *f == 'X', buf, &slen); - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - } case 'c': { - unsigned char val; - char buf[2]; - - assert(len == '?' || len == 'l'); - assert_not_implemented(len != 'l'); - val = va_arg(ap, int); - buf[0] = val; - buf[1] = '\0'; - APPEND_PADDED_S(buf, 1, width, left_justify); - f++; - break; - } case 's': - assert(len == '?' || len == 'l'); - assert_not_implemented(len != 'l'); - s = va_arg(ap, char *); - slen = (prec < 0) ? strlen(s) : (size_t)prec; - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - case 'p': { - uintmax_t val; - char buf[X2S_BUFSIZE]; - - GET_ARG_NUMERIC(val, 'p'); - s = x2s(val, true, false, buf, &slen); - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - } default: not_reached(); - } - break; - } default: { - APPEND_C(*f); - f++; - break; - }} - } - label_out: - if (i < size) - str[i] = '\0'; - else - str[size - 1] = '\0'; - -#undef APPEND_C -#undef APPEND_S -#undef APPEND_PADDED_S -#undef GET_ARG_NUMERIC - return (i); -} - -JEMALLOC_FORMAT_PRINTF(3, 4) -size_t -malloc_snprintf(char *str, size_t size, const char *format, ...) -{ - size_t ret; - va_list ap; - - va_start(ap, format); - ret = malloc_vsnprintf(str, size, format, ap); - va_end(ap); - - return (ret); -} - -void -malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, va_list ap) -{ - char buf[MALLOC_PRINTF_BUFSIZE]; - - if (write_cb == NULL) { - /* - * The caller did not provide an alternate write_cb callback - * function, so use the default one. malloc_write() is an - * inline function, so use malloc_message() directly here. - */ - write_cb = (je_malloc_message != NULL) ? je_malloc_message : - wrtmessage; - cbopaque = NULL; - } - - malloc_vsnprintf(buf, sizeof(buf), format, ap); - write_cb(cbopaque, buf); -} - -/* - * Print to a callback function in such a way as to (hopefully) avoid memory - * allocation. - */ -JEMALLOC_FORMAT_PRINTF(3, 4) -void -malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, ...) -{ - va_list ap; - - va_start(ap, format); - malloc_vcprintf(write_cb, cbopaque, format, ap); - va_end(ap); -} - -/* Print to stderr in such a way as to avoid memory allocation. */ -JEMALLOC_FORMAT_PRINTF(1, 2) -void -malloc_printf(const char *format, ...) -{ - va_list ap; - - va_start(ap, format); - malloc_vcprintf(NULL, NULL, format, ap); - va_end(ap); -} - -/* - * Restore normal assertion macros, in order to make it possible to compile all - * C files as a single concatenation. - */ -#undef assert -#undef not_reached -#undef not_implemented -#include "jemalloc/internal/assert.h" diff --git a/sources/jemalloc/src/je_valgrind.c b/sources/jemalloc/src/je_valgrind.c deleted file mode 100755 index 8e7ef3a2..00000000 --- a/sources/jemalloc/src/je_valgrind.c +++ /dev/null @@ -1,34 +0,0 @@ -#include "jemalloc/internal/jemalloc_internal.h" -#ifndef JEMALLOC_VALGRIND -# error "This source file is for Valgrind integration." -#endif - -#include - -void -valgrind_make_mem_noaccess(void *ptr, size_t usize) -{ - - VALGRIND_MAKE_MEM_NOACCESS(ptr, usize); -} - -void -valgrind_make_mem_undefined(void *ptr, size_t usize) -{ - - VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize); -} - -void -valgrind_make_mem_defined(void *ptr, size_t usize) -{ - - VALGRIND_MAKE_MEM_DEFINED(ptr, usize); -} - -void -valgrind_freelike_block(void *ptr, size_t usize) -{ - - VALGRIND_FREELIKE_BLOCK(ptr, usize); -} diff --git a/sources/jemalloc/src/je_witness.c b/sources/jemalloc/src/je_witness.c deleted file mode 100755 index c3a65f7c..00000000 --- a/sources/jemalloc/src/je_witness.c +++ /dev/null @@ -1,136 +0,0 @@ -#define JEMALLOC_WITNESS_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -void -witness_init(witness_t *witness, const char *name, witness_rank_t rank, - witness_comp_t *comp) -{ - - witness->name = name; - witness->rank = rank; - witness->comp = comp; -} - -#ifdef JEMALLOC_JET -#undef witness_lock_error -#define witness_lock_error JEMALLOC_N(n_witness_lock_error) -#endif -void -witness_lock_error(const witness_list_t *witnesses, const witness_t *witness) -{ - witness_t *w; - - malloc_printf(": Lock rank order reversal:"); - ql_foreach(w, witnesses, link) { - malloc_printf(" %s(%u)", w->name, w->rank); - } - malloc_printf(" %s(%u)\n", witness->name, witness->rank); - abort(); -} -#ifdef JEMALLOC_JET -#undef witness_lock_error -#define witness_lock_error JEMALLOC_N(witness_lock_error) -witness_lock_error_t *witness_lock_error = JEMALLOC_N(n_witness_lock_error); -#endif - -#ifdef JEMALLOC_JET -#undef witness_owner_error -#define witness_owner_error JEMALLOC_N(n_witness_owner_error) -#endif -void -witness_owner_error(const witness_t *witness) -{ - - malloc_printf(": Should own %s(%u)\n", witness->name, - witness->rank); - abort(); -} -#ifdef JEMALLOC_JET -#undef witness_owner_error -#define witness_owner_error JEMALLOC_N(witness_owner_error) -witness_owner_error_t *witness_owner_error = JEMALLOC_N(n_witness_owner_error); -#endif - -#ifdef JEMALLOC_JET -#undef witness_not_owner_error -#define witness_not_owner_error JEMALLOC_N(n_witness_not_owner_error) -#endif -void -witness_not_owner_error(const witness_t *witness) -{ - - malloc_printf(": Should not own %s(%u)\n", witness->name, - witness->rank); - abort(); -} -#ifdef JEMALLOC_JET -#undef witness_not_owner_error -#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error) -witness_not_owner_error_t *witness_not_owner_error = - JEMALLOC_N(n_witness_not_owner_error); -#endif - -#ifdef JEMALLOC_JET -#undef witness_depth_error -#define witness_depth_error JEMALLOC_N(n_witness_depth_error) -#endif -void -witness_depth_error(const witness_list_t *witnesses, - witness_rank_t rank_inclusive, unsigned depth) { - witness_t *w; - - malloc_printf(": Should own %u lock%s of rank >= %u:", depth, - (depth != 1) ? "s" : "", rank_inclusive); - ql_foreach(w, witnesses, link) { - malloc_printf(" %s(%u)", w->name, w->rank); - } - malloc_printf("\n"); - abort(); -} -#ifdef JEMALLOC_JET -#undef witness_depth_error -#define witness_depth_error JEMALLOC_N(witness_depth_error) -witness_depth_error_t *witness_depth_error = JEMALLOC_N(n_witness_depth_error); -#endif - -void -witnesses_cleanup(tsd_t *tsd) -{ - - witness_assert_lockless(tsd_tsdn(tsd)); - - /* Do nothing. */ -} - -void -witness_fork_cleanup(tsd_t *tsd) -{ - - /* Do nothing. */ -} - -void -witness_prefork(tsd_t *tsd) -{ - - tsd_witness_fork_set(tsd, true); -} - -void -witness_postfork_parent(tsd_t *tsd) -{ - - tsd_witness_fork_set(tsd, false); -} - -void -witness_postfork_child(tsd_t *tsd) -{ -#ifndef JEMALLOC_MUTEX_INIT_CB - witness_list_t *witnesses; - - witnesses = tsd_witnessesp_get(tsd); - ql_new(witnesses); -#endif - tsd_witness_fork_set(tsd, false); -} diff --git a/sources/jemalloc/src/je_zone.c b/sources/jemalloc/src/je_zone.c deleted file mode 100755 index 6215133f..00000000 --- a/sources/jemalloc/src/je_zone.c +++ /dev/null @@ -1,478 +0,0 @@ -#include "jemalloc/internal/jemalloc_internal.h" -#ifndef JEMALLOC_ZONE -# error "This source file is for zones on Darwin (OS X)." -#endif - -/* Definitions of the following structs in malloc/malloc.h might be too old - * for the built binary to run on newer versions of OSX. So use the newest - * possible version of those structs. - */ -typedef struct _malloc_zone_t { - void *reserved1; - void *reserved2; - size_t (*size)(struct _malloc_zone_t *, const void *); - void *(*malloc)(struct _malloc_zone_t *, size_t); - void *(*calloc)(struct _malloc_zone_t *, size_t, size_t); - void *(*valloc)(struct _malloc_zone_t *, size_t); - void (*free)(struct _malloc_zone_t *, void *); - void *(*realloc)(struct _malloc_zone_t *, void *, size_t); - void (*destroy)(struct _malloc_zone_t *); - const char *zone_name; - unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned); - void (*batch_free)(struct _malloc_zone_t *, void **, unsigned); - struct malloc_introspection_t *introspect; - unsigned version; - void *(*memalign)(struct _malloc_zone_t *, size_t, size_t); - void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t); - size_t (*pressure_relief)(struct _malloc_zone_t *, size_t); -} malloc_zone_t; - -typedef struct { - vm_address_t address; - vm_size_t size; -} vm_range_t; - -typedef struct malloc_statistics_t { - unsigned blocks_in_use; - size_t size_in_use; - size_t max_size_in_use; - size_t size_allocated; -} malloc_statistics_t; - -typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **); - -typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned); - -typedef struct malloc_introspection_t { - kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t); - size_t (*good_size)(malloc_zone_t *, size_t); - boolean_t (*check)(malloc_zone_t *); - void (*print)(malloc_zone_t *, boolean_t); - void (*log)(malloc_zone_t *, void *); - void (*force_lock)(malloc_zone_t *); - void (*force_unlock)(malloc_zone_t *); - void (*statistics)(malloc_zone_t *, malloc_statistics_t *); - boolean_t (*zone_locked)(malloc_zone_t *); - boolean_t (*enable_discharge_checking)(malloc_zone_t *); - boolean_t (*disable_discharge_checking)(malloc_zone_t *); - void (*discharge)(malloc_zone_t *, void *); -#ifdef __BLOCKS__ - void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *)); -#else - void *enumerate_unavailable_without_blocks; -#endif - void (*reinit_lock)(malloc_zone_t *); -} malloc_introspection_t; - -extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *); - -extern malloc_zone_t *malloc_default_zone(void); - -extern void malloc_zone_register(malloc_zone_t *zone); - -extern void malloc_zone_unregister(malloc_zone_t *zone); - -/* - * The malloc_default_purgeable_zone() function is only available on >= 10.6. - * We need to check whether it is present at runtime, thus the weak_import. - */ -extern malloc_zone_t *malloc_default_purgeable_zone(void) -JEMALLOC_ATTR(weak_import); - -/******************************************************************************/ -/* Data. */ - -static malloc_zone_t *default_zone, *purgeable_zone; -static malloc_zone_t jemalloc_zone; -static struct malloc_introspection_t jemalloc_zone_introspect; - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static size_t zone_size(malloc_zone_t *zone, const void *ptr); -static void *zone_malloc(malloc_zone_t *zone, size_t size); -static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); -static void *zone_valloc(malloc_zone_t *zone, size_t size); -static void zone_free(malloc_zone_t *zone, void *ptr); -static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); -static void *zone_memalign(malloc_zone_t *zone, size_t alignment, - size_t size); -static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, - size_t size); -static void zone_destroy(malloc_zone_t *zone); -static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, - void **results, unsigned num_requested); -static void zone_batch_free(struct _malloc_zone_t *zone, - void **to_be_freed, unsigned num_to_be_freed); -static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal); -static size_t zone_good_size(malloc_zone_t *zone, size_t size); -static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask, - vm_address_t zone_address, memory_reader_t reader, - vm_range_recorder_t recorder); -static boolean_t zone_check(malloc_zone_t *zone); -static void zone_print(malloc_zone_t *zone, boolean_t verbose); -static void zone_log(malloc_zone_t *zone, void *address); -static void zone_force_lock(malloc_zone_t *zone); -static void zone_force_unlock(malloc_zone_t *zone); -static void zone_statistics(malloc_zone_t *zone, - malloc_statistics_t *stats); -static boolean_t zone_locked(malloc_zone_t *zone); -static void zone_reinit_lock(malloc_zone_t *zone); - -/******************************************************************************/ -/* - * Functions. - */ - -static size_t -zone_size(malloc_zone_t *zone, const void *ptr) -{ - - /* - * There appear to be places within Darwin (such as setenv(3)) that - * cause calls to this function with pointers that *no* zone owns. If - * we knew that all pointers were owned by *some* zone, we could split - * our zone into two parts, and use one as the default allocator and - * the other as the default deallocator/reallocator. Since that will - * not work in practice, we must check all pointers to assure that they - * reside within a mapped chunk before determining size. - */ - return (ivsalloc(tsdn_fetch(), ptr, config_prof)); -} - -static void * -zone_malloc(malloc_zone_t *zone, size_t size) -{ - - return (je_malloc(size)); -} - -static void * -zone_calloc(malloc_zone_t *zone, size_t num, size_t size) -{ - - return (je_calloc(num, size)); -} - -static void * -zone_valloc(malloc_zone_t *zone, size_t size) -{ - void *ret = NULL; /* Assignment avoids useless compiler warning. */ - - je_posix_memalign(&ret, PAGE, size); - - return (ret); -} - -static void -zone_free(malloc_zone_t *zone, void *ptr) -{ - - if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) { - je_free(ptr); - return; - } - - free(ptr); -} - -static void * -zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) -{ - - if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) - return (je_realloc(ptr, size)); - - return (realloc(ptr, size)); -} - -static void * -zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) -{ - void *ret = NULL; /* Assignment avoids useless compiler warning. */ - - je_posix_memalign(&ret, alignment, size); - - return (ret); -} - -static void -zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) -{ - size_t alloc_size; - - alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof); - if (alloc_size != 0) { - assert(alloc_size == size); - je_free(ptr); - return; - } - - free(ptr); -} - -static void -zone_destroy(malloc_zone_t *zone) -{ - - /* This function should never be called. */ - not_reached(); -} - -static unsigned -zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results, - unsigned num_requested) -{ - unsigned i; - - for (i = 0; i < num_requested; i++) { - results[i] = je_malloc(size); - if (!results[i]) - break; - } - - return i; -} - -static void -zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed, - unsigned num_to_be_freed) -{ - unsigned i; - - for (i = 0; i < num_to_be_freed; i++) { - zone_free(zone, to_be_freed[i]); - to_be_freed[i] = NULL; - } -} - -static size_t -zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) -{ - return 0; -} - -static size_t -zone_good_size(malloc_zone_t *zone, size_t size) -{ - - if (size == 0) - size = 1; - return (s2u(size)); -} - -static kern_return_t -zone_enumerator(task_t task, void *data, unsigned type_mask, - vm_address_t zone_address, memory_reader_t reader, - vm_range_recorder_t recorder) -{ - return KERN_SUCCESS; -} - -static boolean_t -zone_check(malloc_zone_t *zone) -{ - return true; -} - -static void -zone_print(malloc_zone_t *zone, boolean_t verbose) -{ -} - -static void -zone_log(malloc_zone_t *zone, void *address) -{ -} - -static void -zone_force_lock(malloc_zone_t *zone) -{ - - if (isthreaded) - jemalloc_prefork(); -} - -static void -zone_force_unlock(malloc_zone_t *zone) -{ - - /* - * Call jemalloc_postfork_child() rather than - * jemalloc_postfork_parent(), because this function is executed by both - * parent and child. The parent can tolerate having state - * reinitialized, but the child cannot unlock mutexes that were locked - * by the parent. - */ - if (isthreaded) - jemalloc_postfork_child(); -} - -static void -zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) -{ - /* We make no effort to actually fill the values */ - stats->blocks_in_use = 0; - stats->size_in_use = 0; - stats->max_size_in_use = 0; - stats->size_allocated = 0; -} - -static boolean_t -zone_locked(malloc_zone_t *zone) -{ - /* Pretend no lock is being held */ - return false; -} - -static void -zone_reinit_lock(malloc_zone_t *zone) -{ - /* As of OSX 10.12, this function is only used when force_unlock would - * be used if the zone version were < 9. So just use force_unlock. */ - zone_force_unlock(zone); -} - -static void -zone_init(void) -{ - - jemalloc_zone.size = zone_size; - jemalloc_zone.malloc = zone_malloc; - jemalloc_zone.calloc = zone_calloc; - jemalloc_zone.valloc = zone_valloc; - jemalloc_zone.free = zone_free; - jemalloc_zone.realloc = zone_realloc; - jemalloc_zone.destroy = zone_destroy; - jemalloc_zone.zone_name = "jemalloc_zone"; - jemalloc_zone.batch_malloc = zone_batch_malloc; - jemalloc_zone.batch_free = zone_batch_free; - jemalloc_zone.introspect = &jemalloc_zone_introspect; - jemalloc_zone.version = 9; - jemalloc_zone.memalign = zone_memalign; - jemalloc_zone.free_definite_size = zone_free_definite_size; - jemalloc_zone.pressure_relief = zone_pressure_relief; - - jemalloc_zone_introspect.enumerator = zone_enumerator; - jemalloc_zone_introspect.good_size = zone_good_size; - jemalloc_zone_introspect.check = zone_check; - jemalloc_zone_introspect.print = zone_print; - jemalloc_zone_introspect.log = zone_log; - jemalloc_zone_introspect.force_lock = zone_force_lock; - jemalloc_zone_introspect.force_unlock = zone_force_unlock; - jemalloc_zone_introspect.statistics = zone_statistics; - jemalloc_zone_introspect.zone_locked = zone_locked; - jemalloc_zone_introspect.enable_discharge_checking = NULL; - jemalloc_zone_introspect.disable_discharge_checking = NULL; - jemalloc_zone_introspect.discharge = NULL; -#ifdef __BLOCKS__ - jemalloc_zone_introspect.enumerate_discharged_pointers = NULL; -#else - jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL; -#endif - jemalloc_zone_introspect.reinit_lock = zone_reinit_lock; -} - -static malloc_zone_t * -zone_default_get(void) -{ - malloc_zone_t **zones = NULL; - unsigned int num_zones = 0; - - /* - * On OSX 10.12, malloc_default_zone returns a special zone that is not - * present in the list of registered zones. That zone uses a "lite zone" - * if one is present (apparently enabled when malloc stack logging is - * enabled), or the first registered zone otherwise. In practice this - * means unless malloc stack logging is enabled, the first registered - * zone is the default. So get the list of zones to get the first one, - * instead of relying on malloc_default_zone. - */ - if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, - (vm_address_t**)&zones, &num_zones)) { - /* - * Reset the value in case the failure happened after it was - * set. - */ - num_zones = 0; - } - - if (num_zones) - return (zones[0]); - - return (malloc_default_zone()); -} - -/* As written, this function can only promote jemalloc_zone. */ -static void -zone_promote(void) -{ - malloc_zone_t *zone; - - do { - /* - * Unregister and reregister the default zone. On OSX >= 10.6, - * unregistering takes the last registered zone and places it - * at the location of the specified zone. Unregistering the - * default zone thus makes the last registered one the default. - * On OSX < 10.6, unregistering shifts all registered zones. - * The first registered zone then becomes the default. - */ - malloc_zone_unregister(default_zone); - malloc_zone_register(default_zone); - - /* - * On OSX 10.6, having the default purgeable zone appear before - * the default zone makes some things crash because it thinks it - * owns the default zone allocated pointers. We thus - * unregister/re-register it in order to ensure it's always - * after the default zone. On OSX < 10.6, there is no purgeable - * zone, so this does nothing. On OSX >= 10.6, unregistering - * replaces the purgeable zone with the last registered zone - * above, i.e. the default zone. Registering it again then puts - * it at the end, obviously after the default zone. - */ - if (purgeable_zone != NULL) { - malloc_zone_unregister(purgeable_zone); - malloc_zone_register(purgeable_zone); - } - - zone = zone_default_get(); - } while (zone != &jemalloc_zone); -} - -JEMALLOC_ATTR(constructor) -void -zone_register(void) -{ - - /* - * If something else replaced the system default zone allocator, don't - * register jemalloc's. - */ - default_zone = zone_default_get(); - if (!default_zone->zone_name || strcmp(default_zone->zone_name, - "DefaultMallocZone") != 0) - return; - - /* - * The default purgeable zone is created lazily by OSX's libc. It uses - * the default zone when it is created for "small" allocations - * (< 15 KiB), but assumes the default zone is a scalable_zone. This - * obviously fails when the default zone is the jemalloc zone, so - * malloc_default_purgeable_zone() is called beforehand so that the - * default purgeable zone is created when the default zone is still - * a scalable_zone. As purgeable zones only exist on >= 10.6, we need - * to check for the existence of malloc_default_purgeable_zone() at - * run time. - */ - purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL : - malloc_default_purgeable_zone(); - - /* Register the custom zone. At this point it won't be the default. */ - zone_init(); - malloc_zone_register(&jemalloc_zone); - - /* Promote the custom zone to be default. */ - zone_promote(); -} diff --git a/sources/nedmalloc/malloc.c.h b/sources/nedmalloc/malloc.c.h deleted file mode 100755 index 4803a782..00000000 --- a/sources/nedmalloc/malloc.c.h +++ /dev/null @@ -1,5709 +0,0 @@ -/* - This is a version (aka dlmalloc) of malloc/free/realloc written by - Doug Lea and released to the public domain, as explained at - http://creativecommons.org/licenses/publicdomain. Send questions, - comments, complaints, performance data, etc to dl@cs.oswego.edu - - ** Modified by OGRE to support MinGW 5 Jan 2009 - -* Version pre-2.8.4 Mon Nov 27 11:22:37 2006 (dl at gee) - - Note: There may be an updated version of this malloc obtainable at - ftp://gee.cs.oswego.edu/pub/misc/malloc.c - Check before installing! - -* Quickstart - - This library is all in one file to simplify the most common usage: - ftp it, compile it (-O3), and link it into another program. All of - the compile-time options default to reasonable values for use on - most platforms. You might later want to step through various - compile-time and dynamic tuning options. - - For convenience, an include file for code using this malloc is at: - ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.4.h - You don't really need this .h file unless you call functions not - defined in your system include files. The .h file contains only the - excerpts from this file needed for using this malloc on ANSI C/C++ - systems, so long as you haven't changed compile-time options about - naming and tuning parameters. If you do, then you can create your - own malloc.h that does include all settings by cutting at the point - indicated below. Note that you may already by default be using a C - library containing a malloc that is based on some version of this - malloc (for example in linux). You might still want to use the one - in this file to customize settings or to avoid overheads associated - with library versions. - -* Vital statistics: - - Supported pointer/size_t representation: 4 or 8 bytes - size_t MUST be an unsigned type of the same width as - pointers. (If you are using an ancient system that declares - size_t as a signed type, or need it to be a different width - than pointers, you can use a previous release of this malloc - (e.g. 2.7.2) supporting these.) - - Alignment: 8 bytes (default) - This suffices for nearly all current machines and C compilers. - However, you can define MALLOC_ALIGNMENT to be wider than this - if necessary (up to 128bytes), at the expense of using more space. - - Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes) - 8 or 16 bytes (if 8byte sizes) - Each malloced chunk has a hidden word of overhead holding size - and status information, and additional cross-check word - if FOOTERS is defined. - - Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead) - 8-byte ptrs: 32 bytes (including overhead) - - Even a request for zero bytes (i.e., malloc(0)) returns a - pointer to something of the minimum allocatable size. - The maximum overhead wastage (i.e., number of extra bytes - allocated than were requested in malloc) is less than or equal - to the minimum size, except for requests >= mmap_threshold that - are serviced via mmap(), where the worst case wastage is about - 32 bytes plus the remainder from a system page (the minimal - mmap unit); typically 4096 or 8192 bytes. - - Security: static-safe; optionally more or less - The "security" of malloc refers to the ability of malicious - code to accentuate the effects of errors (for example, freeing - space that is not currently malloc'ed or overwriting past the - ends of chunks) in code that calls malloc. This malloc - guarantees not to modify any memory locations below the base of - heap, i.e., static variables, even in the presence of usage - errors. The routines additionally detect most improper frees - and reallocs. All this holds as long as the static bookkeeping - for malloc itself is not corrupted by some other means. This - is only one aspect of security -- these checks do not, and - cannot, detect all possible programming errors. - - If FOOTERS is defined nonzero, then each allocated chunk - carries an additional check word to verify that it was malloced - from its space. These check words are the same within each - execution of a program using malloc, but differ across - executions, so externally crafted fake chunks cannot be - freed. This improves security by rejecting frees/reallocs that - could corrupt heap memory, in addition to the checks preventing - writes to statics that are always on. This may further improve - security at the expense of time and space overhead. (Note that - FOOTERS may also be worth using with MSPACES.) - - By default detected errors cause the program to abort (calling - "abort()"). You can override this to instead proceed past - errors by defining PROCEED_ON_ERROR. In this case, a bad free - has no effect, and a malloc that encounters a bad address - caused by user overwrites will ignore the bad address by - dropping pointers and indices to all known memory. This may - be appropriate for programs that should continue if at all - possible in the face of programming errors, although they may - run out of memory because dropped memory is never reclaimed. - - If you don't like either of these options, you can define - CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything - else. And if if you are sure that your program using malloc has - no errors or vulnerabilities, you can define INSECURE to 1, - which might (or might not) provide a small performance improvement. - - Thread-safety: NOT thread-safe unless USE_LOCKS defined - When USE_LOCKS is defined, each public call to malloc, free, - etc is surrounded with either a pthread mutex or a win32 - spinlock (depending on WIN32). This is not especially fast, and - can be a major bottleneck. It is designed only to provide - minimal protection in concurrent environments, and to provide a - basis for extensions. If you are using malloc in a concurrent - program, consider instead using nedmalloc - (http://www.nedprod.com/programs/portable/nedmalloc/) or - ptmalloc (See http://www.malloc.de), which are derived - from versions of this malloc. - - System requirements: Any combination of MORECORE and/or MMAP/MUNMAP - This malloc can use unix sbrk or any emulation (invoked using - the CALL_MORECORE macro) and/or mmap/munmap or any emulation - (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system - memory. On most unix systems, it tends to work best if both - MORECORE and MMAP are enabled. On Win32, it uses emulations - based on VirtualAlloc. It also uses common C library functions - like memset. - - Compliance: I believe it is compliant with the Single Unix Specification - (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably - others as well. - -* Overview of algorithms - - This is not the fastest, most space-conserving, most portable, or - most tunable malloc ever written. However it is among the fastest - while also being among the most space-conserving, portable and - tunable. Consistent balance across these factors results in a good - general-purpose allocator for malloc-intensive programs. - - In most ways, this malloc is a best-fit allocator. Generally, it - chooses the best-fitting existing chunk for a request, with ties - broken in approximately least-recently-used order. (This strategy - normally maintains low fragmentation.) However, for requests less - than 256bytes, it deviates from best-fit when there is not an - exactly fitting available chunk by preferring to use space adjacent - to that used for the previous small request, as well as by breaking - ties in approximately most-recently-used order. (These enhance - locality of series of small allocations.) And for very large requests - (>= 256Kb by default), it relies on system memory mapping - facilities, if supported. (This helps avoid carrying around and - possibly fragmenting memory used only for large chunks.) - - All operations (except malloc_stats and mallinfo) have execution - times that are bounded by a constant factor of the number of bits in - a size_t, not counting any clearing in calloc or copying in realloc, - or actions surrounding MORECORE and MMAP that have times - proportional to the number of non-contiguous regions returned by - system allocation routines, which is often just 1. In real-time - applications, you can optionally suppress segment traversals using - NO_SEGMENT_TRAVERSAL, which assures bounded execution even when - system allocators return non-contiguous spaces, at the typical - expense of carrying around more memory and increased fragmentation. - - The implementation is not very modular and seriously overuses - macros. Perhaps someday all C compilers will do as good a job - inlining modular code as can now be done by brute-force expansion, - but now, enough of them seem not to. - - Some compilers issue a lot of warnings about code that is - dead/unreachable only on some platforms, and also about intentional - uses of negation on unsigned types. All known cases of each can be - ignored. - - For a longer but out of date high-level description, see - http://gee.cs.oswego.edu/dl/html/malloc.html - -* MSPACES - If MSPACES is defined, then in addition to malloc, free, etc., - this file also defines mspace_malloc, mspace_free, etc. These - are versions of malloc routines that take an "mspace" argument - obtained using create_mspace, to control all internal bookkeeping. - If ONLY_MSPACES is defined, only these versions are compiled. - So if you would like to use this allocator for only some allocations, - and your system malloc for others, you can compile with - ONLY_MSPACES and then do something like... - static mspace mymspace = create_mspace(0,0); // for example - #define mymalloc(bytes) mspace_malloc(mymspace, bytes) - - (Note: If you only need one instance of an mspace, you can instead - use "USE_DL_PREFIX" to relabel the global malloc.) - - You can similarly create thread-local allocators by storing - mspaces as thread-locals. For example: - static __thread mspace tlms = 0; - void* tlmalloc(size_t bytes) { - if (tlms == 0) tlms = create_mspace(0, 0); - return mspace_malloc(tlms, bytes); - } - void tlfree(void* mem) { mspace_free(tlms, mem); } - - Unless FOOTERS is defined, each mspace is completely independent. - You cannot allocate from one and free to another (although - conformance is only weakly checked, so usage errors are not always - caught). If FOOTERS is defined, then each chunk carries around a tag - indicating its originating mspace, and frees are directed to their - originating spaces. - - ------------------------- Compile-time options --------------------------- - -Be careful in setting #define values for numerical constants of type -size_t. On some systems, literal values are not automatically extended -to size_t precision unless they are explicitly casted. You can also -use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below. - -WIN32 default: defined if _WIN32 defined - Defining WIN32 sets up defaults for MS environment and compilers. - Otherwise defaults are for unix. Beware that there seem to be some - cases where this malloc might not be a pure drop-in replacement for - Win32 malloc: Random-looking failures from Win32 GDI API's (eg; - SetDIBits()) may be due to bugs in some video driver implementations - when pixel buffers are malloc()ed, and the region spans more than - one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb) - default granularity, pixel buffers may straddle virtual allocation - regions more often than when using the Microsoft allocator. You can - avoid this by using VirtualAlloc() and VirtualFree() for all pixel - buffers rather than using malloc(). If this is not possible, - recompile this malloc with a larger DEFAULT_GRANULARITY. - -MALLOC_ALIGNMENT default: (size_t)8 - Controls the minimum alignment for malloc'ed chunks. It must be a - power of two and at least 8, even on machines for which smaller - alignments would suffice. It may be defined as larger than this - though. Note however that code and data structures are optimized for - the case of 8-byte alignment. - -MSPACES default: 0 (false) - If true, compile in support for independent allocation spaces. - This is only supported if HAVE_MMAP is true. - -ONLY_MSPACES default: 0 (false) - If true, only compile in mspace versions, not regular versions. - -USE_LOCKS default: 0 (false) - Causes each call to each public routine to be surrounded with - pthread or WIN32 mutex lock/unlock. (If set true, this can be - overridden on a per-mspace basis for mspace versions.) If set to a - non-zero value other than 1, locks are used, but their - implementation is left out, so lock functions must be supplied manually. - -USE_SPIN_LOCKS default: 1 iff USE_LOCKS and on x86 using gcc or MSC - If true, uses custom spin locks for locking. This is currently - supported only for x86 platforms using gcc or recent MS compilers. - Otherwise, posix locks or win32 critical sections are used. - -FOOTERS default: 0 - If true, provide extra checking and dispatching by placing - information in the footers of allocated chunks. This adds - space and time overhead. - -INSECURE default: 0 - If true, omit checks for usage errors and heap space overwrites. - -USE_DL_PREFIX default: NOT defined - Causes compiler to prefix all public routines with the string 'dl'. - This can be useful when you only want to use this malloc in one part - of a program, using your regular system malloc elsewhere. - -ABORT default: defined as abort() - Defines how to abort on failed checks. On most systems, a failed - check cannot die with an "assert" or even print an informative - message, because the underlying print routines in turn call malloc, - which will fail again. Generally, the best policy is to simply call - abort(). It's not very useful to do more than this because many - errors due to overwriting will show up as address faults (null, odd - addresses etc) rather than malloc-triggered checks, so will also - abort. Also, most compilers know that abort() does not return, so - can better optimize code conditionally calling it. - -PROCEED_ON_ERROR default: defined as 0 (false) - Controls whether detected bad addresses cause them to bypassed - rather than aborting. If set, detected bad arguments to free and - realloc are ignored. And all bookkeeping information is zeroed out - upon a detected overwrite of freed heap space, thus losing the - ability to ever return it from malloc again, but enabling the - application to proceed. If PROCEED_ON_ERROR is defined, the - static variable malloc_corruption_error_count is compiled in - and can be examined to see if errors have occurred. This option - generates slower code than the default abort policy. - -DEBUG default: NOT defined - The DEBUG setting is mainly intended for people trying to modify - this code or diagnose problems when porting to new platforms. - However, it may also be able to better isolate user errors than just - using runtime checks. The assertions in the check routines spell - out in more detail the assumptions and invariants underlying the - algorithms. The checking is fairly extensive, and will slow down - execution noticeably. Calling malloc_stats or mallinfo with DEBUG - set will attempt to check every non-mmapped allocated and free chunk - in the course of computing the summaries. - -ABORT_ON_ASSERT_FAILURE default: defined as 1 (true) - Debugging assertion failures can be nearly impossible if your - version of the assert macro causes malloc to be called, which will - lead to a cascade of further failures, blowing the runtime stack. - ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(), - which will usually make debugging easier. - -MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32 - The action to take before "return 0" when malloc fails to be able to - return memory because there is none available. - -HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES - True if this system supports sbrk or an emulation of it. - -MORECORE default: sbrk - The name of the sbrk-style system routine to call to obtain more - memory. See below for guidance on writing custom MORECORE - functions. The type of the argument to sbrk/MORECORE varies across - systems. It cannot be size_t, because it supports negative - arguments, so it is normally the signed type of the same width as - size_t (sometimes declared as "intptr_t"). It doesn't much matter - though. Internally, we only call it with arguments less than half - the max value of a size_t, which should work across all reasonable - possibilities, although sometimes generating compiler warnings. - -MORECORE_CONTIGUOUS default: 1 (true) if HAVE_MORECORE - If true, take advantage of fact that consecutive calls to MORECORE - with positive arguments always return contiguous increasing - addresses. This is true of unix sbrk. It does not hurt too much to - set it true anyway, since malloc copes with non-contiguities. - Setting it false when definitely non-contiguous saves time - and possibly wasted space it would take to discover this though. - -MORECORE_CANNOT_TRIM default: NOT defined - True if MORECORE cannot release space back to the system when given - negative arguments. This is generally necessary only if you are - using a hand-crafted MORECORE function that cannot handle negative - arguments. - -NO_SEGMENT_TRAVERSAL default: 0 - If non-zero, suppresses traversals of memory segments - returned by either MORECORE or CALL_MMAP. This disables - merging of segments that are contiguous, and selectively - releasing them to the OS if unused, but bounds execution times. - -HAVE_MMAP default: 1 (true) - True if this system supports mmap or an emulation of it. If so, and - HAVE_MORECORE is not true, MMAP is used for all system - allocation. If set and HAVE_MORECORE is true as well, MMAP is - primarily used to directly allocate very large blocks. It is also - used as a backup strategy in cases where MORECORE fails to provide - space from system. Note: A single call to MUNMAP is assumed to be - able to unmap memory that may have be allocated using multiple calls - to MMAP, so long as they are adjacent. - -HAVE_MREMAP default: 1 on linux, else 0 - If true realloc() uses mremap() to re-allocate large blocks and - extend or shrink allocation spaces. - -MMAP_CLEARS default: 1 except on WINCE. - True if mmap clears memory so calloc doesn't need to. This is true - for standard unix mmap using /dev/zero and on WIN32 except for WINCE. - -USE_BUILTIN_FFS default: 0 (i.e., not used) - Causes malloc to use the builtin ffs() function to compute indices. - Some compilers may recognize and intrinsify ffs to be faster than the - supplied C version. Also, the case of x86 using gcc is special-cased - to an asm instruction, so is already as fast as it can be, and so - this setting has no effect. Similarly for Win32 under recent MS compilers. - (On most x86s, the asm version is only slightly faster than the C version.) - -malloc_getpagesize default: derive from system includes, or 4096. - The system page size. To the extent possible, this malloc manages - memory from the system in page-size units. This may be (and - usually is) a function rather than a constant. This is ignored - if WIN32, where page size is determined using getSystemInfo during - initialization. - -USE_DEV_RANDOM default: 0 (i.e., not used) - Causes malloc to use /dev/random to initialize secure magic seed for - stamping footers. Otherwise, the current time is used. - -NO_MALLINFO default: 0 - If defined, don't compile "mallinfo". This can be a simple way - of dealing with mismatches between system declarations and - those in this file. - -MALLINFO_FIELD_TYPE default: size_t - The type of the fields in the mallinfo struct. This was originally - defined as "int" in SVID etc, but is more usefully defined as - size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set - -REALLOC_ZERO_BYTES_FREES default: not defined - This should be set if a call to realloc with zero bytes should - be the same as a call to free. Some people think it should. Otherwise, - since this malloc returns a unique pointer for malloc(0), so does - realloc(p, 0). - -LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H -LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H -LACKS_STDLIB_H default: NOT defined unless on WIN32 - Define these if your system does not have these header files. - You might need to manually insert some of the declarations they provide. - -DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS, - system_info.dwAllocationGranularity in WIN32, - otherwise 64K. - Also settable using mallopt(M_GRANULARITY, x) - The unit for allocating and deallocating memory from the system. On - most systems with contiguous MORECORE, there is no reason to - make this more than a page. However, systems with MMAP tend to - either require or encourage larger granularities. You can increase - this value to prevent system allocation functions to be called so - often, especially if they are slow. The value must be at least one - page and must be a power of two. Setting to 0 causes initialization - to either page size or win32 region size. (Note: In previous - versions of malloc, the equivalent of this option was called - "TOP_PAD") - -DEFAULT_TRIM_THRESHOLD default: 2MB - Also settable using mallopt(M_TRIM_THRESHOLD, x) - The maximum amount of unused top-most memory to keep before - releasing via malloc_trim in free(). Automatic trimming is mainly - useful in long-lived programs using contiguous MORECORE. Because - trimming via sbrk can be slow on some systems, and can sometimes be - wasteful (in cases where programs immediately afterward allocate - more large chunks) the value should be high enough so that your - overall system performance would improve by releasing this much - memory. As a rough guide, you might set to a value close to the - average size of a process (program) running on your system. - Releasing this much memory would allow such a process to run in - memory. Generally, it is worth tuning trim thresholds when a - program undergoes phases where several large chunks are allocated - and released in ways that can reuse each other's storage, perhaps - mixed with phases where there are no such chunks at all. The trim - value must be greater than page size to have any useful effect. To - disable trimming completely, you can set to MAX_SIZE_T. Note that the trick - some people use of mallocing a huge space and then freeing it at - program startup, in an attempt to reserve system memory, doesn't - have the intended effect under automatic trimming, since that memory - will immediately be returned to the system. - -DEFAULT_MMAP_THRESHOLD default: 256K - Also settable using mallopt(M_MMAP_THRESHOLD, x) - The request size threshold for using MMAP to directly service a - request. Requests of at least this size that cannot be allocated - using already-existing space will be serviced via mmap. (If enough - normal freed space already exists it is used instead.) Using mmap - segregates relatively large chunks of memory so that they can be - individually obtained and released from the host system. A request - serviced through mmap is never reused by any other request (at least - not directly; the system may just so happen to remap successive - requests to the same locations). Segregating space in this way has - the benefits that: Mmapped space can always be individually released - back to the system, which helps keep the system level memory demands - of a long-lived program low. Also, mapped memory doesn't become - `locked' between other chunks, as can happen with normally allocated - chunks, which means that even trimming via malloc_trim would not - release them. However, it has the disadvantage that the space - cannot be reclaimed, consolidated, and then used to service later - requests, as happens with normal chunks. The advantages of mmap - nearly always outweigh disadvantages for "large" chunks, but the - value of "large" may vary across systems. The default is an - empirically derived value that works well in most systems. You can - disable mmap by setting to MAX_SIZE_T. - -MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP - The number of consolidated frees between checks to release - unused segments when freeing. When using non-contiguous segments, - especially with multiple mspaces, checking only for topmost space - doesn't always suffice to trigger trimming. To compensate for this, - free() will, with a period of MAX_RELEASE_CHECK_RATE (or the - current number of segments, if greater) try to release unused - segments to the OS when freeing chunks that result in - consolidation. The best value for this parameter is a compromise - between slowing down frees with relatively costly checks that - rarely trigger versus holding on to unused memory. To effectively - disable, set to MAX_SIZE_T. This may lead to a very slight speed - improvement at the expense of carrying around more memory. -*/ - -/* Version identifier to allow people to support multiple versions */ -#ifndef DLMALLOC_VERSION -#define DLMALLOC_VERSION 20804 -#endif /* DLMALLOC_VERSION */ - -#ifndef WIN32 -#ifdef _WIN32 -#define WIN32 1 -#endif /* _WIN32 */ -#ifdef _WIN32_WCE -#define LACKS_FCNTL_H -#define WIN32 1 -#endif /* _WIN32_WCE */ -#endif /* WIN32 */ -#ifdef WIN32 -#define WIN32_LEAN_AND_MEAN -#if (_WIN32_WINNT < 0x403) -#define _WIN32_WINNT 0x403 -#endif -#include -#define HAVE_MMAP 1 -#define HAVE_MORECORE 0 -#define LACKS_UNISTD_H -#define LACKS_SYS_PARAM_H -#define LACKS_SYS_MMAN_H -#define LACKS_STRING_H -#define LACKS_STRINGS_H -#define LACKS_SYS_TYPES_H -#define LACKS_ERRNO_H -#ifndef MALLOC_FAILURE_ACTION -#define MALLOC_FAILURE_ACTION -#endif /* MALLOC_FAILURE_ACTION */ -#ifdef _WIN32_WCE /* WINCE reportedly does not clear */ -#define MMAP_CLEARS 0 -#else -#define MMAP_CLEARS 1 -#endif /* _WIN32_WCE */ -#endif /* WIN32 */ - -#if defined(DARWIN) || defined(_DARWIN) -/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ -#ifndef HAVE_MORECORE -#define HAVE_MORECORE 0 -#define HAVE_MMAP 1 -/* OSX allocators provide 16 byte alignment */ -#ifndef MALLOC_ALIGNMENT -#define MALLOC_ALIGNMENT ((size_t)16U) -#endif -#endif /* HAVE_MORECORE */ -#endif /* DARWIN */ - -#ifndef LACKS_SYS_TYPES_H -#include /* For size_t */ -#endif /* LACKS_SYS_TYPES_H */ - -/* The maximum possible size_t value has all bits set */ -#define MAX_SIZE_T (~(size_t)0) - -#ifndef ONLY_MSPACES -#define ONLY_MSPACES 0 /* define to a value */ -#else -#define ONLY_MSPACES 1 -#endif /* ONLY_MSPACES */ -#ifndef MSPACES -#if ONLY_MSPACES -#define MSPACES 1 -#else /* ONLY_MSPACES */ -#define MSPACES 0 -#endif /* ONLY_MSPACES */ -#endif /* MSPACES */ -#ifndef MALLOC_ALIGNMENT -#define MALLOC_ALIGNMENT ((size_t)8U) -#endif /* MALLOC_ALIGNMENT */ -#ifndef FOOTERS -#define FOOTERS 0 -#endif /* FOOTERS */ -#ifndef ABORT -#define ABORT abort() -#endif /* ABORT */ -#ifndef ABORT_ON_ASSERT_FAILURE -#define ABORT_ON_ASSERT_FAILURE 1 -#endif /* ABORT_ON_ASSERT_FAILURE */ -#ifndef PROCEED_ON_ERROR -#define PROCEED_ON_ERROR 0 -#endif /* PROCEED_ON_ERROR */ -#ifndef USE_LOCKS -#define USE_LOCKS 0 -#endif /* USE_LOCKS */ -#ifndef USE_SPIN_LOCKS -#if USE_LOCKS && (defined(__GNUC__) && ((defined(__i386__) || defined(__x86_64__)))) || (defined(_MSC_VER) && _MSC_VER>=1310) -#define USE_SPIN_LOCKS 1 -#else -#define USE_SPIN_LOCKS 0 -#endif /* USE_LOCKS && ... */ -#endif /* USE_SPIN_LOCKS */ -#ifndef INSECURE -#define INSECURE 0 -#endif /* INSECURE */ -#ifndef HAVE_MMAP -#define HAVE_MMAP 1 -#endif /* HAVE_MMAP */ -#ifndef MMAP_CLEARS -#define MMAP_CLEARS 1 -#endif /* MMAP_CLEARS */ -#ifndef HAVE_MREMAP -#ifdef linux -#define HAVE_MREMAP 1 -#else /* linux */ -#define HAVE_MREMAP 0 -#endif /* linux */ -#endif /* HAVE_MREMAP */ -#ifndef MALLOC_FAILURE_ACTION -#define MALLOC_FAILURE_ACTION errno = ENOMEM; -#endif /* MALLOC_FAILURE_ACTION */ -#ifndef HAVE_MORECORE -#if ONLY_MSPACES -#define HAVE_MORECORE 0 -#else /* ONLY_MSPACES */ -#define HAVE_MORECORE 1 -#endif /* ONLY_MSPACES */ -#endif /* HAVE_MORECORE */ -#if !HAVE_MORECORE -#define MORECORE_CONTIGUOUS 0 -#else /* !HAVE_MORECORE */ -#define MORECORE_DEFAULT sbrk -#ifndef MORECORE_CONTIGUOUS -#define MORECORE_CONTIGUOUS 1 -#endif /* MORECORE_CONTIGUOUS */ -#endif /* HAVE_MORECORE */ -#ifndef DEFAULT_GRANULARITY -#if (MORECORE_CONTIGUOUS || defined(WIN32)) -#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ -#else /* MORECORE_CONTIGUOUS */ -#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) -#endif /* MORECORE_CONTIGUOUS */ -#endif /* DEFAULT_GRANULARITY */ -#ifndef DEFAULT_TRIM_THRESHOLD -#ifndef MORECORE_CANNOT_TRIM -#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) -#else /* MORECORE_CANNOT_TRIM */ -#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T -#endif /* MORECORE_CANNOT_TRIM */ -#endif /* DEFAULT_TRIM_THRESHOLD */ -#ifndef DEFAULT_MMAP_THRESHOLD -#if HAVE_MMAP -#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) -#else /* HAVE_MMAP */ -#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T -#endif /* HAVE_MMAP */ -#endif /* DEFAULT_MMAP_THRESHOLD */ -#ifndef MAX_RELEASE_CHECK_RATE -#if HAVE_MMAP -#define MAX_RELEASE_CHECK_RATE 4095 -#else -#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T -#endif /* HAVE_MMAP */ -#endif /* MAX_RELEASE_CHECK_RATE */ -#ifndef USE_BUILTIN_FFS -#define USE_BUILTIN_FFS 0 -#endif /* USE_BUILTIN_FFS */ -#ifndef USE_DEV_RANDOM -#define USE_DEV_RANDOM 0 -#endif /* USE_DEV_RANDOM */ -#ifndef NO_MALLINFO -#define NO_MALLINFO 0 -#endif /* NO_MALLINFO */ -#ifndef MALLINFO_FIELD_TYPE -#define MALLINFO_FIELD_TYPE size_t -#endif /* MALLINFO_FIELD_TYPE */ -#ifndef NO_SEGMENT_TRAVERSAL -#define NO_SEGMENT_TRAVERSAL 0 -#endif /* NO_SEGMENT_TRAVERSAL */ - -/* - mallopt tuning options. SVID/XPG defines four standard parameter - numbers for mallopt, normally defined in malloc.h. None of these - are used in this malloc, so setting them has no effect. But this - malloc does support the following options. -*/ - -#define M_TRIM_THRESHOLD (-1) -#define M_GRANULARITY (-2) -#define M_MMAP_THRESHOLD (-3) - -/* ------------------------ Mallinfo declarations ------------------------ */ - -#if !NO_MALLINFO -/* - This version of malloc supports the standard SVID/XPG mallinfo - routine that returns a struct containing usage properties and - statistics. It should work on any system that has a - /usr/include/malloc.h defining struct mallinfo. The main - declaration needed is the mallinfo struct that is returned (by-copy) - by mallinfo(). The malloinfo struct contains a bunch of fields that - are not even meaningful in this version of malloc. These fields are - are instead filled by mallinfo() with other numbers that might be of - interest. - - HAVE_USR_INCLUDE_MALLOC_H should be set if you have a - /usr/include/malloc.h file that includes a declaration of struct - mallinfo. If so, it is included; else a compliant version is - declared below. These must be precisely the same for mallinfo() to - work. The original SVID version of this struct, defined on most - systems with mallinfo, declares all fields as ints. But some others - define as unsigned long. If your system defines the fields using a - type of different width than listed here, you MUST #include your - system version and #define HAVE_USR_INCLUDE_MALLOC_H. -*/ - -/* #define HAVE_USR_INCLUDE_MALLOC_H */ - -#ifdef HAVE_USR_INCLUDE_MALLOC_H -#include "/usr/include/malloc.h" -#elif defined(__ANDROID__) || defined(ANDROID) -#include -#define STRUCT_MALLINFO_DECLARED 1 -#else /* HAVE_USR_INCLUDE_MALLOC_H */ -#ifndef STRUCT_MALLINFO_DECLARED -#define STRUCT_MALLINFO_DECLARED 1 -struct mallinfo { - MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ - MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ - MALLINFO_FIELD_TYPE smblks; /* always 0 */ - MALLINFO_FIELD_TYPE hblks; /* always 0 */ - MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ - MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ - MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ - MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ - MALLINFO_FIELD_TYPE fordblks; /* total free space */ - MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ -}; -#endif /* STRUCT_MALLINFO_DECLARED */ -#endif /* HAVE_USR_INCLUDE_MALLOC_H */ -#endif /* NO_MALLINFO */ - -/* - Try to persuade compilers to inline. The most critical functions for - inlining are defined as macros, so these aren't used for them. -*/ - -#ifndef FORCEINLINE - #if defined(__GNUC__) -#define FORCEINLINE __inline __attribute__ ((always_inline)) - #elif defined(_MSC_VER) - #define FORCEINLINE __forceinline - #endif -#endif -#ifndef NOINLINE - #if defined(__GNUC__) - #define NOINLINE __attribute__ ((noinline)) - #elif defined(_MSC_VER) - #define NOINLINE __declspec(noinline) - #else - #define NOINLINE - #endif -#endif - -#ifdef __cplusplus -extern "C" { -#ifndef FORCEINLINE - #define FORCEINLINE inline -#endif -#endif /* __cplusplus */ -#ifndef FORCEINLINE - #define FORCEINLINE -#endif - -#if !ONLY_MSPACES - -/* ------------------- Declarations of public routines ------------------- */ - -#ifndef USE_DL_PREFIX -#define dlcalloc calloc -#define dlfree free -#define dlmalloc malloc -#define dlmemalign memalign -#define dlrealloc realloc -#define dlvalloc valloc -#define dlpvalloc pvalloc -#define dlmallinfo mallinfo -#define dlmallopt mallopt -#define dlmalloc_trim malloc_trim -#define dlmalloc_stats malloc_stats -#define dlmalloc_usable_size malloc_usable_size -#define dlmalloc_footprint malloc_footprint -#define dlmalloc_max_footprint malloc_max_footprint -#define dlindependent_calloc independent_calloc -#define dlindependent_comalloc independent_comalloc -#endif /* USE_DL_PREFIX */ - - -/* - malloc(size_t n) - Returns a pointer to a newly allocated chunk of at least n bytes, or - null if no space is available, in which case errno is set to ENOMEM - on ANSI C systems. - - If n is zero, malloc returns a minimum-sized chunk. (The minimum - size is 16 bytes on most 32bit systems, and 32 bytes on 64bit - systems.) Note that size_t is an unsigned type, so calls with - arguments that would be negative if signed are interpreted as - requests for huge amounts of space, which will often fail. The - maximum supported value of n differs across systems, but is in all - cases less than the maximum representable value of a size_t. -*/ -void* dlmalloc(size_t); - -/* - free(void* p) - Releases the chunk of memory pointed to by p, that had been previously - allocated using malloc or a related routine such as realloc. - It has no effect if p is null. If p was not malloced or already - freed, free(p) will by default cause the current program to abort. -*/ -void dlfree(void*); - -/* - calloc(size_t n_elements, size_t element_size); - Returns a pointer to n_elements * element_size bytes, with all locations - set to zero. -*/ -void* dlcalloc(size_t, size_t); - -/* - realloc(void* p, size_t n) - Returns a pointer to a chunk of size n that contains the same data - as does chunk p up to the minimum of (n, p's size) bytes, or null - if no space is available. - - The returned pointer may or may not be the same as p. The algorithm - prefers extending p in most cases when possible, otherwise it - employs the equivalent of a malloc-copy-free sequence. - - If p is null, realloc is equivalent to malloc. - - If space is not available, realloc returns null, errno is set (if on - ANSI) and p is NOT freed. - - if n is for fewer bytes than already held by p, the newly unused - space is lopped off and freed if possible. realloc with a size - argument of zero (re)allocates a minimum-sized chunk. - - The old unix realloc convention of allowing the last-free'd chunk - to be used as an argument to realloc is not supported. -*/ - -void* dlrealloc(void*, size_t); - -/* - memalign(size_t alignment, size_t n); - Returns a pointer to a newly allocated chunk of n bytes, aligned - in accord with the alignment argument. - - The alignment argument should be a power of two. If the argument is - not a power of two, the nearest greater power is used. - 8-byte alignment is guaranteed by normal malloc calls, so don't - bother calling memalign with an argument of 8 or less. - - Overreliance on memalign is a sure way to fragment space. -*/ -void* dlmemalign(size_t, size_t); - -/* - valloc(size_t n); - Equivalent to memalign(pagesize, n), where pagesize is the page - size of the system. If the pagesize is unknown, 4096 is used. -*/ -void* dlvalloc(size_t); - -/* - mallopt(int parameter_number, int parameter_value) - Sets tunable parameters The format is to provide a - (parameter-number, parameter-value) pair. mallopt then sets the - corresponding parameter to the argument value if it can (i.e., so - long as the value is meaningful), and returns 1 if successful else - 0. To workaround the fact that mallopt is specified to use int, - not size_t parameters, the value -1 is specially treated as the - maximum unsigned size_t value. - - SVID/XPG/ANSI defines four standard param numbers for mallopt, - normally defined in malloc.h. None of these are use in this malloc, - so setting them has no effect. But this malloc also supports other - options in mallopt. See below for details. Briefly, supported - parameters are as follows (listed defaults are for "typical" - configurations). - - Symbol param # default allowed param values - M_TRIM_THRESHOLD -1 2*1024*1024 any (-1 disables) - M_GRANULARITY -2 page size any power of 2 >= page size - M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) -*/ -int dlmallopt(int, int); - -/* - malloc_footprint(); - Returns the number of bytes obtained from the system. The total - number of bytes allocated by malloc, realloc etc., is less than this - value. Unlike mallinfo, this function returns only a precomputed - result, so can be called frequently to monitor memory consumption. - Even if locks are otherwise defined, this function does not use them, - so results might not be up to date. -*/ -size_t dlmalloc_footprint(void); - -/* - malloc_max_footprint(); - Returns the maximum number of bytes obtained from the system. This - value will be greater than current footprint if deallocated space - has been reclaimed by the system. The peak number of bytes allocated - by malloc, realloc etc., is less than this value. Unlike mallinfo, - this function returns only a precomputed result, so can be called - frequently to monitor memory consumption. Even if locks are - otherwise defined, this function does not use them, so results might - not be up to date. -*/ -size_t dlmalloc_max_footprint(void); - -#if !NO_MALLINFO -/* - mallinfo() - Returns (by copy) a struct containing various summary statistics: - - arena: current total non-mmapped bytes allocated from system - ordblks: the number of free chunks - smblks: always zero. - hblks: current number of mmapped regions - hblkhd: total bytes held in mmapped regions - usmblks: the maximum total allocated space. This will be greater - than current total if trimming has occurred. - fsmblks: always zero - uordblks: current total allocated space (normal or mmapped) - fordblks: total free space - keepcost: the maximum number of bytes that could ideally be released - back to system via malloc_trim. ("ideally" means that - it ignores page restrictions etc.) - - Because these fields are ints, but internal bookkeeping may - be kept as longs, the reported values may wrap around zero and - thus be inaccurate. -*/ -struct mallinfo dlmallinfo(void); -#endif /* NO_MALLINFO */ - -/* - independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); - - independent_calloc is similar to calloc, but instead of returning a - single cleared space, it returns an array of pointers to n_elements - independent elements that can hold contents of size elem_size, each - of which starts out cleared, and can be independently freed, - realloc'ed etc. The elements are guaranteed to be adjacently - allocated (this is not guaranteed to occur with multiple callocs or - mallocs), which may also improve cache locality in some - applications. - - The "chunks" argument is optional (i.e., may be null, which is - probably the most typical usage). If it is null, the returned array - is itself dynamically allocated and should also be freed when it is - no longer needed. Otherwise, the chunks array must be of at least - n_elements in length. It is filled in with the pointers to the - chunks. - - In either case, independent_calloc returns this pointer array, or - null if the allocation failed. If n_elements is zero and "chunks" - is null, it returns a chunk representing an array with zero elements - (which should be freed if not wanted). - - Each element must be individually freed when it is no longer - needed. If you'd like to instead be able to free all at once, you - should instead use regular calloc and assign pointers into this - space to represent elements. (In this case though, you cannot - independently free elements.) - - independent_calloc simplifies and speeds up implementations of many - kinds of pools. It may also be useful when constructing large data - structures that initially have a fixed number of fixed-sized nodes, - but the number is not known at compile time, and some of the nodes - may later need to be freed. For example: - - struct Node { int item; struct Node* next; }; - - struct Node* build_list() { - struct Node** pool; - int n = read_number_of_nodes_needed(); - if (n <= 0) return 0; - pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); - if (pool == 0) die(); - // organize into a linked list... - struct Node* first = pool[0]; - for (i = 0; i < n-1; ++i) - pool[i]->next = pool[i+1]; - free(pool); // Can now free the array (or not, if it is needed later) - return first; - } -*/ -void** dlindependent_calloc(size_t, size_t, void**); - -/* - independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); - - independent_comalloc allocates, all at once, a set of n_elements - chunks with sizes indicated in the "sizes" array. It returns - an array of pointers to these elements, each of which can be - independently freed, realloc'ed etc. The elements are guaranteed to - be adjacently allocated (this is not guaranteed to occur with - multiple callocs or mallocs), which may also improve cache locality - in some applications. - - The "chunks" argument is optional (i.e., may be null). If it is null - the returned array is itself dynamically allocated and should also - be freed when it is no longer needed. Otherwise, the chunks array - must be of at least n_elements in length. It is filled in with the - pointers to the chunks. - - In either case, independent_comalloc returns this pointer array, or - null if the allocation failed. If n_elements is zero and chunks is - null, it returns a chunk representing an array with zero elements - (which should be freed if not wanted). - - Each element must be individually freed when it is no longer - needed. If you'd like to instead be able to free all at once, you - should instead use a single regular malloc, and assign pointers at - particular offsets in the aggregate space. (In this case though, you - cannot independently free elements.) - - independent_comallac differs from independent_calloc in that each - element may have a different size, and also that it does not - automatically clear elements. - - independent_comalloc can be used to speed up allocation in cases - where several structs or objects must always be allocated at the - same time. For example: - - struct Head { ... } - struct Foot { ... } - - void send_message(char* msg) { - int msglen = strlen(msg); - size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; - void* chunks[3]; - if (independent_comalloc(3, sizes, chunks) == 0) - die(); - struct Head* head = (struct Head*)(chunks[0]); - char* body = (char*)(chunks[1]); - struct Foot* foot = (struct Foot*)(chunks[2]); - // ... - } - - In general though, independent_comalloc is worth using only for - larger values of n_elements. For small values, you probably won't - detect enough difference from series of malloc calls to bother. - - Overuse of independent_comalloc can increase overall memory usage, - since it cannot reuse existing noncontiguous small chunks that - might be available for some of the elements. -*/ -void** dlindependent_comalloc(size_t, size_t*, void**); - - -/* - pvalloc(size_t n); - Equivalent to valloc(minimum-page-that-holds(n)), that is, - round up n to nearest pagesize. - */ -void* dlpvalloc(size_t); - -/* - malloc_trim(size_t pad); - - If possible, gives memory back to the system (via negative arguments - to sbrk) if there is unused memory at the `high' end of the malloc - pool or in unused MMAP segments. You can call this after freeing - large blocks of memory to potentially reduce the system-level memory - requirements of a program. However, it cannot guarantee to reduce - memory. Under some allocation patterns, some large free blocks of - memory will be locked between two used chunks, so they cannot be - given back to the system. - - The `pad' argument to malloc_trim represents the amount of free - trailing space to leave untrimmed. If this argument is zero, only - the minimum amount of memory to maintain internal data structures - will be left. Non-zero arguments can be supplied to maintain enough - trailing space to service future expected allocations without having - to re-obtain memory from the system. - - Malloc_trim returns 1 if it actually released any memory, else 0. -*/ -int dlmalloc_trim(size_t); - -/* - malloc_stats(); - Prints on stderr the amount of space obtained from the system (both - via sbrk and mmap), the maximum amount (which may be more than - current if malloc_trim and/or munmap got called), and the current - number of bytes allocated via malloc (or realloc, etc) but not yet - freed. Note that this is the number of bytes allocated, not the - number requested. It will be larger than the number requested - because of alignment and bookkeeping overhead. Because it includes - alignment wastage as being in use, this figure may be greater than - zero even when no user-level chunks are allocated. - - The reported current and maximum system memory can be inaccurate if - a program makes other calls to system memory allocation functions - (normally sbrk) outside of malloc. - - malloc_stats prints only the most commonly interesting statistics. - More information can be obtained by calling mallinfo. -*/ -void dlmalloc_stats(void); - -#endif /* ONLY_MSPACES */ - -/* - malloc_usable_size(void* p); - - Returns the number of bytes you can actually use in - an allocated chunk, which may be more than you requested (although - often not) due to alignment and minimum size constraints. - You can use this many bytes without worrying about - overwriting other allocated objects. This is not a particularly great - programming practice. malloc_usable_size can be more useful in - debugging and assertions, for example: - - p = malloc(n); - assert(malloc_usable_size(p) >= 256); -*/ -size_t dlmalloc_usable_size(void*); - - -#if MSPACES - -/* - mspace is an opaque type representing an independent - region of space that supports mspace_malloc, etc. -*/ -typedef void* mspace; - -/* - create_mspace creates and returns a new independent space with the - given initial capacity, or, if 0, the default granularity size. It - returns null if there is no system memory available to create the - space. If argument locked is non-zero, the space uses a separate - lock to control access. The capacity of the space will grow - dynamically as needed to service mspace_malloc requests. You can - control the sizes of incremental increases of this space by - compiling with a different DEFAULT_GRANULARITY or dynamically - setting with mallopt(M_GRANULARITY, value). -*/ -mspace create_mspace(size_t capacity, int locked); - -/* - destroy_mspace destroys the given space, and attempts to return all - of its memory back to the system, returning the total number of - bytes freed. After destruction, the results of access to all memory - used by the space become undefined. -*/ -size_t destroy_mspace(mspace msp); - -/* - create_mspace_with_base uses the memory supplied as the initial base - of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this - space is used for bookkeeping, so the capacity must be at least this - large. (Otherwise 0 is returned.) When this initial space is - exhausted, additional memory will be obtained from the system. - Destroying this space will deallocate all additionally allocated - space (if possible) but not the initial base. -*/ -mspace create_mspace_with_base(void* base, size_t capacity, int locked); - -/* - mspace_mmap_large_chunks controls whether requests for large chunks - are allocated in their own mmapped regions, separate from others in - this mspace. By default this is enabled, which reduces - fragmentation. However, such chunks are not necessarily released to - the system upon destroy_mspace. Disabling by setting to false may - increase fragmentation, but avoids leakage when relying on - destroy_mspace to release all memory allocated using this space. -*/ -int mspace_mmap_large_chunks(mspace msp, int enable); - - -/* - mspace_malloc behaves as malloc, but operates within - the given space. -*/ -void* mspace_malloc(mspace msp, size_t bytes); - -/* - mspace_free behaves as free, but operates within - the given space. - - If compiled with FOOTERS==1, mspace_free is not actually needed. - free may be called instead of mspace_free because freed chunks from - any space are handled by their originating spaces. -*/ -void mspace_free(mspace msp, void* mem); - -/* - mspace_realloc behaves as realloc, but operates within - the given space. - - If compiled with FOOTERS==1, mspace_realloc is not actually - needed. realloc may be called instead of mspace_realloc because - realloced chunks from any space are handled by their originating - spaces. -*/ -void* mspace_realloc(mspace msp, void* mem, size_t newsize); - -/* - mspace_calloc behaves as calloc, but operates within - the given space. -*/ -void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); - -/* - mspace_memalign behaves as memalign, but operates within - the given space. -*/ -void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); - -/* - mspace_independent_calloc behaves as independent_calloc, but - operates within the given space. -*/ -void** mspace_independent_calloc(mspace msp, size_t n_elements, - size_t elem_size, void* chunks[]); - -/* - mspace_independent_comalloc behaves as independent_comalloc, but - operates within the given space. -*/ -void** mspace_independent_comalloc(mspace msp, size_t n_elements, - size_t sizes[], void* chunks[]); - -/* - mspace_footprint() returns the number of bytes obtained from the - system for this space. -*/ -size_t mspace_footprint(mspace msp); - -/* - mspace_max_footprint() returns the peak number of bytes obtained from the - system for this space. -*/ -size_t mspace_max_footprint(mspace msp); - - -#if !NO_MALLINFO -/* - mspace_mallinfo behaves as mallinfo, but reports properties of - the given space. -*/ -struct mallinfo mspace_mallinfo(mspace msp); -#endif /* NO_MALLINFO */ - -/* - malloc_usable_size(void* p) behaves the same as malloc_usable_size; -*/ - size_t mspace_usable_size(void* mem); - -/* - mspace_malloc_stats behaves as malloc_stats, but reports - properties of the given space. -*/ -void mspace_malloc_stats(mspace msp); - -/* - mspace_trim behaves as malloc_trim, but - operates within the given space. -*/ -int mspace_trim(mspace msp, size_t pad); - -/* - An alias for mallopt. -*/ -int mspace_mallopt(int, int); - -#endif /* MSPACES */ - -#ifdef __cplusplus -} /* end of extern "C" */ -#endif /* __cplusplus */ - -/* - ======================================================================== - To make a fully customizable malloc.h header file, cut everything - above this line, put into file malloc.h, edit to suit, and #include it - on the next line, as well as in programs that use this malloc. - ======================================================================== -*/ - -/* #include "malloc.h" */ - -/*------------------------------ internal #includes ---------------------- */ - -#ifdef WIN32 -#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ -#endif /* WIN32 */ - -#include /* for printing in malloc_stats */ - -#ifndef LACKS_ERRNO_H -#include /* for MALLOC_FAILURE_ACTION */ -#endif /* LACKS_ERRNO_H */ -#if FOOTERS -#include /* for magic initialization */ -#endif /* FOOTERS */ -#ifndef LACKS_STDLIB_H -#include /* for abort() */ -#endif /* LACKS_STDLIB_H */ -#ifdef DEBUG -#if ABORT_ON_ASSERT_FAILURE -#undef assert -#define assert(x) if(!(x)) ABORT -#else /* ABORT_ON_ASSERT_FAILURE */ -#include -#endif /* ABORT_ON_ASSERT_FAILURE */ -#else /* DEBUG */ -#ifndef assert -#define assert(x) -#endif -#define DEBUG 0 -#endif /* DEBUG */ -#ifndef LACKS_STRING_H -#include /* for memset etc */ -#endif /* LACKS_STRING_H */ -#if USE_BUILTIN_FFS -#ifndef LACKS_STRINGS_H -#include /* for ffs */ -#endif /* LACKS_STRINGS_H */ -#endif /* USE_BUILTIN_FFS */ -#if HAVE_MMAP -#ifndef LACKS_SYS_MMAN_H -#include /* for mmap */ -#endif /* LACKS_SYS_MMAN_H */ -#ifndef LACKS_FCNTL_H -#include -#endif /* LACKS_FCNTL_H */ -#endif /* HAVE_MMAP */ -#ifndef LACKS_UNISTD_H -#include /* for sbrk, sysconf */ -#else /* LACKS_UNISTD_H */ -#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) -extern void* sbrk(ptrdiff_t); -#endif /* FreeBSD etc */ -#endif /* LACKS_UNISTD_H */ - -/* Declarations for locking */ -#if USE_LOCKS -#ifndef WIN32 -#include -#if defined (__SVR4) && defined (__sun) /* solaris */ -#include -#endif /* solaris */ -#else -#ifndef _M_AMD64 -/* These are already defined on AMD64 builds */ -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ -LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp); -LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value); -#ifdef __cplusplus -} -#endif /* __cplusplus */ -#endif /* _M_AMD64 */ -#pragma intrinsic (_InterlockedCompareExchange) -#pragma intrinsic (_InterlockedExchange) -#define interlockedcompareexchange _InterlockedCompareExchange -#define interlockedexchange _InterlockedExchange -#endif /* Win32 */ -// --- BEGIN OGRE MODIFICATION --- -// MinGW compatibility -#ifdef __MINGW32__ -#define interlockedcompareexchange InterlockedCompareExchange -#define interlockedexchange InterlockedExchange -#endif /* __MINGW32__ */ -// --- END OGRE MODIFICATION --- -#endif /* USE_LOCKS */ - -/* Declarations for bit scanning on win32 */ -#if defined(_MSC_VER) && _MSC_VER>=1300 -#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ -unsigned char _BitScanForward(unsigned long *index, unsigned long mask); -unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#define BitScanForward _BitScanForward -#define BitScanReverse _BitScanReverse -#pragma intrinsic(_BitScanForward) -#pragma intrinsic(_BitScanReverse) -#endif /* BitScanForward */ -#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ - -#ifndef WIN32 -#ifndef malloc_getpagesize -# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ -# ifndef _SC_PAGE_SIZE -# define _SC_PAGE_SIZE _SC_PAGESIZE -# endif -# endif -# ifdef _SC_PAGE_SIZE -# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) -# else -# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) - extern size_t getpagesize(); -# define malloc_getpagesize getpagesize() -# else -# ifdef WIN32 /* use supplied emulation of getpagesize */ -# define malloc_getpagesize getpagesize() -# else -# ifndef LACKS_SYS_PARAM_H -# include -# endif -# ifdef EXEC_PAGESIZE -# define malloc_getpagesize EXEC_PAGESIZE -# else -# ifdef NBPG -# ifndef CLSIZE -# define malloc_getpagesize NBPG -# else -# define malloc_getpagesize (NBPG * CLSIZE) -# endif -# else -# ifdef NBPC -# define malloc_getpagesize NBPC -# else -# ifdef PAGESIZE -# define malloc_getpagesize PAGESIZE -# else /* just guess */ -# define malloc_getpagesize ((size_t)4096U) -# endif -# endif -# endif -# endif -# endif -# endif -# endif -#endif -#endif - - - -/* ------------------- size_t and alignment properties -------------------- */ - -/* The byte and bit size of a size_t */ -#define SIZE_T_SIZE (sizeof(size_t)) -#define SIZE_T_BITSIZE (sizeof(size_t) << 3) - -/* Some constants coerced to size_t */ -/* Annoying but necessary to avoid errors on some platforms */ -#define SIZE_T_ZERO ((size_t)0) -#define SIZE_T_ONE ((size_t)1) -#define SIZE_T_TWO ((size_t)2) -#define SIZE_T_FOUR ((size_t)4) -#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) -#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) -#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) -#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) - -/* The bit mask value corresponding to MALLOC_ALIGNMENT */ -#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) - -/* True if address a has acceptable alignment */ -#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) - -/* the number of bytes to offset an address to align it */ -#define align_offset(A)\ - ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ - ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) - -/* -------------------------- MMAP preliminaries ------------------------- */ - -/* - If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and - checks to fail so compiler optimizer can delete code rather than - using so many "#if"s. -*/ - - -/* MORECORE and MMAP must return MFAIL on failure */ -#define MFAIL ((void*)(MAX_SIZE_T)) -#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ - -#if HAVE_MMAP - -#ifndef WIN32 -#define MUNMAP_DEFAULT(a, s) munmap((a), (s)) -#define MMAP_PROT (PROT_READ|PROT_WRITE) -#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) -#define MAP_ANONYMOUS MAP_ANON -#endif /* MAP_ANON */ -#ifdef MAP_ANONYMOUS -#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) -#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0) -#else /* MAP_ANONYMOUS */ -/* - Nearly all versions of mmap support MAP_ANONYMOUS, so the following - is unlikely to be needed, but is supplied just in case. -*/ -#define MMAP_FLAGS (MAP_PRIVATE) -static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ -#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \ - (dev_zero_fd = open("/dev/zero", O_RDWR), \ - mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ - mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) -#endif /* MAP_ANONYMOUS */ - -#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) - -#else /* WIN32 */ - -/* Win32 MMAP via VirtualAlloc */ -static FORCEINLINE void* win32mmap(size_t size) { - void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); - return (ptr != 0)? ptr: MFAIL; -} - -/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ -static FORCEINLINE void* win32direct_mmap(size_t size) { - void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, - PAGE_READWRITE); - return (ptr != 0)? ptr: MFAIL; -} - -/* This function supports releasing coalesed segments */ -static FORCEINLINE int win32munmap(void* ptr, size_t size) { - MEMORY_BASIC_INFORMATION minfo; - char* cptr = (char*)ptr; - while (size) { - if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) - return -1; - if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || - minfo.State != MEM_COMMIT || minfo.RegionSize > size) - return -1; - if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) - return -1; - cptr += minfo.RegionSize; - size -= minfo.RegionSize; - } - return 0; -} - -#define MMAP_DEFAULT(s) win32mmap(s) -#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) -#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) -#endif /* WIN32 */ -#endif /* HAVE_MMAP */ - -#if HAVE_MREMAP -#ifndef WIN32 -#define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) -#endif /* WIN32 */ -#endif /* HAVE_MREMAP */ - - -/** - * Define CALL_MORECORE - */ -#if HAVE_MORECORE - #ifdef MORECORE - #define CALL_MORECORE(S) MORECORE(S) - #else /* MORECORE */ - #define CALL_MORECORE(S) MORECORE_DEFAULT(S) - #endif /* MORECORE */ -#else /* HAVE_MORECORE */ - #define CALL_MORECORE(S) MFAIL -#endif /* HAVE_MORECORE */ - -/** - * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP - */ -#if HAVE_MMAP - #define IS_MMAPPED_BIT (SIZE_T_ONE) - #define USE_MMAP_BIT (SIZE_T_ONE) - - #ifdef MMAP - #define CALL_MMAP(s) MMAP(s) - #else /* MMAP */ - #define CALL_MMAP(s) MMAP_DEFAULT(s) - #endif /* MMAP */ - #ifdef MUNMAP - #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) - #else /* MUNMAP */ - #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) - #endif /* MUNMAP */ - #ifdef DIRECT_MMAP - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) - #else /* DIRECT_MMAP */ - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) - #endif /* DIRECT_MMAP */ -#else /* HAVE_MMAP */ - #define IS_MMAPPED_BIT (SIZE_T_ZERO) - #define USE_MMAP_BIT (SIZE_T_ZERO) - - #define MMAP(s) MFAIL - #define MUNMAP(a, s) (-1) - #define DIRECT_MMAP(s) MFAIL - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) - #define CALL_MMAP(s) MMAP(s) - #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) -#endif /* HAVE_MMAP */ - -/** - * Define CALL_MREMAP - */ -#if HAVE_MMAP && HAVE_MREMAP - #ifdef MREMAP - #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) - #else /* MREMAP */ - #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) - #endif /* MREMAP */ -#else /* HAVE_MMAP && HAVE_MREMAP */ - #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL -#endif /* HAVE_MMAP && HAVE_MREMAP */ - -/* mstate bit set if continguous morecore disabled or failed */ -#define USE_NONCONTIGUOUS_BIT (4U) - -/* segment bit set in create_mspace_with_base */ -#define EXTERN_BIT (8U) - - -/* --------------------------- Lock preliminaries ------------------------ */ - -/* - When locks are defined, there is one global lock, plus - one per-mspace lock. - - The global lock_ensures that mparams.magic and other unique - mparams values are initialized only once. It also protects - sequences of calls to MORECORE. In many cases sys_alloc requires - two calls, that should not be interleaved with calls by other - threads. This does not protect against direct calls to MORECORE - by other threads not using this lock, so there is still code to - cope the best we can on interference. - - Per-mspace locks surround calls to malloc, free, etc. To enable use - in layered extensions, per-mspace locks are reentrant. - - Because lock-protected regions generally have bounded times, it is - OK to use the supplied simple spinlocks in the custom versions for - x86. - - If USE_LOCKS is > 1, the definitions of lock routines here are - bypassed, in which case you will need to define at least - INITIAL_LOCK, ACQUIRE_LOCK, RELEASE_LOCK and possibly TRY_LOCK - (which is not used in this malloc, but commonly needed in - extensions.) -*/ - -#if USE_LOCKS == 1 - -#if USE_SPIN_LOCKS -#ifndef WIN32 - -/* Custom pthread-style spin locks on x86 and x64 for gcc */ -struct pthread_mlock_t { - volatile unsigned int l; - volatile unsigned int c; - volatile pthread_t threadid; -}; -#define MLOCK_T struct pthread_mlock_t -#define CURRENT_THREAD pthread_self() -#define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), 0) -#define ACQUIRE_LOCK(sl) pthread_acquire_lock(sl) -#define RELEASE_LOCK(sl) pthread_release_lock(sl) -#define TRY_LOCK(sl) pthread_try_lock(sl) -#define SPINS_PER_YIELD 63 - -static MLOCK_T malloc_global_mutex = { 0, 0, 0}; - -static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) { - int spins = 0; - volatile unsigned int* lp = &sl->l; - for (;;) { - if (*lp != 0) { - if (sl->threadid == CURRENT_THREAD) { - ++sl->c; - return 0; - } - } - else { - /* place args to cmpxchgl in locals to evade oddities in some gccs */ - int cmp = 0; - int val = 1; - int ret; - __asm__ __volatile__ ("lock; cmpxchgl %1, %2" - : "=a" (ret) - : "r" (val), "m" (*(lp)), "0"(cmp) - : "memory", "cc"); - if (!ret) { - assert(!sl->threadid); - sl->c = 1; - sl->threadid = CURRENT_THREAD; - return 0; - } - if ((++spins & SPINS_PER_YIELD) == 0) { -#if defined (__SVR4) && defined (__sun) /* solaris */ - thr_yield(); -#else -#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) - sched_yield(); -#else /* no-op yield on unknown systems */ - ; -#endif /* __linux__ || __FreeBSD__ || __APPLE__ */ -#endif /* solaris */ - } - } - } -} - -static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) { - assert(sl->l != 0); - assert(sl->threadid == CURRENT_THREAD); - if (--sl->c == 0) { - sl->threadid = 0; - volatile unsigned int* lp = &sl->l; - int prev = 0; - int ret; - __asm__ __volatile__ ("lock; xchgl %0, %1" - : "=r" (ret) - : "m" (*(lp)), "0"(prev) - : "memory"); - } -} - -static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) { - volatile unsigned int* lp = &sl->l; - if (*lp != 0) { - if (sl->threadid == CURRENT_THREAD) { - ++sl->c; - return 1; - } - } - else { - int cmp = 0; - int val = 1; - int ret; - __asm__ __volatile__ ("lock; cmpxchgl %1, %2" - : "=a" (ret) - : "r" (val), "m" (*(lp)), "0"(cmp) - : "memory", "cc"); - if (!ret) { - assert(!sl->threadid); - sl->c = 1; - sl->threadid = CURRENT_THREAD; - return 1; - } - } - return 0; -} - - -#else /* WIN32 */ -/* Custom win32-style spin locks on x86 and x64 for MSC */ -struct win32_mlock_t -{ - volatile long l; - volatile unsigned int c; - volatile long threadid; -}; - -#define MLOCK_T struct win32_mlock_t -#define CURRENT_THREAD win32_getcurrentthreadid() -#define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), 0) -#define ACQUIRE_LOCK(sl) win32_acquire_lock(sl) -#define RELEASE_LOCK(sl) win32_release_lock(sl) -#define TRY_LOCK(sl) win32_try_lock(sl) -#define SPINS_PER_YIELD 63 - -static MLOCK_T malloc_global_mutex = { 0, 0, 0}; - -static FORCEINLINE long win32_getcurrentthreadid() { -#ifdef _MSC_VER -#if defined(_M_IX86) - long *threadstruct=(long *)__readfsdword(0x18); - long threadid=threadstruct[0x24/sizeof(long)]; - return threadid; -#elif defined(_M_X64) - /* todo */ - return GetCurrentThreadId(); -#else - return GetCurrentThreadId(); -#endif -#else - return GetCurrentThreadId(); -#endif -} - -static FORCEINLINE int win32_acquire_lock (MLOCK_T *sl) { - int spins = 0; - for (;;) { - if (sl->l != 0) { - if (sl->threadid == CURRENT_THREAD) { - ++sl->c; - return 0; - } - } - else { - if (!interlockedexchange(&sl->l, 1)) { - assert(!sl->threadid); - sl->c=CURRENT_THREAD; - sl->threadid = CURRENT_THREAD; - sl->c = 1; - return 0; - } - } - if ((++spins & SPINS_PER_YIELD) == 0) - SleepEx(0, FALSE); - } -} - -static FORCEINLINE void win32_release_lock (MLOCK_T *sl) { - assert(sl->threadid == CURRENT_THREAD); - assert(sl->l != 0); - if (--sl->c == 0) { - sl->threadid = 0; - interlockedexchange (&sl->l, 0); - } -} - -static FORCEINLINE int win32_try_lock (MLOCK_T *sl) { - if(sl->l != 0) { - if (sl->threadid == CURRENT_THREAD) { - ++sl->c; - return 1; - } - } - else { - if (!interlockedexchange(&sl->l, 1)){ - assert(!sl->threadid); - sl->threadid = CURRENT_THREAD; - sl->c = 1; - return 1; - } - } - return 0; -} - -#endif /* WIN32 */ -#else /* USE_SPIN_LOCKS */ - -#ifndef WIN32 -/* pthreads-based locks */ - -#define MLOCK_T pthread_mutex_t -#define CURRENT_THREAD pthread_self() -#define INITIAL_LOCK(sl) pthread_init_lock(sl) -#define ACQUIRE_LOCK(sl) pthread_mutex_lock(sl) -#define RELEASE_LOCK(sl) pthread_mutex_unlock(sl) -#define TRY_LOCK(sl) (!pthread_mutex_trylock(sl)) - -static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER; - -/* Cope with old-style linux recursive lock initialization by adding */ -/* skipped internal declaration from pthread.h */ -#ifdef linux -#if !defined (PTHREAD_MUTEX_RECURSIVE) && defined (PTHREAD_MUTEX_RECURSIVE_NP) -extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr, - int __kind)); -#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP -#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y) -#endif -#endif - -static int pthread_init_lock (MLOCK_T *sl) { - pthread_mutexattr_t attr; - if (pthread_mutexattr_init(&attr)) return 1; - if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1; - if (pthread_mutex_init(sl, &attr)) return 1; - if (pthread_mutexattr_destroy(&attr)) return 1; - return 0; -} - -#else /* WIN32 */ -/* Win32 critical sections */ -#define MLOCK_T CRITICAL_SECTION -#define CURRENT_THREAD GetCurrentThreadId() -#define INITIAL_LOCK(s) (!InitializeCriticalSectionAndSpinCount((s), 0x80000000|4000)) -#define ACQUIRE_LOCK(s) (EnterCriticalSection(s), 0) -#define RELEASE_LOCK(s) LeaveCriticalSection(s) -#define TRY_LOCK(s) TryEnterCriticalSection(s) -#define NEED_GLOBAL_LOCK_INIT - -static MLOCK_T malloc_global_mutex; -static volatile long malloc_global_mutex_status; - -/* Use spin loop to initialize global lock */ -static void init_malloc_global_mutex() { - for (;;) { - long stat = malloc_global_mutex_status; - if (stat > 0) - return; - /* transition to < 0 while initializing, then to > 0) */ - if (stat == 0 && - interlockedcompareexchange(&malloc_global_mutex_status, -1, 0) == 0) { - InitializeCriticalSection(&malloc_global_mutex); - interlockedexchange(&malloc_global_mutex_status,1); - return; - } - SleepEx(0, FALSE); - } -} - -#endif /* WIN32 */ -#endif /* USE_SPIN_LOCKS */ -#endif /* USE_LOCKS == 1 */ - -/* ----------------------- User-defined locks ------------------------ */ - -#if USE_LOCKS > 1 -/* Define your own lock implementation here */ -/* #define INITIAL_LOCK(sl) ... */ -/* #define ACQUIRE_LOCK(sl) ... */ -/* #define RELEASE_LOCK(sl) ... */ -/* #define TRY_LOCK(sl) ... */ -/* static MLOCK_T malloc_global_mutex = ... */ -#endif /* USE_LOCKS > 1 */ - -/* ----------------------- Lock-based state ------------------------ */ - -#if USE_LOCKS -#define USE_LOCK_BIT (2U) -#else /* USE_LOCKS */ -#define USE_LOCK_BIT (0U) -#define INITIAL_LOCK(l) -#endif /* USE_LOCKS */ - -#if USE_LOCKS -#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); -#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); -#else /* USE_LOCKS */ -#define ACQUIRE_MALLOC_GLOBAL_LOCK() -#define RELEASE_MALLOC_GLOBAL_LOCK() -#endif /* USE_LOCKS */ - - -/* ----------------------- Chunk representations ------------------------ */ - -/* - (The following includes lightly edited explanations by Colin Plumb.) - - The malloc_chunk declaration below is misleading (but accurate and - necessary). It declares a "view" into memory allowing access to - necessary fields at known offsets from a given base. - - Chunks of memory are maintained using a `boundary tag' method as - originally described by Knuth. (See the paper by Paul Wilson - ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such - techniques.) Sizes of free chunks are stored both in the front of - each chunk and at the end. This makes consolidating fragmented - chunks into bigger chunks fast. The head fields also hold bits - representing whether chunks are free or in use. - - Here are some pictures to make it clearer. They are "exploded" to - show that the state of a chunk can be thought of as extending from - the high 31 bits of the head field of its header through the - prev_foot and PINUSE_BIT bit of the following chunk header. - - A chunk that's in use looks like: - - chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Size of previous chunk (if P = 0) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| - | Size of this chunk 1| +-+ - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | | - +- -+ - | | - +- -+ - | : - +- size - sizeof(size_t) available payload bytes -+ - : | - chunk-> +- -+ - | | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| - | Size of next chunk (may or may not be in use) | +-+ - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - - And if it's free, it looks like this: - - chunk-> +- -+ - | User payload (must be in use, or we would have merged!) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| - | Size of this chunk 0| +-+ - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Next pointer | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Prev pointer | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | : - +- size - sizeof(struct chunk) unused bytes -+ - : | - chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Size of this chunk | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| - | Size of next chunk (must be in use, or we would have merged)| +-+ - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | : - +- User payload -+ - : | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - |0| - +-+ - Note that since we always merge adjacent free chunks, the chunks - adjacent to a free chunk must be in use. - - Given a pointer to a chunk (which can be derived trivially from the - payload pointer) we can, in O(1) time, find out whether the adjacent - chunks are free, and if so, unlink them from the lists that they - are on and merge them with the current chunk. - - Chunks always begin on even word boundaries, so the mem portion - (which is returned to the user) is also on an even word boundary, and - thus at least double-word aligned. - - The P (PINUSE_BIT) bit, stored in the unused low-order bit of the - chunk size (which is always a multiple of two words), is an in-use - bit for the *previous* chunk. If that bit is *clear*, then the - word before the current chunk size contains the previous chunk - size, and can be used to find the front of the previous chunk. - The very first chunk allocated always has this bit set, preventing - access to non-existent (or non-owned) memory. If pinuse is set for - any given chunk, then you CANNOT determine the size of the - previous chunk, and might even get a memory addressing fault when - trying to do so. - - The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of - the chunk size redundantly records whether the current chunk is - inuse. This redundancy enables usage checks within free and realloc, - and reduces indirection when freeing and consolidating chunks. - - Each freshly allocated chunk must have both cinuse and pinuse set. - That is, each allocated chunk borders either a previously allocated - and still in-use chunk, or the base of its memory arena. This is - ensured by making all allocations from the the `lowest' part of any - found chunk. Further, no free chunk physically borders another one, - so each free chunk is known to be preceded and followed by either - inuse chunks or the ends of memory. - - Note that the `foot' of the current chunk is actually represented - as the prev_foot of the NEXT chunk. This makes it easier to - deal with alignments etc but can be very confusing when trying - to extend or adapt this code. - - The exceptions to all this are - - 1. The special chunk `top' is the top-most available chunk (i.e., - the one bordering the end of available memory). It is treated - specially. Top is never included in any bin, is used only if - no other chunk is available, and is released back to the - system if it is very large (see M_TRIM_THRESHOLD). In effect, - the top chunk is treated as larger (and thus less well - fitting) than any other available chunk. The top chunk - doesn't update its trailing size field since there is no next - contiguous chunk that would have to index off it. However, - space is still allocated for it (TOP_FOOT_SIZE) to enable - separation or merging when space is extended. - - 3. Chunks allocated via mmap, which have the lowest-order bit - (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set - PINUSE_BIT in their head fields. Because they are allocated - one-by-one, each must carry its own prev_foot field, which is - also used to hold the offset this chunk has within its mmapped - region, which is needed to preserve alignment. Each mmapped - chunk is trailed by the first two fields of a fake next-chunk - for sake of usage checks. - -*/ - -struct malloc_chunk { - size_t prev_foot; /* Size of previous chunk (if free). */ - size_t head; /* Size and inuse bits. */ - struct malloc_chunk* fd; /* double links -- used only if free. */ - struct malloc_chunk* bk; -}; - -typedef struct malloc_chunk mchunk; -typedef struct malloc_chunk* mchunkptr; -typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ -typedef unsigned int bindex_t; /* Described below */ -typedef unsigned int binmap_t; /* Described below */ -typedef unsigned int flag_t; /* The type of various bit flag sets */ - -/* ------------------- Chunks sizes and alignments ----------------------- */ - -#define MCHUNK_SIZE (sizeof(mchunk)) - -#if FOOTERS -#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) -#else /* FOOTERS */ -#define CHUNK_OVERHEAD (SIZE_T_SIZE) -#endif /* FOOTERS */ - -/* MMapped chunks need a second word of overhead ... */ -#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) -/* ... and additional padding for fake next-chunk at foot */ -#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) - -/* The smallest size we can malloc is an aligned minimal chunk */ -#define MIN_CHUNK_SIZE\ - ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) - -/* conversion from malloc headers to user pointers, and back */ -#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) -#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) -/* chunk associated with aligned address A */ -#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) - -/* Bounds on request (not chunk) sizes. */ -#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) -#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) - -/* pad request bytes into a usable size */ -#define pad_request(req) \ - (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) - -/* pad request, checking for minimum (but not maximum) */ -#define request2size(req) \ - (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) - - -/* ------------------ Operations on head and foot fields ----------------- */ - -/* - The head field of a chunk is or'ed with PINUSE_BIT when previous - adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in - use. If the chunk was obtained with mmap, the prev_foot field has - IS_MMAPPED_BIT set, otherwise holding the offset of the base of the - mmapped region to the base of the chunk. - - FLAG4_BIT is not used by this malloc, but might be useful in extensions. -*/ - -#define PINUSE_BIT (SIZE_T_ONE) -#define CINUSE_BIT (SIZE_T_TWO) -#define FLAG4_BIT (SIZE_T_FOUR) -#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) -#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT) - -/* Head value for fenceposts */ -#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) - -/* extraction of fields from head words */ -#define cinuse(p) ((p)->head & CINUSE_BIT) -#define pinuse(p) ((p)->head & PINUSE_BIT) -#define chunksize(p) ((p)->head & ~(FLAG_BITS)) - -#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) -#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT) - -/* Treat space at ptr +/- offset as a chunk */ -#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) -#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) - -/* Ptr to next or previous physical malloc_chunk. */ -#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS))) -#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) - -/* extract next chunk's pinuse bit */ -#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) - -/* Get/set size at footer */ -#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) -#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) - -/* Set size, pinuse bit, and foot */ -#define set_size_and_pinuse_of_free_chunk(p, s)\ - ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) - -/* Set size, pinuse bit, foot, and clear next pinuse */ -#define set_free_with_pinuse(p, s, n)\ - (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) - -#define is_mmapped(p)\ - (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT)) - -/* Get the internal overhead associated with chunk p */ -#define overhead_for(p)\ - (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) - -/* Return true if malloced space is not necessarily cleared */ -#if MMAP_CLEARS -#define calloc_must_clear(p) (!is_mmapped(p)) -#else /* MMAP_CLEARS */ -#define calloc_must_clear(p) (1) -#endif /* MMAP_CLEARS */ - -/* ---------------------- Overlaid data structures ----------------------- */ - -/* - When chunks are not in use, they are treated as nodes of either - lists or trees. - - "Small" chunks are stored in circular doubly-linked lists, and look - like this: - - chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Size of previous chunk | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - `head:' | Size of chunk, in bytes |P| - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Forward pointer to next chunk in list | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Back pointer to previous chunk in list | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Unused space (may be 0 bytes long) . - . . - . | -nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - `foot:' | Size of chunk, in bytes | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - - Larger chunks are kept in a form of bitwise digital trees (aka - tries) keyed on chunksizes. Because malloc_tree_chunks are only for - free chunks greater than 256 bytes, their size doesn't impose any - constraints on user chunk sizes. Each node looks like: - - chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Size of previous chunk | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - `head:' | Size of chunk, in bytes |P| - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Forward pointer to next chunk of same size | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Back pointer to previous chunk of same size | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Pointer to left child (child[0]) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Pointer to right child (child[1]) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Pointer to parent | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | bin index of this chunk | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Unused space . - . | -nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - `foot:' | Size of chunk, in bytes | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - - Each tree holding treenodes is a tree of unique chunk sizes. Chunks - of the same size are arranged in a circularly-linked list, with only - the oldest chunk (the next to be used, in our FIFO ordering) - actually in the tree. (Tree members are distinguished by a non-null - parent pointer.) If a chunk with the same size an an existing node - is inserted, it is linked off the existing node using pointers that - work in the same way as fd/bk pointers of small chunks. - - Each tree contains a power of 2 sized range of chunk sizes (the - smallest is 0x100 <= x < 0x180), which is is divided in half at each - tree level, with the chunks in the smaller half of the range (0x100 - <= x < 0x140 for the top nose) in the left subtree and the larger - half (0x140 <= x < 0x180) in the right subtree. This is, of course, - done by inspecting individual bits. - - Using these rules, each node's left subtree contains all smaller - sizes than its right subtree. However, the node at the root of each - subtree has no particular ordering relationship to either. (The - dividing line between the subtree sizes is based on trie relation.) - If we remove the last chunk of a given size from the interior of the - tree, we need to replace it with a leaf node. The tree ordering - rules permit a node to be replaced by any leaf below it. - - The smallest chunk in a tree (a common operation in a best-fit - allocator) can be found by walking a path to the leftmost leaf in - the tree. Unlike a usual binary tree, where we follow left child - pointers until we reach a null, here we follow the right child - pointer any time the left one is null, until we reach a leaf with - both child pointers null. The smallest chunk in the tree will be - somewhere along that path. - - The worst case number of steps to add, find, or remove a node is - bounded by the number of bits differentiating chunks within - bins. Under current bin calculations, this ranges from 6 up to 21 - (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case - is of course much better. -*/ - -struct malloc_tree_chunk { - /* The first four fields must be compatible with malloc_chunk */ - size_t prev_foot; - size_t head; - struct malloc_tree_chunk* fd; - struct malloc_tree_chunk* bk; - - struct malloc_tree_chunk* child[2]; - struct malloc_tree_chunk* parent; - bindex_t index; -}; - -typedef struct malloc_tree_chunk tchunk; -typedef struct malloc_tree_chunk* tchunkptr; -typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ - -/* A little helper macro for trees */ -#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) - -/* ----------------------------- Segments -------------------------------- */ - -/* - Each malloc space may include non-contiguous segments, held in a - list headed by an embedded malloc_segment record representing the - top-most space. Segments also include flags holding properties of - the space. Large chunks that are directly allocated by mmap are not - included in this list. They are instead independently created and - destroyed without otherwise keeping track of them. - - Segment management mainly comes into play for spaces allocated by - MMAP. Any call to MMAP might or might not return memory that is - adjacent to an existing segment. MORECORE normally contiguously - extends the current space, so this space is almost always adjacent, - which is simpler and faster to deal with. (This is why MORECORE is - used preferentially to MMAP when both are available -- see - sys_alloc.) When allocating using MMAP, we don't use any of the - hinting mechanisms (inconsistently) supported in various - implementations of unix mmap, or distinguish reserving from - committing memory. Instead, we just ask for space, and exploit - contiguity when we get it. It is probably possible to do - better than this on some systems, but no general scheme seems - to be significantly better. - - Management entails a simpler variant of the consolidation scheme - used for chunks to reduce fragmentation -- new adjacent memory is - normally prepended or appended to an existing segment. However, - there are limitations compared to chunk consolidation that mostly - reflect the fact that segment processing is relatively infrequent - (occurring only when getting memory from system) and that we - don't expect to have huge numbers of segments: - - * Segments are not indexed, so traversal requires linear scans. (It - would be possible to index these, but is not worth the extra - overhead and complexity for most programs on most platforms.) - * New segments are only appended to old ones when holding top-most - memory; if they cannot be prepended to others, they are held in - different segments. - - Except for the top-most segment of an mstate, each segment record - is kept at the tail of its segment. Segments are added by pushing - segment records onto the list headed by &mstate.seg for the - containing mstate. - - Segment flags control allocation/merge/deallocation policies: - * If EXTERN_BIT set, then we did not allocate this segment, - and so should not try to deallocate or merge with others. - (This currently holds only for the initial segment passed - into create_mspace_with_base.) - * If IS_MMAPPED_BIT set, the segment may be merged with - other surrounding mmapped segments and trimmed/de-allocated - using munmap. - * If neither bit is set, then the segment was obtained using - MORECORE so can be merged with surrounding MORECORE'd segments - and deallocated/trimmed using MORECORE with negative arguments. -*/ - -struct malloc_segment { - char* base; /* base address */ - size_t size; /* allocated size */ - struct malloc_segment* next; /* ptr to next segment */ - flag_t sflags; /* mmap and extern flag */ -}; - -#define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT) -#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) - -typedef struct malloc_segment msegment; -typedef struct malloc_segment* msegmentptr; - -/* ---------------------------- malloc_state ----------------------------- */ - -/* - A malloc_state holds all of the bookkeeping for a space. - The main fields are: - - Top - The topmost chunk of the currently active segment. Its size is - cached in topsize. The actual size of topmost space is - topsize+TOP_FOOT_SIZE, which includes space reserved for adding - fenceposts and segment records if necessary when getting more - space from the system. The size at which to autotrim top is - cached from mparams in trim_check, except that it is disabled if - an autotrim fails. - - Designated victim (dv) - This is the preferred chunk for servicing small requests that - don't have exact fits. It is normally the chunk split off most - recently to service another small request. Its size is cached in - dvsize. The link fields of this chunk are not maintained since it - is not kept in a bin. - - SmallBins - An array of bin headers for free chunks. These bins hold chunks - with sizes less than MIN_LARGE_SIZE bytes. Each bin contains - chunks of all the same size, spaced 8 bytes apart. To simplify - use in double-linked lists, each bin header acts as a malloc_chunk - pointing to the real first node, if it exists (else pointing to - itself). This avoids special-casing for headers. But to avoid - waste, we allocate only the fd/bk pointers of bins, and then use - repositioning tricks to treat these as the fields of a chunk. - - TreeBins - Treebins are pointers to the roots of trees holding a range of - sizes. There are 2 equally spaced treebins for each power of two - from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything - larger. - - Bin maps - There is one bit map for small bins ("smallmap") and one for - treebins ("treemap). Each bin sets its bit when non-empty, and - clears the bit when empty. Bit operations are then used to avoid - bin-by-bin searching -- nearly all "search" is done without ever - looking at bins that won't be selected. The bit maps - conservatively use 32 bits per map word, even if on 64bit system. - For a good description of some of the bit-based techniques used - here, see Henry S. Warren Jr's book "Hacker's Delight" (and - supplement at http://hackersdelight.org/). Many of these are - intended to reduce the branchiness of paths through malloc etc, as - well as to reduce the number of memory locations read or written. - - Segments - A list of segments headed by an embedded malloc_segment record - representing the initial space. - - Address check support - The least_addr field is the least address ever obtained from - MORECORE or MMAP. Attempted frees and reallocs of any address less - than this are trapped (unless INSECURE is defined). - - Magic tag - A cross-check field that should always hold same value as mparams.magic. - - Flags - Bits recording whether to use MMAP, locks, or contiguous MORECORE - - Statistics - Each space keeps track of current and maximum system memory - obtained via MORECORE or MMAP. - - Trim support - Fields holding the amount of unused topmost memory that should trigger - timming, and a counter to force periodic scanning to release unused - non-topmost segments. - - Locking - If USE_LOCKS is defined, the "mutex" lock is acquired and released - around every public call using this mspace. - - Extension support - A void* pointer and a size_t field that can be used to help implement - extensions to this malloc. -*/ - -/* Bin types, widths and sizes */ -#define NSMALLBINS (32U) -#define NTREEBINS (32U) -#define SMALLBIN_SHIFT (3U) -#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) -#define TREEBIN_SHIFT (8U) -#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) -#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) -#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) - -struct malloc_state { - binmap_t smallmap; - binmap_t treemap; - size_t dvsize; - size_t topsize; - char* least_addr; - mchunkptr dv; - mchunkptr top; - size_t trim_check; - size_t release_checks; - size_t magic; - mchunkptr smallbins[(NSMALLBINS+1)*2]; - tbinptr treebins[NTREEBINS]; - size_t footprint; - size_t max_footprint; - flag_t mflags; -#if USE_LOCKS - MLOCK_T mutex; /* locate lock among fields that rarely change */ -#endif /* USE_LOCKS */ - msegment seg; - void* extp; /* Unused but available for extensions */ - size_t exts; -}; - -typedef struct malloc_state* mstate; - -/* ------------- Global malloc_state and malloc_params ------------------- */ - -/* - malloc_params holds global properties, including those that can be - dynamically set using mallopt. There is a single instance, mparams, - initialized in init_mparams. Note that the non-zeroness of "magic" - also serves as an initialization flag. -*/ - -struct malloc_params { - volatile size_t magic; - size_t page_size; - size_t granularity; - size_t mmap_threshold; - size_t trim_threshold; - flag_t default_mflags; -}; - -static struct malloc_params mparams; - -/* Ensure mparams initialized */ -#define ensure_initialization() (mparams.magic != 0 || init_mparams()) - -#if !ONLY_MSPACES - -/* The global malloc_state used for all non-"mspace" calls */ -static struct malloc_state _gm_; -#define gm (&_gm_) -#define is_global(M) ((M) == &_gm_) - -#endif /* !ONLY_MSPACES */ - -#define is_initialized(M) ((M)->top != 0) - -/* -------------------------- system alloc setup ------------------------- */ - -/* Operations on mflags */ - -#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) -#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) -#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) - -#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) -#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) -#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) - -#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) -#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) - -#define set_lock(M,L)\ - ((M)->mflags = (L)?\ - ((M)->mflags | USE_LOCK_BIT) :\ - ((M)->mflags & ~USE_LOCK_BIT)) - -/* page-align a size */ -#define page_align(S)\ - (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) - -/* granularity-align a size */ -#define granularity_align(S)\ - (((S) + (mparams.granularity - SIZE_T_ONE))\ - & ~(mparams.granularity - SIZE_T_ONE)) - - -/* For mmap, use granularity alignment on windows, else page-align */ -#ifdef WIN32 -#define mmap_align(S) granularity_align(S) -#else -#define mmap_align(S) page_align(S) -#endif - -/* For sys_alloc, enough padding to ensure can malloc request on success */ -#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) - -#define is_page_aligned(S)\ - (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) -#define is_granularity_aligned(S)\ - (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) - -/* True if segment S holds address A */ -#define segment_holds(S, A)\ - ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) - -/* Return segment holding given address */ -static msegmentptr segment_holding(mstate m, char* addr) { - msegmentptr sp = &m->seg; - for (;;) { - if (addr >= sp->base && addr < sp->base + sp->size) - return sp; - if ((sp = sp->next) == 0) - return 0; - } -} - -/* Return true if segment contains a segment link */ -static int has_segment_link(mstate m, msegmentptr ss) { - msegmentptr sp = &m->seg; - for (;;) { - if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) - return 1; - if ((sp = sp->next) == 0) - return 0; - } -} - -#ifndef MORECORE_CANNOT_TRIM -#define should_trim(M,s) ((s) > (M)->trim_check) -#else /* MORECORE_CANNOT_TRIM */ -#define should_trim(M,s) (0) -#endif /* MORECORE_CANNOT_TRIM */ - -/* - TOP_FOOT_SIZE is padding at the end of a segment, including space - that may be needed to place segment records and fenceposts when new - noncontiguous segments are added. -*/ -#define TOP_FOOT_SIZE\ - (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) - - -/* ------------------------------- Hooks -------------------------------- */ - -/* - PREACTION should be defined to return 0 on success, and nonzero on - failure. If you are not using locking, you can redefine these to do - anything you like. -*/ - -#if USE_LOCKS - -#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) -#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } -#else /* USE_LOCKS */ - -#ifndef PREACTION -#define PREACTION(M) (0) -#endif /* PREACTION */ - -#ifndef POSTACTION -#define POSTACTION(M) -#endif /* POSTACTION */ - -#endif /* USE_LOCKS */ - -/* - CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. - USAGE_ERROR_ACTION is triggered on detected bad frees and - reallocs. The argument p is an address that might have triggered the - fault. It is ignored by the two predefined actions, but might be - useful in custom actions that try to help diagnose errors. -*/ - -#if PROCEED_ON_ERROR - -/* A count of the number of corruption errors causing resets */ -int malloc_corruption_error_count; - -/* default corruption action */ -static void reset_on_error(mstate m); - -#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) -#define USAGE_ERROR_ACTION(m, p) - -#else /* PROCEED_ON_ERROR */ - -#ifndef CORRUPTION_ERROR_ACTION -#define CORRUPTION_ERROR_ACTION(m) ABORT -#endif /* CORRUPTION_ERROR_ACTION */ - -#ifndef USAGE_ERROR_ACTION -#define USAGE_ERROR_ACTION(m,p) ABORT -#endif /* USAGE_ERROR_ACTION */ - -#endif /* PROCEED_ON_ERROR */ - -/* -------------------------- Debugging setup ---------------------------- */ - -#if ! DEBUG - -#define check_free_chunk(M,P) -#define check_inuse_chunk(M,P) -#define check_malloced_chunk(M,P,N) -#define check_mmapped_chunk(M,P) -#define check_malloc_state(M) -#define check_top_chunk(M,P) - -#else /* DEBUG */ -#define check_free_chunk(M,P) do_check_free_chunk(M,P) -#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) -#define check_top_chunk(M,P) do_check_top_chunk(M,P) -#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) -#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) -#define check_malloc_state(M) do_check_malloc_state(M) - -static void do_check_any_chunk(mstate m, mchunkptr p); -static void do_check_top_chunk(mstate m, mchunkptr p); -static void do_check_mmapped_chunk(mstate m, mchunkptr p); -static void do_check_inuse_chunk(mstate m, mchunkptr p); -static void do_check_free_chunk(mstate m, mchunkptr p); -static void do_check_malloced_chunk(mstate m, void* mem, size_t s); -static void do_check_tree(mstate m, tchunkptr t); -static void do_check_treebin(mstate m, bindex_t i); -static void do_check_smallbin(mstate m, bindex_t i); -static void do_check_malloc_state(mstate m); -static int bin_find(mstate m, mchunkptr x); -static size_t traverse_and_check(mstate m); -#endif /* DEBUG */ - -/* ---------------------------- Indexing Bins ---------------------------- */ - -#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) -#define small_index(s) ((s) >> SMALLBIN_SHIFT) -#define small_index2size(i) ((i) << SMALLBIN_SHIFT) -#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) - -/* addressing by index. See above about smallbin repositioning */ -#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) -#define treebin_at(M,i) (&((M)->treebins[i])) - -/* assign tree index for size S to variable I. Use x86 asm if possible */ -#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) -#define compute_tree_index(S, I)\ -{\ - unsigned int X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K;\ - __asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "rm" (X));\ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} - -#elif defined (__INTEL_COMPILER) -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K = _bit_scan_reverse (X); \ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} - -#elif defined(_MSC_VER) && _MSC_VER>=1300 -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K;\ - _BitScanReverse((DWORD *) &K, X);\ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} - -#else /* GNUC */ -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int Y = (unsigned int)X;\ - unsigned int N = ((Y - 0x100) >> 16) & 8;\ - unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ - N += K;\ - N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ - K = 14 - N + ((Y <<= K) >> 15);\ - I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ - }\ -} -#endif /* GNUC */ - -/* Bit representing maximum resolved size in a treebin at i */ -#define bit_for_tree_index(i) \ - (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) - -/* Shift placing maximum resolved bit in a treebin at i as sign bit */ -#define leftshift_for_tree_index(i) \ - ((i == NTREEBINS-1)? 0 : \ - ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) - -/* The size of the smallest chunk held in bin with index i */ -#define minsize_for_tree_index(i) \ - ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ - (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) - - -/* ------------------------ Operations on bin maps ----------------------- */ - -/* bit corresponding to given index */ -#define idx2bit(i) ((binmap_t)(1) << (i)) - -/* Mark/Clear bits with given index */ -#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) -#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) -#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) - -#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) -#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) -#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) - -/* isolate the least set bit of a bitmap */ -#define least_bit(x) ((x) & -(x)) - -/* mask with all bits to left of least bit of x on */ -#define left_bits(x) ((x<<1) | -(x<<1)) - -/* mask with all bits to left of or equal to least bit of x on */ -#define same_or_left_bits(x) ((x) | -(x)) - -/* index corresponding to given bit. Use x86 asm if possible */ - -#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - __asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "rm" (X));\ - I = (bindex_t)J;\ -} - -#elif defined (__INTEL_COMPILER) -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - J = _bit_scan_forward (X); \ - I = (bindex_t)J;\ -} - -#elif defined(_MSC_VER) && _MSC_VER>=1300 -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - _BitScanForward((DWORD *) &J, X);\ - I = (bindex_t)J;\ -} - -#elif USE_BUILTIN_FFS -#define compute_bit2idx(X, I) I = ffs(X)-1 - -#else -#define compute_bit2idx(X, I)\ -{\ - unsigned int Y = X - 1;\ - unsigned int K = Y >> (16-4) & 16;\ - unsigned int N = K; Y >>= K;\ - N += K = Y >> (8-3) & 8; Y >>= K;\ - N += K = Y >> (4-2) & 4; Y >>= K;\ - N += K = Y >> (2-1) & 2; Y >>= K;\ - N += K = Y >> (1-0) & 1; Y >>= K;\ - I = (bindex_t)(N + Y);\ -} -#endif /* GNUC */ - - -/* ----------------------- Runtime Check Support ------------------------- */ - -/* - For security, the main invariant is that malloc/free/etc never - writes to a static address other than malloc_state, unless static - malloc_state itself has been corrupted, which cannot occur via - malloc (because of these checks). In essence this means that we - believe all pointers, sizes, maps etc held in malloc_state, but - check all of those linked or offsetted from other embedded data - structures. These checks are interspersed with main code in a way - that tends to minimize their run-time cost. - - When FOOTERS is defined, in addition to range checking, we also - verify footer fields of inuse chunks, which can be used guarantee - that the mstate controlling malloc/free is intact. This is a - streamlined version of the approach described by William Robertson - et al in "Run-time Detection of Heap-based Overflows" LISA'03 - http://www.usenix.org/events/lisa03/tech/robertson.html The footer - of an inuse chunk holds the xor of its mstate and a random seed, - that is checked upon calls to free() and realloc(). This is - (probablistically) unguessable from outside the program, but can be - computed by any code successfully malloc'ing any chunk, so does not - itself provide protection against code that has already broken - security through some other means. Unlike Robertson et al, we - always dynamically check addresses of all offset chunks (previous, - next, etc). This turns out to be cheaper than relying on hashes. -*/ - -#if !INSECURE -/* Check if address a is at least as high as any from MORECORE or MMAP */ -#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) -/* Check if address of next chunk n is higher than base chunk p */ -#define ok_next(p, n) ((char*)(p) < (char*)(n)) -/* Check if p has its cinuse bit on */ -#define ok_cinuse(p) cinuse(p) -/* Check if p has its pinuse bit on */ -#define ok_pinuse(p) pinuse(p) - -#else /* !INSECURE */ -#define ok_address(M, a) (1) -#define ok_next(b, n) (1) -#define ok_cinuse(p) (1) -#define ok_pinuse(p) (1) -#endif /* !INSECURE */ - -#if (FOOTERS && !INSECURE) -/* Check if (alleged) mstate m has expected magic field */ -#define ok_magic(M) ((M)->magic == mparams.magic) -#else /* (FOOTERS && !INSECURE) */ -#define ok_magic(M) (1) -#endif /* (FOOTERS && !INSECURE) */ - - -/* In gcc, use __builtin_expect to minimize impact of checks */ -#if !INSECURE -#if defined(__GNUC__) && __GNUC__ >= 3 -#define RTCHECK(e) __builtin_expect(e, 1) -#else /* GNUC */ -#define RTCHECK(e) (e) -#endif /* GNUC */ -#else /* !INSECURE */ -#define RTCHECK(e) (1) -#endif /* !INSECURE */ - -/* macros to set up inuse chunks with or without footers */ - -#if !FOOTERS - -#define mark_inuse_foot(M,p,s) - -/* Set cinuse bit and pinuse bit of next chunk */ -#define set_inuse(M,p,s)\ - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) - -/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ -#define set_inuse_and_pinuse(M,p,s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) - -/* Set size, cinuse and pinuse bit of this chunk */ -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) - -#else /* FOOTERS */ - -/* Set foot of inuse chunk to be xor of mstate and seed */ -#define mark_inuse_foot(M,p,s)\ - (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) - -#define get_mstate_for(p)\ - ((mstate)(((mchunkptr)((char*)(p) +\ - (chunksize(p))))->prev_foot ^ mparams.magic)) - -#define set_inuse(M,p,s)\ - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ - mark_inuse_foot(M,p,s)) - -#define set_inuse_and_pinuse(M,p,s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ - mark_inuse_foot(M,p,s)) - -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - mark_inuse_foot(M, p, s)) - -#endif /* !FOOTERS */ - -/* ---------------------------- setting mparams -------------------------- */ - -/* Initialize mparams */ -static int init_mparams(void) { -#ifdef NEED_GLOBAL_LOCK_INIT - if (malloc_global_mutex_status <= 0) - init_malloc_global_mutex(); -#endif - - ACQUIRE_MALLOC_GLOBAL_LOCK(); - if (mparams.magic == 0) { - size_t magic; - size_t psize; - size_t gsize; - -#ifndef WIN32 - psize = malloc_getpagesize; - gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize); -#else /* WIN32 */ - { - SYSTEM_INFO system_info; - GetSystemInfo(&system_info); - psize = system_info.dwPageSize; - gsize = ((DEFAULT_GRANULARITY != 0)? - DEFAULT_GRANULARITY : system_info.dwAllocationGranularity); - } -#endif /* WIN32 */ - - /* Sanity-check configuration: - size_t must be unsigned and as wide as pointer type. - ints must be at least 4 bytes. - alignment must be at least 8. - Alignment, min chunk size, and page size must all be powers of 2. - */ - if ((sizeof(size_t) != sizeof(char*)) || - (MAX_SIZE_T < MIN_CHUNK_SIZE) || - (sizeof(int) < 4) || - (MALLOC_ALIGNMENT < (size_t)8U) || - ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || - ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || - ((gsize & (gsize-SIZE_T_ONE)) != 0) || - ((psize & (psize-SIZE_T_ONE)) != 0)) - ABORT; - - mparams.granularity = gsize; - mparams.page_size = psize; - mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; - mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; -#if MORECORE_CONTIGUOUS - mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; -#else /* MORECORE_CONTIGUOUS */ - mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; -#endif /* MORECORE_CONTIGUOUS */ - -#if !ONLY_MSPACES - /* Set up lock for main malloc area */ - gm->mflags = mparams.default_mflags; - INITIAL_LOCK(&gm->mutex); -#endif - -#if (FOOTERS && !INSECURE) - { -#if USE_DEV_RANDOM - int fd; - unsigned char buf[sizeof(size_t)]; - /* Try to use /dev/urandom, else fall back on using time */ - if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && - read(fd, buf, sizeof(buf)) == sizeof(buf)) { - magic = *((size_t *) buf); - close(fd); - } - else -#endif /* USE_DEV_RANDOM */ -#ifdef WIN32 - magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); -#else - magic = (size_t)(time(0) ^ (size_t)0x55555555U); -#endif - magic |= (size_t)8U; /* ensure nonzero */ - magic &= ~(size_t)7U; /* improve chances of fault for bad values */ - } -#else /* (FOOTERS && !INSECURE) */ - magic = (size_t)0x58585858U; -#endif /* (FOOTERS && !INSECURE) */ - - mparams.magic = magic; - } - - RELEASE_MALLOC_GLOBAL_LOCK(); - return 1; -} - -/* support for mallopt */ -static int change_mparam(int param_number, int value) { - size_t val = (value == -1)? MAX_SIZE_T : (size_t)value; - ensure_initialization(); - switch(param_number) { - case M_TRIM_THRESHOLD: - mparams.trim_threshold = val; - return 1; - case M_GRANULARITY: - if (val >= mparams.page_size && ((val & (val-1)) == 0)) { - mparams.granularity = val; - return 1; - } - else - return 0; - case M_MMAP_THRESHOLD: - mparams.mmap_threshold = val; - return 1; - default: - return 0; - } -} - -#if DEBUG -/* ------------------------- Debugging Support --------------------------- */ - -/* Check properties of any chunk, whether free, inuse, mmapped etc */ -static void do_check_any_chunk(mstate m, mchunkptr p) { - assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); - assert(ok_address(m, p)); -} - -/* Check properties of top chunk */ -static void do_check_top_chunk(mstate m, mchunkptr p) { - msegmentptr sp = segment_holding(m, (char*)p); - size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ - assert(sp != 0); - assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); - assert(ok_address(m, p)); - assert(sz == m->topsize); - assert(sz > 0); - assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); - assert(pinuse(p)); - assert(!pinuse(chunk_plus_offset(p, sz))); -} - -/* Check properties of (inuse) mmapped chunks */ -static void do_check_mmapped_chunk(mstate m, mchunkptr p) { - size_t sz = chunksize(p); - size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD); - assert(is_mmapped(p)); - assert(use_mmap(m)); - assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); - assert(ok_address(m, p)); - assert(!is_small(sz)); - assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); - assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); - assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); -} - -/* Check properties of inuse chunks */ -static void do_check_inuse_chunk(mstate m, mchunkptr p) { - do_check_any_chunk(m, p); - assert(cinuse(p)); - assert(next_pinuse(p)); - /* If not pinuse and not mmapped, previous chunk has OK offset */ - assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); - if (is_mmapped(p)) - do_check_mmapped_chunk(m, p); -} - -/* Check properties of free chunks */ -static void do_check_free_chunk(mstate m, mchunkptr p) { - size_t sz = chunksize(p); - mchunkptr next = chunk_plus_offset(p, sz); - do_check_any_chunk(m, p); - assert(!cinuse(p)); - assert(!next_pinuse(p)); - assert (!is_mmapped(p)); - if (p != m->dv && p != m->top) { - if (sz >= MIN_CHUNK_SIZE) { - assert((sz & CHUNK_ALIGN_MASK) == 0); - assert(is_aligned(chunk2mem(p))); - assert(next->prev_foot == sz); - assert(pinuse(p)); - assert (next == m->top || cinuse(next)); - assert(p->fd->bk == p); - assert(p->bk->fd == p); - } - else /* markers are always of size SIZE_T_SIZE */ - assert(sz == SIZE_T_SIZE); - } -} - -/* Check properties of malloced chunks at the point they are malloced */ -static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { - if (mem != 0) { - mchunkptr p = mem2chunk(mem); - size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); - do_check_inuse_chunk(m, p); - assert((sz & CHUNK_ALIGN_MASK) == 0); - assert(sz >= MIN_CHUNK_SIZE); - assert(sz >= s); - /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ - assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); - } -} - -/* Check a tree and its subtrees. */ -static void do_check_tree(mstate m, tchunkptr t) { - tchunkptr head = 0; - tchunkptr u = t; - bindex_t tindex = t->index; - size_t tsize = chunksize(t); - bindex_t idx; - compute_tree_index(tsize, idx); - assert(tindex == idx); - assert(tsize >= MIN_LARGE_SIZE); - assert(tsize >= minsize_for_tree_index(idx)); - assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); - - do { /* traverse through chain of same-sized nodes */ - do_check_any_chunk(m, ((mchunkptr)u)); - assert(u->index == tindex); - assert(chunksize(u) == tsize); - assert(!cinuse(u)); - assert(!next_pinuse(u)); - assert(u->fd->bk == u); - assert(u->bk->fd == u); - if (u->parent == 0) { - assert(u->child[0] == 0); - assert(u->child[1] == 0); - } - else { - assert(head == 0); /* only one node on chain has parent */ - head = u; - assert(u->parent != u); - assert (u->parent->child[0] == u || - u->parent->child[1] == u || - *((tbinptr*)(u->parent)) == u); - if (u->child[0] != 0) { - assert(u->child[0]->parent == u); - assert(u->child[0] != u); - do_check_tree(m, u->child[0]); - } - if (u->child[1] != 0) { - assert(u->child[1]->parent == u); - assert(u->child[1] != u); - do_check_tree(m, u->child[1]); - } - if (u->child[0] != 0 && u->child[1] != 0) { - assert(chunksize(u->child[0]) < chunksize(u->child[1])); - } - } - u = u->fd; - } while (u != t); - assert(head != 0); -} - -/* Check all the chunks in a treebin. */ -static void do_check_treebin(mstate m, bindex_t i) { - tbinptr* tb = treebin_at(m, i); - tchunkptr t = *tb; - int empty = (m->treemap & (1U << i)) == 0; - if (t == 0) - assert(empty); - if (!empty) - do_check_tree(m, t); -} - -/* Check all the chunks in a smallbin. */ -static void do_check_smallbin(mstate m, bindex_t i) { - sbinptr b = smallbin_at(m, i); - mchunkptr p = b->bk; - unsigned int empty = (m->smallmap & (1U << i)) == 0; - if (p == b) - assert(empty); - if (!empty) { - for (; p != b; p = p->bk) { - size_t size = chunksize(p); - mchunkptr q; - /* each chunk claims to be free */ - do_check_free_chunk(m, p); - /* chunk belongs in bin */ - assert(small_index(size) == i); - assert(p->bk == b || chunksize(p->bk) == chunksize(p)); - /* chunk is followed by an inuse chunk */ - q = next_chunk(p); - if (q->head != FENCEPOST_HEAD) - do_check_inuse_chunk(m, q); - } - } -} - -/* Find x in a bin. Used in other check functions. */ -static int bin_find(mstate m, mchunkptr x) { - size_t size = chunksize(x); - if (is_small(size)) { - bindex_t sidx = small_index(size); - sbinptr b = smallbin_at(m, sidx); - if (smallmap_is_marked(m, sidx)) { - mchunkptr p = b; - do { - if (p == x) - return 1; - } while ((p = p->fd) != b); - } - } - else { - bindex_t tidx; - compute_tree_index(size, tidx); - if (treemap_is_marked(m, tidx)) { - tchunkptr t = *treebin_at(m, tidx); - size_t sizebits = size << leftshift_for_tree_index(tidx); - while (t != 0 && chunksize(t) != size) { - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; - sizebits <<= 1; - } - if (t != 0) { - tchunkptr u = t; - do { - if (u == (tchunkptr)x) - return 1; - } while ((u = u->fd) != t); - } - } - } - return 0; -} - -/* Traverse each chunk and check it; return total */ -static size_t traverse_and_check(mstate m) { - size_t sum = 0; - if (is_initialized(m)) { - msegmentptr s = &m->seg; - sum += m->topsize + TOP_FOOT_SIZE; - while (s != 0) { - mchunkptr q = align_as_chunk(s->base); - mchunkptr lastq = 0; - assert(pinuse(q)); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { - sum += chunksize(q); - if (cinuse(q)) { - assert(!bin_find(m, q)); - do_check_inuse_chunk(m, q); - } - else { - assert(q == m->dv || bin_find(m, q)); - assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */ - do_check_free_chunk(m, q); - } - lastq = q; - q = next_chunk(q); - } - s = s->next; - } - } - return sum; -} - -/* Check all properties of malloc_state. */ -static void do_check_malloc_state(mstate m) { - bindex_t i; - size_t total; - /* check bins */ - for (i = 0; i < NSMALLBINS; ++i) - do_check_smallbin(m, i); - for (i = 0; i < NTREEBINS; ++i) - do_check_treebin(m, i); - - if (m->dvsize != 0) { /* check dv chunk */ - do_check_any_chunk(m, m->dv); - assert(m->dvsize == chunksize(m->dv)); - assert(m->dvsize >= MIN_CHUNK_SIZE); - assert(bin_find(m, m->dv) == 0); - } - - if (m->top != 0) { /* check top chunk */ - do_check_top_chunk(m, m->top); - /*assert(m->topsize == chunksize(m->top)); redundant */ - assert(m->topsize > 0); - assert(bin_find(m, m->top) == 0); - } - - total = traverse_and_check(m); - assert(total <= m->footprint); - assert(m->footprint <= m->max_footprint); -} -#endif /* DEBUG */ - -/* ----------------------------- statistics ------------------------------ */ - -#if !NO_MALLINFO -static struct mallinfo internal_mallinfo(mstate m) { - struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - ensure_initialization(); - if (!PREACTION(m)) { - check_malloc_state(m); - if (is_initialized(m)) { - size_t nfree = SIZE_T_ONE; /* top always free */ - size_t mfree = m->topsize + TOP_FOOT_SIZE; - size_t sum = mfree; - msegmentptr s = &m->seg; - while (s != 0) { - mchunkptr q = align_as_chunk(s->base); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { - size_t sz = chunksize(q); - sum += sz; - if (!cinuse(q)) { - mfree += sz; - ++nfree; - } - q = next_chunk(q); - } - s = s->next; - } - - nm.arena = sum; - nm.ordblks = nfree; - nm.hblkhd = m->footprint - sum; - nm.usmblks = m->max_footprint; - nm.uordblks = m->footprint - mfree; - nm.fordblks = mfree; - nm.keepcost = m->topsize; - } - - POSTACTION(m); - } - return nm; -} -#endif /* !NO_MALLINFO */ - -static void internal_malloc_stats(mstate m) { - ensure_initialization(); - if (!PREACTION(m)) { - size_t maxfp = 0; - size_t fp = 0; - size_t used = 0; - check_malloc_state(m); - if (is_initialized(m)) { - msegmentptr s = &m->seg; - maxfp = m->max_footprint; - fp = m->footprint; - used = fp - (m->topsize + TOP_FOOT_SIZE); - - while (s != 0) { - mchunkptr q = align_as_chunk(s->base); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { - if (!cinuse(q)) - used -= chunksize(q); - q = next_chunk(q); - } - s = s->next; - } - } - - fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); - fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); - fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); - - POSTACTION(m); - } -} - -/* ----------------------- Operations on smallbins ----------------------- */ - -/* - Various forms of linking and unlinking are defined as macros. Even - the ones for trees, which are very long but have very short typical - paths. This is ugly but reduces reliance on inlining support of - compilers. -*/ - -/* Link a free chunk into a smallbin */ -#define insert_small_chunk(M, P, S) {\ - bindex_t I = small_index(S);\ - mchunkptr B = smallbin_at(M, I);\ - mchunkptr F = B;\ - assert(S >= MIN_CHUNK_SIZE);\ - if (!smallmap_is_marked(M, I))\ - mark_smallmap(M, I);\ - else if (RTCHECK(ok_address(M, B->fd)))\ - F = B->fd;\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - B->fd = P;\ - F->bk = P;\ - P->fd = F;\ - P->bk = B;\ -} - -/* Unlink a chunk from a smallbin */ -#define unlink_small_chunk(M, P, S) {\ - mchunkptr F = P->fd;\ - mchunkptr B = P->bk;\ - bindex_t I = small_index(S);\ - assert(P != B);\ - assert(P != F);\ - assert(chunksize(P) == small_index2size(I));\ - if (F == B)\ - clear_smallmap(M, I);\ - else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\ - (B == smallbin_at(M,I) || ok_address(M, B)))) {\ - F->bk = B;\ - B->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ -} - -/* Unlink the first chunk from a smallbin */ -#define unlink_first_small_chunk(M, B, P, I) {\ - mchunkptr F = P->fd;\ - assert(P != B);\ - assert(P != F);\ - assert(chunksize(P) == small_index2size(I));\ - if (B == F)\ - clear_smallmap(M, I);\ - else if (RTCHECK(ok_address(M, F))) {\ - B->fd = F;\ - F->bk = B;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ -} - - - -/* Replace dv node, binning the old one */ -/* Used only when dvsize known to be small */ -#define replace_dv(M, P, S) {\ - size_t DVS = M->dvsize;\ - if (DVS != 0) {\ - mchunkptr DV = M->dv;\ - assert(is_small(DVS));\ - insert_small_chunk(M, DV, DVS);\ - }\ - M->dvsize = S;\ - M->dv = P;\ -} - -/* ------------------------- Operations on trees ------------------------- */ - -/* Insert chunk into tree */ -#define insert_large_chunk(M, X, S) {\ - tbinptr* H;\ - bindex_t I;\ - compute_tree_index(S, I);\ - H = treebin_at(M, I);\ - X->index = I;\ - X->child[0] = X->child[1] = 0;\ - if (!treemap_is_marked(M, I)) {\ - mark_treemap(M, I);\ - *H = X;\ - X->parent = (tchunkptr)H;\ - X->fd = X->bk = X;\ - }\ - else {\ - tchunkptr T = *H;\ - size_t K = S << leftshift_for_tree_index(I);\ - for (;;) {\ - if (chunksize(T) != S) {\ - tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ - K <<= 1;\ - if (*C != 0)\ - T = *C;\ - else if (RTCHECK(ok_address(M, C))) {\ - *C = X;\ - X->parent = T;\ - X->fd = X->bk = X;\ - break;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - break;\ - }\ - }\ - else {\ - tchunkptr F = T->fd;\ - if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ - T->fd = F->bk = X;\ - X->fd = F;\ - X->bk = T;\ - X->parent = 0;\ - break;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - break;\ - }\ - }\ - }\ - }\ -} - -/* - Unlink steps: - - 1. If x is a chained node, unlink it from its same-sized fd/bk links - and choose its bk node as its replacement. - 2. If x was the last node of its size, but not a leaf node, it must - be replaced with a leaf node (not merely one with an open left or - right), to make sure that lefts and rights of descendents - correspond properly to bit masks. We use the rightmost descendent - of x. We could use any other leaf, but this is easy to locate and - tends to counteract removal of leftmosts elsewhere, and so keeps - paths shorter than minimally guaranteed. This doesn't loop much - because on average a node in a tree is near the bottom. - 3. If x is the base of a chain (i.e., has parent links) relink - x's parent and children to x's replacement (or null if none). -*/ - -#define unlink_large_chunk(M, X) {\ - tchunkptr XP = X->parent;\ - tchunkptr R;\ - if (X->bk != X) {\ - tchunkptr F = X->fd;\ - R = X->bk;\ - if (RTCHECK(ok_address(M, F))) {\ - F->bk = R;\ - R->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else {\ - tchunkptr* RP;\ - if (((R = *(RP = &(X->child[1]))) != 0) ||\ - ((R = *(RP = &(X->child[0]))) != 0)) {\ - tchunkptr* CP;\ - while ((*(CP = &(R->child[1])) != 0) ||\ - (*(CP = &(R->child[0])) != 0)) {\ - R = *(RP = CP);\ - }\ - if (RTCHECK(ok_address(M, RP)))\ - *RP = 0;\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - }\ - if (XP != 0) {\ - tbinptr* H = treebin_at(M, X->index);\ - if (X == *H) {\ - if ((*H = R) == 0) \ - clear_treemap(M, X->index);\ - }\ - else if (RTCHECK(ok_address(M, XP))) {\ - if (XP->child[0] == X) \ - XP->child[0] = R;\ - else \ - XP->child[1] = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - if (R != 0) {\ - if (RTCHECK(ok_address(M, R))) {\ - tchunkptr C0, C1;\ - R->parent = XP;\ - if ((C0 = X->child[0]) != 0) {\ - if (RTCHECK(ok_address(M, C0))) {\ - R->child[0] = C0;\ - C0->parent = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - if ((C1 = X->child[1]) != 0) {\ - if (RTCHECK(ok_address(M, C1))) {\ - R->child[1] = C1;\ - C1->parent = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ -} - -/* Relays to large vs small bin operations */ - -#define insert_chunk(M, P, S)\ - if (is_small(S)) insert_small_chunk(M, P, S)\ - else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } - -#define unlink_chunk(M, P, S)\ - if (is_small(S)) unlink_small_chunk(M, P, S)\ - else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } - - -/* Relays to internal calls to malloc/free from realloc, memalign etc */ - -#if ONLY_MSPACES -#define internal_malloc(m, b) mspace_malloc(m, b) -#define internal_free(m, mem) mspace_free(m,mem); -#else /* ONLY_MSPACES */ -#if MSPACES -#define internal_malloc(m, b)\ - (m == gm)? dlmalloc(b) : mspace_malloc(m, b) -#define internal_free(m, mem)\ - if (m == gm) dlfree(mem); else mspace_free(m,mem); -#else /* MSPACES */ -#define internal_malloc(m, b) dlmalloc(b) -#define internal_free(m, mem) dlfree(mem) -#endif /* MSPACES */ -#endif /* ONLY_MSPACES */ - -/* ----------------------- Direct-mmapping chunks ----------------------- */ - -/* - Directly mmapped chunks are set up with an offset to the start of - the mmapped region stored in the prev_foot field of the chunk. This - allows reconstruction of the required argument to MUNMAP when freed, - and also allows adjustment of the returned chunk to meet alignment - requirements (especially in memalign). There is also enough space - allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain - the PINUSE bit so frees can be checked. -*/ - -/* Malloc using mmap */ -static void* mmap_alloc(mstate m, size_t nb) { - size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - if (mmsize > nb) { /* Check for wrap around 0 */ - char* mm = (char*)(CALL_DIRECT_MMAP(mmsize)); - if (mm != CMFAIL) { - size_t offset = align_offset(chunk2mem(mm)); - size_t psize = mmsize - offset - MMAP_FOOT_PAD; - mchunkptr p = (mchunkptr)(mm + offset); - p->prev_foot = offset | IS_MMAPPED_BIT; - (p)->head = (psize|CINUSE_BIT); - mark_inuse_foot(m, p, psize); - chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; - - if (mm < m->least_addr) - m->least_addr = mm; - if ((m->footprint += mmsize) > m->max_footprint) - m->max_footprint = m->footprint; - assert(is_aligned(chunk2mem(p))); - check_mmapped_chunk(m, p); - return chunk2mem(p); - } - } - return 0; -} - -/* Realloc using mmap */ -static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) { - size_t oldsize = chunksize(oldp); - if (is_small(nb)) /* Can't shrink mmap regions below small size */ - return 0; - /* Keep old chunk if big enough but not too big */ - if (oldsize >= nb + SIZE_T_SIZE && - (oldsize - nb) <= (mparams.granularity << 1)) - return oldp; - else { - size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT; - size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; - size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - char* cp = (char*)CALL_MREMAP((char*)oldp - offset, - oldmmsize, newmmsize, 1); - if (cp != CMFAIL) { - mchunkptr newp = (mchunkptr)(cp + offset); - size_t psize = newmmsize - offset - MMAP_FOOT_PAD; - newp->head = (psize|CINUSE_BIT); - mark_inuse_foot(m, newp, psize); - chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; - - if (cp < m->least_addr) - m->least_addr = cp; - if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) - m->max_footprint = m->footprint; - check_mmapped_chunk(m, newp); - return newp; - } - } - return 0; -} - -/* -------------------------- mspace management -------------------------- */ - -/* Initialize top chunk and its size */ -static void init_top(mstate m, mchunkptr p, size_t psize) { - /* Ensure alignment */ - size_t offset = align_offset(chunk2mem(p)); - p = (mchunkptr)((char*)p + offset); - psize -= offset; - - m->top = p; - m->topsize = psize; - p->head = psize | PINUSE_BIT; - /* set size of fake trailing chunk holding overhead space only once */ - chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; - m->trim_check = mparams.trim_threshold; /* reset on each update */ -} - -/* Initialize bins for a new mstate that is otherwise zeroed out */ -static void init_bins(mstate m) { - /* Establish circular links for smallbins */ - bindex_t i; - for (i = 0; i < NSMALLBINS; ++i) { - sbinptr bin = smallbin_at(m,i); - bin->fd = bin->bk = bin; - } -} - -#if PROCEED_ON_ERROR - -/* default corruption action */ -static void reset_on_error(mstate m) { - int i; - ++malloc_corruption_error_count; - /* Reinitialize fields to forget about all memory */ - m->smallbins = m->treebins = 0; - m->dvsize = m->topsize = 0; - m->seg.base = 0; - m->seg.size = 0; - m->seg.next = 0; - m->top = m->dv = 0; - for (i = 0; i < NTREEBINS; ++i) - *treebin_at(m, i) = 0; - init_bins(m); -} -#endif /* PROCEED_ON_ERROR */ - -/* Allocate chunk and prepend remainder with chunk in successor base. */ -static void* prepend_alloc(mstate m, char* newbase, char* oldbase, - size_t nb) { - mchunkptr p = align_as_chunk(newbase); - mchunkptr oldfirst = align_as_chunk(oldbase); - size_t psize = (char*)oldfirst - (char*)p; - mchunkptr q = chunk_plus_offset(p, nb); - size_t qsize = psize - nb; - set_size_and_pinuse_of_inuse_chunk(m, p, nb); - - assert((char*)oldfirst > (char*)q); - assert(pinuse(oldfirst)); - assert(qsize >= MIN_CHUNK_SIZE); - - /* consolidate remainder with first chunk of old base */ - if (oldfirst == m->top) { - size_t tsize = m->topsize += qsize; - m->top = q; - q->head = tsize | PINUSE_BIT; - check_top_chunk(m, q); - } - else if (oldfirst == m->dv) { - size_t dsize = m->dvsize += qsize; - m->dv = q; - set_size_and_pinuse_of_free_chunk(q, dsize); - } - else { - if (!cinuse(oldfirst)) { - size_t nsize = chunksize(oldfirst); - unlink_chunk(m, oldfirst, nsize); - oldfirst = chunk_plus_offset(oldfirst, nsize); - qsize += nsize; - } - set_free_with_pinuse(q, qsize, oldfirst); - insert_chunk(m, q, qsize); - check_free_chunk(m, q); - } - - check_malloced_chunk(m, chunk2mem(p), nb); - return chunk2mem(p); -} - -/* Add a segment to hold a new noncontiguous region */ -static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { - /* Determine locations and sizes of segment, fenceposts, old top */ - char* old_top = (char*)m->top; - msegmentptr oldsp = segment_holding(m, old_top); - char* old_end = oldsp->base + oldsp->size; - size_t ssize = pad_request(sizeof(struct malloc_segment)); - char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - size_t offset = align_offset(chunk2mem(rawsp)); - char* asp = rawsp + offset; - char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; - mchunkptr sp = (mchunkptr)csp; - msegmentptr ss = (msegmentptr)(chunk2mem(sp)); - mchunkptr tnext = chunk_plus_offset(sp, ssize); - mchunkptr p = tnext; - int nfences = 0; - - /* reset top to new space */ - init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); - - /* Set up segment record */ - assert(is_aligned(ss)); - set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); - *ss = m->seg; /* Push current record */ - m->seg.base = tbase; - m->seg.size = tsize; - m->seg.sflags = mmapped; - m->seg.next = ss; - - /* Insert trailing fenceposts */ - for (;;) { - mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); - p->head = FENCEPOST_HEAD; - ++nfences; - if ((char*)(&(nextp->head)) < old_end) - p = nextp; - else - break; - } - assert(nfences >= 2); - - /* Insert the rest of old top into a bin as an ordinary free chunk */ - if (csp != old_top) { - mchunkptr q = (mchunkptr)old_top; - size_t psize = csp - old_top; - mchunkptr tn = chunk_plus_offset(q, psize); - set_free_with_pinuse(q, psize, tn); - insert_chunk(m, q, psize); - } - - check_top_chunk(m, m->top); -} - -/* -------------------------- System allocation -------------------------- */ - -/* Get memory from system using MORECORE or MMAP */ -static void* sys_alloc(mstate m, size_t nb) { - char* tbase = CMFAIL; - size_t tsize = 0; - flag_t mmap_flag = 0; - - ensure_initialization(); - - /* Directly map large chunks */ - if (use_mmap(m) && nb >= mparams.mmap_threshold) { - void* mem = mmap_alloc(m, nb); - if (mem != 0) - return mem; - } - - /* - Try getting memory in any of three ways (in most-preferred to - least-preferred order): - 1. A call to MORECORE that can normally contiguously extend memory. - (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or - or main space is mmapped or a previous contiguous call failed) - 2. A call to MMAP new space (disabled if not HAVE_MMAP). - Note that under the default settings, if MORECORE is unable to - fulfill a request, and HAVE_MMAP is true, then mmap is - used as a noncontiguous system allocator. This is a useful backup - strategy for systems with holes in address spaces -- in this case - sbrk cannot contiguously expand the heap, but mmap may be able to - find space. - 3. A call to MORECORE that cannot usually contiguously extend memory. - (disabled if not HAVE_MORECORE) - - In all cases, we need to request enough bytes from system to ensure - we can malloc nb bytes upon success, so pad with enough space for - top_foot, plus alignment-pad to make sure we don't lose bytes if - not on boundary, and round this up to a granularity unit. - */ - - if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { - char* br = CMFAIL; - msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); - size_t asize = 0; - ACQUIRE_MALLOC_GLOBAL_LOCK(); - - if (ss == 0) { /* First time through or recovery */ - char* base = (char*)CALL_MORECORE(0); - if (base != CMFAIL) { - asize = granularity_align(nb + SYS_ALLOC_PADDING); - /* Adjust to end on a page boundary */ - if (!is_page_aligned(base)) - asize += (page_align((size_t)base) - (size_t)base); - /* Can't call MORECORE if size is negative when treated as signed */ - if (asize < HALF_MAX_SIZE_T && - (br = (char*)(CALL_MORECORE(asize))) == base) { - tbase = base; - tsize = asize; - } - } - } - else { - /* Subtract out existing available top space from MORECORE request. */ - asize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING); - /* Use mem here only if it did continuously extend old space */ - if (asize < HALF_MAX_SIZE_T && - (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) { - tbase = br; - tsize = asize; - } - } - - if (tbase == CMFAIL) { /* Cope with partial failure */ - if (br != CMFAIL) { /* Try to use/extend the space we did get */ - if (asize < HALF_MAX_SIZE_T && - asize < nb + SYS_ALLOC_PADDING) { - size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - asize); - if (esize < HALF_MAX_SIZE_T) { - char* end = (char*)CALL_MORECORE(esize); - if (end != CMFAIL) - asize += esize; - else { /* Can't use; try to release */ - (void) CALL_MORECORE(-asize); - br = CMFAIL; - } - } - } - } - if (br != CMFAIL) { /* Use the space we did get */ - tbase = br; - tsize = asize; - } - else - disable_contiguous(m); /* Don't try contiguous path in the future */ - } - - RELEASE_MALLOC_GLOBAL_LOCK(); - } - - if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ - size_t rsize = granularity_align(nb + SYS_ALLOC_PADDING); - if (rsize > nb) { /* Fail if wraps around zero */ - char* mp = (char*)(CALL_MMAP(rsize)); - if (mp != CMFAIL) { - tbase = mp; - tsize = rsize; - mmap_flag = IS_MMAPPED_BIT; - } - } - } - - if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ - size_t asize = granularity_align(nb + SYS_ALLOC_PADDING); - if (asize < HALF_MAX_SIZE_T) { - char* br = CMFAIL; - char* end = CMFAIL; - ACQUIRE_MALLOC_GLOBAL_LOCK(); - br = (char*)(CALL_MORECORE(asize)); - end = (char*)(CALL_MORECORE(0)); - RELEASE_MALLOC_GLOBAL_LOCK(); - if (br != CMFAIL && end != CMFAIL && br < end) { - size_t ssize = end - br; - if (ssize > nb + TOP_FOOT_SIZE) { - tbase = br; - tsize = ssize; - } - } - } - } - - if (tbase != CMFAIL) { - - if ((m->footprint += tsize) > m->max_footprint) - m->max_footprint = m->footprint; - - if (!is_initialized(m)) { /* first-time initialization */ - m->seg.base = m->least_addr = tbase; - m->seg.size = tsize; - m->seg.sflags = mmap_flag; - m->magic = mparams.magic; - m->release_checks = MAX_RELEASE_CHECK_RATE; - init_bins(m); -#if !ONLY_MSPACES - if (is_global(m)) - init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); - else -#endif - { - /* Offset top by embedded malloc_state */ - mchunkptr mn = next_chunk(mem2chunk(m)); - init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); - } - } - - else { - /* Try to merge with an existing segment */ - msegmentptr sp = &m->seg; - /* Only consider most recent segment if traversal suppressed */ - while (sp != 0 && tbase != sp->base + sp->size) - sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; - if (sp != 0 && - !is_extern_segment(sp) && - (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && - segment_holds(sp, m->top)) { /* append */ - sp->size += tsize; - init_top(m, m->top, m->topsize + tsize); - } - else { - if (tbase < m->least_addr) - m->least_addr = tbase; - sp = &m->seg; - while (sp != 0 && sp->base != tbase + tsize) - sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; - if (sp != 0 && - !is_extern_segment(sp) && - (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) { - char* oldbase = sp->base; - sp->base = tbase; - sp->size += tsize; - return prepend_alloc(m, tbase, oldbase, nb); - } - else - add_segment(m, tbase, tsize, mmap_flag); - } - } - - if (nb < m->topsize) { /* Allocate from new or extended top space */ - size_t rsize = m->topsize -= nb; - mchunkptr p = m->top; - mchunkptr r = m->top = chunk_plus_offset(p, nb); - r->head = rsize | PINUSE_BIT; - set_size_and_pinuse_of_inuse_chunk(m, p, nb); - check_top_chunk(m, m->top); - check_malloced_chunk(m, chunk2mem(p), nb); - return chunk2mem(p); - } - } - - MALLOC_FAILURE_ACTION; - return 0; -} - -/* ----------------------- system deallocation -------------------------- */ - -/* Unmap and unlink any mmapped segments that don't contain used chunks */ -static size_t release_unused_segments(mstate m) { - size_t released = 0; - int nsegs = 0; - msegmentptr pred = &m->seg; - msegmentptr sp = pred->next; - while (sp != 0) { - char* base = sp->base; - size_t size = sp->size; - msegmentptr next = sp->next; - ++nsegs; - if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { - mchunkptr p = align_as_chunk(base); - size_t psize = chunksize(p); - /* Can unmap if first chunk holds entire segment and not pinned */ - if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { - tchunkptr tp = (tchunkptr)p; - assert(segment_holds(sp, (char*)sp)); - if (p == m->dv) { - m->dv = 0; - m->dvsize = 0; - } - else { - unlink_large_chunk(m, tp); - } - if (CALL_MUNMAP(base, size) == 0) { - released += size; - m->footprint -= size; - /* unlink obsoleted record */ - sp = pred; - sp->next = next; - } - else { /* back out if cannot unmap */ - insert_large_chunk(m, tp, psize); - } - } - } - if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ - break; - pred = sp; - sp = next; - } - /* Reset check counter */ - m->release_checks = ((nsegs > MAX_RELEASE_CHECK_RATE)? - nsegs : MAX_RELEASE_CHECK_RATE); - return released; -} - -static int sys_trim(mstate m, size_t pad) { - size_t released = 0; - ensure_initialization(); - if (pad < MAX_REQUEST && is_initialized(m)) { - pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ - - if (m->topsize > pad) { - /* Shrink top space in granularity-size units, keeping at least one */ - size_t unit = mparams.granularity; - size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - - SIZE_T_ONE) * unit; - msegmentptr sp = segment_holding(m, (char*)m->top); - - if (!is_extern_segment(sp)) { - if (is_mmapped_segment(sp)) { - if (HAVE_MMAP && - sp->size >= extra && - !has_segment_link(m, sp)) { /* can't shrink if pinned */ - size_t newsize = sp->size - extra; - /* Prefer mremap, fall back to munmap */ - if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || - (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { - released = extra; - } - } - } - else if (HAVE_MORECORE) { - if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ - extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; - ACQUIRE_MALLOC_GLOBAL_LOCK(); - { - /* Make sure end of memory is where we last set it. */ - char* old_br = (char*)(CALL_MORECORE(0)); - if (old_br == sp->base + sp->size) { - char* rel_br = (char*)(CALL_MORECORE(-extra)); - char* new_br = (char*)(CALL_MORECORE(0)); - if (rel_br != CMFAIL && new_br < old_br) - released = old_br - new_br; - } - } - RELEASE_MALLOC_GLOBAL_LOCK(); - } - } - - if (released != 0) { - sp->size -= released; - m->footprint -= released; - init_top(m, m->top, m->topsize - released); - check_top_chunk(m, m->top); - } - } - - /* Unmap any unused mmapped segments */ - if (HAVE_MMAP) - released += release_unused_segments(m); - - /* On failure, disable autotrim to avoid repeated failed future calls */ - if (released == 0 && m->topsize > m->trim_check) - m->trim_check = MAX_SIZE_T; - } - - return (released != 0)? 1 : 0; -} - - -/* ---------------------------- malloc support --------------------------- */ - -/* allocate a large request from the best fitting chunk in a treebin */ -static void* tmalloc_large(mstate m, size_t nb) { - tchunkptr v = 0; - size_t rsize = -nb; /* Unsigned negation */ - tchunkptr t; - bindex_t idx; - compute_tree_index(nb, idx); - if ((t = *treebin_at(m, idx)) != 0) { - /* Traverse tree for this bin looking for node with size == nb */ - size_t sizebits = nb << leftshift_for_tree_index(idx); - tchunkptr rst = 0; /* The deepest untaken right subtree */ - for (;;) { - tchunkptr rt; - size_t trem = chunksize(t) - nb; - if (trem < rsize) { - v = t; - if ((rsize = trem) == 0) - break; - } - rt = t->child[1]; - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; - if (rt != 0 && rt != t) - rst = rt; - if (t == 0) { - t = rst; /* set t to least subtree holding sizes > nb */ - break; - } - sizebits <<= 1; - } - } - if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ - binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; - if (leftbits != 0) { - bindex_t i; - binmap_t leastbit = least_bit(leftbits); - compute_bit2idx(leastbit, i); - t = *treebin_at(m, i); - } - } - - while (t != 0) { /* find smallest of tree or subtree */ - size_t trem = chunksize(t) - nb; - if (trem < rsize) { - rsize = trem; - v = t; - } - t = leftmost_child(t); - } - - /* If dv is a better fit, return 0 so malloc will use it */ - if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { - if (RTCHECK(ok_address(m, v))) { /* split */ - mchunkptr r = chunk_plus_offset(v, nb); - assert(chunksize(v) == rsize + nb); - if (RTCHECK(ok_next(v, r))) { - unlink_large_chunk(m, v); - if (rsize < MIN_CHUNK_SIZE) - set_inuse_and_pinuse(m, v, (rsize + nb)); - else { - set_size_and_pinuse_of_inuse_chunk(m, v, nb); - set_size_and_pinuse_of_free_chunk(r, rsize); - insert_chunk(m, r, rsize); - } - return chunk2mem(v); - } - } - CORRUPTION_ERROR_ACTION(m); - } - return 0; -} - -/* allocate a small request from the best fitting chunk in a treebin */ -static void* tmalloc_small(mstate m, size_t nb) { - tchunkptr t, v; - size_t rsize; - bindex_t i; - binmap_t leastbit = least_bit(m->treemap); - compute_bit2idx(leastbit, i); - v = t = *treebin_at(m, i); - rsize = chunksize(t) - nb; - - while ((t = leftmost_child(t)) != 0) { - size_t trem = chunksize(t) - nb; - if (trem < rsize) { - rsize = trem; - v = t; - } - } - - if (RTCHECK(ok_address(m, v))) { - mchunkptr r = chunk_plus_offset(v, nb); - assert(chunksize(v) == rsize + nb); - if (RTCHECK(ok_next(v, r))) { - unlink_large_chunk(m, v); - if (rsize < MIN_CHUNK_SIZE) - set_inuse_and_pinuse(m, v, (rsize + nb)); - else { - set_size_and_pinuse_of_inuse_chunk(m, v, nb); - set_size_and_pinuse_of_free_chunk(r, rsize); - replace_dv(m, r, rsize); - } - return chunk2mem(v); - } - } - - CORRUPTION_ERROR_ACTION(m); - return 0; -} - -/* --------------------------- realloc support --------------------------- */ - -static void* internal_realloc(mstate m, void* oldmem, size_t bytes) { - if (bytes >= MAX_REQUEST) { - MALLOC_FAILURE_ACTION; - return 0; - } - if (!PREACTION(m)) { - mchunkptr oldp = mem2chunk(oldmem); - size_t oldsize = chunksize(oldp); - mchunkptr next = chunk_plus_offset(oldp, oldsize); - mchunkptr newp = 0; - void* extra = 0; - - /* Try to either shrink or extend into top. Else malloc-copy-free */ - - if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) && - ok_next(oldp, next) && ok_pinuse(next))) { - size_t nb = request2size(bytes); - if (is_mmapped(oldp)) - newp = mmap_resize(m, oldp, nb); - else if (oldsize >= nb) { /* already big enough */ - size_t rsize = oldsize - nb; - newp = oldp; - if (rsize >= MIN_CHUNK_SIZE) { - mchunkptr remainder = chunk_plus_offset(newp, nb); - set_inuse(m, newp, nb); - set_inuse(m, remainder, rsize); - extra = chunk2mem(remainder); - } - } - else if (next == m->top && oldsize + m->topsize > nb) { - /* Expand into top */ - size_t newsize = oldsize + m->topsize; - size_t newtopsize = newsize - nb; - mchunkptr newtop = chunk_plus_offset(oldp, nb); - set_inuse(m, oldp, nb); - newtop->head = newtopsize |PINUSE_BIT; - m->top = newtop; - m->topsize = newtopsize; - newp = oldp; - } - } - else { - USAGE_ERROR_ACTION(m, oldmem); - POSTACTION(m); - return 0; - } - - POSTACTION(m); - - if (newp != 0) { - if (extra != 0) { - internal_free(m, extra); - } - check_inuse_chunk(m, newp); - return chunk2mem(newp); - } - else { - void* newmem = internal_malloc(m, bytes); - if (newmem != 0) { - size_t oc = oldsize - overhead_for(oldp); - memcpy(newmem, oldmem, (oc < bytes)? oc : bytes); - internal_free(m, oldmem); - } - return newmem; - } - } - return 0; -} - -/* --------------------------- memalign support -------------------------- */ - -static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { - if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */ - return internal_malloc(m, bytes); - if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ - alignment = MIN_CHUNK_SIZE; - if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ - size_t a = MALLOC_ALIGNMENT << 1; - while (a < alignment) a <<= 1; - alignment = a; - } - - if (bytes >= MAX_REQUEST - alignment) { - if (m != 0) { /* Test isn't needed but avoids compiler warning */ - MALLOC_FAILURE_ACTION; - } - } - else { - size_t nb = request2size(bytes); - size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; - char* mem = (char*)internal_malloc(m, req); - if (mem != 0) { - void* leader = 0; - void* trailer = 0; - mchunkptr p = mem2chunk(mem); - - if (PREACTION(m)) return 0; - if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */ - /* - Find an aligned spot inside chunk. Since we need to give - back leading space in a chunk of at least MIN_CHUNK_SIZE, if - the first calculation places us at a spot with less than - MIN_CHUNK_SIZE leader, we can move to the next aligned spot. - We've allocated enough total room so that this is always - possible. - */ - char* br = (char*)mem2chunk((size_t)(((size_t)(mem + - alignment - - SIZE_T_ONE)) & - -alignment)); - char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? - br : br+alignment; - mchunkptr newp = (mchunkptr)pos; - size_t leadsize = pos - (char*)(p); - size_t newsize = chunksize(p) - leadsize; - - if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ - newp->prev_foot = p->prev_foot + leadsize; - newp->head = (newsize|CINUSE_BIT); - } - else { /* Otherwise, give back leader, use the rest */ - set_inuse(m, newp, newsize); - set_inuse(m, p, leadsize); - leader = chunk2mem(p); - } - p = newp; - } - - /* Give back spare room at the end */ - if (!is_mmapped(p)) { - size_t size = chunksize(p); - if (size > nb + MIN_CHUNK_SIZE) { - size_t remainder_size = size - nb; - mchunkptr remainder = chunk_plus_offset(p, nb); - set_inuse(m, p, nb); - set_inuse(m, remainder, remainder_size); - trailer = chunk2mem(remainder); - } - } - - assert (chunksize(p) >= nb); - assert((((size_t)(chunk2mem(p))) % alignment) == 0); - check_inuse_chunk(m, p); - POSTACTION(m); - if (leader != 0) { - internal_free(m, leader); - } - if (trailer != 0) { - internal_free(m, trailer); - } - return chunk2mem(p); - } - } - return 0; -} - -/* ------------------------ comalloc/coalloc support --------------------- */ - -static void** ialloc(mstate m, - size_t n_elements, - size_t* sizes, - int opts, - void* chunks[]) { - /* - This provides common support for independent_X routines, handling - all of the combinations that can result. - - The opts arg has: - bit 0 set if all elements are same size (using sizes[0]) - bit 1 set if elements should be zeroed - */ - - size_t element_size; /* chunksize of each element, if all same */ - size_t contents_size; /* total size of elements */ - size_t array_size; /* request size of pointer array */ - void* mem; /* malloced aggregate space */ - mchunkptr p; /* corresponding chunk */ - size_t remainder_size; /* remaining bytes while splitting */ - void** marray; /* either "chunks" or malloced ptr array */ - mchunkptr array_chunk; /* chunk for malloced ptr array */ - flag_t was_enabled; /* to disable mmap */ - size_t size; - size_t i; - - ensure_initialization(); - /* compute array length, if needed */ - if (chunks != 0) { - if (n_elements == 0) - return chunks; /* nothing to do */ - marray = chunks; - array_size = 0; - } - else { - /* if empty req, must still return chunk representing empty array */ - if (n_elements == 0) - return (void**)internal_malloc(m, 0); - marray = 0; - array_size = request2size(n_elements * (sizeof(void*))); - } - - /* compute total element size */ - if (opts & 0x1) { /* all-same-size */ - element_size = request2size(*sizes); - contents_size = n_elements * element_size; - } - else { /* add up all the sizes */ - element_size = 0; - contents_size = 0; - for (i = 0; i != n_elements; ++i) - contents_size += request2size(sizes[i]); - } - - size = contents_size + array_size; - - /* - Allocate the aggregate chunk. First disable direct-mmapping so - malloc won't use it, since we would not be able to later - free/realloc space internal to a segregated mmap region. - */ - was_enabled = use_mmap(m); - disable_mmap(m); - mem = internal_malloc(m, size - CHUNK_OVERHEAD); - if (was_enabled) - enable_mmap(m); - if (mem == 0) - return 0; - - if (PREACTION(m)) return 0; - p = mem2chunk(mem); - remainder_size = chunksize(p); - - assert(!is_mmapped(p)); - - if (opts & 0x2) { /* optionally clear the elements */ - memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); - } - - /* If not provided, allocate the pointer array as final part of chunk */ - if (marray == 0) { - size_t array_chunk_size; - array_chunk = chunk_plus_offset(p, contents_size); - array_chunk_size = remainder_size - contents_size; - marray = (void**) (chunk2mem(array_chunk)); - set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); - remainder_size = contents_size; - } - - /* split out elements */ - for (i = 0; ; ++i) { - marray[i] = chunk2mem(p); - if (i != n_elements-1) { - if (element_size != 0) - size = element_size; - else - size = request2size(sizes[i]); - remainder_size -= size; - set_size_and_pinuse_of_inuse_chunk(m, p, size); - p = chunk_plus_offset(p, size); - } - else { /* the final element absorbs any overallocation slop */ - set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); - break; - } - } - -#if DEBUG - if (marray != chunks) { - /* final element must have exactly exhausted chunk */ - if (element_size != 0) { - assert(remainder_size == element_size); - } - else { - assert(remainder_size == request2size(sizes[i])); - } - check_inuse_chunk(m, mem2chunk(marray)); - } - for (i = 0; i != n_elements; ++i) - check_inuse_chunk(m, mem2chunk(marray[i])); - -#endif /* DEBUG */ - - POSTACTION(m); - return marray; -} - - -/* -------------------------- public routines ---------------------------- */ - -#if !ONLY_MSPACES - -void* dlmalloc(size_t bytes) { - /* - Basic algorithm: - If a small request (< 256 bytes minus per-chunk overhead): - 1. If one exists, use a remainderless chunk in associated smallbin. - (Remainderless means that there are too few excess bytes to - represent as a chunk.) - 2. If it is big enough, use the dv chunk, which is normally the - chunk adjacent to the one used for the most recent small request. - 3. If one exists, split the smallest available chunk in a bin, - saving remainder in dv. - 4. If it is big enough, use the top chunk. - 5. If available, get memory from system and use it - Otherwise, for a large request: - 1. Find the smallest available binned chunk that fits, and use it - if it is better fitting than dv chunk, splitting if necessary. - 2. If better fitting than any binned chunk, use the dv chunk. - 3. If it is big enough, use the top chunk. - 4. If request size >= mmap threshold, try to directly mmap this chunk. - 5. If available, get memory from system and use it - - The ugly goto's here ensure that postaction occurs along all paths. - */ - -#if USE_LOCKS - ensure_initialization(); /* initialize in sys_alloc if not using locks */ -#endif - - if (!PREACTION(gm)) { - void* mem; - size_t nb; - if (bytes <= MAX_SMALL_REQUEST) { - bindex_t idx; - binmap_t smallbits; - nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); - idx = small_index(nb); - smallbits = gm->smallmap >> idx; - - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ - mchunkptr b, p; - idx += ~smallbits & 1; /* Uses next bin if idx empty */ - b = smallbin_at(gm, idx); - p = b->fd; - assert(chunksize(p) == small_index2size(idx)); - unlink_first_small_chunk(gm, b, p, idx); - set_inuse_and_pinuse(gm, p, small_index2size(idx)); - mem = chunk2mem(p); - check_malloced_chunk(gm, mem, nb); - goto postaction; - } - - else if (nb > gm->dvsize) { - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ - mchunkptr b, p, r; - size_t rsize; - bindex_t i; - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); - binmap_t leastbit = least_bit(leftbits); - compute_bit2idx(leastbit, i); - b = smallbin_at(gm, i); - p = b->fd; - assert(chunksize(p) == small_index2size(i)); - unlink_first_small_chunk(gm, b, p, i); - rsize = small_index2size(i) - nb; - /* Fit here cannot be remainderless if 4byte sizes */ - if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) - set_inuse_and_pinuse(gm, p, small_index2size(i)); - else { - set_size_and_pinuse_of_inuse_chunk(gm, p, nb); - r = chunk_plus_offset(p, nb); - set_size_and_pinuse_of_free_chunk(r, rsize); - replace_dv(gm, r, rsize); - } - mem = chunk2mem(p); - check_malloced_chunk(gm, mem, nb); - goto postaction; - } - - else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { - check_malloced_chunk(gm, mem, nb); - goto postaction; - } - } - } - else if (bytes >= MAX_REQUEST) - nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ - else { - nb = pad_request(bytes); - if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { - check_malloced_chunk(gm, mem, nb); - goto postaction; - } - } - - if (nb <= gm->dvsize) { - size_t rsize = gm->dvsize - nb; - mchunkptr p = gm->dv; - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ - mchunkptr r = gm->dv = chunk_plus_offset(p, nb); - gm->dvsize = rsize; - set_size_and_pinuse_of_free_chunk(r, rsize); - set_size_and_pinuse_of_inuse_chunk(gm, p, nb); - } - else { /* exhaust dv */ - size_t dvs = gm->dvsize; - gm->dvsize = 0; - gm->dv = 0; - set_inuse_and_pinuse(gm, p, dvs); - } - mem = chunk2mem(p); - check_malloced_chunk(gm, mem, nb); - goto postaction; - } - - else if (nb < gm->topsize) { /* Split top */ - size_t rsize = gm->topsize -= nb; - mchunkptr p = gm->top; - mchunkptr r = gm->top = chunk_plus_offset(p, nb); - r->head = rsize | PINUSE_BIT; - set_size_and_pinuse_of_inuse_chunk(gm, p, nb); - mem = chunk2mem(p); - check_top_chunk(gm, gm->top); - check_malloced_chunk(gm, mem, nb); - goto postaction; - } - - mem = sys_alloc(gm, nb); - - postaction: - POSTACTION(gm); - return mem; - } - - return 0; -} - -void dlfree(void* mem) { - /* - Consolidate freed chunks with preceeding or succeeding bordering - free chunks, if they exist, and then place in a bin. Intermixed - with special cases for top, dv, mmapped chunks, and usage errors. - */ - - if (mem != 0) { - mchunkptr p = mem2chunk(mem); -#if FOOTERS - mstate fm = get_mstate_for(p); - if (!ok_magic(fm)) { - USAGE_ERROR_ACTION(fm, p); - return; - } -#else /* FOOTERS */ -#define fm gm -#endif /* FOOTERS */ - if (!PREACTION(fm)) { - check_inuse_chunk(fm, p); - if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { - size_t psize = chunksize(p); - mchunkptr next = chunk_plus_offset(p, psize); - if (!pinuse(p)) { - size_t prevsize = p->prev_foot; - if ((prevsize & IS_MMAPPED_BIT) != 0) { - prevsize &= ~IS_MMAPPED_BIT; - psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) - fm->footprint -= psize; - goto postaction; - } - else { - mchunkptr prev = chunk_minus_offset(p, prevsize); - psize += prevsize; - p = prev; - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ - if (p != fm->dv) { - unlink_chunk(fm, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { - fm->dvsize = psize; - set_free_with_pinuse(p, psize, next); - goto postaction; - } - } - else - goto erroraction; - } - } - - if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { - if (!cinuse(next)) { /* consolidate forward */ - if (next == fm->top) { - size_t tsize = fm->topsize += psize; - fm->top = p; - p->head = tsize | PINUSE_BIT; - if (p == fm->dv) { - fm->dv = 0; - fm->dvsize = 0; - } - if (should_trim(fm, tsize)) - sys_trim(fm, 0); - goto postaction; - } - else if (next == fm->dv) { - size_t dsize = fm->dvsize += psize; - fm->dv = p; - set_size_and_pinuse_of_free_chunk(p, dsize); - goto postaction; - } - else { - size_t nsize = chunksize(next); - psize += nsize; - unlink_chunk(fm, next, nsize); - set_size_and_pinuse_of_free_chunk(p, psize); - if (p == fm->dv) { - fm->dvsize = psize; - goto postaction; - } - } - } - else - set_free_with_pinuse(p, psize, next); - - if (is_small(psize)) { - insert_small_chunk(fm, p, psize); - check_free_chunk(fm, p); - } - else { - tchunkptr tp = (tchunkptr)p; - insert_large_chunk(fm, tp, psize); - check_free_chunk(fm, p); - if (--fm->release_checks == 0) - release_unused_segments(fm); - } - goto postaction; - } - } - erroraction: - USAGE_ERROR_ACTION(fm, p); - postaction: - POSTACTION(fm); - } - } -#if !FOOTERS -#undef fm -#endif /* FOOTERS */ -} - -void* dlcalloc(size_t n_elements, size_t elem_size) { - void* mem; - size_t req = 0; - if (n_elements != 0) { - req = n_elements * elem_size; - if (((n_elements | elem_size) & ~(size_t)0xffff) && - (req / n_elements != elem_size)) - req = MAX_SIZE_T; /* force downstream failure on overflow */ - } - mem = dlmalloc(req); - if (mem != 0 && calloc_must_clear(mem2chunk(mem))) - memset(mem, 0, req); - return mem; -} - -void* dlrealloc(void* oldmem, size_t bytes) { - if (oldmem == 0) - return dlmalloc(bytes); -#ifdef REALLOC_ZERO_BYTES_FREES - if (bytes == 0) { - dlfree(oldmem); - return 0; - } -#endif /* REALLOC_ZERO_BYTES_FREES */ - else { -#if ! FOOTERS - mstate m = gm; -#else /* FOOTERS */ - mstate m = get_mstate_for(mem2chunk(oldmem)); - if (!ok_magic(m)) { - USAGE_ERROR_ACTION(m, oldmem); - return 0; - } -#endif /* FOOTERS */ - return internal_realloc(m, oldmem, bytes); - } -} - -void* dlmemalign(size_t alignment, size_t bytes) { - return internal_memalign(gm, alignment, bytes); -} - -void** dlindependent_calloc(size_t n_elements, size_t elem_size, - void* chunks[]) { - size_t sz = elem_size; /* serves as 1-element array */ - return ialloc(gm, n_elements, &sz, 3, chunks); -} - -void** dlindependent_comalloc(size_t n_elements, size_t sizes[], - void* chunks[]) { - return ialloc(gm, n_elements, sizes, 0, chunks); -} - -void* dlvalloc(size_t bytes) { - size_t pagesz; - ensure_initialization(); - pagesz = mparams.page_size; - return dlmemalign(pagesz, bytes); -} - -void* dlpvalloc(size_t bytes) { - size_t pagesz; - ensure_initialization(); - pagesz = mparams.page_size; - return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); -} - -int dlmalloc_trim(size_t pad) { - ensure_initialization(); - int result = 0; - if (!PREACTION(gm)) { - result = sys_trim(gm, pad); - POSTACTION(gm); - } - return result; -} - -size_t dlmalloc_footprint(void) { - return gm->footprint; -} - -size_t dlmalloc_max_footprint(void) { - return gm->max_footprint; -} - -#if !NO_MALLINFO -struct mallinfo dlmallinfo(void) { - return internal_mallinfo(gm); -} -#endif /* NO_MALLINFO */ - -void dlmalloc_stats() { - internal_malloc_stats(gm); -} - -int dlmallopt(int param_number, int value) { - return change_mparam(param_number, value); -} - -#endif /* !ONLY_MSPACES */ - -size_t dlmalloc_usable_size(void* mem) { - if (mem != 0) { - mchunkptr p = mem2chunk(mem); - if (cinuse(p)) - return chunksize(p) - overhead_for(p); - } - return 0; -} - -/* ----------------------------- user mspaces ---------------------------- */ - -#if MSPACES - -static mstate init_user_mstate(char* tbase, size_t tsize) { - size_t msize = pad_request(sizeof(struct malloc_state)); - mchunkptr mn; - mchunkptr msp = align_as_chunk(tbase); - mstate m = (mstate)(chunk2mem(msp)); - memset(m, 0, msize); - INITIAL_LOCK(&m->mutex); - msp->head = (msize|PINUSE_BIT|CINUSE_BIT); - m->seg.base = m->least_addr = tbase; - m->seg.size = m->footprint = m->max_footprint = tsize; - m->magic = mparams.magic; - m->release_checks = MAX_RELEASE_CHECK_RATE; - m->mflags = mparams.default_mflags; - m->extp = 0; - m->exts = 0; - disable_contiguous(m); - init_bins(m); - mn = next_chunk(mem2chunk(m)); - init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); - check_top_chunk(m, m->top); - return m; -} - -mspace create_mspace(size_t capacity, int locked) { - mstate m = 0; - size_t msize; - ensure_initialization(); - msize = pad_request(sizeof(struct malloc_state)); - if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { - size_t rs = ((capacity == 0)? mparams.granularity : - (capacity + TOP_FOOT_SIZE + msize)); - size_t tsize = granularity_align(rs); - char* tbase = (char*)(CALL_MMAP(tsize)); - if (tbase != CMFAIL) { - m = init_user_mstate(tbase, tsize); - m->seg.sflags = IS_MMAPPED_BIT; - set_lock(m, locked); - } - } - return (mspace)m; -} - -mspace create_mspace_with_base(void* base, size_t capacity, int locked) { - mstate m = 0; - size_t msize; - ensure_initialization(); - msize = pad_request(sizeof(struct malloc_state)); - if (capacity > msize + TOP_FOOT_SIZE && - capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { - m = init_user_mstate((char*)base, capacity); - m->seg.sflags = EXTERN_BIT; - set_lock(m, locked); - } - return (mspace)m; -} - -int mspace_mmap_large_chunks(mspace msp, int enable) { - int ret = 0; - mstate ms = (mstate)msp; - if (!PREACTION(ms)) { - if (use_mmap(ms)) - ret = 1; - if (enable) - enable_mmap(ms); - else - disable_mmap(ms); - POSTACTION(ms); - } - return ret; -} - -size_t destroy_mspace(mspace msp) { - size_t freed = 0; - mstate ms = (mstate)msp; - if (ok_magic(ms)) { - msegmentptr sp = &ms->seg; - while (sp != 0) { - char* base = sp->base; - size_t size = sp->size; - flag_t flag = sp->sflags; - sp = sp->next; - if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) && - CALL_MUNMAP(base, size) == 0) - freed += size; - } - } - else { - USAGE_ERROR_ACTION(ms,ms); - } - return freed; -} - -/* - mspace versions of routines are near-clones of the global - versions. This is not so nice but better than the alternatives. -*/ - - -void* mspace_malloc(mspace msp, size_t bytes) { - mstate ms = (mstate)msp; - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - return 0; - } - if (!PREACTION(ms)) { - void* mem; - size_t nb; - if (bytes <= MAX_SMALL_REQUEST) { - bindex_t idx; - binmap_t smallbits; - nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); - idx = small_index(nb); - smallbits = ms->smallmap >> idx; - - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ - mchunkptr b, p; - idx += ~smallbits & 1; /* Uses next bin if idx empty */ - b = smallbin_at(ms, idx); - p = b->fd; - assert(chunksize(p) == small_index2size(idx)); - unlink_first_small_chunk(ms, b, p, idx); - set_inuse_and_pinuse(ms, p, small_index2size(idx)); - mem = chunk2mem(p); - check_malloced_chunk(ms, mem, nb); - goto postaction; - } - - else if (nb > ms->dvsize) { - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ - mchunkptr b, p, r; - size_t rsize; - bindex_t i; - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); - binmap_t leastbit = least_bit(leftbits); - compute_bit2idx(leastbit, i); - b = smallbin_at(ms, i); - p = b->fd; - assert(chunksize(p) == small_index2size(i)); - unlink_first_small_chunk(ms, b, p, i); - rsize = small_index2size(i) - nb; - /* Fit here cannot be remainderless if 4byte sizes */ - if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) - set_inuse_and_pinuse(ms, p, small_index2size(i)); - else { - set_size_and_pinuse_of_inuse_chunk(ms, p, nb); - r = chunk_plus_offset(p, nb); - set_size_and_pinuse_of_free_chunk(r, rsize); - replace_dv(ms, r, rsize); - } - mem = chunk2mem(p); - check_malloced_chunk(ms, mem, nb); - goto postaction; - } - - else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { - check_malloced_chunk(ms, mem, nb); - goto postaction; - } - } - } - else if (bytes >= MAX_REQUEST) - nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ - else { - nb = pad_request(bytes); - if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { - check_malloced_chunk(ms, mem, nb); - goto postaction; - } - } - - if (nb <= ms->dvsize) { - size_t rsize = ms->dvsize - nb; - mchunkptr p = ms->dv; - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ - mchunkptr r = ms->dv = chunk_plus_offset(p, nb); - ms->dvsize = rsize; - set_size_and_pinuse_of_free_chunk(r, rsize); - set_size_and_pinuse_of_inuse_chunk(ms, p, nb); - } - else { /* exhaust dv */ - size_t dvs = ms->dvsize; - ms->dvsize = 0; - ms->dv = 0; - set_inuse_and_pinuse(ms, p, dvs); - } - mem = chunk2mem(p); - check_malloced_chunk(ms, mem, nb); - goto postaction; - } - - else if (nb < ms->topsize) { /* Split top */ - size_t rsize = ms->topsize -= nb; - mchunkptr p = ms->top; - mchunkptr r = ms->top = chunk_plus_offset(p, nb); - r->head = rsize | PINUSE_BIT; - set_size_and_pinuse_of_inuse_chunk(ms, p, nb); - mem = chunk2mem(p); - check_top_chunk(ms, ms->top); - check_malloced_chunk(ms, mem, nb); - goto postaction; - } - - mem = sys_alloc(ms, nb); - - postaction: - POSTACTION(ms); - return mem; - } - - return 0; -} - -void mspace_free(mspace msp, void* mem) { - if (mem != 0) { - mchunkptr p = mem2chunk(mem); -#if FOOTERS - mstate fm = get_mstate_for(p); -#else /* FOOTERS */ - mstate fm = (mstate)msp; -#endif /* FOOTERS */ - if (!ok_magic(fm)) { - USAGE_ERROR_ACTION(fm, p); - return; - } - if (!PREACTION(fm)) { - check_inuse_chunk(fm, p); - if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { - size_t psize = chunksize(p); - mchunkptr next = chunk_plus_offset(p, psize); - if (!pinuse(p)) { - size_t prevsize = p->prev_foot; - if ((prevsize & IS_MMAPPED_BIT) != 0) { - prevsize &= ~IS_MMAPPED_BIT; - psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) - fm->footprint -= psize; - goto postaction; - } - else { - mchunkptr prev = chunk_minus_offset(p, prevsize); - psize += prevsize; - p = prev; - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ - if (p != fm->dv) { - unlink_chunk(fm, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { - fm->dvsize = psize; - set_free_with_pinuse(p, psize, next); - goto postaction; - } - } - else - goto erroraction; - } - } - - if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { - if (!cinuse(next)) { /* consolidate forward */ - if (next == fm->top) { - size_t tsize = fm->topsize += psize; - fm->top = p; - p->head = tsize | PINUSE_BIT; - if (p == fm->dv) { - fm->dv = 0; - fm->dvsize = 0; - } - if (should_trim(fm, tsize)) - sys_trim(fm, 0); - goto postaction; - } - else if (next == fm->dv) { - size_t dsize = fm->dvsize += psize; - fm->dv = p; - set_size_and_pinuse_of_free_chunk(p, dsize); - goto postaction; - } - else { - size_t nsize = chunksize(next); - psize += nsize; - unlink_chunk(fm, next, nsize); - set_size_and_pinuse_of_free_chunk(p, psize); - if (p == fm->dv) { - fm->dvsize = psize; - goto postaction; - } - } - } - else - set_free_with_pinuse(p, psize, next); - - if (is_small(psize)) { - insert_small_chunk(fm, p, psize); - check_free_chunk(fm, p); - } - else { - tchunkptr tp = (tchunkptr)p; - insert_large_chunk(fm, tp, psize); - check_free_chunk(fm, p); - if (--fm->release_checks == 0) - release_unused_segments(fm); - } - goto postaction; - } - } - erroraction: - USAGE_ERROR_ACTION(fm, p); - postaction: - POSTACTION(fm); - } - } -} - -void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { - void* mem; - size_t req = 0; - mstate ms = (mstate)msp; - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - return 0; - } - if (n_elements != 0) { - req = n_elements * elem_size; - if (((n_elements | elem_size) & ~(size_t)0xffff) && - (req / n_elements != elem_size)) - req = MAX_SIZE_T; /* force downstream failure on overflow */ - } - mem = internal_malloc(ms, req); - if (mem != 0 && calloc_must_clear(mem2chunk(mem))) - memset(mem, 0, req); - return mem; -} - -void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { - if (oldmem == 0) - return mspace_malloc(msp, bytes); -#ifdef REALLOC_ZERO_BYTES_FREES - if (bytes == 0) { - mspace_free(msp, oldmem); - return 0; - } -#endif /* REALLOC_ZERO_BYTES_FREES */ - else { -#if FOOTERS - mchunkptr p = mem2chunk(oldmem); - mstate ms = get_mstate_for(p); -#else /* FOOTERS */ - mstate ms = (mstate)msp; -#endif /* FOOTERS */ - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - return 0; - } - return internal_realloc(ms, oldmem, bytes); - } -} - -void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { - mstate ms = (mstate)msp; - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - return 0; - } - return internal_memalign(ms, alignment, bytes); -} - -void** mspace_independent_calloc(mspace msp, size_t n_elements, - size_t elem_size, void* chunks[]) { - size_t sz = elem_size; /* serves as 1-element array */ - mstate ms = (mstate)msp; - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - return 0; - } - return ialloc(ms, n_elements, &sz, 3, chunks); -} - -void** mspace_independent_comalloc(mspace msp, size_t n_elements, - size_t sizes[], void* chunks[]) { - mstate ms = (mstate)msp; - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - return 0; - } - return ialloc(ms, n_elements, sizes, 0, chunks); -} - -int mspace_trim(mspace msp, size_t pad) { - int result = 0; - mstate ms = (mstate)msp; - if (ok_magic(ms)) { - if (!PREACTION(ms)) { - result = sys_trim(ms, pad); - POSTACTION(ms); - } - } - else { - USAGE_ERROR_ACTION(ms,ms); - } - return result; -} - -void mspace_malloc_stats(mspace msp) { - mstate ms = (mstate)msp; - if (ok_magic(ms)) { - internal_malloc_stats(ms); - } - else { - USAGE_ERROR_ACTION(ms,ms); - } -} - -size_t mspace_footprint(mspace msp) { - size_t result = 0; - mstate ms = (mstate)msp; - if (ok_magic(ms)) { - result = ms->footprint; - } - else { - USAGE_ERROR_ACTION(ms,ms); - } - return result; -} - - -size_t mspace_max_footprint(mspace msp) { - size_t result = 0; - mstate ms = (mstate)msp; - if (ok_magic(ms)) { - result = ms->max_footprint; - } - else { - USAGE_ERROR_ACTION(ms,ms); - } - return result; -} - - -#if !NO_MALLINFO -struct mallinfo mspace_mallinfo(mspace msp) { - mstate ms = (mstate)msp; - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - } - return internal_mallinfo(ms); -} -#endif /* NO_MALLINFO */ - -size_t mspace_usable_size(void* mem) { - if (mem != 0) { - mchunkptr p = mem2chunk(mem); - if (cinuse(p)) - return chunksize(p) - overhead_for(p); - } - return 0; -} - -int mspace_mallopt(int param_number, int value) { - return change_mparam(param_number, value); -} - -#endif /* MSPACES */ - -/* -------------------- Alternative MORECORE functions ------------------- */ - -/* - Guidelines for creating a custom version of MORECORE: - - * For best performance, MORECORE should allocate in multiples of pagesize. - * MORECORE may allocate more memory than requested. (Or even less, - but this will usually result in a malloc failure.) - * MORECORE must not allocate memory when given argument zero, but - instead return one past the end address of memory from previous - nonzero call. - * For best performance, consecutive calls to MORECORE with positive - arguments should return increasing addresses, indicating that - space has been contiguously extended. - * Even though consecutive calls to MORECORE need not return contiguous - addresses, it must be OK for malloc'ed chunks to span multiple - regions in those cases where they do happen to be contiguous. - * MORECORE need not handle negative arguments -- it may instead - just return MFAIL when given negative arguments. - Negative arguments are always multiples of pagesize. MORECORE - must not misinterpret negative args as large positive unsigned - args. You can suppress all such calls from even occurring by defining - MORECORE_CANNOT_TRIM, - - As an example alternative MORECORE, here is a custom allocator - kindly contributed for pre-OSX macOS. It uses virtually but not - necessarily physically contiguous non-paged memory (locked in, - present and won't get swapped out). You can use it by uncommenting - this section, adding some #includes, and setting up the appropriate - defines above: - - #define MORECORE osMoreCore - - There is also a shutdown routine that should somehow be called for - cleanup upon program exit. - - #define MAX_POOL_ENTRIES 100 - #define MINIMUM_MORECORE_SIZE (64 * 1024U) - static int next_os_pool; - void *our_os_pools[MAX_POOL_ENTRIES]; - - void *osMoreCore(int size) - { - void *ptr = 0; - static void *sbrk_top = 0; - - if (size > 0) - { - if (size < MINIMUM_MORECORE_SIZE) - size = MINIMUM_MORECORE_SIZE; - if (CurrentExecutionLevel() == kTaskLevel) - ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); - if (ptr == 0) - { - return (void *) MFAIL; - } - // save ptrs so they can be freed during cleanup - our_os_pools[next_os_pool] = ptr; - next_os_pool++; - ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); - sbrk_top = (char *) ptr + size; - return ptr; - } - else if (size < 0) - { - // we don't currently support shrink behavior - return (void *) MFAIL; - } - else - { - return sbrk_top; - } - } - - // cleanup any allocated memory pools - // called as last thing before shutting down driver - - void osCleanupMem(void) - { - void **ptr; - - for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) - if (*ptr) - { - PoolDeallocate(*ptr); - *ptr = 0; - } - } - -*/ - - -/* ----------------------------------------------------------------------- -History: - V2.8.4 (not yet released) - * Add mspace_mmap_large_chunks; thanks to Jean Brouwers - * Fix insufficient sys_alloc padding when using 16byte alignment - * Fix bad error check in mspace_footprint - * Adaptations for ptmalloc, courtesy of Wolfram Gloger. - * Reentrant spin locks, courtesy of Earl Chew and others - * Win32 improvements, courtesy of Niall Douglas and Earl Chew - * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options - * Extension hook in malloc_state - * Various small adjustments to reduce warnings on some compilers - * Various configuration extensions/changes for more platforms. Thanks - to all who contributed these. - - V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee) - * Add max_footprint functions - * Ensure all appropriate literals are size_t - * Fix conditional compilation problem for some #define settings - * Avoid concatenating segments with the one provided - in create_mspace_with_base - * Rename some variables to avoid compiler shadowing warnings - * Use explicit lock initialization. - * Better handling of sbrk interference. - * Simplify and fix segment insertion, trimming and mspace_destroy - * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x - * Thanks especially to Dennis Flanagan for help on these. - - V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee) - * Fix memalign brace error. - - V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee) - * Fix improper #endif nesting in C++ - * Add explicit casts needed for C++ - - V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee) - * Use trees for large bins - * Support mspaces - * Use segments to unify sbrk-based and mmap-based system allocation, - removing need for emulation on most platforms without sbrk. - * Default safety checks - * Optional footer checks. Thanks to William Robertson for the idea. - * Internal code refactoring - * Incorporate suggestions and platform-specific changes. - Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas, - Aaron Bachmann, Emery Berger, and others. - * Speed up non-fastbin processing enough to remove fastbins. - * Remove useless cfree() to avoid conflicts with other apps. - * Remove internal memcpy, memset. Compilers handle builtins better. - * Remove some options that no one ever used and rename others. - - V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) - * Fix malloc_state bitmap array misdeclaration - - V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee) - * Allow tuning of FIRST_SORTED_BIN_SIZE - * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. - * Better detection and support for non-contiguousness of MORECORE. - Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger - * Bypass most of malloc if no frees. Thanks To Emery Berger. - * Fix freeing of old top non-contiguous chunk im sysmalloc. - * Raised default trim and map thresholds to 256K. - * Fix mmap-related #defines. Thanks to Lubos Lunak. - * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. - * Branch-free bin calculation - * Default trim and mmap thresholds now 256K. - - V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) - * Introduce independent_comalloc and independent_calloc. - Thanks to Michael Pachos for motivation and help. - * Make optional .h file available - * Allow > 2GB requests on 32bit systems. - * new WIN32 sbrk, mmap, munmap, lock code from . - Thanks also to Andreas Mueller , - and Anonymous. - * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for - helping test this.) - * memalign: check alignment arg - * realloc: don't try to shift chunks backwards, since this - leads to more fragmentation in some programs and doesn't - seem to help in any others. - * Collect all cases in malloc requiring system memory into sysmalloc - * Use mmap as backup to sbrk - * Place all internal state in malloc_state - * Introduce fastbins (although similar to 2.5.1) - * Many minor tunings and cosmetic improvements - * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK - * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS - Thanks to Tony E. Bennett and others. - * Include errno.h to support default failure action. - - V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) - * return null for negative arguments - * Added Several WIN32 cleanups from Martin C. Fong - * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' - (e.g. WIN32 platforms) - * Cleanup header file inclusion for WIN32 platforms - * Cleanup code to avoid Microsoft Visual C++ compiler complaints - * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing - memory allocation routines - * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) - * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to - usage of 'assert' in non-WIN32 code - * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to - avoid infinite loop - * Always call 'fREe()' rather than 'free()' - - V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) - * Fixed ordering problem with boundary-stamping - - V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) - * Added pvalloc, as recommended by H.J. Liu - * Added 64bit pointer support mainly from Wolfram Gloger - * Added anonymously donated WIN32 sbrk emulation - * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen - * malloc_extend_top: fix mask error that caused wastage after - foreign sbrks - * Add linux mremap support code from HJ Liu - - V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) - * Integrated most documentation with the code. - * Add support for mmap, with help from - Wolfram Gloger (Gloger@lrz.uni-muenchen.de). - * Use last_remainder in more cases. - * Pack bins using idea from colin@nyx10.cs.du.edu - * Use ordered bins instead of best-fit threshhold - * Eliminate block-local decls to simplify tracing and debugging. - * Support another case of realloc via move into top - * Fix error occuring when initial sbrk_base not word-aligned. - * Rely on page size for units instead of SBRK_UNIT to - avoid surprises about sbrk alignment conventions. - * Add mallinfo, mallopt. Thanks to Raymond Nijssen - (raymond@es.ele.tue.nl) for the suggestion. - * Add `pad' argument to malloc_trim and top_pad mallopt parameter. - * More precautions for cases where other routines call sbrk, - courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). - * Added macros etc., allowing use in linux libc from - H.J. Lu (hjl@gnu.ai.mit.edu) - * Inverted this history list - - V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) - * Re-tuned and fixed to behave more nicely with V2.6.0 changes. - * Removed all preallocation code since under current scheme - the work required to undo bad preallocations exceeds - the work saved in good cases for most test programs. - * No longer use return list or unconsolidated bins since - no scheme using them consistently outperforms those that don't - given above changes. - * Use best fit for very large chunks to prevent some worst-cases. - * Added some support for debugging - - V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) - * Removed footers when chunks are in use. Thanks to - Paul Wilson (wilson@cs.texas.edu) for the suggestion. - - V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) - * Added malloc_trim, with help from Wolfram Gloger - (wmglo@Dent.MED.Uni-Muenchen.DE). - - V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) - - V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) - * realloc: try to expand in both directions - * malloc: swap order of clean-bin strategy; - * realloc: only conditionally expand backwards - * Try not to scavenge used bins - * Use bin counts as a guide to preallocation - * Occasionally bin return list chunks in first scan - * Add a few optimizations from colin@nyx10.cs.du.edu - - V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) - * faster bin computation & slightly different binning - * merged all consolidations to one part of malloc proper - (eliminating old malloc_find_space & malloc_clean_bin) - * Scan 2 returns chunks (not just 1) - * Propagate failure in realloc if malloc returns 0 - * Add stuff to allow compilation on non-ANSI compilers - from kpv@research.att.com - - V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) - * removed potential for odd address access in prev_chunk - * removed dependency on getpagesize.h - * misc cosmetics and a bit more internal documentation - * anticosmetics: mangled names in macros to evade debugger strangeness - * tested on sparc, hp-700, dec-mips, rs6000 - with gcc & native cc (hp, dec only) allowing - Detlefs & Zorn comparison study (in SIGPLAN Notices.) - - Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) - * Based loosely on libg++-1.2X malloc. (It retains some of the overall - structure of old version, but most details differ.) - -*/ - - diff --git a/sources/nedmalloc/nedmalloc.c b/sources/nedmalloc/nedmalloc.c deleted file mode 100755 index 4dfbded9..00000000 --- a/sources/nedmalloc/nedmalloc.c +++ /dev/null @@ -1,950 +0,0 @@ -/* Alternative malloc implementation for multiple threads without -lock contention based on dlmalloc. (C) 2005-2006 Niall Douglas - -Boost Software License - Version 1.0 - August 17th, 2003 - -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. -*/ - -#ifdef _MSC_VER -/* Enable full aliasing on MSVC */ -/*#pragma optimize("a", on)*/ -#endif - -/*#define FULLSANITYCHECKS*/ - -#include "nedmalloc.h" -#ifdef WIN32 - #include -#endif -#define MSPACES 1 -#define ONLY_MSPACES 1 -#ifndef USE_LOCKS - #define USE_LOCKS 1 -#endif -#define FOOTERS 1 /* Need to enable footers so frees lock the right mspace */ -#undef DEBUG /* dlmalloc wants DEBUG either 0 or 1 */ -#ifdef _DEBUG - #define DEBUG 1 -#else - #define DEBUG 0 -#endif -#ifdef NDEBUG /* Disable assert checking on release builds */ - #undef DEBUG -#endif -/* The default of 64Kb means we spend too much time kernel-side */ -#ifndef DEFAULT_GRANULARITY -#define DEFAULT_GRANULARITY (1*1024*1024) -#endif -/*#define USE_SPIN_LOCKS 0*/ - - -/*#define FORCEINLINE*/ -#include "malloc.c.h" -#ifdef NDEBUG /* Disable assert checking on release builds */ - #undef DEBUG -#endif - -/* The maximum concurrent threads in a pool possible */ -#ifndef MAXTHREADSINPOOL -#define MAXTHREADSINPOOL 16 -#endif -/* The maximum number of threadcaches which can be allocated */ -#ifndef THREADCACHEMAXCACHES -#define THREADCACHEMAXCACHES 256 -#endif -/* The maximum size to be allocated from the thread cache */ -#ifndef THREADCACHEMAX -#define THREADCACHEMAX 8192 -#endif -#if 0 -/* The number of cache entries for finer grained bins. This is (topbitpos(THREADCACHEMAX)-4)*2 */ -#define THREADCACHEMAXBINS ((13-4)*2) -#else -/* The number of cache entries. This is (topbitpos(THREADCACHEMAX)-4) */ -#define THREADCACHEMAXBINS (13-4) -#endif -/* Point at which the free space in a thread cache is garbage collected */ -#ifndef THREADCACHEMAXFREESPACE -#define THREADCACHEMAXFREESPACE (512*1024) -#endif - - -#ifdef WIN32 - #define TLSVAR DWORD - #define TLSALLOC(k) (*(k)=TlsAlloc(), TLS_OUT_OF_INDEXES==*(k)) - #define TLSFREE(k) (!TlsFree(k)) - #define TLSGET(k) TlsGetValue(k) - #define TLSSET(k, a) (!TlsSetValue(k, a)) - #ifdef DEBUG -static LPVOID ChkedTlsGetValue(DWORD idx) -{ - LPVOID ret=TlsGetValue(idx); - assert(S_OK==GetLastError()); - return ret; -} - #undef TLSGET - #define TLSGET(k) ChkedTlsGetValue(k) - #endif -#else - #define TLSVAR pthread_key_t - #define TLSALLOC(k) pthread_key_create(k, 0) - #define TLSFREE(k) pthread_key_delete(k) - #define TLSGET(k) pthread_getspecific(k) - #define TLSSET(k, a) pthread_setspecific(k, a) -#endif - -#if 0 -/* Only enable if testing with valgrind. Causes misoperation */ -#define mspace_malloc(p, s) malloc(s) -#define mspace_realloc(p, m, s) realloc(m, s) -#define mspace_calloc(p, n, s) calloc(n, s) -#define mspace_free(p, m) free(m) -#endif - - -#if defined(__cplusplus) -#if !defined(NO_NED_NAMESPACE) -namespace nedalloc { -#else -extern "C" { -#endif -#endif - -size_t nedblksize(void *mem) THROWSPEC -{ -#if 0 - /* Only enable if testing with valgrind. Causes misoperation */ - return THREADCACHEMAX; -#else - if(mem) - { - mchunkptr p=mem2chunk(mem); - assert(cinuse(p)); /* If this fails, someone tried to free a block twice */ - if(cinuse(p)) - return chunksize(p)-overhead_for(p); - } - return 0; -#endif -} - -void nedsetvalue(void *v) THROWSPEC { nedpsetvalue(0, v); } -void * nedmalloc(size_t size) THROWSPEC { return nedpmalloc(0, size); } -void * nedcalloc(size_t no, size_t size) THROWSPEC { return nedpcalloc(0, no, size); } -void * nedrealloc(void *mem, size_t size) THROWSPEC { return nedprealloc(0, mem, size); } -void nedfree(void *mem) THROWSPEC { nedpfree(0, mem); } -void * nedmemalign(size_t alignment, size_t bytes) THROWSPEC { return nedpmemalign(0, alignment, bytes); } -#if !NO_MALLINFO -struct mallinfo nedmallinfo(void) THROWSPEC { return nedpmallinfo(0); } -#endif -int nedmallopt(int parno, int value) THROWSPEC { return nedpmallopt(0, parno, value); } -int nedmalloc_trim(size_t pad) THROWSPEC { return nedpmalloc_trim(0, pad); } -void nedmalloc_stats() THROWSPEC { nedpmalloc_stats(0); } -size_t nedmalloc_footprint() THROWSPEC { return nedpmalloc_footprint(0); } -void **nedindependent_calloc(size_t elemsno, size_t elemsize, void **chunks) THROWSPEC { return nedpindependent_calloc(0, elemsno, elemsize, chunks); } -void **nedindependent_comalloc(size_t elems, size_t *sizes, void **chunks) THROWSPEC { return nedpindependent_comalloc(0, elems, sizes, chunks); } - -struct threadcacheblk_t; -typedef struct threadcacheblk_t threadcacheblk; -struct threadcacheblk_t -{ /* Keep less than 16 bytes on 32 bit systems and 32 bytes on 64 bit systems */ -#ifdef FULLSANITYCHECKS - unsigned int magic; -#endif - unsigned int lastUsed, size; - threadcacheblk *next, *prev; -}; -typedef struct threadcache_t -{ -#ifdef FULLSANITYCHECKS - unsigned int magic1; -#endif - int mymspace; /* Last mspace entry this thread used */ - long threadid; - unsigned int mallocs, frees, successes; - size_t freeInCache; /* How much free space is stored in this cache */ - threadcacheblk *bins[(THREADCACHEMAXBINS+1)*2]; -#ifdef FULLSANITYCHECKS - unsigned int magic2; -#endif -} threadcache; -struct nedpool_t -{ - MLOCK_T mutex; - void *uservalue; - int threads; /* Max entries in m to use */ - threadcache *caches[THREADCACHEMAXCACHES]; - TLSVAR mycache; /* Thread cache for this thread. 0 for unset, negative for use mspace-1 directly, otherwise is cache-1 */ - mstate m[MAXTHREADSINPOOL+1]; /* mspace entries for this pool */ -}; -static nedpool syspool; - -static FORCEINLINE unsigned int size2binidx(size_t _size) THROWSPEC -{ /* 8=1000 16=10000 20=10100 24=11000 32=100000 48=110000 4096=1000000000000 */ - unsigned int topbit, size=(unsigned int)(_size>>4); - /* 16=1 20=1 24=1 32=10 48=11 64=100 96=110 128=1000 4096=100000000 */ - -#if defined(__GNUC__) - topbit = sizeof(size)*__CHAR_BIT__ - 1 - __builtin_clz(size); -#elif defined(_MSC_VER) && _MSC_VER>=1300 - { - unsigned long bsrTopBit; - - _BitScanReverse(&bsrTopBit, size); - - topbit = bsrTopBit; - } -#else -#if 0 - union { - unsigned asInt[2]; - double asDouble; - }; - int n; - - asDouble = (double)size + 0.5; - topbit = (asInt[!FOX_BIGENDIAN] >> 20) - 1023; -#else - { - unsigned int x=size; - x = x | (x >> 1); - x = x | (x >> 2); - x = x | (x >> 4); - x = x | (x >> 8); - x = x | (x >>16); - x = ~x; - x = x - ((x >> 1) & 0x55555555); - x = (x & 0x33333333) + ((x >> 2) & 0x33333333); - x = (x + (x >> 4)) & 0x0F0F0F0F; - x = x + (x << 8); - x = x + (x << 16); - topbit=31 - (x >> 24); - } -#endif -#endif - return topbit; -} - - -#ifdef FULLSANITYCHECKS -static void tcsanitycheck(threadcacheblk **ptr) THROWSPEC -{ - assert((ptr[0] && ptr[1]) || (!ptr[0] && !ptr[1])); - if(ptr[0] && ptr[1]) - { - assert(nedblksize(ptr[0])>=sizeof(threadcacheblk)); - assert(nedblksize(ptr[1])>=sizeof(threadcacheblk)); - assert(*(unsigned int *) "NEDN"==ptr[0]->magic); - assert(*(unsigned int *) "NEDN"==ptr[1]->magic); - assert(!ptr[0]->prev); - assert(!ptr[1]->next); - if(ptr[0]==ptr[1]) - { - assert(!ptr[0]->next); - assert(!ptr[1]->prev); - } - } -} -static void tcfullsanitycheck(threadcache *tc) THROWSPEC -{ - threadcacheblk **tcbptr=tc->bins; - int n; - for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2) - { - threadcacheblk *b, *ob=0; - tcsanitycheck(tcbptr); - for(b=tcbptr[0]; b; ob=b, b=b->next) - { - assert(*(unsigned int *) "NEDN"==b->magic); - assert(!ob || ob->next==b); - assert(!ob || b->prev==ob); - } - } -} -#endif - -static NOINLINE void RemoveCacheEntries(nedpool *p, threadcache *tc, unsigned int age) THROWSPEC -{ -#ifdef FULLSANITYCHECKS - tcfullsanitycheck(tc); -#endif - if(tc->freeInCache) - { - threadcacheblk **tcbptr=tc->bins; - int n; - for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2) - { - threadcacheblk **tcb=tcbptr+1; /* come from oldest end of list */ - /*tcsanitycheck(tcbptr);*/ - for(; *tcb && tc->frees-(*tcb)->lastUsed>=age; ) - { - threadcacheblk *f=*tcb; - size_t blksize=f->size; /*nedblksize(f);*/ - assert(blksize<=nedblksize(f)); - assert(blksize); -#ifdef FULLSANITYCHECKS - assert(*(unsigned int *) "NEDN"==(*tcb)->magic); -#endif - *tcb=(*tcb)->prev; - if(*tcb) - (*tcb)->next=0; - else - *tcbptr=0; - tc->freeInCache-=blksize; - assert((long) tc->freeInCache>=0); - mspace_free(0, f); - /*tcsanitycheck(tcbptr);*/ - } - } - } -#ifdef FULLSANITYCHECKS - tcfullsanitycheck(tc); -#endif -} -static void DestroyCaches(nedpool *p) THROWSPEC -{ - if(p->caches) - { - threadcache *tc; - int n; - for(n=0; ncaches[n])) - { - tc->frees++; - RemoveCacheEntries(p, tc, 0); - assert(!tc->freeInCache); - tc->mymspace=-1; - tc->threadid=0; - mspace_free(0, tc); - p->caches[n]=0; - } - } - } -} - -static NOINLINE threadcache *AllocCache(nedpool *p) THROWSPEC -{ - threadcache *tc=0; - int n, end; - ACQUIRE_LOCK(&p->mutex); - for(n=0; ncaches[n]; n++); - if(THREADCACHEMAXCACHES==n) - { /* List exhausted, so disable for this thread */ - RELEASE_LOCK(&p->mutex); - return 0; - } - tc=p->caches[n]=(threadcache *) mspace_calloc(p->m[0], 1, sizeof(threadcache)); - if(!tc) - { - RELEASE_LOCK(&p->mutex); - return 0; - } -#ifdef FULLSANITYCHECKS - tc->magic1=*(unsigned int *)"NEDMALC1"; - tc->magic2=*(unsigned int *)"NEDMALC2"; -#endif - tc->threadid=(long)(size_t)CURRENT_THREAD; - for(end=0; p->m[end]; end++); - tc->mymspace=tc->threadid % end; - RELEASE_LOCK(&p->mutex); - if(TLSSET(p->mycache, (void *)(size_t)(n+1))) abort(); - return tc; -} - -static void *threadcache_malloc(nedpool *p, threadcache *tc, size_t *size) THROWSPEC -{ - void *ret=0; - unsigned int bestsize; - unsigned int idx=size2binidx(*size); - size_t blksize=0; - threadcacheblk *blk, **binsptr; -#ifdef FULLSANITYCHECKS - tcfullsanitycheck(tc); -#endif - /* Calculate best fit bin size */ - bestsize=1<<(idx+4); -#if 0 - /* Finer grained bin fit */ - idx<<=1; - if(*size>bestsize) - { - idx++; - bestsize+=bestsize>>1; - } - if(*size>bestsize) - { - idx++; - bestsize=1<<(4+(idx>>1)); - } -#else - if(*size>bestsize) - { - idx++; - bestsize<<=1; - } -#endif - assert(bestsize>=*size); - if(*sizebins[idx*2]; - /* Try to match close, but move up a bin if necessary */ - blk=*binsptr; - if(!blk || blk->size<*size) - { /* Bump it up a bin */ - if(idxsize; /*nedblksize(blk);*/ - assert(nedblksize(blk)>=blksize); - assert(blksize>=*size); - if(blk->next) - blk->next->prev=0; - *binsptr=blk->next; - if(!*binsptr) - binsptr[1]=0; -#ifdef FULLSANITYCHECKS - blk->magic=0; -#endif - assert(binsptr[0]!=blk && binsptr[1]!=blk); - assert(nedblksize(blk)>=sizeof(threadcacheblk) && nedblksize(blk)<=THREADCACHEMAX+CHUNK_OVERHEAD); - /*printf("malloc: %p, %p, %p, %lu\n", p, tc, blk, (long) size);*/ - ret=(void *) blk; - } - ++tc->mallocs; - if(ret) - { - assert(blksize>=*size); - ++tc->successes; - tc->freeInCache-=blksize; - assert((long) tc->freeInCache>=0); - } -#if defined(DEBUG) && 0 - if(!(tc->mallocs & 0xfff)) - { - printf("*** threadcache=%u, mallocs=%u (%f), free=%u (%f), freeInCache=%u\n", (unsigned int) tc->threadid, tc->mallocs, - (float) tc->successes/tc->mallocs, tc->frees, (float) tc->successes/tc->frees, (unsigned int) tc->freeInCache); - } -#endif -#ifdef FULLSANITYCHECKS - tcfullsanitycheck(tc); -#endif - return ret; -} -static NOINLINE void ReleaseFreeInCache(nedpool *p, threadcache *tc, int mymspace) THROWSPEC -{ - unsigned int age=THREADCACHEMAXFREESPACE/8192; - /*ACQUIRE_LOCK(&p->m[mymspace]->mutex);*/ - while(age && tc->freeInCache>=THREADCACHEMAXFREESPACE) - { - RemoveCacheEntries(p, tc, age); - /*printf("*** Removing cache entries older than %u (%u)\n", age, (unsigned int) tc->freeInCache);*/ - age>>=1; - } - /*RELEASE_LOCK(&p->m[mymspace]->mutex);*/ -} -static void threadcache_free(nedpool *p, threadcache *tc, int mymspace, void *mem, size_t size) THROWSPEC -{ - unsigned int bestsize; - unsigned int idx=size2binidx(size); - threadcacheblk **binsptr, *tck=(threadcacheblk *) mem; - assert(size>=sizeof(threadcacheblk) && size<=THREADCACHEMAX+CHUNK_OVERHEAD); -#ifdef DEBUG - { /* Make sure this is a valid memory block */ - mchunkptr cp = mem2chunk(mem); - mstate fm = get_mstate_for(cp); - if (!ok_magic(fm)) { - USAGE_ERROR_ACTION(fm, cp); - return; - } - } -#endif -#ifdef FULLSANITYCHECKS - tcfullsanitycheck(tc); -#endif - /* Calculate best fit bin size */ - bestsize=1<<(idx+4); -#if 0 - /* Finer grained bin fit */ - idx<<=1; - if(size>bestsize) - { - unsigned int biggerbestsize=bestsize+bestsize<<1; - if(size>=biggerbestsize) - { - idx++; - bestsize=biggerbestsize; - } - } -#endif - if(bestsize!=size) /* dlmalloc can round up, so we round down to preserve indexing */ - size=bestsize; - binsptr=&tc->bins[idx*2]; - assert(idx<=THREADCACHEMAXBINS); - if(tck==*binsptr) - { - fprintf(stderr, "Attempt to free already freed memory block %p - aborting!\n", (void*)tck); - abort(); - } -#ifdef FULLSANITYCHECKS - tck->magic=*(unsigned int *) "NEDN"; -#endif - tck->lastUsed=++tc->frees; - tck->size=(unsigned int) size; - tck->next=*binsptr; - tck->prev=0; - if(tck->next) - tck->next->prev=tck; - else - binsptr[1]=tck; - assert(!*binsptr || (*binsptr)->size==tck->size); - *binsptr=tck; - assert(tck==tc->bins[idx*2]); - assert(tc->bins[idx*2+1]==tck || binsptr[0]->next->prev==tck); - /*printf("free: %p, %p, %p, %lu\n", p, tc, mem, (long) size);*/ - tc->freeInCache+=size; -#ifdef FULLSANITYCHECKS - tcfullsanitycheck(tc); -#endif -#if 1 - if(tc->freeInCache>=THREADCACHEMAXFREESPACE) - ReleaseFreeInCache(p, tc, mymspace); -#endif -} - - - - -static NOINLINE int InitPool(nedpool *p, size_t capacity, int threads) THROWSPEC -{ /* threads is -1 for system pool */ - ensure_initialization(); - ACQUIRE_MALLOC_GLOBAL_LOCK(); - if(p->threads) goto done; - if(INITIAL_LOCK(&p->mutex)) goto err; - if(TLSALLOC(&p->mycache)) goto err; - if(!(p->m[0]=(mstate) create_mspace(capacity, 1))) goto err; - p->m[0]->extp=p; - p->threads=(threads<1 || threads>MAXTHREADSINPOOL) ? MAXTHREADSINPOOL : threads; -done: - RELEASE_MALLOC_GLOBAL_LOCK(); - return 1; -err: - if(threads<0) - abort(); /* If you can't allocate for system pool, we're screwed */ - DestroyCaches(p); - if(p->m[0]) - { - destroy_mspace(p->m[0]); - p->m[0]=0; - } - if(p->mycache) - { - if(TLSFREE(p->mycache)) abort(); - p->mycache=0; - } - RELEASE_MALLOC_GLOBAL_LOCK(); - return 0; -} -static NOINLINE mstate FindMSpace(nedpool *p, threadcache *tc, int *lastUsed, size_t size) THROWSPEC -{ /* Gets called when thread's last used mspace is in use. The strategy - is to run through the list of all available mspaces looking for an - unlocked one and if we fail, we create a new one so long as we don't - exceed p->threads */ - int n, end; - for(n=end=*lastUsed+1; p->m[n]; end=++n) - { - if(TRY_LOCK(&p->m[n]->mutex)) goto found; - } - for(n=0; n<*lastUsed && p->m[n]; n++) - { - if(TRY_LOCK(&p->m[n]->mutex)) goto found; - } - if(endthreads) - { - mstate temp; - if(!(temp=(mstate) create_mspace(size, 1))) - goto badexit; - /* Now we're ready to modify the lists, we lock */ - ACQUIRE_LOCK(&p->mutex); - while(p->m[end] && endthreads) - end++; - if(end>=p->threads) - { /* Drat, must destroy it now */ - RELEASE_LOCK(&p->mutex); - destroy_mspace((mspace) temp); - goto badexit; - } - /* We really want to make sure this goes into memory now but we - have to be careful of breaking aliasing rules, so write it twice */ - *((volatile struct malloc_state **) &p->m[end])=p->m[end]=temp; - ACQUIRE_LOCK(&p->m[end]->mutex); - /*printf("Created mspace idx %d\n", end);*/ - RELEASE_LOCK(&p->mutex); - n=end; - goto found; - } - /* Let it lock on the last one it used */ -badexit: - ACQUIRE_LOCK(&p->m[*lastUsed]->mutex); - return p->m[*lastUsed]; -found: - *lastUsed=n; - if(tc) - tc->mymspace=n; - else - { - if(TLSSET(p->mycache, (void *)(size_t)(-(n+1)))) abort(); - } - return p->m[n]; -} - -nedpool *nedcreatepool(size_t capacity, int threads) THROWSPEC -{ - nedpool *ret; - if(!(ret=(nedpool *) nedpcalloc(0, 1, sizeof(nedpool)))) return 0; - if(!InitPool(ret, capacity, threads)) - { - nedpfree(0, ret); - return 0; - } - return ret; -} -void neddestroypool(nedpool *p) THROWSPEC -{ - int n; - ACQUIRE_LOCK(&p->mutex); - DestroyCaches(p); - for(n=0; p->m[n]; n++) - { - destroy_mspace(p->m[n]); - p->m[n]=0; - } - RELEASE_LOCK(&p->mutex); - if(TLSFREE(p->mycache)) abort(); - nedpfree(0, p); -} - -void nedpsetvalue(nedpool *p, void *v) THROWSPEC -{ - if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } - p->uservalue=v; -} -void *nedgetvalue(nedpool **p, void *mem) THROWSPEC -{ - nedpool *np=0; - mchunkptr mcp=mem2chunk(mem); - mstate fm; - if(!(is_aligned(chunk2mem(mcp))) && mcp->head != FENCEPOST_HEAD) return 0; - if(!cinuse(mcp)) return 0; - if(!next_pinuse(mcp)) return 0; - if(!is_mmapped(mcp) && !pinuse(mcp)) - { - if(next_chunk(prev_chunk(mcp))!=mcp) return 0; - } - fm=get_mstate_for(mcp); - if(!ok_magic(fm)) return 0; - if(!ok_address(fm, mcp)) return 0; - if(!fm->extp) return 0; - np=(nedpool *) fm->extp; - if(p) *p=np; - return np->uservalue; -} - -void neddisablethreadcache(nedpool *p) THROWSPEC -{ - int mycache; - if(!p) - { - p=&syspool; - if(!syspool.threads) InitPool(&syspool, 0, -1); - } - mycache=(int)(size_t) TLSGET(p->mycache); - if(!mycache) - { /* Set to mspace 0 */ - if(TLSSET(p->mycache, (void *)-1)) abort(); - } - else if(mycache>0) - { /* Set to last used mspace */ - threadcache *tc=p->caches[mycache-1]; -#if defined(DEBUG) - printf("Threadcache utilisation: %lf%% in cache with %lf%% lost to other threads\n", - 100.0*tc->successes/tc->mallocs, 100.0*((double) tc->mallocs-tc->frees)/tc->mallocs); -#endif - if(TLSSET(p->mycache, (void *)(size_t)(-tc->mymspace))) abort(); - tc->frees++; - RemoveCacheEntries(p, tc, 0); - assert(!tc->freeInCache); - tc->mymspace=-1; - tc->threadid=0; - mspace_free(0, p->caches[mycache-1]); - p->caches[mycache-1]=0; - } -} - -#define GETMSPACE(m,p,tc,ms,s,action) \ - do \ - { \ - mstate m = GetMSpace((p),(tc),(ms),(s)); \ - action; \ - RELEASE_LOCK(&m->mutex); \ - } while (0) - -static FORCEINLINE mstate GetMSpace(nedpool *p, threadcache *tc, int mymspace, size_t size) THROWSPEC -{ /* Returns a locked and ready for use mspace */ - mstate m=p->m[mymspace]; - assert(m); - if(!TRY_LOCK(&p->m[mymspace]->mutex)) m=FindMSpace(p, tc, &mymspace, size);\ - /*assert(IS_LOCKED(&p->m[mymspace]->mutex));*/ - return m; -} -static FORCEINLINE void GetThreadCache(nedpool **p, threadcache **tc, int *mymspace, size_t *size) THROWSPEC -{ - int mycache; - if(size && *sizemycache); - if(mycache>0) - { - *tc=(*p)->caches[mycache-1]; - *mymspace=(*tc)->mymspace; - } - else if(!mycache) - { - *tc=AllocCache(*p); - if(!*tc) - { /* Disable */ - if(TLSSET((*p)->mycache, (void *)-1)) abort(); - *mymspace=0; - } - else - *mymspace=(*tc)->mymspace; - } - else - { - *tc=0; - *mymspace=-mycache-1; - } - assert(*mymspace>=0); - assert((long)(size_t)CURRENT_THREAD==(*tc)->threadid); -#ifdef FULLSANITYCHECKS - if(*tc) - { - if(*(unsigned int *)"NEDMALC1"!=(*tc)->magic1 || *(unsigned int *)"NEDMALC2"!=(*tc)->magic2) - { - abort(); - } - } -#endif -} - -void * nedpmalloc(nedpool *p, size_t size) THROWSPEC -{ - void *ret=0; - threadcache *tc; - int mymspace; - GetThreadCache(&p, &tc, &mymspace, &size); -#if THREADCACHEMAX - if(tc && size<=THREADCACHEMAX) - { /* Use the thread cache */ - ret=threadcache_malloc(p, tc, &size); - } -#endif - if(!ret) - { /* Use this thread's mspace */ - GETMSPACE(m, p, tc, mymspace, size, - ret=mspace_malloc(m, size)); - } - return ret; -} -void * nedpcalloc(nedpool *p, size_t no, size_t size) THROWSPEC -{ - size_t rsize=size*no; - void *ret=0; - threadcache *tc; - int mymspace; - GetThreadCache(&p, &tc, &mymspace, &rsize); -#if THREADCACHEMAX - if(tc && rsize<=THREADCACHEMAX) - { /* Use the thread cache */ - if((ret=threadcache_malloc(p, tc, &rsize))) - memset(ret, 0, rsize); - } -#endif - if(!ret) - { /* Use this thread's mspace */ - GETMSPACE(m, p, tc, mymspace, rsize, - ret=mspace_calloc(m, 1, rsize)); - } - return ret; -} -void * nedprealloc(nedpool *p, void *mem, size_t size) THROWSPEC -{ - void *ret=0; - threadcache *tc; - int mymspace; - if(!mem) return nedpmalloc(p, size); - GetThreadCache(&p, &tc, &mymspace, &size); -#if THREADCACHEMAX - if(tc && size && size<=THREADCACHEMAX) - { /* Use the thread cache */ - size_t memsize=nedblksize(mem); - assert(memsize); - if((ret=threadcache_malloc(p, tc, &size))) - { - memcpy(ret, mem, memsizem[n]; n++) - { - struct mallinfo t=mspace_mallinfo(p->m[n]); - ret.arena+=t.arena; - ret.ordblks+=t.ordblks; - ret.hblkhd+=t.hblkhd; - ret.usmblks+=t.usmblks; - ret.uordblks+=t.uordblks; - ret.fordblks+=t.fordblks; - ret.keepcost+=t.keepcost; - } - return ret; -} -#endif -int nedpmallopt(nedpool *p, int parno, int value) THROWSPEC -{ - return mspace_mallopt(parno, value); -} -int nedpmalloc_trim(nedpool *p, size_t pad) THROWSPEC -{ - int n, ret=0; - if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } - for(n=0; p->m[n]; n++) - { - ret+=mspace_trim(p->m[n], pad); - } - return ret; -} -void nedpmalloc_stats(nedpool *p) THROWSPEC -{ - int n; - if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } - for(n=0; p->m[n]; n++) - { - mspace_malloc_stats(p->m[n]); - } -} -size_t nedpmalloc_footprint(nedpool *p) THROWSPEC -{ - size_t ret=0; - int n; - if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } - for(n=0; p->m[n]; n++) - { - ret+=mspace_footprint(p->m[n]); - } - return ret; -} -void **nedpindependent_calloc(nedpool *p, size_t elemsno, size_t elemsize, void **chunks) THROWSPEC -{ - void **ret; - threadcache *tc; - int mymspace; - GetThreadCache(&p, &tc, &mymspace, &elemsize); - GETMSPACE(m, p, tc, mymspace, elemsno*elemsize, - ret=mspace_independent_calloc(m, elemsno, elemsize, chunks)); - return ret; -} -void **nedpindependent_comalloc(nedpool *p, size_t elems, size_t *sizes, void **chunks) THROWSPEC -{ - void **ret; - threadcache *tc; - int mymspace; - size_t i, *adjustedsizes=(size_t *) alloca(elems*sizeof(size_t)); - if(!adjustedsizes) return 0; - for(i=0; i /* for size_t */ - -#ifndef EXTSPEC - #define EXTSPEC extern -#endif - -#if defined(_MSC_VER) && _MSC_VER>=1400 - #define MALLOCATTR __declspec(restrict) -#endif -#ifdef __GNUC__ - #define MALLOCATTR __attribute__ ((malloc)) -#endif -#ifndef MALLOCATTR - #define MALLOCATTR -#endif - -#ifdef REPLACE_SYSTEM_ALLOCATOR - #define nedmalloc malloc - #define nedcalloc calloc - #define nedrealloc realloc - #define nedfree free - #define nedmemalign memalign - #define nedmallinfo mallinfo - #define nedmallopt mallopt - #define nedmalloc_trim malloc_trim - #define nedmalloc_stats malloc_stats - #define nedmalloc_footprint malloc_footprint - #define nedindependent_calloc independent_calloc - #define nedindependent_comalloc independent_comalloc - #ifdef _MSC_VER - #define nedblksize _msize - #endif -#endif - -#ifndef NO_MALLINFO -#define NO_MALLINFO 0 -#endif - -#if !NO_MALLINFO -struct mallinfo; -#endif - -#if defined(__cplusplus) - #if !defined(NO_NED_NAMESPACE) -namespace nedalloc { - #else -extern "C" { - #endif - #define THROWSPEC throw() -#else - #define THROWSPEC -#endif - -/* These are the global functions */ - -/* Gets the usable size of an allocated block. Note this will always be bigger than what was -asked for due to rounding etc. -*/ -EXTSPEC size_t nedblksize(void *mem) THROWSPEC; - -EXTSPEC void nedsetvalue(void *v) THROWSPEC; - -EXTSPEC MALLOCATTR void * nedmalloc(size_t size) THROWSPEC; -EXTSPEC MALLOCATTR void * nedcalloc(size_t no, size_t size) THROWSPEC; -EXTSPEC MALLOCATTR void * nedrealloc(void *mem, size_t size) THROWSPEC; -EXTSPEC void nedfree(void *mem) THROWSPEC; -EXTSPEC MALLOCATTR void * nedmemalign(size_t alignment, size_t bytes) THROWSPEC; -#if !NO_MALLINFO -EXTSPEC struct mallinfo nedmallinfo(void) THROWSPEC; -#endif -EXTSPEC int nedmallopt(int parno, int value) THROWSPEC; -EXTSPEC int nedmalloc_trim(size_t pad) THROWSPEC; -EXTSPEC void nedmalloc_stats(void) THROWSPEC; -EXTSPEC size_t nedmalloc_footprint(void) THROWSPEC; -EXTSPEC MALLOCATTR void **nedindependent_calloc(size_t elemsno, size_t elemsize, void **chunks) THROWSPEC; -EXTSPEC MALLOCATTR void **nedindependent_comalloc(size_t elems, size_t *sizes, void **chunks) THROWSPEC; - -/* These are the pool functions */ -struct nedpool_t; -typedef struct nedpool_t nedpool; - -/* Creates a memory pool for use with the nedp* functions below. -Capacity is how much to allocate immediately (if you know you'll be allocating a lot -of memory very soon) which you can leave at zero. Threads specifies how many threads -will *normally* be accessing the pool concurrently. Setting this to zero means it -extends on demand, but be careful of this as it can rapidly consume system resources -where bursts of concurrent threads use a pool at once. -*/ -EXTSPEC MALLOCATTR nedpool *nedcreatepool(size_t capacity, int threads) THROWSPEC; - -/* Destroys a memory pool previously created by nedcreatepool(). -*/ -EXTSPEC void neddestroypool(nedpool *p) THROWSPEC; - -/* Sets a value to be associated with a pool. You can retrieve this value by passing -any memory block allocated from that pool. -*/ -EXTSPEC void nedpsetvalue(nedpool *p, void *v) THROWSPEC; -/* Gets a previously set value using nedpsetvalue() or zero if memory is unknown. -Optionally can also retrieve pool. -*/ -EXTSPEC void *nedgetvalue(nedpool **p, void *mem) THROWSPEC; - -/* Disables the thread cache for the calling thread, returning any existing cache -data to the central pool. -*/ -EXTSPEC void neddisablethreadcache(nedpool *p) THROWSPEC; - -EXTSPEC MALLOCATTR void * nedpmalloc(nedpool *p, size_t size) THROWSPEC; -EXTSPEC MALLOCATTR void * nedpcalloc(nedpool *p, size_t no, size_t size) THROWSPEC; -EXTSPEC MALLOCATTR void * nedprealloc(nedpool *p, void *mem, size_t size) THROWSPEC; -EXTSPEC void nedpfree(nedpool *p, void *mem) THROWSPEC; -EXTSPEC MALLOCATTR void * nedpmemalign(nedpool *p, size_t alignment, size_t bytes) THROWSPEC; -#if !NO_MALLINFO -EXTSPEC struct mallinfo nedpmallinfo(nedpool *p) THROWSPEC; -#endif -EXTSPEC int nedpmallopt(nedpool *p, int parno, int value) THROWSPEC; -EXTSPEC int nedpmalloc_trim(nedpool *p, size_t pad) THROWSPEC; -EXTSPEC void nedpmalloc_stats(nedpool *p) THROWSPEC; -EXTSPEC size_t nedpmalloc_footprint(nedpool *p) THROWSPEC; -EXTSPEC MALLOCATTR void **nedpindependent_calloc(nedpool *p, size_t elemsno, size_t elemsize, void **chunks) THROWSPEC; -EXTSPEC MALLOCATTR void **nedpindependent_comalloc(nedpool *p, size_t elems, size_t *sizes, void **chunks) THROWSPEC; - -#if defined(__cplusplus) -} -#endif - -#undef MALLOCATTR -#undef EXTSPEC - -#endif diff --git a/win64/CMakeLists.txt b/win64/CMakeLists.txt index 5c653014..0d585d9e 100644 --- a/win64/CMakeLists.txt +++ b/win64/CMakeLists.txt @@ -166,8 +166,9 @@ set_target_properties(tcmalloc PROPERTIES set(se_libs_name) -if(USE_SE_V8) - +if(CC_EDITOR) + message(VERBOSE "CC_EDITOR USE V8 FROM NODEJS") +elseif(USE_SE_V8) add_library(v8 SHARED IMPORTED GLOBAL) set_target_properties(v8 PROPERTIES IMPORTED_LOCATION ${CMAKE_CURRENT_LIST_DIR}/libs/v8.dll @@ -194,8 +195,8 @@ if(USE_SE_V8) ) set(se_libs_name v8) -elseif(NOT CC_EDITOR) - message(FATAL_ERROR "Only V8 is supported!") +else() + message(FATAL_ERROR "Only V8 is supported!") endif() ############################# glslang #############################