Add 3d libs

This commit is contained in:
pandamicro 2019-12-20 14:28:53 +08:00
parent 7114e243d4
commit 3416f2284a
269 changed files with 129984 additions and 0 deletions

57
sources/jemalloc/CMakeLists.txt Executable file
View File

@ -0,0 +1,57 @@
message(STATUS "Add dependence target: jemalloc ...")
# Define target name
set(TARGET_NAME jemalloc)
if(MSVC)
set(INCLUDE_FILES ${COCOS_EXTERNAL_PATH}/jemalloc/include-win ${COCOS_EXTERNAL_PATH}/jemalloc/include-win/msvc_compat)
else()
set(INCLUDE_FILES ${COCOS_EXTERNAL_PATH}/jemalloc/include-linux)
endif()
include_directories( ${INCLUDE_FILES} )
set (SOURCE_FILES
src/je_arena.c
src/je_atomic.c
src/je_base.c
src/je_bitmap.c
src/je_chunk.c
src/je_chunk_dss.c
src/je_chunk_mmap.c
src/je_ckh.c
src/je_ctl.c
src/je_extent.c
src/je_hash.c
src/je_huge.c
src/je_jemalloc.c
src/je_mb.c
src/je_mutex.c
src/je_nstime.c
src/je_pages.c
src/je_prng.c
src/je_prof.c
src/je_quarantine.c
src/je_rtree.c
src/je_spin.c
src/je_stats.c
src/je_tcache.c
src/je_ticker.c
src/je_tsd.c
src/je_util.c
src/je_witness.c
)
add_library(${TARGET_NAME} STATIC ${SOURCE_FILES})
if(MSVC)
set_target_properties(${TARGET_NAME} PROPERTIES COMPILE_FLAGS "/wd4996")
endif()
if(NOT COCOS_PLATFORM_IOS)
set_target_properties(${TARGET_NAME} PROPERTIES OUTPUT_NAME_DEBUG jemalloc_d)
endif()
set_target_properties(${TARGET_NAME} PROPERTIES FOLDER External)
message(STATUS "${TARGET_NAME} Configuration completed.")

1043
sources/jemalloc/ChangeLog Executable file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,45 @@
/*
* Define a custom assert() in order to reduce the chances of deadlock during
* assertion failure.
*/
#ifndef assert
#define assert(e) do { \
if (unlikely(config_debug && !(e))) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#endif
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} \
unreachable(); \
} while (0)
#endif
#ifndef not_implemented
#define not_implemented() do { \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
if (unlikely(config_debug && !(e))) \
not_implemented(); \
} while (0)
#endif

View File

@ -0,0 +1,651 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
#define atomic_read_p(p) atomic_add_p(p, NULL)
#define atomic_read_z(p) atomic_add_z(p, 0)
#define atomic_read_u(p) atomic_add_u(p, 0)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
/*
* All arithmetic functions return the arithmetic result of the atomic
* operation. Some atomic operation APIs return the value prior to mutation, in
* which case the following functions must redundantly compute the result so
* that it can be returned. These functions are normally inlined, so the extra
* operations can be optimized away if the return values aren't used by the
* callers.
*
* <t> atomic_read_<t>(<t> *p) { return (*p); }
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); }
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); }
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
* {
* if (*p != c)
* return (true);
* *p = s;
* return (false);
* }
* void atomic_write_<t>(<t> *p, <t> x) { *p = x; }
*/
#ifndef JEMALLOC_ENABLE_INLINE
uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s);
void atomic_write_uint64(uint64_t *p, uint64_t x);
uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s);
void atomic_write_uint32(uint32_t *p, uint32_t x);
void *atomic_add_p(void **p, void *x);
void *atomic_sub_p(void **p, void *x);
bool atomic_cas_p(void **p, void *c, void *s);
void atomic_write_p(void **p, const void *x);
size_t atomic_add_z(size_t *p, size_t x);
size_t atomic_sub_z(size_t *p, size_t x);
bool atomic_cas_z(size_t *p, size_t c, size_t s);
void atomic_write_z(size_t *p, size_t x);
unsigned atomic_add_u(unsigned *p, unsigned x);
unsigned atomic_sub_u(unsigned *p, unsigned x);
bool atomic_cas_u(unsigned *p, unsigned c, unsigned s);
void atomic_write_u(unsigned *p, unsigned x);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
/******************************************************************************/
/* 64-bit operations. */
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
# if (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
uint64_t t = x;
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
uint64_t t;
x = (uint64_t)(-(int64_t)x);
t = x;
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
uint8_t success;
asm volatile (
"lock; cmpxchgq %4, %0;"
"sete %1;"
: "=m" (*p), "=a" (success) /* Outputs. */
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
: "memory" /* Clobbers. */
);
return (!(bool)success);
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
asm volatile (
"xchgq %1, %0;" /* Lock is implied by xchgq. */
: "=m" (*p), "+r" (x) /* Outputs. */
: "m" (*p) /* Inputs. */
: "memory" /* Clobbers. */
);
}
# elif (defined(JEMALLOC_C11ATOMICS))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (atomic_fetch_add(a, x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (atomic_fetch_sub(a, x) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (!atomic_compare_exchange_strong(a, &c, s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
atomic_store(a, x);
}
# elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
/*
* atomic_fetchadd_64() doesn't exist, but we only ever use this
* function on LP64 systems, so atomic_fetchadd_long() will do.
*/
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (atomic_fetchadd_long(p, (unsigned long)x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
atomic_store_rel_long(p, x);
}
# elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
uint64_t o;
/*The documented OSAtomic*() API does not expose an atomic exchange. */
do {
o = atomic_read_uint64(p);
} while (atomic_cas_uint64(p, o, x));
}
# elif (defined(_MSC_VER))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
uint64_t o;
o = InterlockedCompareExchange64(p, s, c);
return (o != c);
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
InterlockedExchange64(p, x);
}
# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (__sync_sub_and_fetch(p, x));
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
return (!__sync_bool_compare_and_swap(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
__sync_lock_test_and_set(p, x);
}
# else
# error "Missing implementation for 64-bit atomic operations"
# endif
#endif
/******************************************************************************/
/* 32-bit operations. */
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
uint32_t t = x;
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
uint32_t t;
x = (uint32_t)(-(int32_t)x);
t = x;
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
uint8_t success;
asm volatile (
"lock; cmpxchgl %4, %0;"
"sete %1;"
: "=m" (*p), "=a" (success) /* Outputs. */
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
: "memory"
);
return (!(bool)success);
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
asm volatile (
"xchgl %1, %0;" /* Lock is implied by xchgl. */
: "=m" (*p), "+r" (x) /* Outputs. */
: "m" (*p) /* Inputs. */
: "memory" /* Clobbers. */
);
}
# elif (defined(JEMALLOC_C11ATOMICS))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (atomic_fetch_add(a, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (atomic_fetch_sub(a, x) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (!atomic_compare_exchange_strong(a, &c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
atomic_store(a, x);
}
#elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (atomic_fetchadd_32(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!atomic_cmpset_32(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
atomic_store_rel_32(p, x);
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
uint32_t o;
/*The documented OSAtomic*() API does not expose an atomic exchange. */
do {
o = atomic_read_uint32(p);
} while (atomic_cas_uint32(p, o, x));
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
uint32_t o;
o = InterlockedCompareExchange(p, s, c);
return (o != c);
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
InterlockedExchange(p, x);
}
#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (__sync_sub_and_fetch(p, x));
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!__sync_bool_compare_and_swap(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
__sync_lock_test_and_set(p, x);
}
#else
# error "Missing implementation for 32-bit atomic operations"
#endif
/******************************************************************************/
/* Pointer operations. */
JEMALLOC_INLINE void *
atomic_add_p(void **p, void *x)
{
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
JEMALLOC_INLINE void *
atomic_sub_p(void **p, void *x)
{
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
return ((void *)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_p(void **p, void *c, void *s)
{
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_PTR == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void
atomic_write_p(void **p, const void *x)
{
#if (LG_SIZEOF_PTR == 3)
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
/******************************************************************************/
/* size_t operations. */
JEMALLOC_INLINE size_t
atomic_add_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
JEMALLOC_INLINE size_t
atomic_sub_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
return ((size_t)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_z(size_t *p, size_t c, size_t s)
{
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_PTR == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void
atomic_write_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
/******************************************************************************/
/* unsigned operations. */
JEMALLOC_INLINE unsigned
atomic_add_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_INT == 2)
return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
JEMALLOC_INLINE unsigned
atomic_sub_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_INT == 2)
return ((unsigned)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_u(unsigned *p, unsigned c, unsigned s)
{
#if (LG_SIZEOF_INT == 3)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_INT == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void
atomic_write_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_INT == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
/******************************************************************************/
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,25 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *base_alloc(tsdn_t *tsdn, size_t size);
void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
size_t *mapped);
bool base_boot(void);
void base_prefork(tsdn_t *tsdn);
void base_postfork_parent(tsdn_t *tsdn);
void base_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,274 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
typedef struct bitmap_level_s bitmap_level_t;
typedef struct bitmap_info_s bitmap_info_t;
typedef unsigned long bitmap_t;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
/* Number of bits per group. */
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
/*
* Do some analysis on how big the bitmap is before we use a tree. For a brute
* force linear search, if we would have to call ffs_lu() more than 2^3 times,
* use a tree instead.
*/
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
# define USE_TREE
#endif
/* Number of groups required to store a given number of bits. */
#define BITMAP_BITS2GROUPS(nbits) \
((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
/*
* Number of groups required at a particular level for a given number of bits.
*/
#define BITMAP_GROUPS_L0(nbits) \
BITMAP_BITS2GROUPS(nbits)
#define BITMAP_GROUPS_L1(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
#define BITMAP_GROUPS_L2(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
#define BITMAP_GROUPS_L3(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS((nbits)))))
/*
* Assuming the number of levels, number of groups required for a given number
* of bits.
*/
#define BITMAP_GROUPS_1_LEVEL(nbits) \
BITMAP_GROUPS_L0(nbits)
#define BITMAP_GROUPS_2_LEVEL(nbits) \
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
#define BITMAP_GROUPS_3_LEVEL(nbits) \
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
#define BITMAP_GROUPS_4_LEVEL(nbits) \
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
/*
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
*/
#ifdef USE_TREE
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
#else
# error "Unsupported bitmap size"
#endif
/* Maximum number of levels possible. */
#define BITMAP_MAX_LEVELS \
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
+ !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
#else /* USE_TREE */
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
#endif /* USE_TREE */
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct bitmap_level_s {
/* Offset of this level's groups within the array of groups. */
size_t group_offset;
};
struct bitmap_info_s {
/* Logical number of bits in bitmap (stored at bottom level). */
size_t nbits;
#ifdef USE_TREE
/* Number of levels necessary for nbits. */
unsigned nlevels;
/*
* Only the first (nlevels+1) elements are used, and levels are ordered
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
#else /* USE_TREE */
/* Number of groups necessary for nbits. */
size_t ngroups;
#endif /* USE_TREE */
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo);
size_t bitmap_size(const bitmap_info_t *binfo);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo);
bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo);
void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
JEMALLOC_INLINE bool
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
#ifdef USE_TREE
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
bitmap_t rg = bitmap[rgoff];
/* The bitmap is full iff the root group is 0. */
return (rg == 0);
#else
size_t i;
for (i = 0; i < binfo->ngroups; i++) {
if (bitmap[i] != 0)
return (false);
}
return (true);
#endif
}
JEMALLOC_INLINE bool
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t g;
assert(bit < binfo->nbits);
goff = bit >> LG_BITMAP_GROUP_NBITS;
g = bitmap[goff];
return (!(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))));
}
JEMALLOC_INLINE void
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t *gp;
bitmap_t g;
assert(bit < binfo->nbits);
assert(!bitmap_get(bitmap, binfo, bit));
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit));
#ifdef USE_TREE
/* Propagate group state transitions up the tree. */
if (g == 0) {
unsigned i;
for (i = 1; i < binfo->nlevels; i++) {
bit = goff;
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (g != 0)
break;
}
}
#endif
}
/* sfu: set first unset. */
JEMALLOC_INLINE size_t
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
size_t bit;
bitmap_t g;
unsigned i;
assert(!bitmap_full(bitmap, binfo));
#ifdef USE_TREE
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
bit = ffs_lu(g) - 1;
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
}
#else
i = 0;
g = bitmap[0];
while ((bit = ffs_lu(g)) == 0) {
i++;
g = bitmap[i];
}
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
#endif
bitmap_set(bitmap, binfo, bit);
return (bit);
}
JEMALLOC_INLINE void
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t *gp;
bitmap_t g;
UNUSED bool propagate;
assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit));
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
propagate = (g == 0);
assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(!bitmap_get(bitmap, binfo, bit));
#ifdef USE_TREE
/* Propagate group state transitions up the tree. */
if (propagate) {
unsigned i;
for (i = 1; i < binfo->nlevels; i++) {
bit = goff;
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
propagate = (g == 0);
assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
== 0);
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (!propagate)
break;
}
}
#endif /* USE_TREE */
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,97 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* Size and alignment of memory chunks that are allocated by the OS's virtual
* memory system.
*/
#define LG_CHUNK_DEFAULT 21
/* Return the chunk address for allocation address a. */
#define CHUNK_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~chunksize_mask))
/* Return the chunk offset of address a. */
#define CHUNK_ADDR2OFFSET(a) \
((size_t)((uintptr_t)(a) & chunksize_mask))
/* Return the smallest chunk multiple that is >= s. */
#define CHUNK_CEILING(s) \
(((s) + chunksize_mask) & ~chunksize_mask)
#define CHUNK_HOOKS_INITIALIZER { \
NULL, \
NULL, \
NULL, \
NULL, \
NULL, \
NULL, \
NULL \
}
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern size_t opt_lg_chunk;
extern const char *opt_dss;
extern rtree_t chunks_rtree;
extern size_t chunksize;
extern size_t chunksize_mask; /* (chunksize - 1). */
extern size_t chunk_npages;
extern const chunk_hooks_t chunk_hooks_default;
chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
const chunk_hooks_t *chunk_hooks);
bool chunk_register(const void *chunk, const extent_node_t *node,
bool *gdump);
void chunk_deregister(const void *chunk, const extent_node_t *node);
void *chunk_alloc_base(size_t size);
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
size_t *sn, bool *zero, bool *commit, bool dalloc_node);
void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
size_t *sn, bool *zero, bool *commit);
void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
bool committed);
void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
bool zeroed, bool committed);
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
size_t length);
bool chunk_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
extent_node_t *chunk_lookup(const void *chunk, bool dependent);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
JEMALLOC_INLINE extent_node_t *
chunk_lookup(const void *ptr, bool dependent)
{
return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent));
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#include "jemalloc/internal/chunk_dss.h"
#include "jemalloc/internal/chunk_mmap.h"

View File

@ -0,0 +1,37 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef enum {
dss_prec_disabled = 0,
dss_prec_primary = 1,
dss_prec_secondary = 2,
dss_prec_limit = 3
} dss_prec_t;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
extern const char *dss_prec_names[];
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
dss_prec_t chunk_dss_prec_get(void);
bool chunk_dss_prec_set(dss_prec_t dss_prec);
void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit);
bool chunk_in_dss(void *chunk);
bool chunk_dss_mergeable(void *chunk_a, void *chunk_b);
void chunk_dss_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,21 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit);
bool chunk_dalloc_mmap(void *chunk, size_t size);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,86 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct ckh_s ckh_t;
typedef struct ckhc_s ckhc_t;
/* Typedefs to allow easy function pointer passing. */
typedef void ckh_hash_t (const void *, size_t[2]);
typedef bool ckh_keycomp_t (const void *, const void *);
/* Maintain counters used to get an idea of performance. */
/* #define CKH_COUNT */
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
/* #define CKH_VERBOSE */
/*
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
* one bucket per L1 cache line.
*/
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Hash table cell. */
struct ckhc_s {
const void *key;
const void *data;
};
struct ckh_s {
#ifdef CKH_COUNT
/* Counters used to get an idea of performance. */
uint64_t ngrows;
uint64_t nshrinks;
uint64_t nshrinkfails;
uint64_t ninserts;
uint64_t nrelocs;
#endif
/* Used for pseudo-random number generation. */
uint64_t prng_state;
/* Total number of items. */
size_t count;
/*
* Minimum and current number of hash table buckets. There are
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
*/
unsigned lg_minbuckets;
unsigned lg_curbuckets;
/* Hash and comparison functions. */
ckh_hash_t *hash;
ckh_keycomp_t *keycomp;
/* Hash table with 2^lg_curbuckets buckets. */
ckhc_t *tab;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp);
void ckh_delete(tsd_t *tsd, ckh_t *ckh);
size_t ckh_count(ckh_t *ckh);
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
void **data);
bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
void ckh_string_hash(const void *key, size_t r_hash[2]);
bool ckh_string_keycomp(const void *k1, const void *k2);
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
bool ckh_pointer_keycomp(const void *k1, const void *k2);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,118 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct ctl_node_s ctl_node_t;
typedef struct ctl_named_node_s ctl_named_node_t;
typedef struct ctl_indexed_node_s ctl_indexed_node_t;
typedef struct ctl_arena_stats_s ctl_arena_stats_t;
typedef struct ctl_stats_s ctl_stats_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct ctl_node_s {
bool named;
};
struct ctl_named_node_s {
struct ctl_node_s node;
const char *name;
/* If (nchildren == 0), this is a terminal node. */
unsigned nchildren;
const ctl_node_t *children;
int (*ctl)(tsd_t *, const size_t *, size_t, void *,
size_t *, void *, size_t);
};
struct ctl_indexed_node_s {
struct ctl_node_s node;
const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
size_t);
};
struct ctl_arena_stats_s {
bool initialized;
unsigned nthreads;
const char *dss;
ssize_t lg_dirty_mult;
ssize_t decay_time;
size_t pactive;
size_t pdirty;
/* The remainder are only populated if config_stats is true. */
arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
size_t allocated_small;
uint64_t nmalloc_small;
uint64_t ndalloc_small;
uint64_t nrequests_small;
malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t *lstats; /* nlclasses elements. */
malloc_huge_stats_t *hstats; /* nhclasses elements. */
};
struct ctl_stats_s {
size_t allocated;
size_t active;
size_t metadata;
size_t resident;
size_t mapped;
size_t retained;
unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
size_t *miblenp);
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
bool ctl_boot(void);
void ctl_prefork(tsdn_t *tsdn);
void ctl_postfork_parent(tsdn_t *tsdn);
void ctl_postfork_child(tsdn_t *tsdn);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_printf( \
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
name); \
abort(); \
} \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) do { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
abort(); \
} \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlbymib()\n"); \
abort(); \
} \
} while (0)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,275 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct extent_node_s extent_node_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Tree of extents. Use accessor functions for en_* fields. */
struct extent_node_s {
/* Arena from which this extent came, if any. */
arena_t *en_arena;
/* Pointer to the extent that this tree node is responsible for. */
void *en_addr;
/* Total region size. */
size_t en_size;
/*
* Serial number (potentially non-unique).
*
* In principle serial numbers can wrap around on 32-bit systems if
* JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
* back on address comparison for equal serial numbers, stable (if
* imperfect) ordering is maintained.
*
* Serial numbers may not be unique even in the absence of wrap-around,
* e.g. when splitting an extent and assigning the same serial number to
* both resulting adjacent extents.
*/
size_t en_sn;
/*
* The zeroed flag is used by chunk recycling code to track whether
* memory is zero-filled.
*/
bool en_zeroed;
/*
* True if physical memory is committed to the extent, whether
* explicitly or implicitly as on a system that overcommits and
* satisfies physical memory needs on demand via soft page faults.
*/
bool en_committed;
/*
* The achunk flag is used to validate that huge allocation lookups
* don't return arena chunks.
*/
bool en_achunk;
/* Profile counters, used for huge objects. */
prof_tctx_t *en_prof_tctx;
/* Linkage for arena's runs_dirty and chunks_cache rings. */
arena_runs_dirty_link_t rd;
qr(extent_node_t) cc_link;
union {
/* Linkage for the size/sn/address-ordered tree. */
rb_node(extent_node_t) szsnad_link;
/* Linkage for arena's achunks, huge, and node_cache lists. */
ql_elm(extent_node_t) ql_link;
};
/* Linkage for the address-ordered tree. */
rb_node(extent_node_t) ad_link;
};
typedef rb_tree(extent_node_t) extent_tree_t;
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_JET
size_t extent_size_quantize_floor(size_t size);
#endif
size_t extent_size_quantize_ceil(size_t size);
rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t)
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *extent_node_arena_get(const extent_node_t *node);
void *extent_node_addr_get(const extent_node_t *node);
size_t extent_node_size_get(const extent_node_t *node);
size_t extent_node_sn_get(const extent_node_t *node);
bool extent_node_zeroed_get(const extent_node_t *node);
bool extent_node_committed_get(const extent_node_t *node);
bool extent_node_achunk_get(const extent_node_t *node);
prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
void extent_node_addr_set(extent_node_t *node, void *addr);
void extent_node_size_set(extent_node_t *node, size_t size);
void extent_node_sn_set(extent_node_t *node, size_t sn);
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
void extent_node_committed_set(extent_node_t *node, bool committed);
void extent_node_achunk_set(extent_node_t *node, bool achunk);
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
size_t size, size_t sn, bool zeroed, bool committed);
void extent_node_dirty_linkage_init(extent_node_t *node);
void extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
void extent_node_dirty_remove(extent_node_t *node);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
JEMALLOC_INLINE arena_t *
extent_node_arena_get(const extent_node_t *node)
{
return (node->en_arena);
}
JEMALLOC_INLINE void *
extent_node_addr_get(const extent_node_t *node)
{
return (node->en_addr);
}
JEMALLOC_INLINE size_t
extent_node_size_get(const extent_node_t *node)
{
return (node->en_size);
}
JEMALLOC_INLINE size_t
extent_node_sn_get(const extent_node_t *node)
{
return (node->en_sn);
}
JEMALLOC_INLINE bool
extent_node_zeroed_get(const extent_node_t *node)
{
return (node->en_zeroed);
}
JEMALLOC_INLINE bool
extent_node_committed_get(const extent_node_t *node)
{
assert(!node->en_achunk);
return (node->en_committed);
}
JEMALLOC_INLINE bool
extent_node_achunk_get(const extent_node_t *node)
{
return (node->en_achunk);
}
JEMALLOC_INLINE prof_tctx_t *
extent_node_prof_tctx_get(const extent_node_t *node)
{
return (node->en_prof_tctx);
}
JEMALLOC_INLINE void
extent_node_arena_set(extent_node_t *node, arena_t *arena)
{
node->en_arena = arena;
}
JEMALLOC_INLINE void
extent_node_addr_set(extent_node_t *node, void *addr)
{
node->en_addr = addr;
}
JEMALLOC_INLINE void
extent_node_size_set(extent_node_t *node, size_t size)
{
node->en_size = size;
}
JEMALLOC_INLINE void
extent_node_sn_set(extent_node_t *node, size_t sn)
{
node->en_sn = sn;
}
JEMALLOC_INLINE void
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
{
node->en_zeroed = zeroed;
}
JEMALLOC_INLINE void
extent_node_committed_set(extent_node_t *node, bool committed)
{
node->en_committed = committed;
}
JEMALLOC_INLINE void
extent_node_achunk_set(extent_node_t *node, bool achunk)
{
node->en_achunk = achunk;
}
JEMALLOC_INLINE void
extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
{
node->en_prof_tctx = tctx;
}
JEMALLOC_INLINE void
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
size_t sn, bool zeroed, bool committed)
{
extent_node_arena_set(node, arena);
extent_node_addr_set(node, addr);
extent_node_size_set(node, size);
extent_node_sn_set(node, sn);
extent_node_zeroed_set(node, zeroed);
extent_node_committed_set(node, committed);
extent_node_achunk_set(node, false);
if (config_prof)
extent_node_prof_tctx_set(node, NULL);
}
JEMALLOC_INLINE void
extent_node_dirty_linkage_init(extent_node_t *node)
{
qr_new(&node->rd, rd_link);
qr_new(node, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
{
qr_meld(runs_dirty, &node->rd, rd_link);
qr_meld(chunks_dirty, node, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_remove(extent_node_t *node)
{
qr_remove(&node->rd, rd_link);
qr_remove(node, cc_link);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,357 @@
/*
* The following hash function is based on MurmurHash3, placed into the public
* domain by Austin Appleby. See https://github.com/aappleby/smhasher for
* details.
*/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
void hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2]);
void hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2]);
void hash(const void *key, size_t len, const uint32_t seed,
size_t r_hash[2]);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
/******************************************************************************/
/* Internal implementation. */
JEMALLOC_INLINE uint32_t
hash_rotl_32(uint32_t x, int8_t r)
{
return ((x << r) | (x >> (32 - r)));
}
JEMALLOC_INLINE uint64_t
hash_rotl_64(uint64_t x, int8_t r)
{
return ((x << r) | (x >> (64 - r)));
}
JEMALLOC_INLINE uint32_t
hash_get_block_32(const uint32_t *p, int i)
{
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
uint32_t ret;
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
return (ret);
}
return (p[i]);
}
JEMALLOC_INLINE uint64_t
hash_get_block_64(const uint64_t *p, int i)
{
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
uint64_t ret;
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
return (ret);
}
return (p[i]);
}
JEMALLOC_INLINE uint32_t
hash_fmix_32(uint32_t h)
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return (h);
}
JEMALLOC_INLINE uint64_t
hash_fmix_64(uint64_t k)
{
k ^= k >> 33;
k *= KQU(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= KQU(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return (k);
}
JEMALLOC_INLINE uint32_t
hash_x86_32(const void *key, int len, uint32_t seed)
{
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 4;
uint32_t h1 = seed;
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
int i;
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i);
k1 *= c1;
k1 = hash_rotl_32(k1, 15);
k1 *= c2;
h1 ^= k1;
h1 = hash_rotl_32(h1, 13);
h1 = h1*5 + 0xe6546b64;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
uint32_t k1 = 0;
switch (len & 3) {
case 3: k1 ^= tail[2] << 16;
case 2: k1 ^= tail[1] << 8;
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len;
h1 = hash_fmix_32(h1);
return (h1);
}
UNUSED JEMALLOC_INLINE void
hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2])
{
const uint8_t * data = (const uint8_t *) key;
const int nblocks = len / 16;
uint32_t h1 = seed;
uint32_t h2 = seed;
uint32_t h3 = seed;
uint32_t h4 = seed;
const uint32_t c1 = 0x239b961b;
const uint32_t c2 = 0xab0e9789;
const uint32_t c3 = 0x38b34ae5;
const uint32_t c4 = 0xa1e38b93;
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
int i;
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
h1 = hash_rotl_32(h1, 19); h1 += h2;
h1 = h1*5 + 0x561ccd1b;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
h2 = hash_rotl_32(h2, 17); h2 += h3;
h2 = h2*5 + 0x0bcaa747;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
h3 = hash_rotl_32(h3, 15); h3 += h4;
h3 = h3*5 + 0x96cd1c35;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
h4 = hash_rotl_32(h4, 13); h4 += h1;
h4 = h4*5 + 0x32ac3b17;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
uint32_t k1 = 0;
uint32_t k2 = 0;
uint32_t k3 = 0;
uint32_t k4 = 0;
switch (len & 15) {
case 15: k4 ^= tail[14] << 16;
case 14: k4 ^= tail[13] << 8;
case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
case 12: k3 ^= tail[11] << 24;
case 11: k3 ^= tail[10] << 16;
case 10: k3 ^= tail[ 9] << 8;
case 9: k3 ^= tail[ 8] << 0;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
case 8: k2 ^= tail[ 7] << 24;
case 7: k2 ^= tail[ 6] << 16;
case 6: k2 ^= tail[ 5] << 8;
case 5: k2 ^= tail[ 4] << 0;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
case 4: k1 ^= tail[ 3] << 24;
case 3: k1 ^= tail[ 2] << 16;
case 2: k1 ^= tail[ 1] << 8;
case 1: k1 ^= tail[ 0] << 0;
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
h1 = hash_fmix_32(h1);
h2 = hash_fmix_32(h2);
h3 = hash_fmix_32(h3);
h4 = hash_fmix_32(h4);
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
r_out[0] = (((uint64_t) h2) << 32) | h1;
r_out[1] = (((uint64_t) h4) << 32) | h3;
}
UNUSED JEMALLOC_INLINE void
hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2])
{
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 16;
uint64_t h1 = seed;
uint64_t h2 = seed;
const uint64_t c1 = KQU(0x87c37b91114253d5);
const uint64_t c2 = KQU(0x4cf5ad432745937f);
/* body */
{
const uint64_t *blocks = (const uint64_t *) (data);
int i;
for (i = 0; i < nblocks; i++) {
uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
h1 = hash_rotl_64(h1, 27); h1 += h2;
h1 = h1*5 + 0x52dce729;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
h2 = hash_rotl_64(h2, 31); h2 += h1;
h2 = h2*5 + 0x38495ab5;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
uint64_t k1 = 0;
uint64_t k2 = 0;
switch (len & 15) {
case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len; h2 ^= len;
h1 += h2;
h2 += h1;
h1 = hash_fmix_64(h1);
h2 = hash_fmix_64(h2);
h1 += h2;
h2 += h1;
r_out[0] = h1;
r_out[1] = h2;
}
/******************************************************************************/
/* API. */
JEMALLOC_INLINE void
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
{
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
#else
{
uint64_t hashes[2];
hash_x86_128(key, (int)len, seed, hashes);
r_hash[0] = (size_t)hashes[0];
r_hash[1] = (size_t)hashes[1];
}
#endif
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,35 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero);
bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize_min, size_t usize_max, bool zero);
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
#ifdef JEMALLOC_JET
typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif
void huge_dalloc(tsdn_t *tsdn, void *ptr);
arena_t *huge_aalloc(const void *ptr);
size_t huge_salloc(tsdn_t *tsdn, const void *ptr);
prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx);
void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,75 @@
#ifndef JEMALLOC_INTERNAL_DECLS_H
#define JEMALLOC_INTERNAL_DECLS_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# include "msvc_compat/windows_extra.h"
#else
# include <sys/param.h>
# include <sys/mman.h>
# if !defined(__pnacl__) && !defined(__native_client__)
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# include <sys/uio.h>
# endif
# include <pthread.h>
# ifdef JEMALLOC_OS_UNFAIR_LOCK
# include <os/lock.h>
# endif
# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
# include <sched.h>
# endif
# include <errno.h>
# include <sys/time.h>
# include <time.h>
# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
# include <mach/mach_time.h>
# endif
#endif
#include <sys/types.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
#endif
#include <string.h>
#include <strings.h>
#include <ctype.h>
#ifdef _MSC_VER
# include <io.h>
typedef intptr_t ssize_t;
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
# ifdef JEMALLOC_HAS_RESTRICT
# define restrict __restrict
# endif
/* Disable warnings about deprecated system functions. */
# pragma warning(disable: 4996)
#if _MSC_VER < 1800
static int
isblank(int c)
{
return (c == '\t' || c == ' ');
}
#endif
#else
# include <unistd.h>
#endif
#include <fcntl.h>
#endif /* JEMALLOC_INTERNAL_H */

View File

@ -0,0 +1,335 @@
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
#ifndef JEMALLOC_INTERNAL_DEFS_H_
#define JEMALLOC_INTERNAL_DEFS_H_
//#define PUB_MEMORY_TRACKER
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
* multiple allocators simultaneously.
*/
#define JEMALLOC_PREFIX "je_"
#define JEMALLOC_CPREFIX "JE_"
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#define JEMALLOC_PRIVATE_NAMESPACE je_
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#ifdef __x86_64__
#define CPU_SPINWAIT __asm__ volatile("pause")
#else
#define CPU_SPINWAIT
#endif
/* Defined if C11 atomics are available. */
/* #undef JEMALLOC_C11ATOMICS */
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
/* #undef JEMALLOC_ATOMIC9 */
/*
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
* documented in the atomic(3) manual page.
*/
/* #undef JEMALLOC_OSATOMIC */
/*
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines).
*/
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */
/*
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines).
*/
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */
/*
* Defined if __builtin_clz() and __builtin_clzl() are available.
*/
#define JEMALLOC_HAVE_BUILTIN_CLZ
/*
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
*/
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
/* #undef JEMALLOC_OSSPIN */
/* Defined if syscall(2) is usable. */
#define JEMALLOC_USE_SYSCALL
/*
* Defined if secure_getenv(3) is available.
*/
/* #undef JEMALLOC_HAVE_SECURE_GETENV */
/*
* Defined if issetugid(2) is available.
*/
/* #undef JEMALLOC_HAVE_ISSETUGID */
/* Defined if pthread_atfork(3) is available. */
#define JEMALLOC_HAVE_PTHREAD_ATFORK
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
/*
* Defined if mach_absolute_time() is available.
*/
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
* malloc_tsd.
*/
/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
#define JEMALLOC_THREADED_INIT
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
/* #undef JEMALLOC_MUTEX_INIT_CB */
/* Non-empty if the tls_model attribute is supported. */
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
#define JEMALLOC_CC_SILENCE
/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
/* #undef JEMALLOC_CODE_COVERAGE */
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
#ifdef PUB_MEMORY_TRACKER
#define JEMALLOC_DEBUG
#endif
/* JEMALLOC_STATS enables statistics calculation. */
#ifdef PUB_MEMORY_TRACKER
#define JEMALLOC_STATS
#endif
/* JEMALLOC_PROF enables allocation profiling. */
/* #undef JEMALLOC_PROF */
/* Use libunwind for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_LIBUNWIND */
/* Use libgcc for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_LIBGCC */
/* Use gcc intrinsics for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_GCC */
/*
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
* This makes it possible to allocate/deallocate objects without any locking
* when the cache is in the steady state.
*/
#ifndef PUB_MEMORY_TRACKER
#define JEMALLOC_TCACHE
#endif
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
* segment (DSS).
*/
#define JEMALLOC_DSS
/* Support memory filling (junk/zero/quarantine/redzone). */
#define JEMALLOC_FILL
/* Support utrace(2)-based tracing. */
/* #undef JEMALLOC_UTRACE */
/* Support Valgrind. */
/* #undef JEMALLOC_VALGRIND */
/* Support optional abort() on OOM. */
/* #undef JEMALLOC_XMALLOC */
/* Support lazy locking (avoid locking unless a second thread is launched). */
/* #undef JEMALLOC_LAZY_LOCK */
/* Minimum size class to support is 2^LG_TINY_MIN bytes. */
#define LG_TINY_MIN 3
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
/* #undef LG_QUANTUM */
/* One page is 2^LG_PAGE bytes. */
#define LG_PAGE 12
/*
* If defined, adjacent virtual memory mappings with identical attributes
* automatically coalesce, and they fragment when changes are made to subranges.
* This is the normal order of things for mmap()/munmap(), but on Windows
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
* mappings do *not* coalesce/fragment.
*/
#define JEMALLOC_MAPS_COALESCE
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
* later reuse. This is disabled by default on Linux because common sequences
* of mmap()/munmap() calls will cause virtual memory map holes.
*/
/* #undef JEMALLOC_MUNMAP */
/* TLS is used to map arenas and magazine caches to threads. */
#define JEMALLOC_TLS
/*
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
* Don't use this directly; instead use unreachable() from util.h
*/
#define JEMALLOC_INTERNAL_UNREACHABLE abort
/*
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
* use ffs_*() from util.h.
*/
#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
/*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
* within jemalloc-owned chunks before dereferencing them.
*/
#ifdef PUB_MEMORY_TRACKER
#define JEMALLOC_IVSALLOC
#endif
/*
* If defined, explicitly attempt to more uniformly distribute large allocation
* pointer alignments across all cache indices.
*/
#define JEMALLOC_CACHE_OBLIVIOUS
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
/* #undef JEMALLOC_ZONE */
/*
* Methods for determining whether the OS overcommits.
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
* /proc/sys/vm.overcommit_memory file.
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
*/
/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
/* Defined if madvise(2) is available. */
#define JEMALLOC_HAVE_MADVISE
/*
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
* arguments to madvise(2).
*/
/* #undef JEMALLOC_HAVE_MADVISE_HUGE */
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
* will be discarded rather than swapped out.
* madvise(..., MADV_DONTNEED) : This immediately discards pages, such that
* new pages will be demand-zeroed if the
* address region is later touched.
*/
/* #undef JEMALLOC_PURGE_MADVISE_FREE */
#define JEMALLOC_PURGE_MADVISE_DONTNEED
/* Defined if transparent huge page support is enabled. */
/* #undef JEMALLOC_THP */
/* Define if operating system has alloca.h header. */
#define JEMALLOC_HAS_ALLOCA_H 1
/* C99 restrict keyword supported. */
/*#define JEMALLOC_HAS_RESTRICT 1 */
/* For use by hash code. */
/* #undef JEMALLOC_BIG_ENDIAN */
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#define LG_SIZEOF_INT 2
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#if (defined(__LP64__) && __LP64__)
#define LG_SIZEOF_LONG 3
#else
#define LG_SIZEOF_LONG 2
#endif
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
#define LG_SIZEOF_LONG_LONG 3
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#define LG_SIZEOF_INTMAX_T 3
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
#define JEMALLOC_GLIBC_MALLOC_HOOK
/* glibc memalign hook. */
#define JEMALLOC_GLIBC_MEMALIGN_HOOK
/* Adaptive mutex support in pthreads. */
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
/*
* If defined, jemalloc symbols are not exported (doesn't work when
* JEMALLOC_PREFIX is not defined).
*/
#define JEMALLOC_EXPORT /**/
/* config.malloc_conf options string. */
#define JEMALLOC_CONFIG_MALLOC_CONF ""
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */

View File

@ -0,0 +1,57 @@
/*
* JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for
* functions that are static inline functions if inlining is enabled, and
* single-definition library-private functions if inlining is disabled.
*
* JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in
* which case the denoted functions are always static, regardless of whether
* inlining is enabled.
*/
#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE)
/* Disable inlining to make debugging/profiling easier. */
# define JEMALLOC_ALWAYS_INLINE
# define JEMALLOC_ALWAYS_INLINE_C static
# define JEMALLOC_INLINE
# define JEMALLOC_INLINE_C static
# define inline
#else
# define JEMALLOC_ENABLE_INLINE
# ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ALWAYS_INLINE \
static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
# define JEMALLOC_ALWAYS_INLINE_C \
static inline JEMALLOC_ATTR(always_inline)
# else
# define JEMALLOC_ALWAYS_INLINE static inline
# define JEMALLOC_ALWAYS_INLINE_C static inline
# endif
# define JEMALLOC_INLINE static inline
# define JEMALLOC_INLINE_C static inline
# ifdef _MSC_VER
# define inline _inline
# endif
#endif
#ifdef JEMALLOC_CC_SILENCE
# define UNUSED JEMALLOC_ATTR(unused)
#else
# define UNUSED
#endif
#define ZU(z) ((size_t)z)
#define ZI(z) ((ssize_t)z)
#define QU(q) ((uint64_t)q)
#define QI(q) ((int64_t)q)
#define KZU(z) ZU(z##ULL)
#define KZI(z) ZI(z##LL)
#define KQU(q) QU(q##ULL)
#define KQI(q) QI(q##LL)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
#ifndef JEMALLOC_HAS_RESTRICT
# define restrict
#endif

View File

@ -0,0 +1,115 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void mb_write(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
#ifdef __i386__
/*
* According to the Intel Architecture Software Developer's Manual, current
* processors execute instructions in order from the perspective of other
* processors in a multiprocessor system, but 1) Intel reserves the right to
* change that, and 2) the compiler's optimizer could re-order instructions if
* there weren't some form of barrier. Therefore, even if running on an
* architecture that does not need memory barriers (everything through at least
* i686), an "optimizer barrier" is necessary.
*/
JEMALLOC_INLINE void
mb_write(void)
{
# if 0
/* This is a true memory barrier. */
asm volatile ("pusha;"
"xor %%eax,%%eax;"
"cpuid;"
"popa;"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
# else
/*
* This is hopefully enough to keep the compiler from reordering
* instructions around this one.
*/
asm volatile ("nop;"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
# endif
}
#elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE void
mb_write(void)
{
asm volatile ("sfence"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
}
#elif defined(__powerpc__)
JEMALLOC_INLINE void
mb_write(void)
{
asm volatile ("eieio"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
}
#elif defined(__sparc__) && defined(__arch64__)
JEMALLOC_INLINE void
mb_write(void)
{
asm volatile ("membar #StoreStore"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
}
#elif defined(__tile__)
JEMALLOC_INLINE void
mb_write(void)
{
__sync_synchronize();
}
#else
/*
* This is much slower than a simple memory barrier, but the semantics of mutex
* unlock make this work.
*/
JEMALLOC_INLINE void
mb_write(void)
{
malloc_mutex_t mtx;
malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT);
malloc_mutex_lock(TSDN_NULL, &mtx);
malloc_mutex_unlock(TSDN_NULL, &mtx);
}
#endif
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,145 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct malloc_mutex_s malloc_mutex_t;
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_INITIALIZER \
{OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#else
# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \
WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# endif
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct malloc_mutex_s {
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
SRWLOCK lock;
# else
CRITICAL_SECTION lock;
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock lock;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
pthread_mutex_t lock;
malloc_mutex_t *postponed_next;
#else
pthread_mutex_t lock;
#endif
witness_t witness;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_LAZY_LOCK
extern bool isthreaded;
#else
# undef isthreaded /* Undo private_namespace.h definition. */
# define isthreaded true
#endif
bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
witness_rank_t rank);
void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
bool malloc_mutex_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE void
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
witness_assert_not_owner(tsdn, &mutex->witness);
if (isthreaded) {
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
AcquireSRWLockExclusive(&mutex->lock);
# else
EnterCriticalSection(&mutex->lock);
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock_lock(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mutex->lock);
#else
pthread_mutex_lock(&mutex->lock);
#endif
}
witness_lock(tsdn, &mutex->witness);
}
JEMALLOC_INLINE void
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
witness_unlock(tsdn, &mutex->witness);
if (isthreaded) {
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
ReleaseSRWLockExclusive(&mutex->lock);
# else
LeaveCriticalSection(&mutex->lock);
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock_unlock(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mutex->lock);
#else
pthread_mutex_unlock(&mutex->lock);
#endif
}
}
JEMALLOC_INLINE void
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
witness_assert_owner(tsdn, &mutex->witness);
}
JEMALLOC_INLINE void
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
witness_assert_not_owner(tsdn, &mutex->witness);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,48 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct nstime_s nstime_t;
/* Maximum supported number of seconds (~584 years). */
#define NSTIME_SEC_MAX KQU(18446744072)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct nstime_s {
uint64_t ns;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void nstime_init(nstime_t *time, uint64_t ns);
void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
uint64_t nstime_ns(const nstime_t *time);
uint64_t nstime_sec(const nstime_t *time);
uint64_t nstime_nsec(const nstime_t *time);
void nstime_copy(nstime_t *time, const nstime_t *source);
int nstime_compare(const nstime_t *a, const nstime_t *b);
void nstime_add(nstime_t *time, const nstime_t *addend);
void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
void nstime_imultiply(nstime_t *time, uint64_t multiplier);
void nstime_idivide(nstime_t *time, uint64_t divisor);
uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
#ifdef JEMALLOC_JET
typedef bool (nstime_monotonic_t)(void);
extern nstime_monotonic_t *nstime_monotonic;
typedef bool (nstime_update_t)(nstime_t *);
extern nstime_update_t *nstime_update;
#else
bool nstime_monotonic(void);
bool nstime_update(nstime_t *time);
#endif
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,29 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *pages_map(void *addr, size_t size, bool *commit);
void pages_unmap(void *addr, size_t size);
void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
size_t size, bool *commit);
bool pages_commit(void *addr, size_t size);
bool pages_decommit(void *addr, size_t size);
bool pages_purge(void *addr, size_t size);
bool pages_huge(void *addr, size_t size);
bool pages_nohuge(void *addr, size_t size);
void pages_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,345 @@
/*
* A Pairing Heap implementation.
*
* "The Pairing Heap: A New Form of Self-Adjusting Heap"
* https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
*
* With auxiliary twopass list, described in a follow on paper.
*
* "Pairing Heaps: Experiments and Analysis"
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
*
*******************************************************************************
*/
#ifndef PH_H_
#define PH_H_
/* Node structure. */
#define phn(a_type) \
struct { \
a_type *phn_prev; \
a_type *phn_next; \
a_type *phn_lchild; \
}
/* Root structure. */
#define ph(a_type) \
struct { \
a_type *ph_root; \
}
/* Internal utility macros. */
#define phn_lchild_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_lchild)
#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
a_phn->a_field.phn_lchild = a_lchild; \
} while (0)
#define phn_next_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_next)
#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
a_phn->a_field.phn_prev = a_prev; \
} while (0)
#define phn_prev_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_prev)
#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
a_phn->a_field.phn_next = a_next; \
} while (0)
#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
a_type *phn0child; \
\
assert(a_phn0 != NULL); \
assert(a_phn1 != NULL); \
assert(a_cmp(a_phn0, a_phn1) <= 0); \
\
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
phn_next_set(a_type, a_field, a_phn1, phn0child); \
if (phn0child != NULL) \
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
} while (0)
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
if (a_phn0 == NULL) \
r_phn = a_phn1; \
else if (a_phn1 == NULL) \
r_phn = a_phn0; \
else if (a_cmp(a_phn0, a_phn1) < 0) { \
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
a_cmp); \
r_phn = a_phn0; \
} else { \
phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
a_cmp); \
r_phn = a_phn1; \
} \
} while (0)
#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *head = NULL; \
a_type *tail = NULL; \
a_type *phn0 = a_phn; \
a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
\
/* \
* Multipass merge, wherein the first two elements of a FIFO \
* are repeatedly merged, and each result is appended to the \
* singly linked FIFO, until the FIFO contains only a single \
* element. We start with a sibling list but no reference to \
* its tail, so we do a single pass over the sibling list to \
* populate the FIFO. \
*/ \
if (phn1 != NULL) { \
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
if (phnrest != NULL) \
phn_prev_set(a_type, a_field, phnrest, NULL); \
phn_prev_set(a_type, a_field, phn0, NULL); \
phn_next_set(a_type, a_field, phn0, NULL); \
phn_prev_set(a_type, a_field, phn1, NULL); \
phn_next_set(a_type, a_field, phn1, NULL); \
phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
head = tail = phn0; \
phn0 = phnrest; \
while (phn0 != NULL) { \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
phnrest = phn_next_get(a_type, a_field, \
phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, \
phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, \
NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
phn_prev_set(a_type, a_field, phn1, \
NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = phnrest; \
} else { \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = NULL; \
} \
} \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
while (true) { \
head = phn_next_get(a_type, a_field, \
phn1); \
assert(phn_prev_get(a_type, a_field, \
phn0) == NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
assert(phn_prev_get(a_type, a_field, \
phn1) == NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
if (head == NULL) \
break; \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, \
phn0); \
} \
} \
} \
r_phn = phn0; \
} while (0)
#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
if (phn != NULL) { \
phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_prev_set(a_type, a_field, phn, NULL); \
ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
assert(phn_next_get(a_type, a_field, phn) == NULL); \
phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
a_ph->ph_root); \
} \
} while (0)
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
if (lchild == NULL) \
r_phn = NULL; \
else { \
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
r_phn); \
} \
} while (0)
/*
* The ph_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to ph_gen().
*/
#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
a_attr void a_prefix##new(a_ph_type *ph); \
a_attr bool a_prefix##empty(a_ph_type *ph); \
a_attr a_type *a_prefix##first(a_ph_type *ph); \
a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
/*
* The ph_gen() macro generates a type-specific pairing heap implementation,
* based on the above cpp macros.
*/
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_ph_type *ph) \
{ \
\
memset(ph, 0, sizeof(ph(a_type))); \
} \
a_attr bool \
a_prefix##empty(a_ph_type *ph) \
{ \
\
return (ph->ph_root == NULL); \
} \
a_attr a_type * \
a_prefix##first(a_ph_type *ph) \
{ \
\
if (ph->ph_root == NULL) \
return (NULL); \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
return (ph->ph_root); \
} \
a_attr void \
a_prefix##insert(a_ph_type *ph, a_type *phn) \
{ \
\
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
\
/* \
* Treat the root as an aux list during insertion, and lazily \
* merge during a_prefix##remove_first(). For elements that \
* are inserted, then removed via a_prefix##remove() before the \
* aux list is ever processed, this makes insert/remove \
* constant-time, whereas eager merging would make insert \
* O(log n). \
*/ \
if (ph->ph_root == NULL) \
ph->ph_root = phn; \
else { \
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
a_field, ph->ph_root)); \
if (phn_next_get(a_type, a_field, ph->ph_root) != \
NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, ph->ph_root), \
phn); \
} \
phn_prev_set(a_type, a_field, phn, ph->ph_root); \
phn_next_set(a_type, a_field, ph->ph_root, phn); \
} \
} \
a_attr a_type * \
a_prefix##remove_first(a_ph_type *ph) \
{ \
a_type *ret; \
\
if (ph->ph_root == NULL) \
return (NULL); \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
\
ret = ph->ph_root; \
\
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
\
return (ret); \
} \
a_attr void \
a_prefix##remove(a_ph_type *ph, a_type *phn) \
{ \
a_type *replace, *parent; \
\
/* \
* We can delete from aux list without merging it, but we need \
* to merge if we are dealing with the root node. \
*/ \
if (ph->ph_root == phn) { \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
if (ph->ph_root == phn) { \
ph_merge_children(a_type, a_field, ph->ph_root, \
a_cmp, ph->ph_root); \
return; \
} \
} \
\
/* Get parent (if phn is leftmost child) before mutating. */ \
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
if (phn_lchild_get(a_type, a_field, parent) != phn) \
parent = NULL; \
} \
/* Find a possible replacement node, and link to parent. */ \
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
/* Set next/prev for sibling linked list. */ \
if (replace != NULL) { \
if (parent != NULL) { \
phn_prev_set(a_type, a_field, replace, parent); \
phn_lchild_set(a_type, a_field, parent, \
replace); \
} else { \
phn_prev_set(a_type, a_field, replace, \
phn_prev_get(a_type, a_field, phn)); \
if (phn_prev_get(a_type, a_field, phn) != \
NULL) { \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
replace); \
} \
} \
phn_next_set(a_type, a_field, replace, \
phn_next_get(a_type, a_field, phn)); \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
replace); \
} \
} else { \
if (parent != NULL) { \
a_type *next = phn_next_get(a_type, a_field, \
phn); \
phn_lchild_set(a_type, a_field, parent, next); \
if (next != NULL) { \
phn_prev_set(a_type, a_field, next, \
parent); \
} \
} else { \
assert(phn_prev_get(a_type, a_field, phn) != \
NULL); \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
phn_next_get(a_type, a_field, phn)); \
} \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
phn_prev_get(a_type, a_field, phn)); \
} \
} \
}
#endif /* PH_H_ */

View File

@ -0,0 +1,639 @@
#define a0dalloc JEMALLOC_N(a0dalloc)
#define a0get JEMALLOC_N(a0get)
#define a0malloc JEMALLOC_N(a0malloc)
#define arena_aalloc JEMALLOC_N(arena_aalloc)
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge)
#define arena_bin_index JEMALLOC_N(arena_bin_index)
#define arena_bin_info JEMALLOC_N(arena_bin_info)
#define arena_bitselm_get_const JEMALLOC_N(arena_bitselm_get_const)
#define arena_bitselm_get_mutable JEMALLOC_N(arena_bitselm_get_mutable)
#define arena_boot JEMALLOC_N(arena_boot)
#define arena_choose JEMALLOC_N(arena_choose)
#define arena_choose_hard JEMALLOC_N(arena_choose_hard)
#define arena_choose_impl JEMALLOC_N(arena_choose_impl)
#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge)
#define arena_chunk_cache_maybe_insert JEMALLOC_N(arena_chunk_cache_maybe_insert)
#define arena_chunk_cache_maybe_remove JEMALLOC_N(arena_chunk_cache_maybe_remove)
#define arena_chunk_dalloc_huge JEMALLOC_N(arena_chunk_dalloc_huge)
#define arena_chunk_ralloc_huge_expand JEMALLOC_N(arena_chunk_ralloc_huge_expand)
#define arena_chunk_ralloc_huge_shrink JEMALLOC_N(arena_chunk_ralloc_huge_shrink)
#define arena_chunk_ralloc_huge_similar JEMALLOC_N(arena_chunk_ralloc_huge_similar)
#define arena_cleanup JEMALLOC_N(arena_cleanup)
#define arena_dalloc JEMALLOC_N(arena_dalloc)
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked)
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_dalloc_large_junked_locked JEMALLOC_N(arena_dalloc_large_junked_locked)
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
#define arena_decay_tick JEMALLOC_N(arena_decay_tick)
#define arena_decay_ticks JEMALLOC_N(arena_decay_ticks)
#define arena_decay_time_default_get JEMALLOC_N(arena_decay_time_default_get)
#define arena_decay_time_default_set JEMALLOC_N(arena_decay_time_default_set)
#define arena_decay_time_get JEMALLOC_N(arena_decay_time_get)
#define arena_decay_time_set JEMALLOC_N(arena_decay_time_set)
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next)
#define arena_get JEMALLOC_N(arena_get)
#define arena_ichoose JEMALLOC_N(arena_ichoose)
#define arena_init JEMALLOC_N(arena_init)
#define arena_lg_dirty_mult_default_get JEMALLOC_N(arena_lg_dirty_mult_default_get)
#define arena_lg_dirty_mult_default_set JEMALLOC_N(arena_lg_dirty_mult_default_set)
#define arena_lg_dirty_mult_get JEMALLOC_N(arena_lg_dirty_mult_get)
#define arena_lg_dirty_mult_set JEMALLOC_N(arena_lg_dirty_mult_set)
#define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_hard JEMALLOC_N(arena_malloc_hard)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get)
#define arena_mapbits_decommitted_get JEMALLOC_N(arena_mapbits_decommitted_get)
#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
#define arena_mapbits_internal_set JEMALLOC_N(arena_mapbits_internal_set)
#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
#define arena_mapbits_size_decode JEMALLOC_N(arena_mapbits_size_decode)
#define arena_mapbits_size_encode JEMALLOC_N(arena_mapbits_size_encode)
#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
#define arena_mapbitsp_get_const JEMALLOC_N(arena_mapbitsp_get_const)
#define arena_mapbitsp_get_mutable JEMALLOC_N(arena_mapbitsp_get_mutable)
#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
#define arena_maxrun JEMALLOC_N(arena_maxrun)
#define arena_maybe_purge JEMALLOC_N(arena_maybe_purge)
#define arena_metadata_allocated_add JEMALLOC_N(arena_metadata_allocated_add)
#define arena_metadata_allocated_get JEMALLOC_N(arena_metadata_allocated_get)
#define arena_metadata_allocated_sub JEMALLOC_N(arena_metadata_allocated_sub)
#define arena_migrate JEMALLOC_N(arena_migrate)
#define arena_miscelm_get_const JEMALLOC_N(arena_miscelm_get_const)
#define arena_miscelm_get_mutable JEMALLOC_N(arena_miscelm_get_mutable)
#define arena_miscelm_to_pageind JEMALLOC_N(arena_miscelm_to_pageind)
#define arena_miscelm_to_rpages JEMALLOC_N(arena_miscelm_to_rpages)
#define arena_new JEMALLOC_N(arena_new)
#define arena_node_alloc JEMALLOC_N(arena_node_alloc)
#define arena_node_dalloc JEMALLOC_N(arena_node_dalloc)
#define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec)
#define arena_nthreads_get JEMALLOC_N(arena_nthreads_get)
#define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc)
#define arena_palloc JEMALLOC_N(arena_palloc)
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
#define arena_prefork0 JEMALLOC_N(arena_prefork0)
#define arena_prefork1 JEMALLOC_N(arena_prefork1)
#define arena_prefork2 JEMALLOC_N(arena_prefork2)
#define arena_prefork3 JEMALLOC_N(arena_prefork3)
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl)
#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked)
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
#define arena_prof_tctx_get JEMALLOC_N(arena_prof_tctx_get)
#define arena_prof_tctx_reset JEMALLOC_N(arena_prof_tctx_reset)
#define arena_prof_tctx_set JEMALLOC_N(arena_prof_tctx_set)
#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
#define arena_purge JEMALLOC_N(arena_purge)
#define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small)
#define arena_ralloc JEMALLOC_N(arena_ralloc)
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
#define arena_rd_to_miscelm JEMALLOC_N(arena_rd_to_miscelm)
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
#define arena_reset JEMALLOC_N(arena_reset)
#define arena_run_regind JEMALLOC_N(arena_run_regind)
#define arena_run_to_miscelm JEMALLOC_N(arena_run_to_miscelm)
#define arena_salloc JEMALLOC_N(arena_salloc)
#define arena_sdalloc JEMALLOC_N(arena_sdalloc)
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
#define arena_tdata_get JEMALLOC_N(arena_tdata_get)
#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard)
#define arenas JEMALLOC_N(arenas)
#define arenas_tdata_bypass_cleanup JEMALLOC_N(arenas_tdata_bypass_cleanup)
#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup)
#define atomic_add_p JEMALLOC_N(atomic_add_p)
#define atomic_add_u JEMALLOC_N(atomic_add_u)
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
#define atomic_add_z JEMALLOC_N(atomic_add_z)
#define atomic_cas_p JEMALLOC_N(atomic_cas_p)
#define atomic_cas_u JEMALLOC_N(atomic_cas_u)
#define atomic_cas_uint32 JEMALLOC_N(atomic_cas_uint32)
#define atomic_cas_uint64 JEMALLOC_N(atomic_cas_uint64)
#define atomic_cas_z JEMALLOC_N(atomic_cas_z)
#define atomic_sub_p JEMALLOC_N(atomic_sub_p)
#define atomic_sub_u JEMALLOC_N(atomic_sub_u)
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
#define atomic_write_p JEMALLOC_N(atomic_write_p)
#define atomic_write_u JEMALLOC_N(atomic_write_u)
#define atomic_write_uint32 JEMALLOC_N(atomic_write_uint32)
#define atomic_write_uint64 JEMALLOC_N(atomic_write_uint64)
#define atomic_write_z JEMALLOC_N(atomic_write_z)
#define base_alloc JEMALLOC_N(base_alloc)
#define base_boot JEMALLOC_N(base_boot)
#define base_postfork_child JEMALLOC_N(base_postfork_child)
#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
#define base_prefork JEMALLOC_N(base_prefork)
#define base_stats_get JEMALLOC_N(base_stats_get)
#define bitmap_full JEMALLOC_N(bitmap_full)
#define bitmap_get JEMALLOC_N(bitmap_get)
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
#define bitmap_init JEMALLOC_N(bitmap_init)
#define bitmap_set JEMALLOC_N(bitmap_set)
#define bitmap_sfu JEMALLOC_N(bitmap_sfu)
#define bitmap_size JEMALLOC_N(bitmap_size)
#define bitmap_unset JEMALLOC_N(bitmap_unset)
#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc)
#define bootstrap_free JEMALLOC_N(bootstrap_free)
#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc)
#define bt_init JEMALLOC_N(bt_init)
#define buferror JEMALLOC_N(buferror)
#define chunk_alloc_base JEMALLOC_N(chunk_alloc_base)
#define chunk_alloc_cache JEMALLOC_N(chunk_alloc_cache)
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
#define chunk_alloc_wrapper JEMALLOC_N(chunk_alloc_wrapper)
#define chunk_boot JEMALLOC_N(chunk_boot)
#define chunk_dalloc_cache JEMALLOC_N(chunk_dalloc_cache)
#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap)
#define chunk_dalloc_wrapper JEMALLOC_N(chunk_dalloc_wrapper)
#define chunk_deregister JEMALLOC_N(chunk_deregister)
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
#define chunk_dss_mergeable JEMALLOC_N(chunk_dss_mergeable)
#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
#define chunk_hooks_default JEMALLOC_N(chunk_hooks_default)
#define chunk_hooks_get JEMALLOC_N(chunk_hooks_get)
#define chunk_hooks_set JEMALLOC_N(chunk_hooks_set)
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
#define chunk_lookup JEMALLOC_N(chunk_lookup)
#define chunk_npages JEMALLOC_N(chunk_npages)
#define chunk_purge_wrapper JEMALLOC_N(chunk_purge_wrapper)
#define chunk_register JEMALLOC_N(chunk_register)
#define chunks_rtree JEMALLOC_N(chunks_rtree)
#define chunksize JEMALLOC_N(chunksize)
#define chunksize_mask JEMALLOC_N(chunksize_mask)
#define ckh_count JEMALLOC_N(ckh_count)
#define ckh_delete JEMALLOC_N(ckh_delete)
#define ckh_insert JEMALLOC_N(ckh_insert)
#define ckh_iter JEMALLOC_N(ckh_iter)
#define ckh_new JEMALLOC_N(ckh_new)
#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
#define ckh_remove JEMALLOC_N(ckh_remove)
#define ckh_search JEMALLOC_N(ckh_search)
#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
#define ctl_boot JEMALLOC_N(ctl_boot)
#define ctl_bymib JEMALLOC_N(ctl_bymib)
#define ctl_byname JEMALLOC_N(ctl_byname)
#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
#define ctl_prefork JEMALLOC_N(ctl_prefork)
#define decay_ticker_get JEMALLOC_N(decay_ticker_get)
#define dss_prec_names JEMALLOC_N(dss_prec_names)
#define extent_node_achunk_get JEMALLOC_N(extent_node_achunk_get)
#define extent_node_achunk_set JEMALLOC_N(extent_node_achunk_set)
#define extent_node_addr_get JEMALLOC_N(extent_node_addr_get)
#define extent_node_addr_set JEMALLOC_N(extent_node_addr_set)
#define extent_node_arena_get JEMALLOC_N(extent_node_arena_get)
#define extent_node_arena_set JEMALLOC_N(extent_node_arena_set)
#define extent_node_committed_get JEMALLOC_N(extent_node_committed_get)
#define extent_node_committed_set JEMALLOC_N(extent_node_committed_set)
#define extent_node_dirty_insert JEMALLOC_N(extent_node_dirty_insert)
#define extent_node_dirty_linkage_init JEMALLOC_N(extent_node_dirty_linkage_init)
#define extent_node_dirty_remove JEMALLOC_N(extent_node_dirty_remove)
#define extent_node_init JEMALLOC_N(extent_node_init)
#define extent_node_prof_tctx_get JEMALLOC_N(extent_node_prof_tctx_get)
#define extent_node_prof_tctx_set JEMALLOC_N(extent_node_prof_tctx_set)
#define extent_node_size_get JEMALLOC_N(extent_node_size_get)
#define extent_node_size_set JEMALLOC_N(extent_node_size_set)
#define extent_node_sn_get JEMALLOC_N(extent_node_sn_get)
#define extent_node_sn_set JEMALLOC_N(extent_node_sn_set)
#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get)
#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set)
#define extent_size_quantize_ceil JEMALLOC_N(extent_size_quantize_ceil)
#define extent_size_quantize_floor JEMALLOC_N(extent_size_quantize_floor)
#define extent_tree_ad_destroy JEMALLOC_N(extent_tree_ad_destroy)
#define extent_tree_ad_destroy_recurse JEMALLOC_N(extent_tree_ad_destroy_recurse)
#define extent_tree_ad_empty JEMALLOC_N(extent_tree_ad_empty)
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse)
#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start)
#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last)
#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new)
#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next)
#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch)
#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev)
#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch)
#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove)
#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter)
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
#define extent_tree_szsnad_destroy JEMALLOC_N(extent_tree_szsnad_destroy)
#define extent_tree_szsnad_destroy_recurse JEMALLOC_N(extent_tree_szsnad_destroy_recurse)
#define extent_tree_szsnad_empty JEMALLOC_N(extent_tree_szsnad_empty)
#define extent_tree_szsnad_first JEMALLOC_N(extent_tree_szsnad_first)
#define extent_tree_szsnad_insert JEMALLOC_N(extent_tree_szsnad_insert)
#define extent_tree_szsnad_iter JEMALLOC_N(extent_tree_szsnad_iter)
#define extent_tree_szsnad_iter_recurse JEMALLOC_N(extent_tree_szsnad_iter_recurse)
#define extent_tree_szsnad_iter_start JEMALLOC_N(extent_tree_szsnad_iter_start)
#define extent_tree_szsnad_last JEMALLOC_N(extent_tree_szsnad_last)
#define extent_tree_szsnad_new JEMALLOC_N(extent_tree_szsnad_new)
#define extent_tree_szsnad_next JEMALLOC_N(extent_tree_szsnad_next)
#define extent_tree_szsnad_nsearch JEMALLOC_N(extent_tree_szsnad_nsearch)
#define extent_tree_szsnad_prev JEMALLOC_N(extent_tree_szsnad_prev)
#define extent_tree_szsnad_psearch JEMALLOC_N(extent_tree_szsnad_psearch)
#define extent_tree_szsnad_remove JEMALLOC_N(extent_tree_szsnad_remove)
#define extent_tree_szsnad_reverse_iter JEMALLOC_N(extent_tree_szsnad_reverse_iter)
#define extent_tree_szsnad_reverse_iter_recurse JEMALLOC_N(extent_tree_szsnad_reverse_iter_recurse)
#define extent_tree_szsnad_reverse_iter_start JEMALLOC_N(extent_tree_szsnad_reverse_iter_start)
#define extent_tree_szsnad_search JEMALLOC_N(extent_tree_szsnad_search)
#define ffs_llu JEMALLOC_N(ffs_llu)
#define ffs_lu JEMALLOC_N(ffs_lu)
#define ffs_u JEMALLOC_N(ffs_u)
#define ffs_u32 JEMALLOC_N(ffs_u32)
#define ffs_u64 JEMALLOC_N(ffs_u64)
#define ffs_zu JEMALLOC_N(ffs_zu)
#define get_errno JEMALLOC_N(get_errno)
#define hash JEMALLOC_N(hash)
#define hash_fmix_32 JEMALLOC_N(hash_fmix_32)
#define hash_fmix_64 JEMALLOC_N(hash_fmix_64)
#define hash_get_block_32 JEMALLOC_N(hash_get_block_32)
#define hash_get_block_64 JEMALLOC_N(hash_get_block_64)
#define hash_rotl_32 JEMALLOC_N(hash_rotl_32)
#define hash_rotl_64 JEMALLOC_N(hash_rotl_64)
#define hash_x64_128 JEMALLOC_N(hash_x64_128)
#define hash_x86_128 JEMALLOC_N(hash_x86_128)
#define hash_x86_32 JEMALLOC_N(hash_x86_32)
#define huge_aalloc JEMALLOC_N(huge_aalloc)
#define huge_dalloc JEMALLOC_N(huge_dalloc)
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
#define huge_malloc JEMALLOC_N(huge_malloc)
#define huge_palloc JEMALLOC_N(huge_palloc)
#define huge_prof_tctx_get JEMALLOC_N(huge_prof_tctx_get)
#define huge_prof_tctx_reset JEMALLOC_N(huge_prof_tctx_reset)
#define huge_prof_tctx_set JEMALLOC_N(huge_prof_tctx_set)
#define huge_ralloc JEMALLOC_N(huge_ralloc)
#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
#define huge_salloc JEMALLOC_N(huge_salloc)
#define iaalloc JEMALLOC_N(iaalloc)
#define ialloc JEMALLOC_N(ialloc)
#define iallocztm JEMALLOC_N(iallocztm)
#define iarena_cleanup JEMALLOC_N(iarena_cleanup)
#define idalloc JEMALLOC_N(idalloc)
#define idalloctm JEMALLOC_N(idalloctm)
#define in_valgrind JEMALLOC_N(in_valgrind)
#define index2size JEMALLOC_N(index2size)
#define index2size_compute JEMALLOC_N(index2size_compute)
#define index2size_lookup JEMALLOC_N(index2size_lookup)
#define index2size_tab JEMALLOC_N(index2size_tab)
#define ipalloc JEMALLOC_N(ipalloc)
#define ipalloct JEMALLOC_N(ipalloct)
#define ipallocztm JEMALLOC_N(ipallocztm)
#define iqalloc JEMALLOC_N(iqalloc)
#define iralloc JEMALLOC_N(iralloc)
#define iralloct JEMALLOC_N(iralloct)
#define iralloct_realign JEMALLOC_N(iralloct_realign)
#define isalloc JEMALLOC_N(isalloc)
#define isdalloct JEMALLOC_N(isdalloct)
#define isqalloc JEMALLOC_N(isqalloc)
#define isthreaded JEMALLOC_N(isthreaded)
#define ivsalloc JEMALLOC_N(ivsalloc)
#define ixalloc JEMALLOC_N(ixalloc)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
#define large_maxclass JEMALLOC_N(large_maxclass)
#define lg_floor JEMALLOC_N(lg_floor)
#define lg_prof_sample JEMALLOC_N(lg_prof_sample)
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
#define malloc_mutex_assert_not_owner JEMALLOC_N(malloc_mutex_assert_not_owner)
#define malloc_mutex_assert_owner JEMALLOC_N(malloc_mutex_assert_owner)
#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot)
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock)
#define malloc_printf JEMALLOC_N(malloc_printf)
#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0)
#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1)
#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup)
#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
#define malloc_write JEMALLOC_N(malloc_write)
#define map_bias JEMALLOC_N(map_bias)
#define map_misc_offset JEMALLOC_N(map_misc_offset)
#define mb_write JEMALLOC_N(mb_write)
#define narenas_auto JEMALLOC_N(narenas_auto)
#define narenas_tdata_cleanup JEMALLOC_N(narenas_tdata_cleanup)
#define narenas_total_get JEMALLOC_N(narenas_total_get)
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
#define nhclasses JEMALLOC_N(nhclasses)
#define nlclasses JEMALLOC_N(nlclasses)
#define nstime_add JEMALLOC_N(nstime_add)
#define nstime_compare JEMALLOC_N(nstime_compare)
#define nstime_copy JEMALLOC_N(nstime_copy)
#define nstime_divide JEMALLOC_N(nstime_divide)
#define nstime_idivide JEMALLOC_N(nstime_idivide)
#define nstime_imultiply JEMALLOC_N(nstime_imultiply)
#define nstime_init JEMALLOC_N(nstime_init)
#define nstime_init2 JEMALLOC_N(nstime_init2)
#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
#define nstime_ns JEMALLOC_N(nstime_ns)
#define nstime_nsec JEMALLOC_N(nstime_nsec)
#define nstime_sec JEMALLOC_N(nstime_sec)
#define nstime_subtract JEMALLOC_N(nstime_subtract)
#define nstime_update JEMALLOC_N(nstime_update)
#define opt_abort JEMALLOC_N(opt_abort)
#define opt_decay_time JEMALLOC_N(opt_decay_time)
#define opt_dss JEMALLOC_N(opt_dss)
#define opt_junk JEMALLOC_N(opt_junk)
#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc)
#define opt_junk_free JEMALLOC_N(opt_junk_free)
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
#define opt_narenas JEMALLOC_N(opt_narenas)
#define opt_prof JEMALLOC_N(opt_prof)
#define opt_prof_accum JEMALLOC_N(opt_prof_accum)
#define opt_prof_active JEMALLOC_N(opt_prof_active)
#define opt_prof_final JEMALLOC_N(opt_prof_final)
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init)
#define opt_purge JEMALLOC_N(opt_purge)
#define opt_quarantine JEMALLOC_N(opt_quarantine)
#define opt_redzone JEMALLOC_N(opt_redzone)
#define opt_stats_print JEMALLOC_N(opt_stats_print)
#define opt_tcache JEMALLOC_N(opt_tcache)
#define opt_thp JEMALLOC_N(opt_thp)
#define opt_utrace JEMALLOC_N(opt_utrace)
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
#define opt_zero JEMALLOC_N(opt_zero)
#define p2rz JEMALLOC_N(p2rz)
#define pages_boot JEMALLOC_N(pages_boot)
#define pages_commit JEMALLOC_N(pages_commit)
#define pages_decommit JEMALLOC_N(pages_decommit)
#define pages_huge JEMALLOC_N(pages_huge)
#define pages_map JEMALLOC_N(pages_map)
#define pages_nohuge JEMALLOC_N(pages_nohuge)
#define pages_purge JEMALLOC_N(pages_purge)
#define pages_trim JEMALLOC_N(pages_trim)
#define pages_unmap JEMALLOC_N(pages_unmap)
#define pind2sz JEMALLOC_N(pind2sz)
#define pind2sz_compute JEMALLOC_N(pind2sz_compute)
#define pind2sz_lookup JEMALLOC_N(pind2sz_lookup)
#define pind2sz_tab JEMALLOC_N(pind2sz_tab)
#define pow2_ceil_u32 JEMALLOC_N(pow2_ceil_u32)
#define pow2_ceil_u64 JEMALLOC_N(pow2_ceil_u64)
#define pow2_ceil_zu JEMALLOC_N(pow2_ceil_zu)
#define prng_lg_range_u32 JEMALLOC_N(prng_lg_range_u32)
#define prng_lg_range_u64 JEMALLOC_N(prng_lg_range_u64)
#define prng_lg_range_zu JEMALLOC_N(prng_lg_range_zu)
#define prng_range_u32 JEMALLOC_N(prng_range_u32)
#define prng_range_u64 JEMALLOC_N(prng_range_u64)
#define prng_range_zu JEMALLOC_N(prng_range_zu)
#define prng_state_next_u32 JEMALLOC_N(prng_state_next_u32)
#define prng_state_next_u64 JEMALLOC_N(prng_state_next_u64)
#define prng_state_next_zu JEMALLOC_N(prng_state_next_zu)
#define prof_active JEMALLOC_N(prof_active)
#define prof_active_get JEMALLOC_N(prof_active_get)
#define prof_active_get_unlocked JEMALLOC_N(prof_active_get_unlocked)
#define prof_active_set JEMALLOC_N(prof_active_set)
#define prof_alloc_prep JEMALLOC_N(prof_alloc_prep)
#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback)
#define prof_backtrace JEMALLOC_N(prof_backtrace)
#define prof_boot0 JEMALLOC_N(prof_boot0)
#define prof_boot1 JEMALLOC_N(prof_boot1)
#define prof_boot2 JEMALLOC_N(prof_boot2)
#define prof_bt_count JEMALLOC_N(prof_bt_count)
#define prof_dump_header JEMALLOC_N(prof_dump_header)
#define prof_dump_open JEMALLOC_N(prof_dump_open)
#define prof_free JEMALLOC_N(prof_free)
#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object)
#define prof_gdump JEMALLOC_N(prof_gdump)
#define prof_gdump_get JEMALLOC_N(prof_gdump_get)
#define prof_gdump_get_unlocked JEMALLOC_N(prof_gdump_get_unlocked)
#define prof_gdump_set JEMALLOC_N(prof_gdump_set)
#define prof_gdump_val JEMALLOC_N(prof_gdump_val)
#define prof_idump JEMALLOC_N(prof_idump)
#define prof_interval JEMALLOC_N(prof_interval)
#define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_malloc JEMALLOC_N(prof_malloc)
#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object)
#define prof_mdump JEMALLOC_N(prof_mdump)
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
#define prof_prefork0 JEMALLOC_N(prof_prefork0)
#define prof_prefork1 JEMALLOC_N(prof_prefork1)
#define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_reset JEMALLOC_N(prof_reset)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
#define prof_tctx_get JEMALLOC_N(prof_tctx_get)
#define prof_tctx_reset JEMALLOC_N(prof_tctx_reset)
#define prof_tctx_set JEMALLOC_N(prof_tctx_set)
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
#define prof_tdata_count JEMALLOC_N(prof_tdata_count)
#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit)
#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get)
#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get)
#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set)
#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set)
#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get)
#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set)
#define psz2ind JEMALLOC_N(psz2ind)
#define psz2u JEMALLOC_N(psz2u)
#define purge_mode_names JEMALLOC_N(purge_mode_names)
#define quarantine JEMALLOC_N(quarantine)
#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook)
#define quarantine_alloc_hook_work JEMALLOC_N(quarantine_alloc_hook_work)
#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup)
#define rtree_child_read JEMALLOC_N(rtree_child_read)
#define rtree_child_read_hard JEMALLOC_N(rtree_child_read_hard)
#define rtree_child_tryread JEMALLOC_N(rtree_child_tryread)
#define rtree_delete JEMALLOC_N(rtree_delete)
#define rtree_get JEMALLOC_N(rtree_get)
#define rtree_new JEMALLOC_N(rtree_new)
#define rtree_node_valid JEMALLOC_N(rtree_node_valid)
#define rtree_set JEMALLOC_N(rtree_set)
#define rtree_start_level JEMALLOC_N(rtree_start_level)
#define rtree_subkey JEMALLOC_N(rtree_subkey)
#define rtree_subtree_read JEMALLOC_N(rtree_subtree_read)
#define rtree_subtree_read_hard JEMALLOC_N(rtree_subtree_read_hard)
#define rtree_subtree_tryread JEMALLOC_N(rtree_subtree_tryread)
#define rtree_val_read JEMALLOC_N(rtree_val_read)
#define rtree_val_write JEMALLOC_N(rtree_val_write)
#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
#define s2u JEMALLOC_N(s2u)
#define s2u_compute JEMALLOC_N(s2u_compute)
#define s2u_lookup JEMALLOC_N(s2u_lookup)
#define sa2u JEMALLOC_N(sa2u)
#define set_errno JEMALLOC_N(set_errno)
#define size2index JEMALLOC_N(size2index)
#define size2index_compute JEMALLOC_N(size2index_compute)
#define size2index_lookup JEMALLOC_N(size2index_lookup)
#define size2index_tab JEMALLOC_N(size2index_tab)
#define spin_adaptive JEMALLOC_N(spin_adaptive)
#define spin_init JEMALLOC_N(spin_init)
#define stats_cactive JEMALLOC_N(stats_cactive)
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
#define stats_print JEMALLOC_N(stats_print)
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate)
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
#define tcache_boot JEMALLOC_N(tcache_boot)
#define tcache_cleanup JEMALLOC_N(tcache_cleanup)
#define tcache_create JEMALLOC_N(tcache_create)
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
#define tcache_enabled_cleanup JEMALLOC_N(tcache_enabled_cleanup)
#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
#define tcache_event JEMALLOC_N(tcache_event)
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
#define tcache_flush JEMALLOC_N(tcache_flush)
#define tcache_get JEMALLOC_N(tcache_get)
#define tcache_get_hard JEMALLOC_N(tcache_get_hard)
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
#define tcache_postfork_child JEMALLOC_N(tcache_postfork_child)
#define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent)
#define tcache_prefork JEMALLOC_N(tcache_prefork)
#define tcache_salloc JEMALLOC_N(tcache_salloc)
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
#define tcaches JEMALLOC_N(tcaches)
#define tcaches_create JEMALLOC_N(tcaches_create)
#define tcaches_destroy JEMALLOC_N(tcaches_destroy)
#define tcaches_flush JEMALLOC_N(tcaches_flush)
#define tcaches_get JEMALLOC_N(tcaches_get)
#define thread_allocated_cleanup JEMALLOC_N(thread_allocated_cleanup)
#define thread_deallocated_cleanup JEMALLOC_N(thread_deallocated_cleanup)
#define ticker_copy JEMALLOC_N(ticker_copy)
#define ticker_init JEMALLOC_N(ticker_init)
#define ticker_read JEMALLOC_N(ticker_read)
#define ticker_tick JEMALLOC_N(ticker_tick)
#define ticker_ticks JEMALLOC_N(ticker_ticks)
#define tsd_arena_get JEMALLOC_N(tsd_arena_get)
#define tsd_arena_set JEMALLOC_N(tsd_arena_set)
#define tsd_arenap_get JEMALLOC_N(tsd_arenap_get)
#define tsd_arenas_tdata_bypass_get JEMALLOC_N(tsd_arenas_tdata_bypass_get)
#define tsd_arenas_tdata_bypass_set JEMALLOC_N(tsd_arenas_tdata_bypass_set)
#define tsd_arenas_tdata_bypassp_get JEMALLOC_N(tsd_arenas_tdata_bypassp_get)
#define tsd_arenas_tdata_get JEMALLOC_N(tsd_arenas_tdata_get)
#define tsd_arenas_tdata_set JEMALLOC_N(tsd_arenas_tdata_set)
#define tsd_arenas_tdatap_get JEMALLOC_N(tsd_arenas_tdatap_get)
#define tsd_boot JEMALLOC_N(tsd_boot)
#define tsd_boot0 JEMALLOC_N(tsd_boot0)
#define tsd_boot1 JEMALLOC_N(tsd_boot1)
#define tsd_booted JEMALLOC_N(tsd_booted)
#define tsd_booted_get JEMALLOC_N(tsd_booted_get)
#define tsd_cleanup JEMALLOC_N(tsd_cleanup)
#define tsd_cleanup_wrapper JEMALLOC_N(tsd_cleanup_wrapper)
#define tsd_fetch JEMALLOC_N(tsd_fetch)
#define tsd_fetch_impl JEMALLOC_N(tsd_fetch_impl)
#define tsd_get JEMALLOC_N(tsd_get)
#define tsd_get_allocates JEMALLOC_N(tsd_get_allocates)
#define tsd_iarena_get JEMALLOC_N(tsd_iarena_get)
#define tsd_iarena_set JEMALLOC_N(tsd_iarena_set)
#define tsd_iarenap_get JEMALLOC_N(tsd_iarenap_get)
#define tsd_initialized JEMALLOC_N(tsd_initialized)
#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
#define tsd_init_finish JEMALLOC_N(tsd_init_finish)
#define tsd_init_head JEMALLOC_N(tsd_init_head)
#define tsd_narenas_tdata_get JEMALLOC_N(tsd_narenas_tdata_get)
#define tsd_narenas_tdata_set JEMALLOC_N(tsd_narenas_tdata_set)
#define tsd_narenas_tdatap_get JEMALLOC_N(tsd_narenas_tdatap_get)
#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get)
#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set)
#define tsd_nominal JEMALLOC_N(tsd_nominal)
#define tsd_prof_tdata_get JEMALLOC_N(tsd_prof_tdata_get)
#define tsd_prof_tdata_set JEMALLOC_N(tsd_prof_tdata_set)
#define tsd_prof_tdatap_get JEMALLOC_N(tsd_prof_tdatap_get)
#define tsd_quarantine_get JEMALLOC_N(tsd_quarantine_get)
#define tsd_quarantine_set JEMALLOC_N(tsd_quarantine_set)
#define tsd_quarantinep_get JEMALLOC_N(tsd_quarantinep_get)
#define tsd_set JEMALLOC_N(tsd_set)
#define tsd_tcache_enabled_get JEMALLOC_N(tsd_tcache_enabled_get)
#define tsd_tcache_enabled_set JEMALLOC_N(tsd_tcache_enabled_set)
#define tsd_tcache_enabledp_get JEMALLOC_N(tsd_tcache_enabledp_get)
#define tsd_tcache_get JEMALLOC_N(tsd_tcache_get)
#define tsd_tcache_set JEMALLOC_N(tsd_tcache_set)
#define tsd_tcachep_get JEMALLOC_N(tsd_tcachep_get)
#define tsd_thread_allocated_get JEMALLOC_N(tsd_thread_allocated_get)
#define tsd_thread_allocated_set JEMALLOC_N(tsd_thread_allocated_set)
#define tsd_thread_allocatedp_get JEMALLOC_N(tsd_thread_allocatedp_get)
#define tsd_thread_deallocated_get JEMALLOC_N(tsd_thread_deallocated_get)
#define tsd_thread_deallocated_set JEMALLOC_N(tsd_thread_deallocated_set)
#define tsd_thread_deallocatedp_get JEMALLOC_N(tsd_thread_deallocatedp_get)
#define tsd_tls JEMALLOC_N(tsd_tls)
#define tsd_tsd JEMALLOC_N(tsd_tsd)
#define tsd_tsdn JEMALLOC_N(tsd_tsdn)
#define tsd_witness_fork_get JEMALLOC_N(tsd_witness_fork_get)
#define tsd_witness_fork_set JEMALLOC_N(tsd_witness_fork_set)
#define tsd_witness_forkp_get JEMALLOC_N(tsd_witness_forkp_get)
#define tsd_witnesses_get JEMALLOC_N(tsd_witnesses_get)
#define tsd_witnesses_set JEMALLOC_N(tsd_witnesses_set)
#define tsd_witnessesp_get JEMALLOC_N(tsd_witnessesp_get)
#define tsdn_fetch JEMALLOC_N(tsdn_fetch)
#define tsdn_null JEMALLOC_N(tsdn_null)
#define tsdn_tsd JEMALLOC_N(tsdn_tsd)
#define u2rz JEMALLOC_N(u2rz)
#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block)
#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined)
#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess)
#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined)
#define witness_assert_depth JEMALLOC_N(witness_assert_depth)
#define witness_assert_depth_to_rank JEMALLOC_N(witness_assert_depth_to_rank)
#define witness_assert_lockless JEMALLOC_N(witness_assert_lockless)
#define witness_assert_not_owner JEMALLOC_N(witness_assert_not_owner)
#define witness_assert_owner JEMALLOC_N(witness_assert_owner)
#define witness_depth_error JEMALLOC_N(witness_depth_error)
#define witness_fork_cleanup JEMALLOC_N(witness_fork_cleanup)
#define witness_init JEMALLOC_N(witness_init)
#define witness_lock JEMALLOC_N(witness_lock)
#define witness_lock_error JEMALLOC_N(witness_lock_error)
#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
#define witness_owner JEMALLOC_N(witness_owner)
#define witness_owner_error JEMALLOC_N(witness_owner_error)
#define witness_postfork_child JEMALLOC_N(witness_postfork_child)
#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent)
#define witness_prefork JEMALLOC_N(witness_prefork)
#define witness_unlock JEMALLOC_N(witness_unlock)
#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup)
#define zone_register JEMALLOC_N(zone_register)

View File

@ -0,0 +1,639 @@
a0dalloc
a0get
a0malloc
arena_aalloc
arena_alloc_junk_small
arena_basic_stats_merge
arena_bin_index
arena_bin_info
arena_bitselm_get_const
arena_bitselm_get_mutable
arena_boot
arena_choose
arena_choose_hard
arena_choose_impl
arena_chunk_alloc_huge
arena_chunk_cache_maybe_insert
arena_chunk_cache_maybe_remove
arena_chunk_dalloc_huge
arena_chunk_ralloc_huge_expand
arena_chunk_ralloc_huge_shrink
arena_chunk_ralloc_huge_similar
arena_cleanup
arena_dalloc
arena_dalloc_bin
arena_dalloc_bin_junked_locked
arena_dalloc_junk_large
arena_dalloc_junk_small
arena_dalloc_large
arena_dalloc_large_junked_locked
arena_dalloc_small
arena_decay_tick
arena_decay_ticks
arena_decay_time_default_get
arena_decay_time_default_set
arena_decay_time_get
arena_decay_time_set
arena_dss_prec_get
arena_dss_prec_set
arena_extent_sn_next
arena_get
arena_ichoose
arena_init
arena_lg_dirty_mult_default_get
arena_lg_dirty_mult_default_set
arena_lg_dirty_mult_get
arena_lg_dirty_mult_set
arena_malloc
arena_malloc_hard
arena_malloc_large
arena_mapbits_allocated_get
arena_mapbits_binind_get
arena_mapbits_decommitted_get
arena_mapbits_dirty_get
arena_mapbits_get
arena_mapbits_internal_set
arena_mapbits_large_binind_set
arena_mapbits_large_get
arena_mapbits_large_set
arena_mapbits_large_size_get
arena_mapbits_size_decode
arena_mapbits_size_encode
arena_mapbits_small_runind_get
arena_mapbits_small_set
arena_mapbits_unallocated_set
arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get
arena_mapbitsp_get_const
arena_mapbitsp_get_mutable
arena_mapbitsp_read
arena_mapbitsp_write
arena_maxrun
arena_maybe_purge
arena_metadata_allocated_add
arena_metadata_allocated_get
arena_metadata_allocated_sub
arena_migrate
arena_miscelm_get_const
arena_miscelm_get_mutable
arena_miscelm_to_pageind
arena_miscelm_to_rpages
arena_new
arena_node_alloc
arena_node_dalloc
arena_nthreads_dec
arena_nthreads_get
arena_nthreads_inc
arena_palloc
arena_postfork_child
arena_postfork_parent
arena_prefork0
arena_prefork1
arena_prefork2
arena_prefork3
arena_prof_accum
arena_prof_accum_impl
arena_prof_accum_locked
arena_prof_promoted
arena_prof_tctx_get
arena_prof_tctx_reset
arena_prof_tctx_set
arena_ptr_small_binind_get
arena_purge
arena_quarantine_junk_small
arena_ralloc
arena_ralloc_junk_large
arena_ralloc_no_move
arena_rd_to_miscelm
arena_redzone_corruption
arena_reset
arena_run_regind
arena_run_to_miscelm
arena_salloc
arena_sdalloc
arena_stats_merge
arena_tcache_fill_small
arena_tdata_get
arena_tdata_get_hard
arenas
arenas_tdata_bypass_cleanup
arenas_tdata_cleanup
atomic_add_p
atomic_add_u
atomic_add_uint32
atomic_add_uint64
atomic_add_z
atomic_cas_p
atomic_cas_u
atomic_cas_uint32
atomic_cas_uint64
atomic_cas_z
atomic_sub_p
atomic_sub_u
atomic_sub_uint32
atomic_sub_uint64
atomic_sub_z
atomic_write_p
atomic_write_u
atomic_write_uint32
atomic_write_uint64
atomic_write_z
base_alloc
base_boot
base_postfork_child
base_postfork_parent
base_prefork
base_stats_get
bitmap_full
bitmap_get
bitmap_info_init
bitmap_init
bitmap_set
bitmap_sfu
bitmap_size
bitmap_unset
bootstrap_calloc
bootstrap_free
bootstrap_malloc
bt_init
buferror
chunk_alloc_base
chunk_alloc_cache
chunk_alloc_dss
chunk_alloc_mmap
chunk_alloc_wrapper
chunk_boot
chunk_dalloc_cache
chunk_dalloc_mmap
chunk_dalloc_wrapper
chunk_deregister
chunk_dss_boot
chunk_dss_mergeable
chunk_dss_prec_get
chunk_dss_prec_set
chunk_hooks_default
chunk_hooks_get
chunk_hooks_set
chunk_in_dss
chunk_lookup
chunk_npages
chunk_purge_wrapper
chunk_register
chunks_rtree
chunksize
chunksize_mask
ckh_count
ckh_delete
ckh_insert
ckh_iter
ckh_new
ckh_pointer_hash
ckh_pointer_keycomp
ckh_remove
ckh_search
ckh_string_hash
ckh_string_keycomp
ctl_boot
ctl_bymib
ctl_byname
ctl_nametomib
ctl_postfork_child
ctl_postfork_parent
ctl_prefork
decay_ticker_get
dss_prec_names
extent_node_achunk_get
extent_node_achunk_set
extent_node_addr_get
extent_node_addr_set
extent_node_arena_get
extent_node_arena_set
extent_node_committed_get
extent_node_committed_set
extent_node_dirty_insert
extent_node_dirty_linkage_init
extent_node_dirty_remove
extent_node_init
extent_node_prof_tctx_get
extent_node_prof_tctx_set
extent_node_size_get
extent_node_size_set
extent_node_sn_get
extent_node_sn_set
extent_node_zeroed_get
extent_node_zeroed_set
extent_size_quantize_ceil
extent_size_quantize_floor
extent_tree_ad_destroy
extent_tree_ad_destroy_recurse
extent_tree_ad_empty
extent_tree_ad_first
extent_tree_ad_insert
extent_tree_ad_iter
extent_tree_ad_iter_recurse
extent_tree_ad_iter_start
extent_tree_ad_last
extent_tree_ad_new
extent_tree_ad_next
extent_tree_ad_nsearch
extent_tree_ad_prev
extent_tree_ad_psearch
extent_tree_ad_remove
extent_tree_ad_reverse_iter
extent_tree_ad_reverse_iter_recurse
extent_tree_ad_reverse_iter_start
extent_tree_ad_search
extent_tree_szsnad_destroy
extent_tree_szsnad_destroy_recurse
extent_tree_szsnad_empty
extent_tree_szsnad_first
extent_tree_szsnad_insert
extent_tree_szsnad_iter
extent_tree_szsnad_iter_recurse
extent_tree_szsnad_iter_start
extent_tree_szsnad_last
extent_tree_szsnad_new
extent_tree_szsnad_next
extent_tree_szsnad_nsearch
extent_tree_szsnad_prev
extent_tree_szsnad_psearch
extent_tree_szsnad_remove
extent_tree_szsnad_reverse_iter
extent_tree_szsnad_reverse_iter_recurse
extent_tree_szsnad_reverse_iter_start
extent_tree_szsnad_search
ffs_llu
ffs_lu
ffs_u
ffs_u32
ffs_u64
ffs_zu
get_errno
hash
hash_fmix_32
hash_fmix_64
hash_get_block_32
hash_get_block_64
hash_rotl_32
hash_rotl_64
hash_x64_128
hash_x86_128
hash_x86_32
huge_aalloc
huge_dalloc
huge_dalloc_junk
huge_malloc
huge_palloc
huge_prof_tctx_get
huge_prof_tctx_reset
huge_prof_tctx_set
huge_ralloc
huge_ralloc_no_move
huge_salloc
iaalloc
ialloc
iallocztm
iarena_cleanup
idalloc
idalloctm
in_valgrind
index2size
index2size_compute
index2size_lookup
index2size_tab
ipalloc
ipalloct
ipallocztm
iqalloc
iralloc
iralloct
iralloct_realign
isalloc
isdalloct
isqalloc
isthreaded
ivsalloc
ixalloc
jemalloc_postfork_child
jemalloc_postfork_parent
jemalloc_prefork
large_maxclass
lg_floor
lg_prof_sample
malloc_cprintf
malloc_mutex_assert_not_owner
malloc_mutex_assert_owner
malloc_mutex_boot
malloc_mutex_init
malloc_mutex_lock
malloc_mutex_postfork_child
malloc_mutex_postfork_parent
malloc_mutex_prefork
malloc_mutex_unlock
malloc_printf
malloc_snprintf
malloc_strtoumax
malloc_tsd_boot0
malloc_tsd_boot1
malloc_tsd_cleanup_register
malloc_tsd_dalloc
malloc_tsd_malloc
malloc_tsd_no_cleanup
malloc_vcprintf
malloc_vsnprintf
malloc_write
map_bias
map_misc_offset
mb_write
narenas_auto
narenas_tdata_cleanup
narenas_total_get
ncpus
nhbins
nhclasses
nlclasses
nstime_add
nstime_compare
nstime_copy
nstime_divide
nstime_idivide
nstime_imultiply
nstime_init
nstime_init2
nstime_monotonic
nstime_ns
nstime_nsec
nstime_sec
nstime_subtract
nstime_update
opt_abort
opt_decay_time
opt_dss
opt_junk
opt_junk_alloc
opt_junk_free
opt_lg_chunk
opt_lg_dirty_mult
opt_lg_prof_interval
opt_lg_prof_sample
opt_lg_tcache_max
opt_narenas
opt_prof
opt_prof_accum
opt_prof_active
opt_prof_final
opt_prof_gdump
opt_prof_leak
opt_prof_prefix
opt_prof_thread_active_init
opt_purge
opt_quarantine
opt_redzone
opt_stats_print
opt_tcache
opt_thp
opt_utrace
opt_xmalloc
opt_zero
p2rz
pages_boot
pages_commit
pages_decommit
pages_huge
pages_map
pages_nohuge
pages_purge
pages_trim
pages_unmap
pind2sz
pind2sz_compute
pind2sz_lookup
pind2sz_tab
pow2_ceil_u32
pow2_ceil_u64
pow2_ceil_zu
prng_lg_range_u32
prng_lg_range_u64
prng_lg_range_zu
prng_range_u32
prng_range_u64
prng_range_zu
prng_state_next_u32
prng_state_next_u64
prng_state_next_zu
prof_active
prof_active_get
prof_active_get_unlocked
prof_active_set
prof_alloc_prep
prof_alloc_rollback
prof_backtrace
prof_boot0
prof_boot1
prof_boot2
prof_bt_count
prof_dump_header
prof_dump_open
prof_free
prof_free_sampled_object
prof_gdump
prof_gdump_get
prof_gdump_get_unlocked
prof_gdump_set
prof_gdump_val
prof_idump
prof_interval
prof_lookup
prof_malloc
prof_malloc_sample_object
prof_mdump
prof_postfork_child
prof_postfork_parent
prof_prefork0
prof_prefork1
prof_realloc
prof_reset
prof_sample_accum_update
prof_sample_threshold_update
prof_tctx_get
prof_tctx_reset
prof_tctx_set
prof_tdata_cleanup
prof_tdata_count
prof_tdata_get
prof_tdata_init
prof_tdata_reinit
prof_thread_active_get
prof_thread_active_init_get
prof_thread_active_init_set
prof_thread_active_set
prof_thread_name_get
prof_thread_name_set
psz2ind
psz2u
purge_mode_names
quarantine
quarantine_alloc_hook
quarantine_alloc_hook_work
quarantine_cleanup
rtree_child_read
rtree_child_read_hard
rtree_child_tryread
rtree_delete
rtree_get
rtree_new
rtree_node_valid
rtree_set
rtree_start_level
rtree_subkey
rtree_subtree_read
rtree_subtree_read_hard
rtree_subtree_tryread
rtree_val_read
rtree_val_write
run_quantize_ceil
run_quantize_floor
s2u
s2u_compute
s2u_lookup
sa2u
set_errno
size2index
size2index_compute
size2index_lookup
size2index_tab
spin_adaptive
spin_init
stats_cactive
stats_cactive_add
stats_cactive_get
stats_cactive_sub
stats_print
tcache_alloc_easy
tcache_alloc_large
tcache_alloc_small
tcache_alloc_small_hard
tcache_arena_reassociate
tcache_bin_flush_large
tcache_bin_flush_small
tcache_bin_info
tcache_boot
tcache_cleanup
tcache_create
tcache_dalloc_large
tcache_dalloc_small
tcache_enabled_cleanup
tcache_enabled_get
tcache_enabled_set
tcache_event
tcache_event_hard
tcache_flush
tcache_get
tcache_get_hard
tcache_maxclass
tcache_postfork_child
tcache_postfork_parent
tcache_prefork
tcache_salloc
tcache_stats_merge
tcaches
tcaches_create
tcaches_destroy
tcaches_flush
tcaches_get
thread_allocated_cleanup
thread_deallocated_cleanup
ticker_copy
ticker_init
ticker_read
ticker_tick
ticker_ticks
tsd_arena_get
tsd_arena_set
tsd_arenap_get
tsd_arenas_tdata_bypass_get
tsd_arenas_tdata_bypass_set
tsd_arenas_tdata_bypassp_get
tsd_arenas_tdata_get
tsd_arenas_tdata_set
tsd_arenas_tdatap_get
tsd_boot
tsd_boot0
tsd_boot1
tsd_booted
tsd_booted_get
tsd_cleanup
tsd_cleanup_wrapper
tsd_fetch
tsd_fetch_impl
tsd_get
tsd_get_allocates
tsd_iarena_get
tsd_iarena_set
tsd_iarenap_get
tsd_initialized
tsd_init_check_recursion
tsd_init_finish
tsd_init_head
tsd_narenas_tdata_get
tsd_narenas_tdata_set
tsd_narenas_tdatap_get
tsd_wrapper_get
tsd_wrapper_set
tsd_nominal
tsd_prof_tdata_get
tsd_prof_tdata_set
tsd_prof_tdatap_get
tsd_quarantine_get
tsd_quarantine_set
tsd_quarantinep_get
tsd_set
tsd_tcache_enabled_get
tsd_tcache_enabled_set
tsd_tcache_enabledp_get
tsd_tcache_get
tsd_tcache_set
tsd_tcachep_get
tsd_thread_allocated_get
tsd_thread_allocated_set
tsd_thread_allocatedp_get
tsd_thread_deallocated_get
tsd_thread_deallocated_set
tsd_thread_deallocatedp_get
tsd_tls
tsd_tsd
tsd_tsdn
tsd_witness_fork_get
tsd_witness_fork_set
tsd_witness_forkp_get
tsd_witnesses_get
tsd_witnesses_set
tsd_witnessesp_get
tsdn_fetch
tsdn_null
tsdn_tsd
u2rz
valgrind_freelike_block
valgrind_make_mem_defined
valgrind_make_mem_noaccess
valgrind_make_mem_undefined
witness_assert_depth
witness_assert_depth_to_rank
witness_assert_lockless
witness_assert_not_owner
witness_assert_owner
witness_depth_error
witness_fork_cleanup
witness_init
witness_lock
witness_lock_error
witness_not_owner_error
witness_owner
witness_owner_error
witness_postfork_child
witness_postfork_parent
witness_prefork
witness_unlock
witnesses_cleanup
zone_register

View File

@ -0,0 +1,639 @@
#undef a0dalloc
#undef a0get
#undef a0malloc
#undef arena_aalloc
#undef arena_alloc_junk_small
#undef arena_basic_stats_merge
#undef arena_bin_index
#undef arena_bin_info
#undef arena_bitselm_get_const
#undef arena_bitselm_get_mutable
#undef arena_boot
#undef arena_choose
#undef arena_choose_hard
#undef arena_choose_impl
#undef arena_chunk_alloc_huge
#undef arena_chunk_cache_maybe_insert
#undef arena_chunk_cache_maybe_remove
#undef arena_chunk_dalloc_huge
#undef arena_chunk_ralloc_huge_expand
#undef arena_chunk_ralloc_huge_shrink
#undef arena_chunk_ralloc_huge_similar
#undef arena_cleanup
#undef arena_dalloc
#undef arena_dalloc_bin
#undef arena_dalloc_bin_junked_locked
#undef arena_dalloc_junk_large
#undef arena_dalloc_junk_small
#undef arena_dalloc_large
#undef arena_dalloc_large_junked_locked
#undef arena_dalloc_small
#undef arena_decay_tick
#undef arena_decay_ticks
#undef arena_decay_time_default_get
#undef arena_decay_time_default_set
#undef arena_decay_time_get
#undef arena_decay_time_set
#undef arena_dss_prec_get
#undef arena_dss_prec_set
#undef arena_extent_sn_next
#undef arena_get
#undef arena_ichoose
#undef arena_init
#undef arena_lg_dirty_mult_default_get
#undef arena_lg_dirty_mult_default_set
#undef arena_lg_dirty_mult_get
#undef arena_lg_dirty_mult_set
#undef arena_malloc
#undef arena_malloc_hard
#undef arena_malloc_large
#undef arena_mapbits_allocated_get
#undef arena_mapbits_binind_get
#undef arena_mapbits_decommitted_get
#undef arena_mapbits_dirty_get
#undef arena_mapbits_get
#undef arena_mapbits_internal_set
#undef arena_mapbits_large_binind_set
#undef arena_mapbits_large_get
#undef arena_mapbits_large_set
#undef arena_mapbits_large_size_get
#undef arena_mapbits_size_decode
#undef arena_mapbits_size_encode
#undef arena_mapbits_small_runind_get
#undef arena_mapbits_small_set
#undef arena_mapbits_unallocated_set
#undef arena_mapbits_unallocated_size_get
#undef arena_mapbits_unallocated_size_set
#undef arena_mapbits_unzeroed_get
#undef arena_mapbitsp_get_const
#undef arena_mapbitsp_get_mutable
#undef arena_mapbitsp_read
#undef arena_mapbitsp_write
#undef arena_maxrun
#undef arena_maybe_purge
#undef arena_metadata_allocated_add
#undef arena_metadata_allocated_get
#undef arena_metadata_allocated_sub
#undef arena_migrate
#undef arena_miscelm_get_const
#undef arena_miscelm_get_mutable
#undef arena_miscelm_to_pageind
#undef arena_miscelm_to_rpages
#undef arena_new
#undef arena_node_alloc
#undef arena_node_dalloc
#undef arena_nthreads_dec
#undef arena_nthreads_get
#undef arena_nthreads_inc
#undef arena_palloc
#undef arena_postfork_child
#undef arena_postfork_parent
#undef arena_prefork0
#undef arena_prefork1
#undef arena_prefork2
#undef arena_prefork3
#undef arena_prof_accum
#undef arena_prof_accum_impl
#undef arena_prof_accum_locked
#undef arena_prof_promoted
#undef arena_prof_tctx_get
#undef arena_prof_tctx_reset
#undef arena_prof_tctx_set
#undef arena_ptr_small_binind_get
#undef arena_purge
#undef arena_quarantine_junk_small
#undef arena_ralloc
#undef arena_ralloc_junk_large
#undef arena_ralloc_no_move
#undef arena_rd_to_miscelm
#undef arena_redzone_corruption
#undef arena_reset
#undef arena_run_regind
#undef arena_run_to_miscelm
#undef arena_salloc
#undef arena_sdalloc
#undef arena_stats_merge
#undef arena_tcache_fill_small
#undef arena_tdata_get
#undef arena_tdata_get_hard
#undef arenas
#undef arenas_tdata_bypass_cleanup
#undef arenas_tdata_cleanup
#undef atomic_add_p
#undef atomic_add_u
#undef atomic_add_uint32
#undef atomic_add_uint64
#undef atomic_add_z
#undef atomic_cas_p
#undef atomic_cas_u
#undef atomic_cas_uint32
#undef atomic_cas_uint64
#undef atomic_cas_z
#undef atomic_sub_p
#undef atomic_sub_u
#undef atomic_sub_uint32
#undef atomic_sub_uint64
#undef atomic_sub_z
#undef atomic_write_p
#undef atomic_write_u
#undef atomic_write_uint32
#undef atomic_write_uint64
#undef atomic_write_z
#undef base_alloc
#undef base_boot
#undef base_postfork_child
#undef base_postfork_parent
#undef base_prefork
#undef base_stats_get
#undef bitmap_full
#undef bitmap_get
#undef bitmap_info_init
#undef bitmap_init
#undef bitmap_set
#undef bitmap_sfu
#undef bitmap_size
#undef bitmap_unset
#undef bootstrap_calloc
#undef bootstrap_free
#undef bootstrap_malloc
#undef bt_init
#undef buferror
#undef chunk_alloc_base
#undef chunk_alloc_cache
#undef chunk_alloc_dss
#undef chunk_alloc_mmap
#undef chunk_alloc_wrapper
#undef chunk_boot
#undef chunk_dalloc_cache
#undef chunk_dalloc_mmap
#undef chunk_dalloc_wrapper
#undef chunk_deregister
#undef chunk_dss_boot
#undef chunk_dss_mergeable
#undef chunk_dss_prec_get
#undef chunk_dss_prec_set
#undef chunk_hooks_default
#undef chunk_hooks_get
#undef chunk_hooks_set
#undef chunk_in_dss
#undef chunk_lookup
#undef chunk_npages
#undef chunk_purge_wrapper
#undef chunk_register
#undef chunks_rtree
#undef chunksize
#undef chunksize_mask
#undef ckh_count
#undef ckh_delete
#undef ckh_insert
#undef ckh_iter
#undef ckh_new
#undef ckh_pointer_hash
#undef ckh_pointer_keycomp
#undef ckh_remove
#undef ckh_search
#undef ckh_string_hash
#undef ckh_string_keycomp
#undef ctl_boot
#undef ctl_bymib
#undef ctl_byname
#undef ctl_nametomib
#undef ctl_postfork_child
#undef ctl_postfork_parent
#undef ctl_prefork
#undef decay_ticker_get
#undef dss_prec_names
#undef extent_node_achunk_get
#undef extent_node_achunk_set
#undef extent_node_addr_get
#undef extent_node_addr_set
#undef extent_node_arena_get
#undef extent_node_arena_set
#undef extent_node_committed_get
#undef extent_node_committed_set
#undef extent_node_dirty_insert
#undef extent_node_dirty_linkage_init
#undef extent_node_dirty_remove
#undef extent_node_init
#undef extent_node_prof_tctx_get
#undef extent_node_prof_tctx_set
#undef extent_node_size_get
#undef extent_node_size_set
#undef extent_node_sn_get
#undef extent_node_sn_set
#undef extent_node_zeroed_get
#undef extent_node_zeroed_set
#undef extent_size_quantize_ceil
#undef extent_size_quantize_floor
#undef extent_tree_ad_destroy
#undef extent_tree_ad_destroy_recurse
#undef extent_tree_ad_empty
#undef extent_tree_ad_first
#undef extent_tree_ad_insert
#undef extent_tree_ad_iter
#undef extent_tree_ad_iter_recurse
#undef extent_tree_ad_iter_start
#undef extent_tree_ad_last
#undef extent_tree_ad_new
#undef extent_tree_ad_next
#undef extent_tree_ad_nsearch
#undef extent_tree_ad_prev
#undef extent_tree_ad_psearch
#undef extent_tree_ad_remove
#undef extent_tree_ad_reverse_iter
#undef extent_tree_ad_reverse_iter_recurse
#undef extent_tree_ad_reverse_iter_start
#undef extent_tree_ad_search
#undef extent_tree_szsnad_destroy
#undef extent_tree_szsnad_destroy_recurse
#undef extent_tree_szsnad_empty
#undef extent_tree_szsnad_first
#undef extent_tree_szsnad_insert
#undef extent_tree_szsnad_iter
#undef extent_tree_szsnad_iter_recurse
#undef extent_tree_szsnad_iter_start
#undef extent_tree_szsnad_last
#undef extent_tree_szsnad_new
#undef extent_tree_szsnad_next
#undef extent_tree_szsnad_nsearch
#undef extent_tree_szsnad_prev
#undef extent_tree_szsnad_psearch
#undef extent_tree_szsnad_remove
#undef extent_tree_szsnad_reverse_iter
#undef extent_tree_szsnad_reverse_iter_recurse
#undef extent_tree_szsnad_reverse_iter_start
#undef extent_tree_szsnad_search
#undef ffs_llu
#undef ffs_lu
#undef ffs_u
#undef ffs_u32
#undef ffs_u64
#undef ffs_zu
#undef get_errno
#undef hash
#undef hash_fmix_32
#undef hash_fmix_64
#undef hash_get_block_32
#undef hash_get_block_64
#undef hash_rotl_32
#undef hash_rotl_64
#undef hash_x64_128
#undef hash_x86_128
#undef hash_x86_32
#undef huge_aalloc
#undef huge_dalloc
#undef huge_dalloc_junk
#undef huge_malloc
#undef huge_palloc
#undef huge_prof_tctx_get
#undef huge_prof_tctx_reset
#undef huge_prof_tctx_set
#undef huge_ralloc
#undef huge_ralloc_no_move
#undef huge_salloc
#undef iaalloc
#undef ialloc
#undef iallocztm
#undef iarena_cleanup
#undef idalloc
#undef idalloctm
#undef in_valgrind
#undef index2size
#undef index2size_compute
#undef index2size_lookup
#undef index2size_tab
#undef ipalloc
#undef ipalloct
#undef ipallocztm
#undef iqalloc
#undef iralloc
#undef iralloct
#undef iralloct_realign
#undef isalloc
#undef isdalloct
#undef isqalloc
#undef isthreaded
#undef ivsalloc
#undef ixalloc
#undef jemalloc_postfork_child
#undef jemalloc_postfork_parent
#undef jemalloc_prefork
#undef large_maxclass
#undef lg_floor
#undef lg_prof_sample
#undef malloc_cprintf
#undef malloc_mutex_assert_not_owner
#undef malloc_mutex_assert_owner
#undef malloc_mutex_boot
#undef malloc_mutex_init
#undef malloc_mutex_lock
#undef malloc_mutex_postfork_child
#undef malloc_mutex_postfork_parent
#undef malloc_mutex_prefork
#undef malloc_mutex_unlock
#undef malloc_printf
#undef malloc_snprintf
#undef malloc_strtoumax
#undef malloc_tsd_boot0
#undef malloc_tsd_boot1
#undef malloc_tsd_cleanup_register
#undef malloc_tsd_dalloc
#undef malloc_tsd_malloc
#undef malloc_tsd_no_cleanup
#undef malloc_vcprintf
#undef malloc_vsnprintf
#undef malloc_write
#undef map_bias
#undef map_misc_offset
#undef mb_write
#undef narenas_auto
#undef narenas_tdata_cleanup
#undef narenas_total_get
#undef ncpus
#undef nhbins
#undef nhclasses
#undef nlclasses
#undef nstime_add
#undef nstime_compare
#undef nstime_copy
#undef nstime_divide
#undef nstime_idivide
#undef nstime_imultiply
#undef nstime_init
#undef nstime_init2
#undef nstime_monotonic
#undef nstime_ns
#undef nstime_nsec
#undef nstime_sec
#undef nstime_subtract
#undef nstime_update
#undef opt_abort
#undef opt_decay_time
#undef opt_dss
#undef opt_junk
#undef opt_junk_alloc
#undef opt_junk_free
#undef opt_lg_chunk
#undef opt_lg_dirty_mult
#undef opt_lg_prof_interval
#undef opt_lg_prof_sample
#undef opt_lg_tcache_max
#undef opt_narenas
#undef opt_prof
#undef opt_prof_accum
#undef opt_prof_active
#undef opt_prof_final
#undef opt_prof_gdump
#undef opt_prof_leak
#undef opt_prof_prefix
#undef opt_prof_thread_active_init
#undef opt_purge
#undef opt_quarantine
#undef opt_redzone
#undef opt_stats_print
#undef opt_tcache
#undef opt_thp
#undef opt_utrace
#undef opt_xmalloc
#undef opt_zero
#undef p2rz
#undef pages_boot
#undef pages_commit
#undef pages_decommit
#undef pages_huge
#undef pages_map
#undef pages_nohuge
#undef pages_purge
#undef pages_trim
#undef pages_unmap
#undef pind2sz
#undef pind2sz_compute
#undef pind2sz_lookup
#undef pind2sz_tab
#undef pow2_ceil_u32
#undef pow2_ceil_u64
#undef pow2_ceil_zu
#undef prng_lg_range_u32
#undef prng_lg_range_u64
#undef prng_lg_range_zu
#undef prng_range_u32
#undef prng_range_u64
#undef prng_range_zu
#undef prng_state_next_u32
#undef prng_state_next_u64
#undef prng_state_next_zu
#undef prof_active
#undef prof_active_get
#undef prof_active_get_unlocked
#undef prof_active_set
#undef prof_alloc_prep
#undef prof_alloc_rollback
#undef prof_backtrace
#undef prof_boot0
#undef prof_boot1
#undef prof_boot2
#undef prof_bt_count
#undef prof_dump_header
#undef prof_dump_open
#undef prof_free
#undef prof_free_sampled_object
#undef prof_gdump
#undef prof_gdump_get
#undef prof_gdump_get_unlocked
#undef prof_gdump_set
#undef prof_gdump_val
#undef prof_idump
#undef prof_interval
#undef prof_lookup
#undef prof_malloc
#undef prof_malloc_sample_object
#undef prof_mdump
#undef prof_postfork_child
#undef prof_postfork_parent
#undef prof_prefork0
#undef prof_prefork1
#undef prof_realloc
#undef prof_reset
#undef prof_sample_accum_update
#undef prof_sample_threshold_update
#undef prof_tctx_get
#undef prof_tctx_reset
#undef prof_tctx_set
#undef prof_tdata_cleanup
#undef prof_tdata_count
#undef prof_tdata_get
#undef prof_tdata_init
#undef prof_tdata_reinit
#undef prof_thread_active_get
#undef prof_thread_active_init_get
#undef prof_thread_active_init_set
#undef prof_thread_active_set
#undef prof_thread_name_get
#undef prof_thread_name_set
#undef psz2ind
#undef psz2u
#undef purge_mode_names
#undef quarantine
#undef quarantine_alloc_hook
#undef quarantine_alloc_hook_work
#undef quarantine_cleanup
#undef rtree_child_read
#undef rtree_child_read_hard
#undef rtree_child_tryread
#undef rtree_delete
#undef rtree_get
#undef rtree_new
#undef rtree_node_valid
#undef rtree_set
#undef rtree_start_level
#undef rtree_subkey
#undef rtree_subtree_read
#undef rtree_subtree_read_hard
#undef rtree_subtree_tryread
#undef rtree_val_read
#undef rtree_val_write
#undef run_quantize_ceil
#undef run_quantize_floor
#undef s2u
#undef s2u_compute
#undef s2u_lookup
#undef sa2u
#undef set_errno
#undef size2index
#undef size2index_compute
#undef size2index_lookup
#undef size2index_tab
#undef spin_adaptive
#undef spin_init
#undef stats_cactive
#undef stats_cactive_add
#undef stats_cactive_get
#undef stats_cactive_sub
#undef stats_print
#undef tcache_alloc_easy
#undef tcache_alloc_large
#undef tcache_alloc_small
#undef tcache_alloc_small_hard
#undef tcache_arena_reassociate
#undef tcache_bin_flush_large
#undef tcache_bin_flush_small
#undef tcache_bin_info
#undef tcache_boot
#undef tcache_cleanup
#undef tcache_create
#undef tcache_dalloc_large
#undef tcache_dalloc_small
#undef tcache_enabled_cleanup
#undef tcache_enabled_get
#undef tcache_enabled_set
#undef tcache_event
#undef tcache_event_hard
#undef tcache_flush
#undef tcache_get
#undef tcache_get_hard
#undef tcache_maxclass
#undef tcache_postfork_child
#undef tcache_postfork_parent
#undef tcache_prefork
#undef tcache_salloc
#undef tcache_stats_merge
#undef tcaches
#undef tcaches_create
#undef tcaches_destroy
#undef tcaches_flush
#undef tcaches_get
#undef thread_allocated_cleanup
#undef thread_deallocated_cleanup
#undef ticker_copy
#undef ticker_init
#undef ticker_read
#undef ticker_tick
#undef ticker_ticks
#undef tsd_arena_get
#undef tsd_arena_set
#undef tsd_arenap_get
#undef tsd_arenas_tdata_bypass_get
#undef tsd_arenas_tdata_bypass_set
#undef tsd_arenas_tdata_bypassp_get
#undef tsd_arenas_tdata_get
#undef tsd_arenas_tdata_set
#undef tsd_arenas_tdatap_get
#undef tsd_boot
#undef tsd_boot0
#undef tsd_boot1
#undef tsd_booted
#undef tsd_booted_get
#undef tsd_cleanup
#undef tsd_cleanup_wrapper
#undef tsd_fetch
#undef tsd_fetch_impl
#undef tsd_get
#undef tsd_get_allocates
#undef tsd_iarena_get
#undef tsd_iarena_set
#undef tsd_iarenap_get
#undef tsd_initialized
#undef tsd_init_check_recursion
#undef tsd_init_finish
#undef tsd_init_head
#undef tsd_narenas_tdata_get
#undef tsd_narenas_tdata_set
#undef tsd_narenas_tdatap_get
#undef tsd_wrapper_get
#undef tsd_wrapper_set
#undef tsd_nominal
#undef tsd_prof_tdata_get
#undef tsd_prof_tdata_set
#undef tsd_prof_tdatap_get
#undef tsd_quarantine_get
#undef tsd_quarantine_set
#undef tsd_quarantinep_get
#undef tsd_set
#undef tsd_tcache_enabled_get
#undef tsd_tcache_enabled_set
#undef tsd_tcache_enabledp_get
#undef tsd_tcache_get
#undef tsd_tcache_set
#undef tsd_tcachep_get
#undef tsd_thread_allocated_get
#undef tsd_thread_allocated_set
#undef tsd_thread_allocatedp_get
#undef tsd_thread_deallocated_get
#undef tsd_thread_deallocated_set
#undef tsd_thread_deallocatedp_get
#undef tsd_tls
#undef tsd_tsd
#undef tsd_tsdn
#undef tsd_witness_fork_get
#undef tsd_witness_fork_set
#undef tsd_witness_forkp_get
#undef tsd_witnesses_get
#undef tsd_witnesses_set
#undef tsd_witnessesp_get
#undef tsdn_fetch
#undef tsdn_null
#undef tsdn_tsd
#undef u2rz
#undef valgrind_freelike_block
#undef valgrind_make_mem_defined
#undef valgrind_make_mem_noaccess
#undef valgrind_make_mem_undefined
#undef witness_assert_depth
#undef witness_assert_depth_to_rank
#undef witness_assert_lockless
#undef witness_assert_not_owner
#undef witness_assert_owner
#undef witness_depth_error
#undef witness_fork_cleanup
#undef witness_init
#undef witness_lock
#undef witness_lock_error
#undef witness_not_owner_error
#undef witness_owner
#undef witness_owner_error
#undef witness_postfork_child
#undef witness_postfork_parent
#undef witness_prefork
#undef witness_unlock
#undef witnesses_cleanup
#undef zone_register

View File

@ -0,0 +1,207 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* Simple linear congruential pseudo-random number generator:
*
* prng(y) = (a*x + c) % m
*
* where the following constants ensure maximal period:
*
* a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
* c == Odd number (relatively prime to 2^n).
* m == 2^32
*
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
*
* This choice of m has the disadvantage that the quality of the bits is
* proportional to bit position. For example, the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
*/
#define PRNG_A_32 UINT32_C(1103515241)
#define PRNG_C_32 UINT32_C(12347)
#define PRNG_A_64 UINT64_C(6364136223846793005)
#define PRNG_C_64 UINT64_C(1442695040888963407)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint32_t prng_state_next_u32(uint32_t state);
uint64_t prng_state_next_u64(uint64_t state);
size_t prng_state_next_zu(size_t state);
uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range,
bool atomic);
uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range);
size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic);
uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic);
uint64_t prng_range_u64(uint64_t *state, uint64_t range);
size_t prng_range_zu(size_t *state, size_t range, bool atomic);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
JEMALLOC_ALWAYS_INLINE uint32_t
prng_state_next_u32(uint32_t state)
{
return ((state * PRNG_A_32) + PRNG_C_32);
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_state_next_u64(uint64_t state)
{
return ((state * PRNG_A_64) + PRNG_C_64);
}
JEMALLOC_ALWAYS_INLINE size_t
prng_state_next_zu(size_t state)
{
#if LG_SIZEOF_PTR == 2
return ((state * PRNG_A_32) + PRNG_C_32);
#elif LG_SIZEOF_PTR == 3
return ((state * PRNG_A_64) + PRNG_C_64);
#else
#error Unsupported pointer size
#endif
}
JEMALLOC_ALWAYS_INLINE uint32_t
prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic)
{
uint32_t ret, state1;
assert(lg_range > 0);
assert(lg_range <= 32);
if (atomic) {
uint32_t state0;
do {
state0 = atomic_read_uint32(state);
state1 = prng_state_next_u32(state0);
} while (atomic_cas_uint32(state, state0, state1));
} else {
state1 = prng_state_next_u32(*state);
*state = state1;
}
ret = state1 >> (32 - lg_range);
return (ret);
}
/* 64-bit atomic operations cannot be supported on all relevant platforms. */
JEMALLOC_ALWAYS_INLINE uint64_t
prng_lg_range_u64(uint64_t *state, unsigned lg_range)
{
uint64_t ret, state1;
assert(lg_range > 0);
assert(lg_range <= 64);
state1 = prng_state_next_u64(*state);
*state = state1;
ret = state1 >> (64 - lg_range);
return (ret);
}
JEMALLOC_ALWAYS_INLINE size_t
prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic)
{
size_t ret, state1;
assert(lg_range > 0);
assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
if (atomic) {
size_t state0;
do {
state0 = atomic_read_z(state);
state1 = prng_state_next_zu(state0);
} while (atomic_cas_z(state, state0, state1));
} else {
state1 = prng_state_next_zu(*state);
*state = state1;
}
ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
return (ret);
}
JEMALLOC_ALWAYS_INLINE uint32_t
prng_range_u32(uint32_t *state, uint32_t range, bool atomic)
{
uint32_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_u32(state, lg_range, atomic);
} while (ret >= range);
return (ret);
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_range_u64(uint64_t *state, uint64_t range)
{
uint64_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_u64(state, lg_range);
} while (ret >= range);
return (ret);
}
JEMALLOC_ALWAYS_INLINE size_t
prng_range_zu(size_t *state, size_t range, bool atomic)
{
size_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_zu(state, lg_range, atomic);
} while (ret >= range);
return (ret);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,547 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct prof_bt_s prof_bt_t;
typedef struct prof_cnt_s prof_cnt_t;
typedef struct prof_tctx_s prof_tctx_t;
typedef struct prof_gctx_s prof_gctx_t;
typedef struct prof_tdata_s prof_tdata_t;
/* Option defaults. */
#ifdef JEMALLOC_PROF
# define PROF_PREFIX_DEFAULT "jeprof"
#else
# define PROF_PREFIX_DEFAULT ""
#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
/*
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
* is based on __builtin_return_address() necessarily has a hard-coded number
* of backtrace frame handlers, and should be kept in sync with this setting.
*/
#define PROF_BT_MAX 128
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
#define PROF_DUMP_BUFSIZE 65536
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
/*
* Number of mutexes shared among all gctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NCTX_LOCKS 1024
/*
* Number of mutexes shared among all tdata's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NTDATA_LOCKS 256
/*
* prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct prof_bt_s {
/* Backtrace, stored as len program counters. */
void **vec;
unsigned len;
};
#ifdef JEMALLOC_PROF_LIBGCC
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef struct {
prof_bt_t *bt;
unsigned max;
} prof_unwind_data_t;
#endif
struct prof_cnt_s {
/* Profiling counters. */
uint64_t curobjs;
uint64_t curbytes;
uint64_t accumobjs;
uint64_t accumbytes;
};
typedef enum {
prof_tctx_state_initializing,
prof_tctx_state_nominal,
prof_tctx_state_dumping,
prof_tctx_state_purgatory /* Dumper must finish destroying. */
} prof_tctx_state_t;
struct prof_tctx_s {
/* Thread data for thread that performed the allocation. */
prof_tdata_t *tdata;
/*
* Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
* defunct during teardown.
*/
uint64_t thr_uid;
uint64_t thr_discrim;
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t cnts;
/* Associated global context. */
prof_gctx_t *gctx;
/*
* UID that distinguishes multiple tctx's created by the same thread,
* but coexisting in gctx->tctxs. There are two ways that such
* coexistence can occur:
* - A dumper thread can cause a tctx to be retained in the purgatory
* state.
* - Although a single "producer" thread must create all tctx's which
* share the same thr_uid, multiple "consumers" can each concurrently
* execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
* gets called once each time cnts.cur{objs,bytes} drop to 0, but this
* threshold can be hit again before the first consumer finishes
* executing prof_tctx_destroy().
*/
uint64_t tctx_uid;
/* Linkage into gctx's tctxs. */
rb_node(prof_tctx_t) tctx_link;
/*
* True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
* sample vs destroy race.
*/
bool prepared;
/* Current dump-related state, protected by gctx->lock. */
prof_tctx_state_t state;
/*
* Copy of cnts snapshotted during early dump phase, protected by
* dump_mtx.
*/
prof_cnt_t dump_cnts;
};
typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
struct prof_gctx_s {
/* Protects nlimbo, cnt_summed, and tctxs. */
malloc_mutex_t *lock;
/*
* Number of threads that currently cause this gctx to be in a state of
* limbo due to one of:
* - Initializing this gctx.
* - Initializing per thread counters associated with this gctx.
* - Preparing to destroy this gctx.
* - Dumping a heap profile that includes this gctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* gctx.
*/
unsigned nlimbo;
/*
* Tree of profile counters, one for each thread that has allocated in
* this context.
*/
prof_tctx_tree_t tctxs;
/* Linkage for tree of contexts to be dumped. */
rb_node(prof_gctx_t) dump_link;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* Associated backtrace. */
prof_bt_t bt;
/* Backtrace vector, variable size, referred to by bt. */
void *vec[1];
};
typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
struct prof_tdata_s {
malloc_mutex_t *lock;
/* Monotonically increasing unique thread identifier. */
uint64_t thr_uid;
/*
* Monotonically increasing discriminator among tdata structures
* associated with the same thr_uid.
*/
uint64_t thr_discrim;
/* Included in heap profile dumps if non-NULL. */
char *thread_name;
bool attached;
bool expired;
rb_node(prof_tdata_t) tdata_link;
/*
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
* necessary when incrementing this field, because only one thread ever
* does so.
*/
uint64_t tctx_uid_next;
/*
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
* backtraces for which it has non-zero allocation/deallocation counters
* associated with thread-specific prof_tctx_t objects. Other threads
* may write to prof_tctx_t contents when freeing associated objects.
*/
ckh_t bt2tctx;
/* Sampling state. */
uint64_t prng_state;
uint64_t bytes_until_sample;
/* State used to avoid dumping while operating on prof internals. */
bool enq;
bool enq_idump;
bool enq_gdump;
/*
* Set to true during an early dump phase for tdata's which are
* currently being dumped. New threads' tdata's have this initialized
* to false so that they aren't accidentally included in later dump
* phases.
*/
bool dumping;
/*
* True if profiling is active for this tdata's thread
* (thread.prof.active mallctl).
*/
bool active;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* Backtrace vector, used for calls to prof_backtrace(). */
void *vec[PROF_BT_MAX];
};
typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_prof;
extern bool opt_prof_active;
extern bool opt_prof_thread_active_init;
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
extern bool opt_prof_gdump; /* High-water memory dumping. */
extern bool opt_prof_final; /* Final profile dumping. */
extern bool opt_prof_leak; /* Dump leak summary at exit. */
extern bool opt_prof_accum; /* Report cumulative bytes. */
extern char opt_prof_prefix[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX +
#endif
1];
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern bool prof_active;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern bool prof_gdump_val;
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
* interval between profile dumps averages prof_interval, though the actual
* interval between dumps will tend to be sporadic, and the interval will be a
* maximum of approximately (prof_interval * narenas).
*/
extern uint64_t prof_interval;
/*
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
* resets.
*/
extern size_t lg_prof_sample;
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(prof_bt_t *bt);
prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
#ifdef JEMALLOC_JET
size_t prof_tdata_count(void);
size_t prof_bt_count(void);
const prof_cnt_t *prof_cnt_all(void);
typedef int (prof_dump_open_t)(bool, const char *);
extern prof_dump_open_t *prof_dump_open;
typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
extern prof_dump_header_t *prof_dump_header;
#endif
void prof_idump(tsdn_t *tsdn);
bool prof_mdump(tsd_t *tsd, const char *filename);
void prof_gdump(tsdn_t *tsdn);
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
void prof_reset(tsd_t *tsd, size_t lg_sample);
void prof_tdata_cleanup(tsd_t *tsd);
bool prof_active_get(tsdn_t *tsdn);
bool prof_active_set(tsdn_t *tsdn, bool active);
const char *prof_thread_name_get(tsd_t *tsd);
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
bool prof_thread_active_get(tsd_t *tsd);
bool prof_thread_active_set(tsd_t *tsd, bool active);
bool prof_thread_active_init_get(tsdn_t *tsdn);
bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
bool prof_gdump_get(tsdn_t *tsdn);
bool prof_gdump_set(tsdn_t *tsdn, bool active);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(tsd_t *tsd);
void prof_prefork0(tsdn_t *tsdn);
void prof_prefork1(tsdn_t *tsdn);
void prof_postfork_parent(tsdn_t *tsdn);
void prof_postfork_child(tsdn_t *tsdn);
void prof_sample_threshold_update(prof_tdata_t *tdata);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool prof_active_get_unlocked(void);
bool prof_gdump_get_unlocked(void);
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr);
void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
const void *old_ptr, prof_tctx_t *tctx);
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
prof_tdata_t **tdata_out);
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
bool update);
void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
size_t old_usize, prof_tctx_t *old_tctx);
void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
JEMALLOC_ALWAYS_INLINE bool
prof_active_get_unlocked(void)
{
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return (prof_active);
}
JEMALLOC_ALWAYS_INLINE bool
prof_gdump_get_unlocked(void)
{
/*
* No locking is used when reading prof_gdump_val in the fast path, so
* there are no guarantees regarding how long it will take for all
* threads to notice state changes.
*/
return (prof_gdump_val);
}
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
prof_tdata_get(tsd_t *tsd, bool create)
{
prof_tdata_t *tdata;
cassert(config_prof);
tdata = tsd_prof_tdata_get(tsd);
if (create) {
if (unlikely(tdata == NULL)) {
if (tsd_nominal(tsd)) {
tdata = prof_tdata_init(tsd);
tsd_prof_tdata_set(tsd, tdata);
}
} else if (unlikely(tdata->expired)) {
tdata = prof_tdata_reinit(tsd, tdata);
tsd_prof_tdata_set(tsd, tdata);
}
assert(tdata == NULL || tdata->attached);
}
return (tdata);
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_tctx_get(tsdn_t *tsdn, const void *ptr)
{
cassert(config_prof);
assert(ptr != NULL);
return (arena_prof_tctx_get(tsdn, ptr));
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
arena_prof_tctx_set(tsdn, ptr, usize, tctx);
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr,
prof_tctx_t *old_tctx)
{
cassert(config_prof);
assert(ptr != NULL);
arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx);
}
JEMALLOC_ALWAYS_INLINE bool
prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
prof_tdata_t **tdata_out)
{
prof_tdata_t *tdata;
cassert(config_prof);
tdata = prof_tdata_get(tsd, true);
if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX))
tdata = NULL;
if (tdata_out != NULL)
*tdata_out = tdata;
if (unlikely(tdata == NULL))
return (true);
if (likely(tdata->bytes_until_sample >= usize)) {
if (update)
tdata->bytes_until_sample -= usize;
return (true);
} else {
/* Compute new sample threshold. */
if (update)
prof_sample_threshold_update(tdata);
return (!tdata->active);
}
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
{
prof_tctx_t *ret;
prof_tdata_t *tdata;
prof_bt_t bt;
assert(usize == s2u(usize));
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
&tdata)))
ret = (prof_tctx_t *)(uintptr_t)1U;
else {
bt_init(&bt, tdata->vec);
prof_backtrace(&bt);
ret = prof_lookup(tsd, &bt);
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
assert(usize == isalloc(tsdn, ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_malloc_sample_object(tsdn, ptr, usize, tctx);
else
prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
}
JEMALLOC_ALWAYS_INLINE void
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
prof_tctx_t *old_tctx)
{
bool sampled, old_sampled;
cassert(config_prof);
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
if (prof_active && !updated && ptr != NULL) {
assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
/*
* Don't sample. The usize passed to prof_alloc_prep()
* was larger than what actually got allocated, so a
* backtrace was captured for this allocation, even
* though its actual usize was insufficient to cross the
* sample threshold.
*/
prof_alloc_rollback(tsd, tctx, true);
tctx = (prof_tctx_t *)(uintptr_t)1U;
}
}
sampled = ((uintptr_t)tctx > (uintptr_t)1U);
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
if (unlikely(sampled))
prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
else
prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx);
if (unlikely(old_sampled))
prof_free_sampled_object(tsd, old_usize, old_tctx);
}
JEMALLOC_ALWAYS_INLINE void
prof_free(tsd_t *tsd, const void *ptr, size_t usize)
{
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
cassert(config_prof);
assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_free_sampled_object(tsd, usize, tctx);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,22 @@
#define je_malloc_conf JEMALLOC_N(malloc_conf)
#define je_malloc_message JEMALLOC_N(malloc_message)
#define je_malloc JEMALLOC_N(malloc)
#define je_calloc JEMALLOC_N(calloc)
#define je_posix_memalign JEMALLOC_N(posix_memalign)
#define je_aligned_alloc JEMALLOC_N(aligned_alloc)
#define je_realloc JEMALLOC_N(realloc)
#define je_free JEMALLOC_N(free)
#define je_mallocx JEMALLOC_N(mallocx)
#define je_rallocx JEMALLOC_N(rallocx)
#define je_xallocx JEMALLOC_N(xallocx)
#define je_sallocx JEMALLOC_N(sallocx)
#define je_dallocx JEMALLOC_N(dallocx)
#define je_sdallocx JEMALLOC_N(sdallocx)
#define je_nallocx JEMALLOC_N(nallocx)
#define je_mallctl JEMALLOC_N(mallctl)
#define je_mallctlnametomib JEMALLOC_N(mallctlnametomib)
#define je_mallctlbymib JEMALLOC_N(mallctlbymib)
#define je_malloc_stats_print JEMALLOC_N(malloc_stats_print)
#define je_malloc_usable_size JEMALLOC_N(malloc_usable_size)
#define je_memalign JEMALLOC_N(memalign)
#define je_valloc JEMALLOC_N(valloc)

View File

@ -0,0 +1,22 @@
malloc_conf:je_malloc_conf
malloc_message:je_malloc_message
malloc:je_malloc
calloc:je_calloc
posix_memalign:je_posix_memalign
aligned_alloc:je_aligned_alloc
realloc:je_realloc
free:je_free
mallocx:je_mallocx
rallocx:je_rallocx
xallocx:je_xallocx
sallocx:je_sallocx
dallocx:je_dallocx
sdallocx:je_sdallocx
nallocx:je_nallocx
mallctl:je_mallctl
mallctlnametomib:je_mallctlnametomib
mallctlbymib:je_mallctlbymib
malloc_stats_print:je_malloc_stats_print
malloc_usable_size:je_malloc_usable_size
memalign:je_memalign
valloc:je_valloc

View File

@ -0,0 +1,22 @@
#undef je_malloc_conf
#undef je_malloc_message
#undef je_malloc
#undef je_calloc
#undef je_posix_memalign
#undef je_aligned_alloc
#undef je_realloc
#undef je_free
#undef je_mallocx
#undef je_rallocx
#undef je_xallocx
#undef je_sallocx
#undef je_dallocx
#undef je_sdallocx
#undef je_nallocx
#undef je_mallctl
#undef je_mallctlnametomib
#undef je_mallctlbymib
#undef je_malloc_stats_print
#undef je_malloc_usable_size
#undef je_memalign
#undef je_valloc

View File

@ -0,0 +1,81 @@
/* List definitions. */
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
#define ql_head_initializer(a_head) {NULL}
#define ql_elm(a_type) qr(a_type)
/* List functions. */
#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
#define ql_first(a_head) ((a_head)->qlh_first)
#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
} else { \
ql_first(a_head) = NULL; \
} \
} while (0)
#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)

View File

@ -0,0 +1,69 @@
/* Ring definitions. */
#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) \
do \
{ \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
void *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
/*
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code.
*/
#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
= (a_qr)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
? (var)->a_field.qre_prev : NULL))

View File

@ -0,0 +1,60 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct quarantine_obj_s quarantine_obj_t;
typedef struct quarantine_s quarantine_t;
/* Default per thread quarantine size if valgrind is enabled. */
#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct quarantine_obj_s {
void *ptr;
size_t usize;
};
struct quarantine_s {
size_t curbytes;
size_t curobjs;
size_t first;
#define LG_MAXOBJS_INIT 10
size_t lg_maxobjs;
quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void quarantine_alloc_hook_work(tsd_t *tsd);
void quarantine(tsd_t *tsd, void *ptr);
void quarantine_cleanup(tsd_t *tsd);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void quarantine_alloc_hook(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
JEMALLOC_ALWAYS_INLINE void
quarantine_alloc_hook(void)
{
tsd_t *tsd;
assert(config_fill && opt_quarantine);
tsd = tsd_fetch();
if (tsd_quarantine_get(tsd) == NULL)
quarantine_alloc_hook_work(tsd);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,366 @@
/*
* This radix tree implementation is tailored to the singular purpose of
* associating metadata with chunks that are currently owned by jemalloc.
*
*******************************************************************************
*/
#ifdef JEMALLOC_H_TYPES
typedef struct rtree_node_elm_s rtree_node_elm_t;
typedef struct rtree_level_s rtree_level_t;
typedef struct rtree_s rtree_t;
/*
* RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the
* machine address width.
*/
#define LG_RTREE_BITS_PER_LEVEL 4
#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL)
/* Maximum rtree height. */
#define RTREE_HEIGHT_MAX \
((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
/* Used for two-stage lock-free node initialization. */
#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
/*
* The node allocation callback function's argument is the number of contiguous
* rtree_node_elm_t structures to allocate, and the resulting memory must be
* zeroed.
*/
typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t);
typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *);
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct rtree_node_elm_s {
union {
void *pun;
rtree_node_elm_t *child;
extent_node_t *val;
};
};
struct rtree_level_s {
/*
* A non-NULL subtree points to a subtree rooted along the hypothetical
* path to the leaf node corresponding to key 0. Depending on what keys
* have been used to store to the tree, an arbitrary combination of
* subtree pointers may remain NULL.
*
* Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4.
* This results in a 3-level tree, and the leftmost leaf can be directly
* accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding
* 0x00000000) can be accessed via subtrees[1], and the remainder of the
* tree can be accessed via subtrees[0].
*
* levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...]
*
* levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ]
*
* levels[2] : [val(0x000000000000) | val(0x000000000001) | ...]
*
* This has practical implications on x64, which currently uses only the
* lower 47 bits of virtual address space in userland, thus leaving
* subtrees[0] unused and avoiding a level of tree traversal.
*/
union {
void *subtree_pun;
rtree_node_elm_t *subtree;
};
/* Number of key bits distinguished by this level. */
unsigned bits;
/*
* Cumulative number of key bits distinguished by traversing to
* corresponding tree level.
*/
unsigned cumbits;
};
struct rtree_s {
rtree_node_alloc_t *alloc;
rtree_node_dalloc_t *dalloc;
unsigned height;
/*
* Precomputed table used to convert from the number of leading 0 key
* bits to which subtree level to start at.
*/
unsigned start_level[RTREE_HEIGHT_MAX];
rtree_level_t levels[RTREE_HEIGHT_MAX];
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
rtree_node_dalloc_t *dalloc);
void rtree_delete(rtree_t *rtree);
rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree,
unsigned level);
rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree,
rtree_node_elm_t *elm, unsigned level);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
bool rtree_node_valid(rtree_node_elm_t *node);
rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm,
bool dependent);
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
unsigned level, bool dependent);
extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
bool dependent);
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
const extent_node_t *val);
rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
bool dependent);
rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
bool dependent);
extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
JEMALLOC_ALWAYS_INLINE unsigned
rtree_start_level(rtree_t *rtree, uintptr_t key)
{
unsigned start_level;
if (unlikely(key == 0))
return (rtree->height - 1);
start_level = rtree->start_level[lg_floor(key) >>
LG_RTREE_BITS_PER_LEVEL];
assert(start_level < rtree->height);
return (start_level);
}
JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
{
return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
rtree->levels[level].cumbits)) & ((ZU(1) <<
rtree->levels[level].bits) - 1));
}
JEMALLOC_ALWAYS_INLINE bool
rtree_node_valid(rtree_node_elm_t *node)
{
return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
}
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_child_tryread(rtree_node_elm_t *elm, bool dependent)
{
rtree_node_elm_t *child;
/* Double-checked read (first read may be stale. */
child = elm->child;
if (!dependent && !rtree_node_valid(child))
child = atomic_read_p(&elm->pun);
assert(!dependent || child != NULL);
return (child);
}
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level,
bool dependent)
{
rtree_node_elm_t *child;
child = rtree_child_tryread(elm, dependent);
if (!dependent && unlikely(!rtree_node_valid(child)))
child = rtree_child_read_hard(rtree, elm, level);
assert(!dependent || child != NULL);
return (child);
}
JEMALLOC_ALWAYS_INLINE extent_node_t *
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
{
if (dependent) {
/*
* Reading a val on behalf of a pointer to a valid allocation is
* guaranteed to be a clean read even without synchronization,
* because the rtree update became visible in memory before the
* pointer came into existence.
*/
return (elm->val);
} else {
/*
* An arbitrary read, e.g. on behalf of ivsalloc(), may not be
* dependent on a previous rtree write, which means a stale read
* could result if synchronization were omitted here.
*/
return (atomic_read_p(&elm->pun));
}
}
JEMALLOC_INLINE void
rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
{
atomic_write_p(&elm->pun, val);
}
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
{
rtree_node_elm_t *subtree;
/* Double-checked read (first read may be stale. */
subtree = rtree->levels[level].subtree;
if (!dependent && unlikely(!rtree_node_valid(subtree)))
subtree = atomic_read_p(&rtree->levels[level].subtree_pun);
assert(!dependent || subtree != NULL);
return (subtree);
}
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent)
{
rtree_node_elm_t *subtree;
subtree = rtree_subtree_tryread(rtree, level, dependent);
if (!dependent && unlikely(!rtree_node_valid(subtree)))
subtree = rtree_subtree_read_hard(rtree, level);
assert(!dependent || subtree != NULL);
return (subtree);
}
JEMALLOC_ALWAYS_INLINE extent_node_t *
rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
{
uintptr_t subkey;
unsigned start_level;
rtree_node_elm_t *node;
start_level = rtree_start_level(rtree, key);
node = rtree_subtree_tryread(rtree, start_level, dependent);
#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height)
switch (start_level + RTREE_GET_BIAS) {
#define RTREE_GET_SUBTREE(level) \
case level: \
assert(level < (RTREE_HEIGHT_MAX-1)); \
if (!dependent && unlikely(!rtree_node_valid(node))) \
return (NULL); \
subkey = rtree_subkey(rtree, key, level - \
RTREE_GET_BIAS); \
node = rtree_child_tryread(&node[subkey], dependent); \
/* Fall through. */
#define RTREE_GET_LEAF(level) \
case level: \
assert(level == (RTREE_HEIGHT_MAX-1)); \
if (!dependent && unlikely(!rtree_node_valid(node))) \
return (NULL); \
subkey = rtree_subkey(rtree, key, level - \
RTREE_GET_BIAS); \
/* \
* node is a leaf, so it contains values rather than \
* child pointers. \
*/ \
return (rtree_val_read(rtree, &node[subkey], \
dependent));
#if RTREE_HEIGHT_MAX > 1
RTREE_GET_SUBTREE(0)
#endif
#if RTREE_HEIGHT_MAX > 2
RTREE_GET_SUBTREE(1)
#endif
#if RTREE_HEIGHT_MAX > 3
RTREE_GET_SUBTREE(2)
#endif
#if RTREE_HEIGHT_MAX > 4
RTREE_GET_SUBTREE(3)
#endif
#if RTREE_HEIGHT_MAX > 5
RTREE_GET_SUBTREE(4)
#endif
#if RTREE_HEIGHT_MAX > 6
RTREE_GET_SUBTREE(5)
#endif
#if RTREE_HEIGHT_MAX > 7
RTREE_GET_SUBTREE(6)
#endif
#if RTREE_HEIGHT_MAX > 8
RTREE_GET_SUBTREE(7)
#endif
#if RTREE_HEIGHT_MAX > 9
RTREE_GET_SUBTREE(8)
#endif
#if RTREE_HEIGHT_MAX > 10
RTREE_GET_SUBTREE(9)
#endif
#if RTREE_HEIGHT_MAX > 11
RTREE_GET_SUBTREE(10)
#endif
#if RTREE_HEIGHT_MAX > 12
RTREE_GET_SUBTREE(11)
#endif
#if RTREE_HEIGHT_MAX > 13
RTREE_GET_SUBTREE(12)
#endif
#if RTREE_HEIGHT_MAX > 14
RTREE_GET_SUBTREE(13)
#endif
#if RTREE_HEIGHT_MAX > 15
RTREE_GET_SUBTREE(14)
#endif
#if RTREE_HEIGHT_MAX > 16
# error Unsupported RTREE_HEIGHT_MAX
#endif
RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1)
#undef RTREE_GET_SUBTREE
#undef RTREE_GET_LEAF
default: not_reached();
}
#undef RTREE_GET_BIAS
not_reached();
}
JEMALLOC_INLINE bool
rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
{
uintptr_t subkey;
unsigned i, start_level;
rtree_node_elm_t *node, *child;
start_level = rtree_start_level(rtree, key);
node = rtree_subtree_read(rtree, start_level, false);
if (node == NULL)
return (true);
for (i = start_level; /**/; i++, node = child) {
subkey = rtree_subkey(rtree, key, i);
if (i == rtree->height - 1) {
/*
* node is a leaf, so it contains values rather than
* child pointers.
*/
rtree_val_write(rtree, &node[subkey], val);
return (false);
}
assert(i + 1 < rtree->height);
child = rtree_child_read(rtree, &node[subkey], i, false);
if (child == NULL)
return (true);
}
not_reached();
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,246 @@
/*
* This file was generated by the following command:
* sh smoothstep.sh smoother 200 24 3 15
*/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* This header defines a precomputed table based on the smoothstep family of
* sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
* to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
* that floating point math can be avoided.
*
* 3 2
* smoothstep(x) = -2x + 3x
*
* 5 4 3
* smootherstep(x) = 6x - 15x + 10x
*
* 7 6 5 4
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
#define SMOOTHSTEP_VARIANT "smoother"
#define SMOOTHSTEP_NSTEPS 200
#define SMOOTHSTEP_BFP 24
#define SMOOTHSTEP \
/* STEP(step, h, x, y) */ \
STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,51 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct spin_s spin_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct spin_s {
unsigned iteration;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void spin_init(spin_t *spin);
void spin_adaptive(spin_t *spin);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_SPIN_C_))
JEMALLOC_INLINE void
spin_init(spin_t *spin)
{
spin->iteration = 0;
}
JEMALLOC_INLINE void
spin_adaptive(spin_t *spin)
{
volatile uint64_t i;
for (i = 0; i < (KQU(1) << spin->iteration); i++)
CPU_SPINWAIT;
if (spin->iteration < 63)
spin->iteration++;
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,197 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
typedef struct malloc_large_stats_s malloc_large_stats_t;
typedef struct malloc_huge_stats_s malloc_huge_stats_t;
typedef struct arena_stats_s arena_stats_t;
typedef struct chunk_stats_s chunk_stats_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct tcache_bin_stats_s {
/*
* Number of allocation requests that corresponded to the size of this
* bin.
*/
uint64_t nrequests;
};
struct malloc_bin_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Number of allocation requests that correspond to the size of this
* bin. This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
/*
* Current number of regions of this size class, including regions
* currently cached by tcache.
*/
size_t curregs;
/* Number of tcache fills from this bin. */
uint64_t nfills;
/* Number of tcache flushes to this bin. */
uint64_t nflushes;
/* Total number of runs created for this bin's size class. */
uint64_t nruns;
/*
* Total number of runs reused by extracting them from the runs tree for
* this bin's size class.
*/
uint64_t reruns;
/* Current number of runs in this bin. */
size_t curruns;
};
struct malloc_large_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
/*
* Current number of runs of this size class, including runs currently
* cached by tcache.
*/
size_t curruns;
};
struct malloc_huge_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/* Current number of (multi-)chunk allocations of this size class. */
size_t curhchunks;
};
struct arena_stats_s {
/* Number of bytes currently mapped. */
size_t mapped;
/*
* Number of bytes currently retained as a side effect of munmap() being
* disabled/bypassed. Retained bytes are technically mapped (though
* always decommitted or purged), but they are excluded from the mapped
* statistic (above).
*/
size_t retained;
/*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under
* control.
*/
uint64_t npurge;
uint64_t nmadvise;
uint64_t purged;
/*
* Number of bytes currently mapped purely for metadata purposes, and
* number of bytes currently allocated for internal metadata.
*/
size_t metadata_mapped;
size_t metadata_allocated; /* Protected via atomic_*_z(). */
/* Per-size-category statistics. */
size_t allocated_large;
uint64_t nmalloc_large;
uint64_t ndalloc_large;
uint64_t nrequests_large;
size_t allocated_huge;
uint64_t nmalloc_huge;
uint64_t ndalloc_huge;
/* One element for each large size class. */
malloc_large_stats_t *lstats;
/* One element for each huge size class. */
malloc_huge_stats_t *hstats;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_stats_print;
extern size_t stats_cactive;
void stats_print(void (*write)(void *, const char *), void *cbopaque,
const char *opts);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
size_t stats_cactive_get(void);
void stats_cactive_add(size_t size);
void stats_cactive_sub(size_t size);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
JEMALLOC_INLINE size_t
stats_cactive_get(void)
{
return (atomic_read_z(&stats_cactive));
}
JEMALLOC_INLINE void
stats_cactive_add(size_t size)
{
assert(size > 0);
assert((size & chunksize_mask) == 0);
atomic_add_z(&stats_cactive, size);
}
JEMALLOC_INLINE void
stats_cactive_sub(size_t size)
{
assert(size > 0);
assert((size & chunksize_mask) == 0);
atomic_sub_z(&stats_cactive, size);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,472 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct tcache_bin_info_s tcache_bin_info_t;
typedef struct tcache_bin_s tcache_bin_t;
typedef struct tcache_s tcache_t;
typedef struct tcaches_s tcaches_t;
/*
* tcache pointers close to NULL are used to encode state information that is
* used for two purposes: preventing thread caching on a per thread basis and
* cleaning up during thread shutdown.
*/
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
/*
* Absolute minimum number of cache slots for each small bin.
*/
#define TCACHE_NSLOTS_SMALL_MIN 20
/*
* Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the
* number of regions per run for this size class.
*
* This constant must be an even number.
*/
#define TCACHE_NSLOTS_SMALL_MAX 200
/* Number of cache slots for large size classes. */
#define TCACHE_NSLOTS_LARGE 20
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
#define LG_TCACHE_MAXCLASS_DEFAULT 15
/*
* TCACHE_GC_SWEEP is the approximate number of allocation events between
* full GC sweeps. Integer rounding may cause the actual number to be
* slightly higher, since GC is performed incrementally.
*/
#define TCACHE_GC_SWEEP 8192
/* Number of tcache allocation/deallocation events between incremental GCs. */
#define TCACHE_GC_INCR \
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
typedef enum {
tcache_enabled_false = 0, /* Enable cast to/from bool. */
tcache_enabled_true = 1,
tcache_enabled_default = 2
} tcache_enabled_t;
/*
* Read-only information associated with each element of tcache_t's tbins array
* is stored separately, mainly to reduce memory usage.
*/
struct tcache_bin_info_s {
unsigned ncached_max; /* Upper limit on ncached. */
};
struct tcache_bin_s {
tcache_bin_stats_t tstats;
int low_water; /* Min # cached since last GC. */
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
unsigned ncached; /* # of cached objects. */
/*
* To make use of adjacent cacheline prefetch, the items in the avail
* stack goes to higher address for newer allocations. avail points
* just above the available space, which means that
* avail[-ncached, ... -1] are available items and the lowest item will
* be allocated first.
*/
void **avail; /* Stack of available objects. */
};
struct tcache_s {
ql_elm(tcache_t) link; /* Used for aggregating stats. */
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
ticker_t gc_ticker; /* Drives incremental GC. */
szind_t next_gc_bin; /* Next bin to GC. */
tcache_bin_t tbins[1]; /* Dynamically sized. */
/*
* The pointer stacks associated with tbins follow as a contiguous
* array. During tcache initialization, the avail pointer in each
* element of tbins is initialized to point to the proper offset within
* this array.
*/
};
/* Linkage for list of available (previously used) explicit tcache IDs. */
struct tcaches_s {
union {
tcache_t *tcache;
tcaches_t *next;
};
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_tcache;
extern ssize_t opt_lg_tcache_max;
extern tcache_bin_info_t *tcache_bin_info;
/*
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
* large-object bins.
*/
extern unsigned nhbins;
/* Maximum cached size class. */
extern size_t tcache_maxclass;
/*
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
* completely disjoint from this data structure. tcaches starts off as a sparse
* array, so it has no physical memory footprint until individual pages are
* touched. This allows the entire array to be allocated the first time an
* explicit tcache is created without a disproportionate impact on memory usage.
*/
extern tcaches_t *tcaches;
size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
szind_t binind, unsigned rem);
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache);
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
arena_t *oldarena, arena_t *newarena);
tcache_t *tcache_get_hard(tsd_t *tsd);
tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena);
void tcache_cleanup(tsd_t *tsd);
void tcache_enabled_cleanup(tsd_t *tsd);
void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
void tcaches_flush(tsd_t *tsd, unsigned ind);
void tcaches_destroy(tsd_t *tsd, unsigned ind);
bool tcache_boot(tsdn_t *tsdn);
void tcache_prefork(tsdn_t *tsdn);
void tcache_postfork_parent(tsdn_t *tsdn);
void tcache_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void tcache_event(tsd_t *tsd, tcache_t *tcache);
void tcache_flush(void);
bool tcache_enabled_get(void);
tcache_t *tcache_get(tsd_t *tsd, bool create);
void tcache_enabled_set(bool enabled);
void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
size_t size, szind_t ind, bool zero, bool slow_path);
void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
size_t size, szind_t ind, bool zero, bool slow_path);
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
szind_t binind, bool slow_path);
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
size_t size, bool slow_path);
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
JEMALLOC_INLINE void
tcache_flush(void)
{
tsd_t *tsd;
cassert(config_tcache);
tsd = tsd_fetch();
tcache_cleanup(tsd);
}
JEMALLOC_INLINE bool
tcache_enabled_get(void)
{
tsd_t *tsd;
tcache_enabled_t tcache_enabled;
cassert(config_tcache);
tsd = tsd_fetch();
tcache_enabled = tsd_tcache_enabled_get(tsd);
if (tcache_enabled == tcache_enabled_default) {
tcache_enabled = (tcache_enabled_t)opt_tcache;
tsd_tcache_enabled_set(tsd, tcache_enabled);
}
return ((bool)tcache_enabled);
}
JEMALLOC_INLINE void
tcache_enabled_set(bool enabled)
{
tsd_t *tsd;
tcache_enabled_t tcache_enabled;
cassert(config_tcache);
tsd = tsd_fetch();
tcache_enabled = (tcache_enabled_t)enabled;
tsd_tcache_enabled_set(tsd, tcache_enabled);
if (!enabled)
tcache_cleanup(tsd);
}
JEMALLOC_ALWAYS_INLINE tcache_t *
tcache_get(tsd_t *tsd, bool create)
{
tcache_t *tcache;
if (!config_tcache)
return (NULL);
tcache = tsd_tcache_get(tsd);
if (!create)
return (tcache);
if (unlikely(tcache == NULL) && tsd_nominal(tsd)) {
tcache = tcache_get_hard(tsd);
tsd_tcache_set(tsd, tcache);
}
return (tcache);
}
JEMALLOC_ALWAYS_INLINE void
tcache_event(tsd_t *tsd, tcache_t *tcache)
{
if (TCACHE_GC_INCR == 0)
return;
if (unlikely(ticker_tick(&tcache->gc_ticker)))
tcache_event_hard(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success)
{
void *ret;
if (unlikely(tbin->ncached == 0)) {
tbin->low_water = -1;
*tcache_success = false;
return (NULL);
}
/*
* tcache_success (instead of ret) should be checked upon the return of
* this function. We avoid checking (ret == NULL) because there is
* never a null stored on the avail stack (which is unknown to the
* compiler), and eagerly checking ret would cause pipeline stall
* (waiting for the cacheline).
*/
*tcache_success = true;
ret = *(tbin->avail - tbin->ncached);
tbin->ncached--;
if (unlikely((int)tbin->ncached < tbin->low_water))
tbin->low_water = tbin->ncached;
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
szind_t binind, bool zero, bool slow_path)
{
void *ret;
tcache_bin_t *tbin;
bool tcache_success;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
assert(binind < NBINS);
tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin, &tcache_success);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
bool tcache_hard_success;
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
tbin, binind, &tcache_hard_success);
if (tcache_hard_success == false)
return (NULL);
}
assert(ret);
/*
* Only compute usize if required. The checks in the following if
* statement are all static.
*/
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
usize = index2size(binind);
assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
}
if (likely(!zero)) {
if (slow_path && config_fill) {
if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
} else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
} else {
if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
memset(ret, 0, usize);
}
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
tcache->prof_accumbytes += usize;
tcache_event(tsd, tcache);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
szind_t binind, bool zero, bool slow_path)
{
void *ret;
tcache_bin_t *tbin;
bool tcache_success;
assert(binind < nhbins);
tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin, &tcache_success);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero);
if (ret == NULL)
return (NULL);
} else {
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
/* Only compute usize on demand */
if (config_prof || (slow_path && config_fill) ||
unlikely(zero)) {
usize = index2size(binind);
assert(usize <= tcache_maxclass);
}
if (config_prof && usize == LARGE_MINCLASS) {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
LG_PAGE);
arena_mapbits_large_binind_set(chunk, pageind,
BININD_INVALID);
}
if (likely(!zero)) {
if (slow_path && config_fill) {
if (unlikely(opt_junk_alloc)) {
memset(ret, JEMALLOC_ALLOC_JUNK,
usize);
} else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
} else
memset(ret, 0, usize);
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
tcache->prof_accumbytes += usize;
}
tcache_event(tsd, tcache);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path)
{
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
if (slow_path && config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
tcache_bin_flush_small(tsd, tcache, tbin, binind,
(tbin_info->ncached_max >> 1));
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->ncached++;
*(tbin->avail - tbin->ncached) = ptr;
tcache_event(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
bool slow_path)
{
szind_t binind;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert((size & PAGE_MASK) == 0);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
binind = size2index(size);
if (slow_path && config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_large(ptr, size);
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
tcache_bin_flush_large(tsd, tbin, binind,
(tbin_info->ncached_max >> 1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->ncached++;
*(tbin->avail - tbin->ncached) = ptr;
tcache_event(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE tcache_t *
tcaches_get(tsd_t *tsd, unsigned ind)
{
tcaches_t *elm = &tcaches[ind];
if (unlikely(elm->tcache == NULL)) {
elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd,
NULL));
}
return (elm->tcache);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,75 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct ticker_s ticker_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct ticker_s {
int32_t tick;
int32_t nticks;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void ticker_init(ticker_t *ticker, int32_t nticks);
void ticker_copy(ticker_t *ticker, const ticker_t *other);
int32_t ticker_read(const ticker_t *ticker);
bool ticker_ticks(ticker_t *ticker, int32_t nticks);
bool ticker_tick(ticker_t *ticker);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_))
JEMALLOC_INLINE void
ticker_init(ticker_t *ticker, int32_t nticks)
{
ticker->tick = nticks;
ticker->nticks = nticks;
}
JEMALLOC_INLINE void
ticker_copy(ticker_t *ticker, const ticker_t *other)
{
*ticker = *other;
}
JEMALLOC_INLINE int32_t
ticker_read(const ticker_t *ticker)
{
return (ticker->tick);
}
JEMALLOC_INLINE bool
ticker_ticks(ticker_t *ticker, int32_t nticks)
{
if (unlikely(ticker->tick < nticks)) {
ticker->tick = ticker->nticks;
return (true);
}
ticker->tick -= nticks;
return(false);
}
JEMALLOC_INLINE bool
ticker_tick(ticker_t *ticker)
{
return (ticker_ticks(ticker, 1));
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,788 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/* Maximum number of malloc_tsd users with cleanup functions. */
#define MALLOC_TSD_CLEANUPS_MAX 2
typedef bool (*malloc_tsd_cleanup_t)(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
typedef struct tsd_init_block_s tsd_init_block_t;
typedef struct tsd_init_head_s tsd_init_head_t;
#endif
typedef struct tsd_s tsd_t;
typedef struct tsdn_s tsdn_t;
#define TSDN_NULL ((tsdn_t *)0)
typedef enum {
tsd_state_uninitialized,
tsd_state_nominal,
tsd_state_purgatory,
tsd_state_reincarnated
} tsd_state_t;
/*
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
* are five macros that support (at least) three use cases: file-private,
* library-private, and library-private inlined. Following is an example
* library-private tsd variable:
*
* In example.h:
* typedef struct {
* int x;
* int y;
* } example_t;
* #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
* malloc_tsd_types(example_, example_t)
* malloc_tsd_protos(, example_, example_t)
* malloc_tsd_externs(example_, example_t)
* In example.c:
* malloc_tsd_data(, example_, example_t, EX_INITIALIZER)
* malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER,
* example_tsd_cleanup)
*
* The result is a set of generated functions, e.g.:
*
* bool example_tsd_boot(void) {...}
* bool example_tsd_booted_get(void) {...}
* example_t *example_tsd_get(bool init) {...}
* void example_tsd_set(example_t *val) {...}
*
* Note that all of the functions deal in terms of (a_type *) rather than
* (a_type) so that it is possible to support non-pointer types (unlike
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
* cast to (void *). This means that the cleanup function needs to cast the
* function argument to (a_type *), then dereference the resulting pointer to
* access fields, e.g.
*
* void
* example_tsd_cleanup(void *arg)
* {
* example_t *example = (example_t *)arg;
*
* example->x = 42;
* [...]
* if ([want the cleanup function to be called again])
* example_tsd_set(example);
* }
*
* If example_tsd_set() is called within example_tsd_cleanup(), it will be
* called again. This is similar to how pthreads TSD destruction works, except
* that pthreads only calls the cleanup function again if the value was set to
* non-NULL.
*/
/* malloc_tsd_types(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_types(a_name, a_type)
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_types(a_name, a_type)
#elif (defined(_WIN32))
#define malloc_tsd_types(a_name, a_type) \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##tsd_wrapper_t;
#else
#define malloc_tsd_types(a_name, a_type) \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##tsd_wrapper_t;
#endif
/* malloc_tsd_protos(). */
#define malloc_tsd_protos(a_attr, a_name, a_type) \
a_attr bool \
a_name##tsd_boot0(void); \
a_attr void \
a_name##tsd_boot1(void); \
a_attr bool \
a_name##tsd_boot(void); \
a_attr bool \
a_name##tsd_booted_get(void); \
a_attr a_type * \
a_name##tsd_get(bool init); \
a_attr void \
a_name##tsd_set(a_type *val);
/* malloc_tsd_externs(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_externs(a_name, a_type) \
extern __thread a_type a_name##tsd_tls; \
extern __thread bool a_name##tsd_initialized; \
extern bool a_name##tsd_booted;
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_externs(a_name, a_type) \
extern __thread a_type a_name##tsd_tls; \
extern pthread_key_t a_name##tsd_tsd; \
extern bool a_name##tsd_booted;
#elif (defined(_WIN32))
#define malloc_tsd_externs(a_name, a_type) \
extern DWORD a_name##tsd_tsd; \
extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
extern bool a_name##tsd_booted;
#else
#define malloc_tsd_externs(a_name, a_type) \
extern pthread_key_t a_name##tsd_tsd; \
extern tsd_init_head_t a_name##tsd_init_head; \
extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
extern bool a_name##tsd_booted;
#endif
/* malloc_tsd_data(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_name##tsd_tls = a_initializer; \
a_attr __thread bool JEMALLOC_TLS_MODEL \
a_name##tsd_initialized = false; \
a_attr bool a_name##tsd_booted = false;
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_name##tsd_tls = a_initializer; \
a_attr pthread_key_t a_name##tsd_tsd; \
a_attr bool a_name##tsd_booted = false;
#elif (defined(_WIN32))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr DWORD a_name##tsd_tsd; \
a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
false, \
a_initializer \
}; \
a_attr bool a_name##tsd_booted = false;
#else
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr pthread_key_t a_name##tsd_tsd; \
a_attr tsd_init_head_t a_name##tsd_init_head = { \
ql_head_initializer(blocks), \
MALLOC_MUTEX_INITIALIZER \
}; \
a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
false, \
a_initializer \
}; \
a_attr bool a_name##tsd_booted = false;
#endif
/* malloc_tsd_funcs(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##tsd_cleanup_wrapper(void) \
{ \
\
if (a_name##tsd_initialized) { \
a_name##tsd_initialized = false; \
a_cleanup(&a_name##tsd_tls); \
} \
return (a_name##tsd_initialized); \
} \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##tsd_cleanup_wrapper); \
} \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
\
/* Do nothing. */ \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
return (a_name##tsd_boot0()); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(bool init) \
{ \
\
assert(a_name##tsd_booted); \
return (&a_name##tsd_tls); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
\
assert(a_name##tsd_booted); \
a_name##tsd_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
a_name##tsd_initialized = true; \
}
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \
0) \
return (true); \
} \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
\
/* Do nothing. */ \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
return (a_name##tsd_boot0()); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(bool init) \
{ \
\
assert(a_name##tsd_booted); \
return (&a_name##tsd_tls); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
\
assert(a_name##tsd_booted); \
a_name##tsd_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_setspecific(a_name##tsd_tsd, \
(void *)(&a_name##tsd_tls))) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
} \
} \
}
#elif (defined(_WIN32))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##tsd_cleanup_wrapper(void) \
{ \
DWORD error = GetLastError(); \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
TlsGetValue(a_name##tsd_tsd); \
SetLastError(error); \
\
if (wrapper == NULL) \
return (false); \
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
wrapper->initialized = false; \
a_cleanup(&wrapper->val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \
return (true); \
} \
} \
malloc_tsd_dalloc(wrapper); \
return (false); \
} \
a_attr void \
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
{ \
\
if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
abort(); \
} \
} \
a_attr a_name##tsd_wrapper_t * \
a_name##tsd_wrapper_get(bool init) \
{ \
DWORD error = GetLastError(); \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
TlsGetValue(a_name##tsd_tsd); \
SetLastError(error); \
\
if (init && unlikely(wrapper == NULL)) { \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
wrapper->initialized = false; \
wrapper->val = a_initializer; \
} \
a_name##tsd_wrapper_set(wrapper); \
} \
return (wrapper); \
} \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
a_name##tsd_tsd = TlsAlloc(); \
if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \
return (true); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##tsd_cleanup_wrapper); \
} \
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} \
memcpy(wrapper, &a_name##tsd_boot_wrapper, \
sizeof(a_name##tsd_wrapper_t)); \
a_name##tsd_wrapper_set(wrapper); \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
if (a_name##tsd_boot0()) \
return (true); \
a_name##tsd_boot1(); \
return (false); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (true); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(bool init) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(init); \
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
return (NULL); \
return (&wrapper->val); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(true); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
}
#else
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr void \
a_name##tsd_cleanup_wrapper(void *arg) \
{ \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \
\
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
wrapper->initialized = false; \
a_cleanup(&wrapper->val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \
if (pthread_setspecific(a_name##tsd_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
} \
return; \
} \
} \
malloc_tsd_dalloc(wrapper); \
} \
a_attr void \
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
{ \
\
if (pthread_setspecific(a_name##tsd_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
abort(); \
} \
} \
a_attr a_name##tsd_wrapper_t * \
a_name##tsd_wrapper_get(bool init) \
{ \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
pthread_getspecific(a_name##tsd_tsd); \
\
if (init && unlikely(wrapper == NULL)) { \
tsd_init_block_t block; \
wrapper = (a_name##tsd_wrapper_t *) \
tsd_init_check_recursion(&a_name##tsd_init_head, \
&block); \
if (wrapper) \
return (wrapper); \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
block.data = (void *)wrapper; \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
wrapper->initialized = false; \
wrapper->val = a_initializer; \
} \
a_name##tsd_wrapper_set(wrapper); \
tsd_init_finish(&a_name##tsd_init_head, &block); \
} \
return (wrapper); \
} \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
if (pthread_key_create(&a_name##tsd_tsd, \
a_name##tsd_cleanup_wrapper) != 0) \
return (true); \
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} \
memcpy(wrapper, &a_name##tsd_boot_wrapper, \
sizeof(a_name##tsd_wrapper_t)); \
a_name##tsd_wrapper_set(wrapper); \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
if (a_name##tsd_boot0()) \
return (true); \
a_name##tsd_boot1(); \
return (false); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (true); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(bool init) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(init); \
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
return (NULL); \
return (&wrapper->val); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(true); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
}
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
struct tsd_init_block_s {
ql_elm(tsd_init_block_t) link;
pthread_t thread;
void *data;
};
struct tsd_init_head_s {
ql_head(tsd_init_block_t) blocks;
malloc_mutex_t lock;
};
#endif
#define MALLOC_TSD \
/* O(name, type) */ \
O(tcache, tcache_t *) \
O(thread_allocated, uint64_t) \
O(thread_deallocated, uint64_t) \
O(prof_tdata, prof_tdata_t *) \
O(iarena, arena_t *) \
O(arena, arena_t *) \
O(arenas_tdata, arena_tdata_t *) \
O(narenas_tdata, unsigned) \
O(arenas_tdata_bypass, bool) \
O(tcache_enabled, tcache_enabled_t) \
O(quarantine, quarantine_t *) \
O(witnesses, witness_list_t) \
O(witness_fork, bool) \
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
NULL, \
0, \
0, \
NULL, \
NULL, \
NULL, \
NULL, \
0, \
false, \
tcache_enabled_default, \
NULL, \
ql_head_initializer(witnesses), \
false \
}
struct tsd_s {
tsd_state_t state;
#define O(n, t) \
t n;
MALLOC_TSD
#undef O
};
/*
* Wrapper around tsd_t that makes it possible to avoid implicit conversion
* between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
* explicitly converted to tsd_t, which is non-nullable.
*/
struct tsdn_s {
tsd_t tsd;
};
static const tsd_t tsd_initializer = TSD_INITIALIZER;
malloc_tsd_types(, tsd_t)
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *malloc_tsd_malloc(size_t size);
void malloc_tsd_dalloc(void *wrapper);
void malloc_tsd_no_cleanup(void *arg);
void malloc_tsd_cleanup_register(bool (*f)(void));
tsd_t *malloc_tsd_boot0(void);
void malloc_tsd_boot1(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
void *tsd_init_check_recursion(tsd_init_head_t *head,
tsd_init_block_t *block);
void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
#endif
void tsd_cleanup(void *arg);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t)
tsd_t *tsd_fetch_impl(bool init);
tsd_t *tsd_fetch(void);
tsdn_t *tsd_tsdn(tsd_t *tsd);
bool tsd_nominal(tsd_t *tsd);
#define O(n, t) \
t *tsd_##n##p_get(tsd_t *tsd); \
t tsd_##n##_get(tsd_t *tsd); \
void tsd_##n##_set(tsd_t *tsd, t n);
MALLOC_TSD
#undef O
tsdn_t *tsdn_fetch(void);
bool tsdn_null(const tsdn_t *tsdn);
tsd_t *tsdn_tsd(tsdn_t *tsdn);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
malloc_tsd_externs(, tsd_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup)
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch_impl(bool init)
{
tsd_t *tsd = tsd_get(init);
if (!init && tsd_get_allocates() && tsd == NULL)
return (NULL);
assert(tsd != NULL);
if (unlikely(tsd->state != tsd_state_nominal)) {
if (tsd->state == tsd_state_uninitialized) {
tsd->state = tsd_state_nominal;
/* Trigger cleanup handler registration. */
tsd_set(tsd);
} else if (tsd->state == tsd_state_purgatory) {
tsd->state = tsd_state_reincarnated;
tsd_set(tsd);
} else
assert(tsd->state == tsd_state_reincarnated);
}
return (tsd);
}
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch(void)
{
return (tsd_fetch_impl(true));
}
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsd_tsdn(tsd_t *tsd)
{
return ((tsdn_t *)tsd);
}
JEMALLOC_INLINE bool
tsd_nominal(tsd_t *tsd)
{
return (tsd->state == tsd_state_nominal);
}
#define O(n, t) \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get(tsd_t *tsd) \
{ \
\
return (&tsd->n); \
} \
\
JEMALLOC_ALWAYS_INLINE t \
tsd_##n##_get(tsd_t *tsd) \
{ \
\
return (*tsd_##n##p_get(tsd)); \
} \
\
JEMALLOC_ALWAYS_INLINE void \
tsd_##n##_set(tsd_t *tsd, t n) \
{ \
\
assert(tsd->state == tsd_state_nominal); \
tsd->n = n; \
}
MALLOC_TSD
#undef O
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsdn_fetch(void)
{
if (!tsd_booted_get())
return (NULL);
return (tsd_tsdn(tsd_fetch_impl(false)));
}
JEMALLOC_ALWAYS_INLINE bool
tsdn_null(const tsdn_t *tsdn)
{
return (tsdn == NULL);
}
JEMALLOC_ALWAYS_INLINE tsd_t *
tsdn_tsd(tsdn_t *tsdn)
{
assert(!tsdn_null(tsdn));
return (&tsdn->tsd);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,342 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifdef _WIN32
# ifdef _WIN64
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX "ll"
# else
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX ""
# endif
# define FMTd32 "d"
# define FMTu32 "u"
# define FMTx32 "x"
# define FMTd64 FMT64_PREFIX "d"
# define FMTu64 FMT64_PREFIX "u"
# define FMTx64 FMT64_PREFIX "x"
# define FMTdPTR FMTPTR_PREFIX "d"
# define FMTuPTR FMTPTR_PREFIX "u"
# define FMTxPTR FMTPTR_PREFIX "x"
#else
# include <inttypes.h>
# define FMTd32 PRId32
# define FMTu32 PRIu32
# define FMTx32 PRIx32
# define FMTd64 PRId64
# define FMTu64 PRIu64
# define FMTx64 PRIx64
# define FMTdPTR PRIdPTR
# define FMTuPTR PRIuPTR
# define FMTxPTR PRIxPTR
#endif
/* Size of stack-allocated buffer passed to buferror(). */
#define BUFERROR_BUF 64
/*
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
* large enough for all possible uses within jemalloc.
*/
#define MALLOC_PRINTF_BUFSIZE 4096
/* Junk fill patterns. */
#ifndef JEMALLOC_ALLOC_JUNK
# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
#endif
#ifndef JEMALLOC_FREE_JUNK
# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
#endif
/*
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
*/
#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
/*
* Silence compiler warnings due to uninitialized values. This is used
* wherever the compiler fails to recognize that the variable is never used
* uninitialized.
*/
#ifdef JEMALLOC_CC_SILENCE
# define JEMALLOC_CC_SILENCE_INIT(v) = v
#else
# define JEMALLOC_CC_SILENCE_INIT(v)
#endif
#ifdef __GNUC__
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
#else
# define likely(x) !!(x)
# define unlikely(x) !!(x)
#endif
#if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure
#endif
#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
#include "jemalloc/internal/assert.h"
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
if (unlikely(!(c))) \
not_reached(); \
} while (0)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int buferror(int err, char *buf, size_t buflen);
uintmax_t malloc_strtoumax(const char *restrict nptr,
char **restrict endptr, int base);
void malloc_write(const char *s);
/*
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math.
*/
size_t malloc_vsnprintf(char *str, size_t size, const char *format,
va_list ap);
size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
JEMALLOC_FORMAT_PRINTF(3, 4);
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap);
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4);
void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
unsigned ffs_llu(unsigned long long bitmap);
unsigned ffs_lu(unsigned long bitmap);
unsigned ffs_u(unsigned bitmap);
unsigned ffs_zu(size_t bitmap);
unsigned ffs_u64(uint64_t bitmap);
unsigned ffs_u32(uint32_t bitmap);
uint64_t pow2_ceil_u64(uint64_t x);
uint32_t pow2_ceil_u32(uint32_t x);
size_t pow2_ceil_zu(size_t x);
unsigned lg_floor(size_t x);
void set_errno(int errnum);
int get_errno(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
/* Sanity check. */
#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
|| !defined(JEMALLOC_INTERNAL_FFS)
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
#endif
JEMALLOC_ALWAYS_INLINE unsigned
ffs_llu(unsigned long long bitmap)
{
return (JEMALLOC_INTERNAL_FFSLL(bitmap));
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_lu(unsigned long bitmap)
{
return (JEMALLOC_INTERNAL_FFSL(bitmap));
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_u(unsigned bitmap)
{
return (JEMALLOC_INTERNAL_FFS(bitmap));
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_zu(size_t bitmap)
{
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
return (ffs_u(bitmap));
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
return (ffs_lu(bitmap));
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return (ffs_llu(bitmap));
#else
#error No implementation for size_t ffs()
#endif
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_u64(uint64_t bitmap)
{
#if LG_SIZEOF_LONG == 3
return (ffs_lu(bitmap));
#elif LG_SIZEOF_LONG_LONG == 3
return (ffs_llu(bitmap));
#else
#error No implementation for 64-bit ffs()
#endif
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_u32(uint32_t bitmap)
{
#if LG_SIZEOF_INT == 2
return (ffs_u(bitmap));
#else
#error No implementation for 32-bit ffs()
#endif
return (ffs_u(bitmap));
}
JEMALLOC_INLINE uint64_t
pow2_ceil_u64(uint64_t x)
{
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x |= x >> 32;
x++;
return (x);
}
JEMALLOC_INLINE uint32_t
pow2_ceil_u32(uint32_t x)
{
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x++;
return (x);
}
/* Compute the smallest power of 2 that is >= x. */
JEMALLOC_INLINE size_t
pow2_ceil_zu(size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return (pow2_ceil_u64(x));
#else
return (pow2_ceil_u32(x));
#endif
}
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE unsigned
lg_floor(size_t x)
{
size_t ret;
assert(x != 0);
asm ("bsr %1, %0"
: "=r"(ret) // Outputs.
: "r"(x) // Inputs.
);
assert(ret < UINT_MAX);
return ((unsigned)ret);
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE unsigned
lg_floor(size_t x)
{
unsigned long ret;
assert(x != 0);
#if (LG_SIZEOF_PTR == 3)
_BitScanReverse64(&ret, x);
#elif (LG_SIZEOF_PTR == 2)
_BitScanReverse(&ret, x);
#else
# error "Unsupported type size for lg_floor()"
#endif
assert(ret < UINT_MAX);
return ((unsigned)ret);
}
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
JEMALLOC_INLINE unsigned
lg_floor(size_t x)
{
assert(x != 0);
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x));
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x));
#else
# error "Unsupported type size for lg_floor()"
#endif
}
#else
JEMALLOC_INLINE unsigned
lg_floor(size_t x)
{
assert(x != 0);
x |= (x >> 1);
x |= (x >> 2);
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
#if (LG_SIZEOF_PTR == 3)
x |= (x >> 32);
#endif
if (x == SIZE_T_MAX)
return ((8 << LG_SIZEOF_PTR) - 1);
x++;
return (ffs_zu(x) - 2);
}
#endif
/* Set error code. */
JEMALLOC_INLINE void
set_errno(int errnum)
{
#ifdef _WIN32
SetLastError(errnum);
#else
errno = errnum;
#endif
}
/* Get last error code. */
JEMALLOC_INLINE int
get_errno(void)
{
#ifdef _WIN32
return (GetLastError());
#else
return (errno);
#endif
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,128 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifdef JEMALLOC_VALGRIND
#include <valgrind/valgrind.h>
/*
* The size that is reported to Valgrind must be consistent through a chain of
* malloc..realloc..realloc calls. Request size isn't recorded anywhere in
* jemalloc, so it is critical that all callers of these macros provide usize
* rather than request size. As a result, buffer overflow detection is
* technically weakened for the standard API, though it is generally accepted
* practice to consider any extra bytes reported by malloc_usable_size() as
* usable space.
*/
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_noaccess(ptr, usize); \
} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_undefined(ptr, usize); \
} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_defined(ptr, usize); \
} while (0)
/*
* The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro
* calls must be embedded in macros rather than in functions so that when
* Valgrind reports errors, there are no extra stack frames in the backtraces.
*/
#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \
if (unlikely(in_valgrind && cond)) { \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \
zero); \
} \
} while (0)
#define JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr) \
(false)
#define JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr) \
((ptr) != (old_ptr))
#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr) \
(false)
#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr) \
(ptr == NULL)
#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr) \
(false)
#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr) \
(old_ptr == NULL)
#define JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null, \
old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do { \
if (unlikely(in_valgrind)) { \
size_t rzsize = p2rz(tsdn, ptr); \
\
if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr, \
old_ptr)) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
usize, rzsize); \
if (zero && old_usize < usize) { \
valgrind_make_mem_defined( \
(void *)((uintptr_t)ptr + \
old_usize), usize - old_usize); \
} \
} else { \
if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_## \
old_ptr_null(old_ptr)) { \
valgrind_freelike_block(old_ptr, \
old_rzsize); \
} \
if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_## \
ptr_null(ptr)) { \
size_t copy_size = (old_usize < usize) \
? old_usize : usize; \
size_t tail_size = usize - copy_size; \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
rzsize, false); \
if (copy_size > 0) { \
valgrind_make_mem_defined(ptr, \
copy_size); \
} \
if (zero && tail_size > 0) { \
valgrind_make_mem_defined( \
(void *)((uintptr_t)ptr + \
copy_size), tail_size); \
} \
} \
} \
} \
} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
if (unlikely(in_valgrind)) \
valgrind_freelike_block(ptr, rzsize); \
} while (0)
#else
#define RUNNING_ON_VALGRIND ((unsigned)0)
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_VALGRIND
void valgrind_make_mem_noaccess(void *ptr, size_t usize);
void valgrind_make_mem_undefined(void *ptr, size_t usize);
void valgrind_make_mem_defined(void *ptr, size_t usize);
void valgrind_freelike_block(void *ptr, size_t usize);
#endif
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,304 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct witness_s witness_t;
typedef unsigned witness_rank_t;
typedef ql_head(witness_t) witness_list_t;
typedef int witness_comp_t (const witness_t *, const witness_t *);
/*
* Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by
* the witness machinery.
*/
#define WITNESS_RANK_OMIT 0U
#define WITNESS_RANK_MIN 1U
#define WITNESS_RANK_INIT 1U
#define WITNESS_RANK_CTL 1U
#define WITNESS_RANK_TCACHES 2U
#define WITNESS_RANK_ARENAS 3U
#define WITNESS_RANK_PROF_DUMP 4U
#define WITNESS_RANK_PROF_BT2GCTX 5U
#define WITNESS_RANK_PROF_TDATAS 6U
#define WITNESS_RANK_PROF_TDATA 7U
#define WITNESS_RANK_PROF_GCTX 8U
/*
* Used as an argument to witness_assert_depth_to_rank() in order to validate
* depth excluding non-core locks with lower ranks. Since the rank argument to
* witness_assert_depth_to_rank() is inclusive rather than exclusive, this
* definition can have the same value as the minimally ranked core lock.
*/
#define WITNESS_RANK_CORE 9U
#define WITNESS_RANK_ARENA 9U
#define WITNESS_RANK_ARENA_CHUNKS 10U
#define WITNESS_RANK_ARENA_NODE_CACHE 11U
#define WITNESS_RANK_BASE 12U
#define WITNESS_RANK_LEAF 0xffffffffU
#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
#define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}}
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct witness_s {
/* Name, used for printing lock order reversal messages. */
const char *name;
/*
* Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
* must be acquired in order of increasing rank.
*/
witness_rank_t rank;
/*
* If two witnesses are of equal rank and they have the samp comp
* function pointer, it is called as a last attempt to differentiate
* between witnesses of equal rank.
*/
witness_comp_t *comp;
/* Linkage for thread's currently owned locks. */
ql_elm(witness_t) link;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
witness_comp_t *comp);
#ifdef JEMALLOC_JET
typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
extern witness_lock_error_t *witness_lock_error;
#else
void witness_lock_error(const witness_list_t *witnesses,
const witness_t *witness);
#endif
#ifdef JEMALLOC_JET
typedef void (witness_owner_error_t)(const witness_t *);
extern witness_owner_error_t *witness_owner_error;
#else
void witness_owner_error(const witness_t *witness);
#endif
#ifdef JEMALLOC_JET
typedef void (witness_not_owner_error_t)(const witness_t *);
extern witness_not_owner_error_t *witness_not_owner_error;
#else
void witness_not_owner_error(const witness_t *witness);
#endif
#ifdef JEMALLOC_JET
typedef void (witness_depth_error_t)(const witness_list_t *,
witness_rank_t rank_inclusive, unsigned depth);
extern witness_depth_error_t *witness_depth_error;
#else
void witness_depth_error(const witness_list_t *witnesses,
witness_rank_t rank_inclusive, unsigned depth);
#endif
void witnesses_cleanup(tsd_t *tsd);
void witness_fork_cleanup(tsd_t *tsd);
void witness_prefork(tsd_t *tsd);
void witness_postfork_parent(tsd_t *tsd);
void witness_postfork_child(tsd_t *tsd);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool witness_owner(tsd_t *tsd, const witness_t *witness);
void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness);
void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness);
void witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive,
unsigned depth);
void witness_assert_depth(tsdn_t *tsdn, unsigned depth);
void witness_assert_lockless(tsdn_t *tsdn);
void witness_lock(tsdn_t *tsdn, witness_t *witness);
void witness_unlock(tsdn_t *tsdn, witness_t *witness);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE bool
witness_owner(tsd_t *tsd, const witness_t *witness)
{
witness_list_t *witnesses;
witness_t *w;
cassert(config_debug);
witnesses = tsd_witnessesp_get(tsd);
ql_foreach(w, witnesses, link) {
if (w == witness)
return (true);
}
return (false);
}
JEMALLOC_INLINE void
witness_assert_owner(tsdn_t *tsdn, const witness_t *witness)
{
tsd_t *tsd;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
if (witness_owner(tsd, witness))
return;
witness_owner_error(witness);
}
JEMALLOC_INLINE void
witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness)
{
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
witnesses = tsd_witnessesp_get(tsd);
ql_foreach(w, witnesses, link) {
if (w == witness)
witness_not_owner_error(witness);
}
}
JEMALLOC_INLINE void
witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive,
unsigned depth) {
tsd_t *tsd;
unsigned d;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
d = 0;
witnesses = tsd_witnessesp_get(tsd);
w = ql_last(witnesses, link);
if (w != NULL) {
ql_reverse_foreach(w, witnesses, link) {
if (w->rank < rank_inclusive) {
break;
}
d++;
}
}
if (d != depth)
witness_depth_error(witnesses, rank_inclusive, depth);
}
JEMALLOC_INLINE void
witness_assert_depth(tsdn_t *tsdn, unsigned depth) {
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_MIN, depth);
}
JEMALLOC_INLINE void
witness_assert_lockless(tsdn_t *tsdn) {
witness_assert_depth(tsdn, 0);
}
JEMALLOC_INLINE void
witness_lock(tsdn_t *tsdn, witness_t *witness)
{
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
witness_assert_not_owner(tsdn, witness);
witnesses = tsd_witnessesp_get(tsd);
w = ql_last(witnesses, link);
if (w == NULL) {
/* No other locks; do nothing. */
} else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) {
/* Forking, and relaxed ranking satisfied. */
} else if (w->rank > witness->rank) {
/* Not forking, rank order reversal. */
witness_lock_error(witnesses, witness);
} else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
witness->comp || w->comp(w, witness) > 0)) {
/*
* Missing/incompatible comparison function, or comparison
* function indicates rank order reversal.
*/
witness_lock_error(witnesses, witness);
}
ql_elm_new(witness, link);
ql_tail_insert(witnesses, witness, link);
}
JEMALLOC_INLINE void
witness_unlock(tsdn_t *tsdn, witness_t *witness)
{
tsd_t *tsd;
witness_list_t *witnesses;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
/*
* Check whether owner before removal, rather than relying on
* witness_assert_owner() to abort, so that unit tests can test this
* function's failure mode without causing undefined behavior.
*/
if (witness_owner(tsd, witness)) {
witnesses = tsd_witnessesp_get(tsd);
ql_remove(witnesses, witness, link);
} else
witness_assert_owner(tsdn, witness);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,386 @@
#ifndef JEMALLOC_H_
#define JEMALLOC_H_
#ifdef __cplusplus
extern "C" {
#endif
/* Defined if __attribute__((...)) syntax is supported. */
#define JEMALLOC_HAVE_ATTR
/* Defined if alloc_size attribute is supported. */
#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE
/* Defined if format(gnu_printf, ...) attribute is supported. */
#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
/* Defined if format(printf, ...) attribute is supported. */
#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
#define JEMALLOC_OVERRIDE_MEMALIGN
#define JEMALLOC_OVERRIDE_VALLOC
/*
* At least Linux omits the "const" in:
*
* size_t malloc_usable_size(const void *ptr);
*
* Match the operating system's prototype.
*/
#define JEMALLOC_USABLE_SIZE_CONST
/*
* If defined, specify throw() for the public function prototypes when compiling
* with C++. The only justification for this is to match the prototypes that
* glibc defines.
*/
#define JEMALLOC_USE_CXX_THROW
#ifdef _MSC_VER
# ifdef _WIN64
# define LG_SIZEOF_PTR_WIN 3
# else
# define LG_SIZEOF_PTR_WIN 2
# endif
#endif
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#if (defined(__LP64__) && __LP64__)
#define LG_SIZEOF_PTR 3
#else
#define LG_SIZEOF_PTR 2
#endif
/*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#ifndef JEMALLOC_NO_RENAME
# define je_malloc_conf je_malloc_conf
# define je_malloc_message je_malloc_message
# define je_malloc je_malloc
# define je_calloc je_calloc
# define je_posix_memalign je_posix_memalign
# define je_aligned_alloc je_aligned_alloc
# define je_realloc je_realloc
# define je_free je_free
# define je_mallocx je_mallocx
# define je_rallocx je_rallocx
# define je_xallocx je_xallocx
# define je_sallocx je_sallocx
# define je_dallocx je_dallocx
# define je_sdallocx je_sdallocx
# define je_nallocx je_nallocx
# define je_mallctl je_mallctl
# define je_mallctlnametomib je_mallctlnametomib
# define je_mallctlbymib je_mallctlbymib
# define je_malloc_stats_print je_malloc_stats_print
# define je_malloc_usable_size je_malloc_usable_size
# define je_memalign je_memalign
# define je_valloc je_valloc
#endif
#include <stdlib.h>
#include <stdbool.h>
#include <stdint.h>
#include <limits.h>
#include <strings.h>
#define JEMALLOC_VERSION "0.0.0-0-g0000000000000000000000000000000000000000"
#define JEMALLOC_VERSION_MAJOR 0
#define JEMALLOC_VERSION_MINOR 0
#define JEMALLOC_VERSION_BUGFIX 0
#define JEMALLOC_VERSION_NREV 0
#define JEMALLOC_VERSION_GID "0000000000000000000000000000000000000000"
# define MALLOCX_LG_ALIGN(la) ((int)(la))
# if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
# else
# define MALLOCX_ALIGN(a) \
((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
ffs((int)(((size_t)(a))>>32))+31))
# endif
# define MALLOCX_ZERO ((int)0x40)
/*
* Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
* encodes MALLOCX_TCACHE_NONE.
*/
# define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
# define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
/*
* Bias arena index bits so that 0 encodes "use an automatically chosen arena".
*/
# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
# define JEMALLOC_CXX_THROW throw()
#else
# define JEMALLOC_CXX_THROW
#endif
#if _MSC_VER
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_ALLOC_SIZE(s)
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# ifndef JEMALLOC_EXPORT
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# endif
# define JEMALLOC_FORMAT_PRINTF(s, i)
# define JEMALLOC_NOINLINE __declspec(noinline)
# ifdef __cplusplus
# define JEMALLOC_NOTHROW __declspec(nothrow)
# else
# define JEMALLOC_NOTHROW
# endif
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
# if _MSC_VER >= 1900 && !defined(__EDG__)
# define JEMALLOC_ALLOCATOR __declspec(allocator)
# else
# define JEMALLOC_ALLOCATOR
# endif
#elif defined(JEMALLOC_HAVE_ATTR)
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
# else
# define JEMALLOC_ALLOC_SIZE(s)
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# endif
# ifndef JEMALLOC_EXPORT
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# endif
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
# else
# define JEMALLOC_FORMAT_PRINTF(s, i)
# endif
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_ALLOC_SIZE(s)
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# define JEMALLOC_EXPORT
# define JEMALLOC_FORMAT_PRINTF(s, i)
# define JEMALLOC_NOINLINE
# define JEMALLOC_NOTHROW
# define JEMALLOC_SECTION(s)
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
#endif
/*
* The je_ prefix on the following public symbol declarations is an artifact
* of namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
*/
extern JEMALLOC_EXPORT const char *je_malloc_conf;
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
const char *s);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_malloc(size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr,
size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment,
size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr)
JEMALLOC_CXX_THROW;
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags)
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size,
int flags) JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size,
size_t extra, int flags);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr,
int flags) JEMALLOC_ATTR(pure);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size,
int flags);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags)
JEMALLOC_ATTR(pure);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name,
size_t *mibp, size_t *miblenp);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(
void (*write_cb)(void *, const char *), void *je_cbopaque,
const char *opts);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
JEMALLOC_ATTR(malloc);
#endif
/*
* void *
* chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
* bool *commit, unsigned arena_ind);
*/
typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned);
/*
* bool
* chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind);
*/
typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned);
/*
* bool
* chunk_commit(void *chunk, size_t size, size_t offset, size_t length,
* unsigned arena_ind);
*/
typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned);
/*
* bool
* chunk_decommit(void *chunk, size_t size, size_t offset, size_t length,
* unsigned arena_ind);
*/
typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned);
/*
* bool
* chunk_purge(void *chunk, size_t size, size_t offset, size_t length,
* unsigned arena_ind);
*/
typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned);
/*
* bool
* chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b,
* bool committed, unsigned arena_ind);
*/
typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned);
/*
* bool
* chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
* bool committed, unsigned arena_ind);
*/
typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned);
typedef struct {
chunk_alloc_t *alloc;
chunk_dalloc_t *dalloc;
chunk_commit_t *commit;
chunk_decommit_t *decommit;
chunk_purge_t *purge;
chunk_split_t *split;
chunk_merge_t *merge;
} chunk_hooks_t;
/*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
# define malloc_conf je_malloc_conf
# define malloc_message je_malloc_message
# define malloc je_malloc
# define calloc je_calloc
# define posix_memalign je_posix_memalign
# define aligned_alloc je_aligned_alloc
# define realloc je_realloc
# define free je_free
# define mallocx je_mallocx
# define rallocx je_rallocx
# define xallocx je_xallocx
# define sallocx je_sallocx
# define dallocx je_dallocx
# define sdallocx je_sdallocx
# define nallocx je_nallocx
# define mallctl je_mallctl
# define mallctlnametomib je_mallctlnametomib
# define mallctlbymib je_mallctlbymib
# define malloc_stats_print je_malloc_stats_print
# define malloc_usable_size je_malloc_usable_size
# define memalign je_memalign
# define valloc je_valloc
#endif
/*
* The je_* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
# undef je_malloc_conf
# undef je_malloc_message
# undef je_malloc
# undef je_calloc
# undef je_posix_memalign
# undef je_aligned_alloc
# undef je_realloc
# undef je_free
# undef je_mallocx
# undef je_rallocx
# undef je_xallocx
# undef je_sallocx
# undef je_dallocx
# undef je_sdallocx
# undef je_nallocx
# undef je_mallctl
# undef je_mallctlnametomib
# undef je_mallctlbymib
# undef je_malloc_stats_print
# undef je_malloc_usable_size
# undef je_memalign
# undef je_valloc
#endif
#ifdef __cplusplus
}
#endif
#endif /* JEMALLOC_H_ */

View File

@ -0,0 +1,50 @@
/* include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */
/* Defined if __attribute__((...)) syntax is supported. */
#define JEMALLOC_HAVE_ATTR
/* Defined if alloc_size attribute is supported. */
#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE
/* Defined if format(gnu_printf, ...) attribute is supported. */
#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
/* Defined if format(printf, ...) attribute is supported. */
#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
#define JEMALLOC_OVERRIDE_MEMALIGN
#define JEMALLOC_OVERRIDE_VALLOC
/*
* At least Linux omits the "const" in:
*
* size_t malloc_usable_size(const void *ptr);
*
* Match the operating system's prototype.
*/
#define JEMALLOC_USABLE_SIZE_CONST
/*
* If defined, specify throw() for the public function prototypes when compiling
* with C++. The only justification for this is to match the prototypes that
* glibc defines.
*/
#define JEMALLOC_USE_CXX_THROW
#ifdef _MSC_VER
# ifdef _WIN64
# define LG_SIZEOF_PTR_WIN 3
# else
# define LG_SIZEOF_PTR_WIN 2
# endif
#endif
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#if (defined(__LP64__) && __LP64__)
#define LG_SIZEOF_PTR 3
#else
#define LG_SIZEOF_PTR 2
#endif

View File

@ -0,0 +1,103 @@
#include <stdlib.h>
#include <stdbool.h>
#include <stdint.h>
#include <limits.h>
#include <strings.h>
#define JEMALLOC_VERSION "0.0.0-0-g0000000000000000000000000000000000000000"
#define JEMALLOC_VERSION_MAJOR 0
#define JEMALLOC_VERSION_MINOR 0
#define JEMALLOC_VERSION_BUGFIX 0
#define JEMALLOC_VERSION_NREV 0
#define JEMALLOC_VERSION_GID "0000000000000000000000000000000000000000"
# define MALLOCX_LG_ALIGN(la) ((int)(la))
# if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
# else
# define MALLOCX_ALIGN(a) \
((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
ffs((int)(((size_t)(a))>>32))+31))
# endif
# define MALLOCX_ZERO ((int)0x40)
/*
* Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
* encodes MALLOCX_TCACHE_NONE.
*/
# define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
# define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
/*
* Bias arena index bits so that 0 encodes "use an automatically chosen arena".
*/
# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
# define JEMALLOC_CXX_THROW throw()
#else
# define JEMALLOC_CXX_THROW
#endif
#if _MSC_VER
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_ALLOC_SIZE(s)
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# ifndef JEMALLOC_EXPORT
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# endif
# define JEMALLOC_FORMAT_PRINTF(s, i)
# define JEMALLOC_NOINLINE __declspec(noinline)
# ifdef __cplusplus
# define JEMALLOC_NOTHROW __declspec(nothrow)
# else
# define JEMALLOC_NOTHROW
# endif
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
# if _MSC_VER >= 1900 && !defined(__EDG__)
# define JEMALLOC_ALLOCATOR __declspec(allocator)
# else
# define JEMALLOC_ALLOCATOR
# endif
#elif defined(JEMALLOC_HAVE_ATTR)
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
# else
# define JEMALLOC_ALLOC_SIZE(s)
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# endif
# ifndef JEMALLOC_EXPORT
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# endif
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
# else
# define JEMALLOC_FORMAT_PRINTF(s, i)
# endif
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_ALLOC_SIZE(s)
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# define JEMALLOC_EXPORT
# define JEMALLOC_FORMAT_PRINTF(s, i)
# define JEMALLOC_NOINLINE
# define JEMALLOC_NOTHROW
# define JEMALLOC_SECTION(s)
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
#endif

View File

@ -0,0 +1,66 @@
/*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
# define malloc_conf je_malloc_conf
# define malloc_message je_malloc_message
# define malloc je_malloc
# define calloc je_calloc
# define posix_memalign je_posix_memalign
# define aligned_alloc je_aligned_alloc
# define realloc je_realloc
# define free je_free
# define mallocx je_mallocx
# define rallocx je_rallocx
# define xallocx je_xallocx
# define sallocx je_sallocx
# define dallocx je_dallocx
# define sdallocx je_sdallocx
# define nallocx je_nallocx
# define mallctl je_mallctl
# define mallctlnametomib je_mallctlnametomib
# define mallctlbymib je_mallctlbymib
# define malloc_stats_print je_malloc_stats_print
# define malloc_usable_size je_malloc_usable_size
# define memalign je_memalign
# define valloc je_valloc
#endif
/*
* The je_* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
# undef je_malloc_conf
# undef je_malloc_message
# undef je_malloc
# undef je_calloc
# undef je_posix_memalign
# undef je_aligned_alloc
# undef je_realloc
# undef je_free
# undef je_mallocx
# undef je_rallocx
# undef je_xallocx
# undef je_sallocx
# undef je_dallocx
# undef je_sdallocx
# undef je_nallocx
# undef je_mallctl
# undef je_mallctlnametomib
# undef je_mallctlbymib
# undef je_malloc_stats_print
# undef je_malloc_usable_size
# undef je_memalign
# undef je_valloc
#endif

View File

@ -0,0 +1,66 @@
/*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
# define malloc_conf jet_malloc_conf
# define malloc_message jet_malloc_message
# define malloc jet_malloc
# define calloc jet_calloc
# define posix_memalign jet_posix_memalign
# define aligned_alloc jet_aligned_alloc
# define realloc jet_realloc
# define free jet_free
# define mallocx jet_mallocx
# define rallocx jet_rallocx
# define xallocx jet_xallocx
# define sallocx jet_sallocx
# define dallocx jet_dallocx
# define sdallocx jet_sdallocx
# define nallocx jet_nallocx
# define mallctl jet_mallctl
# define mallctlnametomib jet_mallctlnametomib
# define mallctlbymib jet_mallctlbymib
# define malloc_stats_print jet_malloc_stats_print
# define malloc_usable_size jet_malloc_usable_size
# define memalign jet_memalign
# define valloc jet_valloc
#endif
/*
* The jet_* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
# undef jet_malloc_conf
# undef jet_malloc_message
# undef jet_malloc
# undef jet_calloc
# undef jet_posix_memalign
# undef jet_aligned_alloc
# undef jet_realloc
# undef jet_free
# undef jet_mallocx
# undef jet_rallocx
# undef jet_xallocx
# undef jet_sallocx
# undef jet_dallocx
# undef jet_sdallocx
# undef jet_nallocx
# undef jet_mallctl
# undef jet_mallctlnametomib
# undef jet_mallctlbymib
# undef jet_malloc_stats_print
# undef jet_malloc_usable_size
# undef jet_memalign
# undef jet_valloc
#endif

View File

@ -0,0 +1,66 @@
/*
* The je_ prefix on the following public symbol declarations is an artifact
* of namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
*/
extern JEMALLOC_EXPORT const char *je_malloc_conf;
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
const char *s);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_malloc(size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr,
size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment,
size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr)
JEMALLOC_CXX_THROW;
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags)
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size,
int flags) JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size,
size_t extra, int flags);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr,
int flags) JEMALLOC_ATTR(pure);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size,
int flags);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags)
JEMALLOC_ATTR(pure);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name,
size_t *mibp, size_t *miblenp);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(
void (*write_cb)(void *, const char *), void *je_cbopaque,
const char *opts);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
JEMALLOC_ATTR(malloc);
#endif

View File

@ -0,0 +1,66 @@
/*
* The jet_ prefix on the following public symbol declarations is an artifact
* of namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h).
*/
extern JEMALLOC_EXPORT const char *jet_malloc_conf;
extern JEMALLOC_EXPORT void (*jet_malloc_message)(void *cbopaque,
const char *s);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *jet_malloc(size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *jet_calloc(size_t num, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_posix_memalign(void **memptr,
size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *jet_aligned_alloc(size_t alignment,
size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *jet_realloc(void *ptr, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_free(void *ptr)
JEMALLOC_CXX_THROW;
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *jet_mallocx(size_t size, int flags)
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *jet_rallocx(void *ptr, size_t size,
int flags) JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_xallocx(void *ptr, size_t size,
size_t extra, int flags);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_sallocx(const void *ptr,
int flags) JEMALLOC_ATTR(pure);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_dallocx(void *ptr, int flags);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_sdallocx(void *ptr, size_t size,
int flags);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_nallocx(size_t size, int flags)
JEMALLOC_ATTR(pure);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctl(const char *name,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctlnametomib(const char *name,
size_t *mibp, size_t *miblenp);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctlbymib(const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_malloc_stats_print(
void (*write_cb)(void *, const char *), void *jet_cbopaque,
const char *opts);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *jet_memalign(size_t alignment, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *jet_valloc(size_t size) JEMALLOC_CXX_THROW
JEMALLOC_ATTR(malloc);
#endif

View File

@ -0,0 +1,29 @@
/*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#ifndef JEMALLOC_NO_RENAME
# define je_malloc_conf je_malloc_conf
# define je_malloc_message je_malloc_message
# define je_malloc je_malloc
# define je_calloc je_calloc
# define je_posix_memalign je_posix_memalign
# define je_aligned_alloc je_aligned_alloc
# define je_realloc je_realloc
# define je_free je_free
# define je_mallocx je_mallocx
# define je_rallocx je_rallocx
# define je_xallocx je_xallocx
# define je_sallocx je_sallocx
# define je_dallocx je_dallocx
# define je_sdallocx je_sdallocx
# define je_nallocx je_nallocx
# define je_mallctl je_mallctl
# define je_mallctlnametomib je_mallctlnametomib
# define je_mallctlbymib je_mallctlbymib
# define je_malloc_stats_print je_malloc_stats_print
# define je_malloc_usable_size je_malloc_usable_size
# define je_memalign je_memalign
# define je_valloc je_valloc
#endif

View File

@ -0,0 +1,57 @@
/*
* void *
* chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
* bool *commit, unsigned arena_ind);
*/
typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned);
/*
* bool
* chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind);
*/
typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned);
/*
* bool
* chunk_commit(void *chunk, size_t size, size_t offset, size_t length,
* unsigned arena_ind);
*/
typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned);
/*
* bool
* chunk_decommit(void *chunk, size_t size, size_t offset, size_t length,
* unsigned arena_ind);
*/
typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned);
/*
* bool
* chunk_purge(void *chunk, size_t size, size_t offset, size_t length,
* unsigned arena_ind);
*/
typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned);
/*
* bool
* chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b,
* bool committed, unsigned arena_ind);
*/
typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned);
/*
* bool
* chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
* bool committed, unsigned arena_ind);
*/
typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned);
typedef struct {
chunk_alloc_t *alloc;
chunk_dalloc_t *dalloc;
chunk_commit_t *commit;
chunk_decommit_t *decommit;
chunk_purge_t *purge;
chunk_split_t *split;
chunk_merge_t *merge;
} chunk_hooks_t;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,45 @@
/*
* Define a custom assert() in order to reduce the chances of deadlock during
* assertion failure.
*/
#ifndef assert
#define assert(e) do { \
if (unlikely(config_debug && !(e))) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#endif
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} \
unreachable(); \
} while (0)
#endif
#ifndef not_implemented
#define not_implemented() do { \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
if (unlikely(config_debug && !(e))) \
not_implemented(); \
} while (0)
#endif

View File

@ -0,0 +1,651 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
#define atomic_read_p(p) atomic_add_p(p, NULL)
#define atomic_read_z(p) atomic_add_z(p, 0)
#define atomic_read_u(p) atomic_add_u(p, 0)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
/*
* All arithmetic functions return the arithmetic result of the atomic
* operation. Some atomic operation APIs return the value prior to mutation, in
* which case the following functions must redundantly compute the result so
* that it can be returned. These functions are normally inlined, so the extra
* operations can be optimized away if the return values aren't used by the
* callers.
*
* <t> atomic_read_<t>(<t> *p) { return (*p); }
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); }
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); }
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
* {
* if (*p != c)
* return (true);
* *p = s;
* return (false);
* }
* void atomic_write_<t>(<t> *p, <t> x) { *p = x; }
*/
#ifndef JEMALLOC_ENABLE_INLINE
uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s);
void atomic_write_uint64(uint64_t *p, uint64_t x);
uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s);
void atomic_write_uint32(uint32_t *p, uint32_t x);
void *atomic_add_p(void **p, void *x);
void *atomic_sub_p(void **p, void *x);
bool atomic_cas_p(void **p, void *c, void *s);
void atomic_write_p(void **p, const void *x);
size_t atomic_add_z(size_t *p, size_t x);
size_t atomic_sub_z(size_t *p, size_t x);
bool atomic_cas_z(size_t *p, size_t c, size_t s);
void atomic_write_z(size_t *p, size_t x);
unsigned atomic_add_u(unsigned *p, unsigned x);
unsigned atomic_sub_u(unsigned *p, unsigned x);
bool atomic_cas_u(unsigned *p, unsigned c, unsigned s);
void atomic_write_u(unsigned *p, unsigned x);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
/******************************************************************************/
/* 64-bit operations. */
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
# if (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
uint64_t t = x;
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
uint64_t t;
x = (uint64_t)(-(int64_t)x);
t = x;
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
uint8_t success;
asm volatile (
"lock; cmpxchgq %4, %0;"
"sete %1;"
: "=m" (*p), "=a" (success) /* Outputs. */
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
: "memory" /* Clobbers. */
);
return (!(bool)success);
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
asm volatile (
"xchgq %1, %0;" /* Lock is implied by xchgq. */
: "=m" (*p), "+r" (x) /* Outputs. */
: "m" (*p) /* Inputs. */
: "memory" /* Clobbers. */
);
}
# elif (defined(JEMALLOC_C11ATOMICS))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (atomic_fetch_add(a, x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (atomic_fetch_sub(a, x) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (!atomic_compare_exchange_strong(a, &c, s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
atomic_store(a, x);
}
# elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
/*
* atomic_fetchadd_64() doesn't exist, but we only ever use this
* function on LP64 systems, so atomic_fetchadd_long() will do.
*/
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (atomic_fetchadd_long(p, (unsigned long)x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
atomic_store_rel_long(p, x);
}
# elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
uint64_t o;
/*The documented OSAtomic*() API does not expose an atomic exchange. */
do {
o = atomic_read_uint64(p);
} while (atomic_cas_uint64(p, o, x));
}
# elif (defined(_MSC_VER))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
uint64_t o;
o = InterlockedCompareExchange64(p, s, c);
return (o != c);
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
InterlockedExchange64(p, x);
}
# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (__sync_sub_and_fetch(p, x));
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
return (!__sync_bool_compare_and_swap(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
__sync_lock_test_and_set(p, x);
}
# else
# error "Missing implementation for 64-bit atomic operations"
# endif
#endif
/******************************************************************************/
/* 32-bit operations. */
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
uint32_t t = x;
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
uint32_t t;
x = (uint32_t)(-(int32_t)x);
t = x;
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
uint8_t success;
asm volatile (
"lock; cmpxchgl %4, %0;"
"sete %1;"
: "=m" (*p), "=a" (success) /* Outputs. */
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
: "memory"
);
return (!(bool)success);
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
asm volatile (
"xchgl %1, %0;" /* Lock is implied by xchgl. */
: "=m" (*p), "+r" (x) /* Outputs. */
: "m" (*p) /* Inputs. */
: "memory" /* Clobbers. */
);
}
# elif (defined(JEMALLOC_C11ATOMICS))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (atomic_fetch_add(a, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (atomic_fetch_sub(a, x) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (!atomic_compare_exchange_strong(a, &c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
atomic_store(a, x);
}
#elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (atomic_fetchadd_32(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!atomic_cmpset_32(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
atomic_store_rel_32(p, x);
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
uint32_t o;
/*The documented OSAtomic*() API does not expose an atomic exchange. */
do {
o = atomic_read_uint32(p);
} while (atomic_cas_uint32(p, o, x));
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
uint32_t o;
o = InterlockedCompareExchange(p, s, c);
return (o != c);
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
InterlockedExchange(p, x);
}
#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (__sync_sub_and_fetch(p, x));
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!__sync_bool_compare_and_swap(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
__sync_lock_test_and_set(p, x);
}
#else
# error "Missing implementation for 32-bit atomic operations"
#endif
/******************************************************************************/
/* Pointer operations. */
JEMALLOC_INLINE void *
atomic_add_p(void **p, void *x)
{
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
JEMALLOC_INLINE void *
atomic_sub_p(void **p, void *x)
{
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
return ((void *)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_p(void **p, void *c, void *s)
{
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_PTR == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void
atomic_write_p(void **p, const void *x)
{
#if (LG_SIZEOF_PTR == 3)
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
/******************************************************************************/
/* size_t operations. */
JEMALLOC_INLINE size_t
atomic_add_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
JEMALLOC_INLINE size_t
atomic_sub_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
return ((size_t)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_z(size_t *p, size_t c, size_t s)
{
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_PTR == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void
atomic_write_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
/******************************************************************************/
/* unsigned operations. */
JEMALLOC_INLINE unsigned
atomic_add_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_INT == 2)
return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
JEMALLOC_INLINE unsigned
atomic_sub_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_INT == 2)
return ((unsigned)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_u(unsigned *p, unsigned c, unsigned s)
{
#if (LG_SIZEOF_INT == 3)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_INT == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void
atomic_write_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_INT == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
/******************************************************************************/
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,25 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *base_alloc(tsdn_t *tsdn, size_t size);
void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
size_t *mapped);
bool base_boot(void);
void base_prefork(tsdn_t *tsdn);
void base_postfork_parent(tsdn_t *tsdn);
void base_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,274 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
typedef struct bitmap_level_s bitmap_level_t;
typedef struct bitmap_info_s bitmap_info_t;
typedef unsigned long bitmap_t;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
/* Number of bits per group. */
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
/*
* Do some analysis on how big the bitmap is before we use a tree. For a brute
* force linear search, if we would have to call ffs_lu() more than 2^3 times,
* use a tree instead.
*/
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
# define USE_TREE
#endif
/* Number of groups required to store a given number of bits. */
#define BITMAP_BITS2GROUPS(nbits) \
((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
/*
* Number of groups required at a particular level for a given number of bits.
*/
#define BITMAP_GROUPS_L0(nbits) \
BITMAP_BITS2GROUPS(nbits)
#define BITMAP_GROUPS_L1(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
#define BITMAP_GROUPS_L2(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
#define BITMAP_GROUPS_L3(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS((nbits)))))
/*
* Assuming the number of levels, number of groups required for a given number
* of bits.
*/
#define BITMAP_GROUPS_1_LEVEL(nbits) \
BITMAP_GROUPS_L0(nbits)
#define BITMAP_GROUPS_2_LEVEL(nbits) \
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
#define BITMAP_GROUPS_3_LEVEL(nbits) \
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
#define BITMAP_GROUPS_4_LEVEL(nbits) \
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
/*
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
*/
#ifdef USE_TREE
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
#else
# error "Unsupported bitmap size"
#endif
/* Maximum number of levels possible. */
#define BITMAP_MAX_LEVELS \
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
+ !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
#else /* USE_TREE */
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
#endif /* USE_TREE */
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct bitmap_level_s {
/* Offset of this level's groups within the array of groups. */
size_t group_offset;
};
struct bitmap_info_s {
/* Logical number of bits in bitmap (stored at bottom level). */
size_t nbits;
#ifdef USE_TREE
/* Number of levels necessary for nbits. */
unsigned nlevels;
/*
* Only the first (nlevels+1) elements are used, and levels are ordered
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
#else /* USE_TREE */
/* Number of groups necessary for nbits. */
size_t ngroups;
#endif /* USE_TREE */
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo);
size_t bitmap_size(const bitmap_info_t *binfo);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo);
bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo);
void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
JEMALLOC_INLINE bool
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
#ifdef USE_TREE
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
bitmap_t rg = bitmap[rgoff];
/* The bitmap is full iff the root group is 0. */
return (rg == 0);
#else
size_t i;
for (i = 0; i < binfo->ngroups; i++) {
if (bitmap[i] != 0)
return (false);
}
return (true);
#endif
}
JEMALLOC_INLINE bool
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t g;
assert(bit < binfo->nbits);
goff = bit >> LG_BITMAP_GROUP_NBITS;
g = bitmap[goff];
return (!(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))));
}
JEMALLOC_INLINE void
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t *gp;
bitmap_t g;
assert(bit < binfo->nbits);
assert(!bitmap_get(bitmap, binfo, bit));
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit));
#ifdef USE_TREE
/* Propagate group state transitions up the tree. */
if (g == 0) {
unsigned i;
for (i = 1; i < binfo->nlevels; i++) {
bit = goff;
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (g != 0)
break;
}
}
#endif
}
/* sfu: set first unset. */
JEMALLOC_INLINE size_t
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
size_t bit;
bitmap_t g;
unsigned i;
assert(!bitmap_full(bitmap, binfo));
#ifdef USE_TREE
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
bit = ffs_lu(g) - 1;
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
}
#else
i = 0;
g = bitmap[0];
while ((bit = ffs_lu(g)) == 0) {
i++;
g = bitmap[i];
}
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
#endif
bitmap_set(bitmap, binfo, bit);
return (bit);
}
JEMALLOC_INLINE void
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t *gp;
bitmap_t g;
UNUSED bool propagate;
assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit));
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
propagate = (g == 0);
assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(!bitmap_get(bitmap, binfo, bit));
#ifdef USE_TREE
/* Propagate group state transitions up the tree. */
if (propagate) {
unsigned i;
for (i = 1; i < binfo->nlevels; i++) {
bit = goff;
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
propagate = (g == 0);
assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
== 0);
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (!propagate)
break;
}
}
#endif /* USE_TREE */
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,97 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* Size and alignment of memory chunks that are allocated by the OS's virtual
* memory system.
*/
#define LG_CHUNK_DEFAULT 21
/* Return the chunk address for allocation address a. */
#define CHUNK_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~chunksize_mask))
/* Return the chunk offset of address a. */
#define CHUNK_ADDR2OFFSET(a) \
((size_t)((uintptr_t)(a) & chunksize_mask))
/* Return the smallest chunk multiple that is >= s. */
#define CHUNK_CEILING(s) \
(((s) + chunksize_mask) & ~chunksize_mask)
#define CHUNK_HOOKS_INITIALIZER { \
NULL, \
NULL, \
NULL, \
NULL, \
NULL, \
NULL, \
NULL \
}
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern size_t opt_lg_chunk;
extern const char *opt_dss;
extern rtree_t chunks_rtree;
extern size_t chunksize;
extern size_t chunksize_mask; /* (chunksize - 1). */
extern size_t chunk_npages;
extern const chunk_hooks_t chunk_hooks_default;
chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
const chunk_hooks_t *chunk_hooks);
bool chunk_register(const void *chunk, const extent_node_t *node,
bool *gdump);
void chunk_deregister(const void *chunk, const extent_node_t *node);
void *chunk_alloc_base(size_t size);
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
size_t *sn, bool *zero, bool *commit, bool dalloc_node);
void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
size_t *sn, bool *zero, bool *commit);
void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
bool committed);
void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
bool zeroed, bool committed);
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
size_t length);
bool chunk_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
extent_node_t *chunk_lookup(const void *chunk, bool dependent);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
JEMALLOC_INLINE extent_node_t *
chunk_lookup(const void *ptr, bool dependent)
{
return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent));
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#include "jemalloc/internal/chunk_dss.h"
#include "jemalloc/internal/chunk_mmap.h"

View File

@ -0,0 +1,37 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef enum {
dss_prec_disabled = 0,
dss_prec_primary = 1,
dss_prec_secondary = 2,
dss_prec_limit = 3
} dss_prec_t;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
extern const char *dss_prec_names[];
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
dss_prec_t chunk_dss_prec_get(void);
bool chunk_dss_prec_set(dss_prec_t dss_prec);
void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit);
bool chunk_in_dss(void *chunk);
bool chunk_dss_mergeable(void *chunk_a, void *chunk_b);
void chunk_dss_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,21 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit);
bool chunk_dalloc_mmap(void *chunk, size_t size);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,86 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct ckh_s ckh_t;
typedef struct ckhc_s ckhc_t;
/* Typedefs to allow easy function pointer passing. */
typedef void ckh_hash_t (const void *, size_t[2]);
typedef bool ckh_keycomp_t (const void *, const void *);
/* Maintain counters used to get an idea of performance. */
/* #define CKH_COUNT */
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
/* #define CKH_VERBOSE */
/*
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
* one bucket per L1 cache line.
*/
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Hash table cell. */
struct ckhc_s {
const void *key;
const void *data;
};
struct ckh_s {
#ifdef CKH_COUNT
/* Counters used to get an idea of performance. */
uint64_t ngrows;
uint64_t nshrinks;
uint64_t nshrinkfails;
uint64_t ninserts;
uint64_t nrelocs;
#endif
/* Used for pseudo-random number generation. */
uint64_t prng_state;
/* Total number of items. */
size_t count;
/*
* Minimum and current number of hash table buckets. There are
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
*/
unsigned lg_minbuckets;
unsigned lg_curbuckets;
/* Hash and comparison functions. */
ckh_hash_t *hash;
ckh_keycomp_t *keycomp;
/* Hash table with 2^lg_curbuckets buckets. */
ckhc_t *tab;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp);
void ckh_delete(tsd_t *tsd, ckh_t *ckh);
size_t ckh_count(ckh_t *ckh);
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
void **data);
bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
void ckh_string_hash(const void *key, size_t r_hash[2]);
bool ckh_string_keycomp(const void *k1, const void *k2);
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
bool ckh_pointer_keycomp(const void *k1, const void *k2);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,118 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct ctl_node_s ctl_node_t;
typedef struct ctl_named_node_s ctl_named_node_t;
typedef struct ctl_indexed_node_s ctl_indexed_node_t;
typedef struct ctl_arena_stats_s ctl_arena_stats_t;
typedef struct ctl_stats_s ctl_stats_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct ctl_node_s {
bool named;
};
struct ctl_named_node_s {
struct ctl_node_s node;
const char *name;
/* If (nchildren == 0), this is a terminal node. */
unsigned nchildren;
const ctl_node_t *children;
int (*ctl)(tsd_t *, const size_t *, size_t, void *,
size_t *, void *, size_t);
};
struct ctl_indexed_node_s {
struct ctl_node_s node;
const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
size_t);
};
struct ctl_arena_stats_s {
bool initialized;
unsigned nthreads;
const char *dss;
ssize_t lg_dirty_mult;
ssize_t decay_time;
size_t pactive;
size_t pdirty;
/* The remainder are only populated if config_stats is true. */
arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
size_t allocated_small;
uint64_t nmalloc_small;
uint64_t ndalloc_small;
uint64_t nrequests_small;
malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t *lstats; /* nlclasses elements. */
malloc_huge_stats_t *hstats; /* nhclasses elements. */
};
struct ctl_stats_s {
size_t allocated;
size_t active;
size_t metadata;
size_t resident;
size_t mapped;
size_t retained;
unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
size_t *miblenp);
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
bool ctl_boot(void);
void ctl_prefork(tsdn_t *tsdn);
void ctl_postfork_parent(tsdn_t *tsdn);
void ctl_postfork_child(tsdn_t *tsdn);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_printf( \
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
name); \
abort(); \
} \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) do { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
abort(); \
} \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlbymib()\n"); \
abort(); \
} \
} while (0)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,275 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct extent_node_s extent_node_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Tree of extents. Use accessor functions for en_* fields. */
struct extent_node_s {
/* Arena from which this extent came, if any. */
arena_t *en_arena;
/* Pointer to the extent that this tree node is responsible for. */
void *en_addr;
/* Total region size. */
size_t en_size;
/*
* Serial number (potentially non-unique).
*
* In principle serial numbers can wrap around on 32-bit systems if
* JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
* back on address comparison for equal serial numbers, stable (if
* imperfect) ordering is maintained.
*
* Serial numbers may not be unique even in the absence of wrap-around,
* e.g. when splitting an extent and assigning the same serial number to
* both resulting adjacent extents.
*/
size_t en_sn;
/*
* The zeroed flag is used by chunk recycling code to track whether
* memory is zero-filled.
*/
bool en_zeroed;
/*
* True if physical memory is committed to the extent, whether
* explicitly or implicitly as on a system that overcommits and
* satisfies physical memory needs on demand via soft page faults.
*/
bool en_committed;
/*
* The achunk flag is used to validate that huge allocation lookups
* don't return arena chunks.
*/
bool en_achunk;
/* Profile counters, used for huge objects. */
prof_tctx_t *en_prof_tctx;
/* Linkage for arena's runs_dirty and chunks_cache rings. */
arena_runs_dirty_link_t rd;
qr(extent_node_t) cc_link;
union {
/* Linkage for the size/sn/address-ordered tree. */
rb_node(extent_node_t) szsnad_link;
/* Linkage for arena's achunks, huge, and node_cache lists. */
ql_elm(extent_node_t) ql_link;
};
/* Linkage for the address-ordered tree. */
rb_node(extent_node_t) ad_link;
};
typedef rb_tree(extent_node_t) extent_tree_t;
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_JET
size_t extent_size_quantize_floor(size_t size);
#endif
size_t extent_size_quantize_ceil(size_t size);
rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t)
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *extent_node_arena_get(const extent_node_t *node);
void *extent_node_addr_get(const extent_node_t *node);
size_t extent_node_size_get(const extent_node_t *node);
size_t extent_node_sn_get(const extent_node_t *node);
bool extent_node_zeroed_get(const extent_node_t *node);
bool extent_node_committed_get(const extent_node_t *node);
bool extent_node_achunk_get(const extent_node_t *node);
prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
void extent_node_addr_set(extent_node_t *node, void *addr);
void extent_node_size_set(extent_node_t *node, size_t size);
void extent_node_sn_set(extent_node_t *node, size_t sn);
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
void extent_node_committed_set(extent_node_t *node, bool committed);
void extent_node_achunk_set(extent_node_t *node, bool achunk);
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
size_t size, size_t sn, bool zeroed, bool committed);
void extent_node_dirty_linkage_init(extent_node_t *node);
void extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
void extent_node_dirty_remove(extent_node_t *node);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
JEMALLOC_INLINE arena_t *
extent_node_arena_get(const extent_node_t *node)
{
return (node->en_arena);
}
JEMALLOC_INLINE void *
extent_node_addr_get(const extent_node_t *node)
{
return (node->en_addr);
}
JEMALLOC_INLINE size_t
extent_node_size_get(const extent_node_t *node)
{
return (node->en_size);
}
JEMALLOC_INLINE size_t
extent_node_sn_get(const extent_node_t *node)
{
return (node->en_sn);
}
JEMALLOC_INLINE bool
extent_node_zeroed_get(const extent_node_t *node)
{
return (node->en_zeroed);
}
JEMALLOC_INLINE bool
extent_node_committed_get(const extent_node_t *node)
{
assert(!node->en_achunk);
return (node->en_committed);
}
JEMALLOC_INLINE bool
extent_node_achunk_get(const extent_node_t *node)
{
return (node->en_achunk);
}
JEMALLOC_INLINE prof_tctx_t *
extent_node_prof_tctx_get(const extent_node_t *node)
{
return (node->en_prof_tctx);
}
JEMALLOC_INLINE void
extent_node_arena_set(extent_node_t *node, arena_t *arena)
{
node->en_arena = arena;
}
JEMALLOC_INLINE void
extent_node_addr_set(extent_node_t *node, void *addr)
{
node->en_addr = addr;
}
JEMALLOC_INLINE void
extent_node_size_set(extent_node_t *node, size_t size)
{
node->en_size = size;
}
JEMALLOC_INLINE void
extent_node_sn_set(extent_node_t *node, size_t sn)
{
node->en_sn = sn;
}
JEMALLOC_INLINE void
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
{
node->en_zeroed = zeroed;
}
JEMALLOC_INLINE void
extent_node_committed_set(extent_node_t *node, bool committed)
{
node->en_committed = committed;
}
JEMALLOC_INLINE void
extent_node_achunk_set(extent_node_t *node, bool achunk)
{
node->en_achunk = achunk;
}
JEMALLOC_INLINE void
extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
{
node->en_prof_tctx = tctx;
}
JEMALLOC_INLINE void
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
size_t sn, bool zeroed, bool committed)
{
extent_node_arena_set(node, arena);
extent_node_addr_set(node, addr);
extent_node_size_set(node, size);
extent_node_sn_set(node, sn);
extent_node_zeroed_set(node, zeroed);
extent_node_committed_set(node, committed);
extent_node_achunk_set(node, false);
if (config_prof)
extent_node_prof_tctx_set(node, NULL);
}
JEMALLOC_INLINE void
extent_node_dirty_linkage_init(extent_node_t *node)
{
qr_new(&node->rd, rd_link);
qr_new(node, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
{
qr_meld(runs_dirty, &node->rd, rd_link);
qr_meld(chunks_dirty, node, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_remove(extent_node_t *node)
{
qr_remove(&node->rd, rd_link);
qr_remove(node, cc_link);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,357 @@
/*
* The following hash function is based on MurmurHash3, placed into the public
* domain by Austin Appleby. See https://github.com/aappleby/smhasher for
* details.
*/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
void hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2]);
void hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2]);
void hash(const void *key, size_t len, const uint32_t seed,
size_t r_hash[2]);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
/******************************************************************************/
/* Internal implementation. */
JEMALLOC_INLINE uint32_t
hash_rotl_32(uint32_t x, int8_t r)
{
return ((x << r) | (x >> (32 - r)));
}
JEMALLOC_INLINE uint64_t
hash_rotl_64(uint64_t x, int8_t r)
{
return ((x << r) | (x >> (64 - r)));
}
JEMALLOC_INLINE uint32_t
hash_get_block_32(const uint32_t *p, int i)
{
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
uint32_t ret;
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
return (ret);
}
return (p[i]);
}
JEMALLOC_INLINE uint64_t
hash_get_block_64(const uint64_t *p, int i)
{
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
uint64_t ret;
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
return (ret);
}
return (p[i]);
}
JEMALLOC_INLINE uint32_t
hash_fmix_32(uint32_t h)
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return (h);
}
JEMALLOC_INLINE uint64_t
hash_fmix_64(uint64_t k)
{
k ^= k >> 33;
k *= KQU(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= KQU(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return (k);
}
JEMALLOC_INLINE uint32_t
hash_x86_32(const void *key, int len, uint32_t seed)
{
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 4;
uint32_t h1 = seed;
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
int i;
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i);
k1 *= c1;
k1 = hash_rotl_32(k1, 15);
k1 *= c2;
h1 ^= k1;
h1 = hash_rotl_32(h1, 13);
h1 = h1*5 + 0xe6546b64;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
uint32_t k1 = 0;
switch (len & 3) {
case 3: k1 ^= tail[2] << 16;
case 2: k1 ^= tail[1] << 8;
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len;
h1 = hash_fmix_32(h1);
return (h1);
}
UNUSED JEMALLOC_INLINE void
hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2])
{
const uint8_t * data = (const uint8_t *) key;
const int nblocks = len / 16;
uint32_t h1 = seed;
uint32_t h2 = seed;
uint32_t h3 = seed;
uint32_t h4 = seed;
const uint32_t c1 = 0x239b961b;
const uint32_t c2 = 0xab0e9789;
const uint32_t c3 = 0x38b34ae5;
const uint32_t c4 = 0xa1e38b93;
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
int i;
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
h1 = hash_rotl_32(h1, 19); h1 += h2;
h1 = h1*5 + 0x561ccd1b;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
h2 = hash_rotl_32(h2, 17); h2 += h3;
h2 = h2*5 + 0x0bcaa747;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
h3 = hash_rotl_32(h3, 15); h3 += h4;
h3 = h3*5 + 0x96cd1c35;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
h4 = hash_rotl_32(h4, 13); h4 += h1;
h4 = h4*5 + 0x32ac3b17;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
uint32_t k1 = 0;
uint32_t k2 = 0;
uint32_t k3 = 0;
uint32_t k4 = 0;
switch (len & 15) {
case 15: k4 ^= tail[14] << 16;
case 14: k4 ^= tail[13] << 8;
case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
case 12: k3 ^= tail[11] << 24;
case 11: k3 ^= tail[10] << 16;
case 10: k3 ^= tail[ 9] << 8;
case 9: k3 ^= tail[ 8] << 0;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
case 8: k2 ^= tail[ 7] << 24;
case 7: k2 ^= tail[ 6] << 16;
case 6: k2 ^= tail[ 5] << 8;
case 5: k2 ^= tail[ 4] << 0;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
case 4: k1 ^= tail[ 3] << 24;
case 3: k1 ^= tail[ 2] << 16;
case 2: k1 ^= tail[ 1] << 8;
case 1: k1 ^= tail[ 0] << 0;
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
h1 = hash_fmix_32(h1);
h2 = hash_fmix_32(h2);
h3 = hash_fmix_32(h3);
h4 = hash_fmix_32(h4);
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
r_out[0] = (((uint64_t) h2) << 32) | h1;
r_out[1] = (((uint64_t) h4) << 32) | h3;
}
UNUSED JEMALLOC_INLINE void
hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2])
{
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 16;
uint64_t h1 = seed;
uint64_t h2 = seed;
const uint64_t c1 = KQU(0x87c37b91114253d5);
const uint64_t c2 = KQU(0x4cf5ad432745937f);
/* body */
{
const uint64_t *blocks = (const uint64_t *) (data);
int i;
for (i = 0; i < nblocks; i++) {
uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
h1 = hash_rotl_64(h1, 27); h1 += h2;
h1 = h1*5 + 0x52dce729;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
h2 = hash_rotl_64(h2, 31); h2 += h1;
h2 = h2*5 + 0x38495ab5;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
uint64_t k1 = 0;
uint64_t k2 = 0;
switch (len & 15) {
case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len; h2 ^= len;
h1 += h2;
h2 += h1;
h1 = hash_fmix_64(h1);
h2 = hash_fmix_64(h2);
h1 += h2;
h2 += h1;
r_out[0] = h1;
r_out[1] = h2;
}
/******************************************************************************/
/* API. */
JEMALLOC_INLINE void
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
{
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
#else
{
uint64_t hashes[2];
hash_x86_128(key, (int)len, seed, hashes);
r_hash[0] = (size_t)hashes[0];
r_hash[1] = (size_t)hashes[1];
}
#endif
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,35 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero);
bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize_min, size_t usize_max, bool zero);
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
#ifdef JEMALLOC_JET
typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif
void huge_dalloc(tsdn_t *tsdn, void *ptr);
arena_t *huge_aalloc(const void *ptr);
size_t huge_salloc(tsdn_t *tsdn, const void *ptr);
prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx);
void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,75 @@
#ifndef JEMALLOC_INTERNAL_DECLS_H
#define JEMALLOC_INTERNAL_DECLS_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# include "msvc_compat/windows_extra.h"
#else
# include <sys/param.h>
# include <sys/mman.h>
# if !defined(__pnacl__) && !defined(__native_client__)
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# include <sys/uio.h>
# endif
# include <pthread.h>
# ifdef JEMALLOC_OS_UNFAIR_LOCK
# include <os/lock.h>
# endif
# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
# include <sched.h>
# endif
# include <errno.h>
# include <sys/time.h>
# include <time.h>
# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
# include <mach/mach_time.h>
# endif
#endif
#include <sys/types.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
#endif
#include <string.h>
#include <strings.h>
#include <ctype.h>
#ifdef _MSC_VER
# include <io.h>
typedef intptr_t ssize_t;
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
# ifdef JEMALLOC_HAS_RESTRICT
# define restrict __restrict
# endif
/* Disable warnings about deprecated system functions. */
# pragma warning(disable: 4996)
#if _MSC_VER < 1800
static int
isblank(int c)
{
return (c == '\t' || c == ' ');
}
#endif
#else
# include <unistd.h>
#endif
#include <fcntl.h>
#endif /* JEMALLOC_INTERNAL_H */

View File

@ -0,0 +1,316 @@
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
#ifndef JEMALLOC_INTERNAL_DEFS_H_
#define JEMALLOC_INTERNAL_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
* multiple allocators simultaneously.
*/
#define JEMALLOC_PREFIX "je_"
#define JEMALLOC_CPREFIX "JE_"
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#define JEMALLOC_PRIVATE_NAMESPACE je_
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#define CPU_SPINWAIT _mm_pause()
/* Defined if C11 atomics are available. */
/* #undef JEMALLOC_C11ATOMICS */
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
/* #undef JEMALLOC_ATOMIC9 */
/*
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
* documented in the atomic(3) manual page.
*/
/* #undef JEMALLOC_OSATOMIC */
/*
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines).
*/
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */
/*
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines).
*/
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */
/*
* Defined if __builtin_clz() and __builtin_clzl() are available.
*/
/* #undef JEMALLOC_HAVE_BUILTIN_CLZ */
/*
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
*/
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
/* #undef JEMALLOC_OSSPIN */
/* Defined if syscall(2) is usable. */
/* #undef JEMALLOC_USE_SYSCALL */
/*
* Defined if secure_getenv(3) is available.
*/
/* #undef JEMALLOC_HAVE_SECURE_GETENV */
/*
* Defined if issetugid(2) is available.
*/
/* #undef JEMALLOC_HAVE_ISSETUGID */
/* Defined if pthread_atfork(3) is available. */
/* #undef JEMALLOC_HAVE_PTHREAD_ATFORK */
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
*/
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC */
/*
* Defined if mach_absolute_time() is available.
*/
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
* malloc_tsd.
*/
/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
/* #undef JEMALLOC_THREADED_INIT */
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
/* #undef JEMALLOC_MUTEX_INIT_CB */
/* Non-empty if the tls_model attribute is supported. */
#define JEMALLOC_TLS_MODEL
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
#define JEMALLOC_CC_SILENCE
/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
/* #undef JEMALLOC_CODE_COVERAGE */
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
#define JEMALLOC_DEBUG
/* JEMALLOC_STATS enables statistics calculation. */
#define JEMALLOC_STATS
/* JEMALLOC_PROF enables allocation profiling. */
/* #undef JEMALLOC_PROF */
/* Use libunwind for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_LIBUNWIND */
/* Use libgcc for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_LIBGCC */
/* Use gcc intrinsics for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_GCC */
/*
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
* This makes it possible to allocate/deallocate objects without any locking
* when the cache is in the steady state.
*/
/* #undef JEMALLOC_TCACHE */
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
* segment (DSS).
*/
/* #undef JEMALLOC_DSS */
/* Support memory filling (junk/zero/quarantine/redzone). */
#define JEMALLOC_FILL
/* Support utrace(2)-based tracing. */
/* #undef JEMALLOC_UTRACE */
/* Support Valgrind. */
/* #undef JEMALLOC_VALGRIND */
/* Support optional abort() on OOM. */
/* #undef JEMALLOC_XMALLOC */
/* Support lazy locking (avoid locking unless a second thread is launched). */
/* #undef JEMALLOC_LAZY_LOCK */
/* Minimum size class to support is 2^LG_TINY_MIN bytes. */
#define LG_TINY_MIN 3
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
/* #undef LG_QUANTUM */
/* One page is 2^LG_PAGE bytes. */
#define LG_PAGE 12
/*
* If defined, adjacent virtual memory mappings with identical attributes
* automatically coalesce, and they fragment when changes are made to subranges.
* This is the normal order of things for mmap()/munmap(), but on Windows
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
* mappings do *not* coalesce/fragment.
*/
/* #undef JEMALLOC_MAPS_COALESCE */
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
* later reuse. This is disabled by default on Linux because common sequences
* of mmap()/munmap() calls will cause virtual memory map holes.
*/
#define JEMALLOC_MUNMAP
/* TLS is used to map arenas and magazine caches to threads. */
/* #undef JEMALLOC_TLS */
/*
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
* Don't use this directly; instead use unreachable() from util.h
*/
#define JEMALLOC_INTERNAL_UNREACHABLE abort
/*
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
* use ffs_*() from util.h.
*/
#define JEMALLOC_INTERNAL_FFSLL ffsll
#define JEMALLOC_INTERNAL_FFSL ffsl
#define JEMALLOC_INTERNAL_FFS ffs
/*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
* within jemalloc-owned chunks before dereferencing them.
*/
#define JEMALLOC_IVSALLOC
/*
* If defined, explicitly attempt to more uniformly distribute large allocation
* pointer alignments across all cache indices.
*/
#define JEMALLOC_CACHE_OBLIVIOUS
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
/* #undef JEMALLOC_ZONE */
/*
* Methods for determining whether the OS overcommits.
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
* /proc/sys/vm.overcommit_memory file.
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
*/
/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
/* #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY */
/* Defined if madvise(2) is available. */
/* #undef JEMALLOC_HAVE_MADVISE */
/*
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
* arguments to madvise(2).
*/
/* #undef JEMALLOC_HAVE_MADVISE_HUGE */
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
* will be discarded rather than swapped out.
* madvise(..., MADV_DONTNEED) : This immediately discards pages, such that
* new pages will be demand-zeroed if the
* address region is later touched.
*/
/* #undef JEMALLOC_PURGE_MADVISE_FREE */
/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED */
/* Defined if transparent huge page support is enabled. */
/* #undef JEMALLOC_THP */
/* Define if operating system has alloca.h header. */
/* #undef JEMALLOC_HAS_ALLOCA_H */
/* C99 restrict keyword supported. */
/* #undef JEMALLOC_HAS_RESTRICT */
/* For use by hash code. */
/* #undef JEMALLOC_BIG_ENDIAN */
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#define LG_SIZEOF_INT 2
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#define LG_SIZEOF_LONG 2
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
#define LG_SIZEOF_LONG_LONG 3
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#define LG_SIZEOF_INTMAX_T 3
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
/* #undef JEMALLOC_GLIBC_MALLOC_HOOK */
/* glibc memalign hook. */
/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */
/* Adaptive mutex support in pthreads. */
/* #undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP */
/*
* If defined, jemalloc symbols are not exported (doesn't work when
* JEMALLOC_PREFIX is not defined).
*/
/* #undef JEMALLOC_EXPORT */
/* config.malloc_conf options string. */
#define JEMALLOC_CONFIG_MALLOC_CONF ""
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */

View File

@ -0,0 +1,315 @@
#ifndef JEMALLOC_INTERNAL_DEFS_H_
#define JEMALLOC_INTERNAL_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
* multiple allocators simultaneously.
*/
#undef JEMALLOC_PREFIX
#undef JEMALLOC_CPREFIX
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#undef JEMALLOC_PRIVATE_NAMESPACE
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#undef CPU_SPINWAIT
/* Defined if C11 atomics are available. */
#undef JEMALLOC_C11ATOMICS
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
#undef JEMALLOC_ATOMIC9
/*
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
* documented in the atomic(3) manual page.
*/
#undef JEMALLOC_OSATOMIC
/*
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines).
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
/*
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines).
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
/*
* Defined if __builtin_clz() and __builtin_clzl() are available.
*/
#undef JEMALLOC_HAVE_BUILTIN_CLZ
/*
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
*/
#undef JEMALLOC_OS_UNFAIR_LOCK
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
#undef JEMALLOC_OSSPIN
/* Defined if syscall(2) is usable. */
#undef JEMALLOC_USE_SYSCALL
/*
* Defined if secure_getenv(3) is available.
*/
#undef JEMALLOC_HAVE_SECURE_GETENV
/*
* Defined if issetugid(2) is available.
*/
#undef JEMALLOC_HAVE_ISSETUGID
/* Defined if pthread_atfork(3) is available. */
#undef JEMALLOC_HAVE_PTHREAD_ATFORK
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
*/
#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
/*
* Defined if mach_absolute_time() is available.
*/
#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
* malloc_tsd.
*/
#undef JEMALLOC_MALLOC_THREAD_CLEANUP
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
#undef JEMALLOC_THREADED_INIT
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
#undef JEMALLOC_MUTEX_INIT_CB
/* Non-empty if the tls_model attribute is supported. */
#undef JEMALLOC_TLS_MODEL
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
#undef JEMALLOC_CC_SILENCE
/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
#undef JEMALLOC_CODE_COVERAGE
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
#undef JEMALLOC_DEBUG
/* JEMALLOC_STATS enables statistics calculation. */
#undef JEMALLOC_STATS
/* JEMALLOC_PROF enables allocation profiling. */
#undef JEMALLOC_PROF
/* Use libunwind for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBUNWIND
/* Use libgcc for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBGCC
/* Use gcc intrinsics for profile backtracing if defined. */
#undef JEMALLOC_PROF_GCC
/*
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
* This makes it possible to allocate/deallocate objects without any locking
* when the cache is in the steady state.
*/
#undef JEMALLOC_TCACHE
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
* segment (DSS).
*/
#undef JEMALLOC_DSS
/* Support memory filling (junk/zero/quarantine/redzone). */
#undef JEMALLOC_FILL
/* Support utrace(2)-based tracing. */
#undef JEMALLOC_UTRACE
/* Support Valgrind. */
#undef JEMALLOC_VALGRIND
/* Support optional abort() on OOM. */
#undef JEMALLOC_XMALLOC
/* Support lazy locking (avoid locking unless a second thread is launched). */
#undef JEMALLOC_LAZY_LOCK
/* Minimum size class to support is 2^LG_TINY_MIN bytes. */
#undef LG_TINY_MIN
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#undef LG_QUANTUM
/* One page is 2^LG_PAGE bytes. */
#undef LG_PAGE
/*
* If defined, adjacent virtual memory mappings with identical attributes
* automatically coalesce, and they fragment when changes are made to subranges.
* This is the normal order of things for mmap()/munmap(), but on Windows
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
* mappings do *not* coalesce/fragment.
*/
#undef JEMALLOC_MAPS_COALESCE
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
* later reuse. This is disabled by default on Linux because common sequences
* of mmap()/munmap() calls will cause virtual memory map holes.
*/
#undef JEMALLOC_MUNMAP
/* TLS is used to map arenas and magazine caches to threads. */
#undef JEMALLOC_TLS
/*
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
* Don't use this directly; instead use unreachable() from util.h
*/
#undef JEMALLOC_INTERNAL_UNREACHABLE
/*
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
* use ffs_*() from util.h.
*/
#undef JEMALLOC_INTERNAL_FFSLL
#undef JEMALLOC_INTERNAL_FFSL
#undef JEMALLOC_INTERNAL_FFS
/*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
* within jemalloc-owned chunks before dereferencing them.
*/
#undef JEMALLOC_IVSALLOC
/*
* If defined, explicitly attempt to more uniformly distribute large allocation
* pointer alignments across all cache indices.
*/
#undef JEMALLOC_CACHE_OBLIVIOUS
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
#undef JEMALLOC_ZONE
/*
* Methods for determining whether the OS overcommits.
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
* /proc/sys/vm.overcommit_memory file.
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
*/
#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
/* Defined if madvise(2) is available. */
#undef JEMALLOC_HAVE_MADVISE
/*
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
* arguments to madvise(2).
*/
#undef JEMALLOC_HAVE_MADVISE_HUGE
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
* will be discarded rather than swapped out.
* madvise(..., MADV_DONTNEED) : This immediately discards pages, such that
* new pages will be demand-zeroed if the
* address region is later touched.
*/
#undef JEMALLOC_PURGE_MADVISE_FREE
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
/* Defined if transparent huge page support is enabled. */
#undef JEMALLOC_THP
/* Define if operating system has alloca.h header. */
#undef JEMALLOC_HAS_ALLOCA_H
/* C99 restrict keyword supported. */
#undef JEMALLOC_HAS_RESTRICT
/* For use by hash code. */
#undef JEMALLOC_BIG_ENDIAN
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#undef LG_SIZEOF_INT
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#undef LG_SIZEOF_LONG
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
#undef LG_SIZEOF_LONG_LONG
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
#undef JEMALLOC_GLIBC_MALLOC_HOOK
/* glibc memalign hook. */
#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
/* Adaptive mutex support in pthreads. */
#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
/*
* If defined, jemalloc symbols are not exported (doesn't work when
* JEMALLOC_PREFIX is not defined).
*/
#undef JEMALLOC_EXPORT
/* config.malloc_conf options string. */
#undef JEMALLOC_CONFIG_MALLOC_CONF
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */

View File

@ -0,0 +1,57 @@
/*
* JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for
* functions that are static inline functions if inlining is enabled, and
* single-definition library-private functions if inlining is disabled.
*
* JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in
* which case the denoted functions are always static, regardless of whether
* inlining is enabled.
*/
#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE)
/* Disable inlining to make debugging/profiling easier. */
# define JEMALLOC_ALWAYS_INLINE
# define JEMALLOC_ALWAYS_INLINE_C static
# define JEMALLOC_INLINE
# define JEMALLOC_INLINE_C static
# define inline
#else
# define JEMALLOC_ENABLE_INLINE
# ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ALWAYS_INLINE \
static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
# define JEMALLOC_ALWAYS_INLINE_C \
static inline JEMALLOC_ATTR(always_inline)
# else
# define JEMALLOC_ALWAYS_INLINE static inline
# define JEMALLOC_ALWAYS_INLINE_C static inline
# endif
# define JEMALLOC_INLINE static inline
# define JEMALLOC_INLINE_C static inline
# ifdef _MSC_VER
# define inline _inline
# endif
#endif
#ifdef JEMALLOC_CC_SILENCE
# define UNUSED JEMALLOC_ATTR(unused)
#else
# define UNUSED
#endif
#define ZU(z) ((size_t)z)
#define ZI(z) ((ssize_t)z)
#define QU(q) ((uint64_t)q)
#define QI(q) ((int64_t)q)
#define KZU(z) ZU(z##ULL)
#define KZI(z) ZI(z##LL)
#define KQU(q) QU(q##ULL)
#define KQI(q) QI(q##LL)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
#ifndef JEMALLOC_HAS_RESTRICT
# define restrict
#endif

View File

@ -0,0 +1,115 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void mb_write(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
#ifdef __i386__
/*
* According to the Intel Architecture Software Developer's Manual, current
* processors execute instructions in order from the perspective of other
* processors in a multiprocessor system, but 1) Intel reserves the right to
* change that, and 2) the compiler's optimizer could re-order instructions if
* there weren't some form of barrier. Therefore, even if running on an
* architecture that does not need memory barriers (everything through at least
* i686), an "optimizer barrier" is necessary.
*/
JEMALLOC_INLINE void
mb_write(void)
{
# if 0
/* This is a true memory barrier. */
asm volatile ("pusha;"
"xor %%eax,%%eax;"
"cpuid;"
"popa;"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
# else
/*
* This is hopefully enough to keep the compiler from reordering
* instructions around this one.
*/
asm volatile ("nop;"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
# endif
}
#elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE void
mb_write(void)
{
asm volatile ("sfence"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
}
#elif defined(__powerpc__)
JEMALLOC_INLINE void
mb_write(void)
{
asm volatile ("eieio"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
}
#elif defined(__sparc__) && defined(__arch64__)
JEMALLOC_INLINE void
mb_write(void)
{
asm volatile ("membar #StoreStore"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
}
#elif defined(__tile__)
JEMALLOC_INLINE void
mb_write(void)
{
__sync_synchronize();
}
#else
/*
* This is much slower than a simple memory barrier, but the semantics of mutex
* unlock make this work.
*/
JEMALLOC_INLINE void
mb_write(void)
{
malloc_mutex_t mtx;
malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT);
malloc_mutex_lock(TSDN_NULL, &mtx);
malloc_mutex_unlock(TSDN_NULL, &mtx);
}
#endif
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,145 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct malloc_mutex_s malloc_mutex_t;
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_INITIALIZER \
{OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#else
# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \
WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# endif
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct malloc_mutex_s {
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
SRWLOCK lock;
# else
CRITICAL_SECTION lock;
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock lock;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
pthread_mutex_t lock;
malloc_mutex_t *postponed_next;
#else
pthread_mutex_t lock;
#endif
witness_t witness;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_LAZY_LOCK
extern bool isthreaded;
#else
# undef isthreaded /* Undo private_namespace.h definition. */
# define isthreaded true
#endif
bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
witness_rank_t rank);
void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
bool malloc_mutex_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE void
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
witness_assert_not_owner(tsdn, &mutex->witness);
if (isthreaded) {
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
AcquireSRWLockExclusive(&mutex->lock);
# else
EnterCriticalSection(&mutex->lock);
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock_lock(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mutex->lock);
#else
pthread_mutex_lock(&mutex->lock);
#endif
}
witness_lock(tsdn, &mutex->witness);
}
JEMALLOC_INLINE void
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
witness_unlock(tsdn, &mutex->witness);
if (isthreaded) {
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
ReleaseSRWLockExclusive(&mutex->lock);
# else
LeaveCriticalSection(&mutex->lock);
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock_unlock(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mutex->lock);
#else
pthread_mutex_unlock(&mutex->lock);
#endif
}
}
JEMALLOC_INLINE void
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
witness_assert_owner(tsdn, &mutex->witness);
}
JEMALLOC_INLINE void
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
witness_assert_not_owner(tsdn, &mutex->witness);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,48 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct nstime_s nstime_t;
/* Maximum supported number of seconds (~584 years). */
#define NSTIME_SEC_MAX KQU(18446744072)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct nstime_s {
uint64_t ns;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void nstime_init(nstime_t *time, uint64_t ns);
void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
uint64_t nstime_ns(const nstime_t *time);
uint64_t nstime_sec(const nstime_t *time);
uint64_t nstime_nsec(const nstime_t *time);
void nstime_copy(nstime_t *time, const nstime_t *source);
int nstime_compare(const nstime_t *a, const nstime_t *b);
void nstime_add(nstime_t *time, const nstime_t *addend);
void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
void nstime_imultiply(nstime_t *time, uint64_t multiplier);
void nstime_idivide(nstime_t *time, uint64_t divisor);
uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
#ifdef JEMALLOC_JET
typedef bool (nstime_monotonic_t)(void);
extern nstime_monotonic_t *nstime_monotonic;
typedef bool (nstime_update_t)(nstime_t *);
extern nstime_update_t *nstime_update;
#else
bool nstime_monotonic(void);
bool nstime_update(nstime_t *time);
#endif
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,29 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *pages_map(void *addr, size_t size, bool *commit);
void pages_unmap(void *addr, size_t size);
void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
size_t size, bool *commit);
bool pages_commit(void *addr, size_t size);
bool pages_decommit(void *addr, size_t size);
bool pages_purge(void *addr, size_t size);
bool pages_huge(void *addr, size_t size);
bool pages_nohuge(void *addr, size_t size);
void pages_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,345 @@
/*
* A Pairing Heap implementation.
*
* "The Pairing Heap: A New Form of Self-Adjusting Heap"
* https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
*
* With auxiliary twopass list, described in a follow on paper.
*
* "Pairing Heaps: Experiments and Analysis"
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
*
*******************************************************************************
*/
#ifndef PH_H_
#define PH_H_
/* Node structure. */
#define phn(a_type) \
struct { \
a_type *phn_prev; \
a_type *phn_next; \
a_type *phn_lchild; \
}
/* Root structure. */
#define ph(a_type) \
struct { \
a_type *ph_root; \
}
/* Internal utility macros. */
#define phn_lchild_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_lchild)
#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
a_phn->a_field.phn_lchild = a_lchild; \
} while (0)
#define phn_next_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_next)
#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
a_phn->a_field.phn_prev = a_prev; \
} while (0)
#define phn_prev_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_prev)
#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
a_phn->a_field.phn_next = a_next; \
} while (0)
#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
a_type *phn0child; \
\
assert(a_phn0 != NULL); \
assert(a_phn1 != NULL); \
assert(a_cmp(a_phn0, a_phn1) <= 0); \
\
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
phn_next_set(a_type, a_field, a_phn1, phn0child); \
if (phn0child != NULL) \
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
} while (0)
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
if (a_phn0 == NULL) \
r_phn = a_phn1; \
else if (a_phn1 == NULL) \
r_phn = a_phn0; \
else if (a_cmp(a_phn0, a_phn1) < 0) { \
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
a_cmp); \
r_phn = a_phn0; \
} else { \
phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
a_cmp); \
r_phn = a_phn1; \
} \
} while (0)
#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *head = NULL; \
a_type *tail = NULL; \
a_type *phn0 = a_phn; \
a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
\
/* \
* Multipass merge, wherein the first two elements of a FIFO \
* are repeatedly merged, and each result is appended to the \
* singly linked FIFO, until the FIFO contains only a single \
* element. We start with a sibling list but no reference to \
* its tail, so we do a single pass over the sibling list to \
* populate the FIFO. \
*/ \
if (phn1 != NULL) { \
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
if (phnrest != NULL) \
phn_prev_set(a_type, a_field, phnrest, NULL); \
phn_prev_set(a_type, a_field, phn0, NULL); \
phn_next_set(a_type, a_field, phn0, NULL); \
phn_prev_set(a_type, a_field, phn1, NULL); \
phn_next_set(a_type, a_field, phn1, NULL); \
phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
head = tail = phn0; \
phn0 = phnrest; \
while (phn0 != NULL) { \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
phnrest = phn_next_get(a_type, a_field, \
phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, \
phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, \
NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
phn_prev_set(a_type, a_field, phn1, \
NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = phnrest; \
} else { \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = NULL; \
} \
} \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
while (true) { \
head = phn_next_get(a_type, a_field, \
phn1); \
assert(phn_prev_get(a_type, a_field, \
phn0) == NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
assert(phn_prev_get(a_type, a_field, \
phn1) == NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
if (head == NULL) \
break; \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, \
phn0); \
} \
} \
} \
r_phn = phn0; \
} while (0)
#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
if (phn != NULL) { \
phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_prev_set(a_type, a_field, phn, NULL); \
ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
assert(phn_next_get(a_type, a_field, phn) == NULL); \
phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
a_ph->ph_root); \
} \
} while (0)
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
if (lchild == NULL) \
r_phn = NULL; \
else { \
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
r_phn); \
} \
} while (0)
/*
* The ph_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to ph_gen().
*/
#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
a_attr void a_prefix##new(a_ph_type *ph); \
a_attr bool a_prefix##empty(a_ph_type *ph); \
a_attr a_type *a_prefix##first(a_ph_type *ph); \
a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
/*
* The ph_gen() macro generates a type-specific pairing heap implementation,
* based on the above cpp macros.
*/
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_ph_type *ph) \
{ \
\
memset(ph, 0, sizeof(ph(a_type))); \
} \
a_attr bool \
a_prefix##empty(a_ph_type *ph) \
{ \
\
return (ph->ph_root == NULL); \
} \
a_attr a_type * \
a_prefix##first(a_ph_type *ph) \
{ \
\
if (ph->ph_root == NULL) \
return (NULL); \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
return (ph->ph_root); \
} \
a_attr void \
a_prefix##insert(a_ph_type *ph, a_type *phn) \
{ \
\
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
\
/* \
* Treat the root as an aux list during insertion, and lazily \
* merge during a_prefix##remove_first(). For elements that \
* are inserted, then removed via a_prefix##remove() before the \
* aux list is ever processed, this makes insert/remove \
* constant-time, whereas eager merging would make insert \
* O(log n). \
*/ \
if (ph->ph_root == NULL) \
ph->ph_root = phn; \
else { \
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
a_field, ph->ph_root)); \
if (phn_next_get(a_type, a_field, ph->ph_root) != \
NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, ph->ph_root), \
phn); \
} \
phn_prev_set(a_type, a_field, phn, ph->ph_root); \
phn_next_set(a_type, a_field, ph->ph_root, phn); \
} \
} \
a_attr a_type * \
a_prefix##remove_first(a_ph_type *ph) \
{ \
a_type *ret; \
\
if (ph->ph_root == NULL) \
return (NULL); \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
\
ret = ph->ph_root; \
\
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
\
return (ret); \
} \
a_attr void \
a_prefix##remove(a_ph_type *ph, a_type *phn) \
{ \
a_type *replace, *parent; \
\
/* \
* We can delete from aux list without merging it, but we need \
* to merge if we are dealing with the root node. \
*/ \
if (ph->ph_root == phn) { \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
if (ph->ph_root == phn) { \
ph_merge_children(a_type, a_field, ph->ph_root, \
a_cmp, ph->ph_root); \
return; \
} \
} \
\
/* Get parent (if phn is leftmost child) before mutating. */ \
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
if (phn_lchild_get(a_type, a_field, parent) != phn) \
parent = NULL; \
} \
/* Find a possible replacement node, and link to parent. */ \
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
/* Set next/prev for sibling linked list. */ \
if (replace != NULL) { \
if (parent != NULL) { \
phn_prev_set(a_type, a_field, replace, parent); \
phn_lchild_set(a_type, a_field, parent, \
replace); \
} else { \
phn_prev_set(a_type, a_field, replace, \
phn_prev_get(a_type, a_field, phn)); \
if (phn_prev_get(a_type, a_field, phn) != \
NULL) { \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
replace); \
} \
} \
phn_next_set(a_type, a_field, replace, \
phn_next_get(a_type, a_field, phn)); \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
replace); \
} \
} else { \
if (parent != NULL) { \
a_type *next = phn_next_get(a_type, a_field, \
phn); \
phn_lchild_set(a_type, a_field, parent, next); \
if (next != NULL) { \
phn_prev_set(a_type, a_field, next, \
parent); \
} \
} else { \
assert(phn_prev_get(a_type, a_field, phn) != \
NULL); \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
phn_next_get(a_type, a_field, phn)); \
} \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
phn_prev_get(a_type, a_field, phn)); \
} \
} \
}
#endif /* PH_H_ */

View File

@ -0,0 +1,639 @@
#define a0dalloc JEMALLOC_N(a0dalloc)
#define a0get JEMALLOC_N(a0get)
#define a0malloc JEMALLOC_N(a0malloc)
#define arena_aalloc JEMALLOC_N(arena_aalloc)
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge)
#define arena_bin_index JEMALLOC_N(arena_bin_index)
#define arena_bin_info JEMALLOC_N(arena_bin_info)
#define arena_bitselm_get_const JEMALLOC_N(arena_bitselm_get_const)
#define arena_bitselm_get_mutable JEMALLOC_N(arena_bitselm_get_mutable)
#define arena_boot JEMALLOC_N(arena_boot)
#define arena_choose JEMALLOC_N(arena_choose)
#define arena_choose_hard JEMALLOC_N(arena_choose_hard)
#define arena_choose_impl JEMALLOC_N(arena_choose_impl)
#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge)
#define arena_chunk_cache_maybe_insert JEMALLOC_N(arena_chunk_cache_maybe_insert)
#define arena_chunk_cache_maybe_remove JEMALLOC_N(arena_chunk_cache_maybe_remove)
#define arena_chunk_dalloc_huge JEMALLOC_N(arena_chunk_dalloc_huge)
#define arena_chunk_ralloc_huge_expand JEMALLOC_N(arena_chunk_ralloc_huge_expand)
#define arena_chunk_ralloc_huge_shrink JEMALLOC_N(arena_chunk_ralloc_huge_shrink)
#define arena_chunk_ralloc_huge_similar JEMALLOC_N(arena_chunk_ralloc_huge_similar)
#define arena_cleanup JEMALLOC_N(arena_cleanup)
#define arena_dalloc JEMALLOC_N(arena_dalloc)
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked)
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_dalloc_large_junked_locked JEMALLOC_N(arena_dalloc_large_junked_locked)
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
#define arena_decay_tick JEMALLOC_N(arena_decay_tick)
#define arena_decay_ticks JEMALLOC_N(arena_decay_ticks)
#define arena_decay_time_default_get JEMALLOC_N(arena_decay_time_default_get)
#define arena_decay_time_default_set JEMALLOC_N(arena_decay_time_default_set)
#define arena_decay_time_get JEMALLOC_N(arena_decay_time_get)
#define arena_decay_time_set JEMALLOC_N(arena_decay_time_set)
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next)
#define arena_get JEMALLOC_N(arena_get)
#define arena_ichoose JEMALLOC_N(arena_ichoose)
#define arena_init JEMALLOC_N(arena_init)
#define arena_lg_dirty_mult_default_get JEMALLOC_N(arena_lg_dirty_mult_default_get)
#define arena_lg_dirty_mult_default_set JEMALLOC_N(arena_lg_dirty_mult_default_set)
#define arena_lg_dirty_mult_get JEMALLOC_N(arena_lg_dirty_mult_get)
#define arena_lg_dirty_mult_set JEMALLOC_N(arena_lg_dirty_mult_set)
#define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_hard JEMALLOC_N(arena_malloc_hard)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get)
#define arena_mapbits_decommitted_get JEMALLOC_N(arena_mapbits_decommitted_get)
#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
#define arena_mapbits_internal_set JEMALLOC_N(arena_mapbits_internal_set)
#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
#define arena_mapbits_size_decode JEMALLOC_N(arena_mapbits_size_decode)
#define arena_mapbits_size_encode JEMALLOC_N(arena_mapbits_size_encode)
#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
#define arena_mapbitsp_get_const JEMALLOC_N(arena_mapbitsp_get_const)
#define arena_mapbitsp_get_mutable JEMALLOC_N(arena_mapbitsp_get_mutable)
#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
#define arena_maxrun JEMALLOC_N(arena_maxrun)
#define arena_maybe_purge JEMALLOC_N(arena_maybe_purge)
#define arena_metadata_allocated_add JEMALLOC_N(arena_metadata_allocated_add)
#define arena_metadata_allocated_get JEMALLOC_N(arena_metadata_allocated_get)
#define arena_metadata_allocated_sub JEMALLOC_N(arena_metadata_allocated_sub)
#define arena_migrate JEMALLOC_N(arena_migrate)
#define arena_miscelm_get_const JEMALLOC_N(arena_miscelm_get_const)
#define arena_miscelm_get_mutable JEMALLOC_N(arena_miscelm_get_mutable)
#define arena_miscelm_to_pageind JEMALLOC_N(arena_miscelm_to_pageind)
#define arena_miscelm_to_rpages JEMALLOC_N(arena_miscelm_to_rpages)
#define arena_new JEMALLOC_N(arena_new)
#define arena_node_alloc JEMALLOC_N(arena_node_alloc)
#define arena_node_dalloc JEMALLOC_N(arena_node_dalloc)
#define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec)
#define arena_nthreads_get JEMALLOC_N(arena_nthreads_get)
#define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc)
#define arena_palloc JEMALLOC_N(arena_palloc)
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
#define arena_prefork0 JEMALLOC_N(arena_prefork0)
#define arena_prefork1 JEMALLOC_N(arena_prefork1)
#define arena_prefork2 JEMALLOC_N(arena_prefork2)
#define arena_prefork3 JEMALLOC_N(arena_prefork3)
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl)
#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked)
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
#define arena_prof_tctx_get JEMALLOC_N(arena_prof_tctx_get)
#define arena_prof_tctx_reset JEMALLOC_N(arena_prof_tctx_reset)
#define arena_prof_tctx_set JEMALLOC_N(arena_prof_tctx_set)
#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
#define arena_purge JEMALLOC_N(arena_purge)
#define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small)
#define arena_ralloc JEMALLOC_N(arena_ralloc)
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
#define arena_rd_to_miscelm JEMALLOC_N(arena_rd_to_miscelm)
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
#define arena_reset JEMALLOC_N(arena_reset)
#define arena_run_regind JEMALLOC_N(arena_run_regind)
#define arena_run_to_miscelm JEMALLOC_N(arena_run_to_miscelm)
#define arena_salloc JEMALLOC_N(arena_salloc)
#define arena_sdalloc JEMALLOC_N(arena_sdalloc)
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
#define arena_tdata_get JEMALLOC_N(arena_tdata_get)
#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard)
#define arenas JEMALLOC_N(arenas)
#define arenas_tdata_bypass_cleanup JEMALLOC_N(arenas_tdata_bypass_cleanup)
#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup)
#define atomic_add_p JEMALLOC_N(atomic_add_p)
#define atomic_add_u JEMALLOC_N(atomic_add_u)
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
#define atomic_add_z JEMALLOC_N(atomic_add_z)
#define atomic_cas_p JEMALLOC_N(atomic_cas_p)
#define atomic_cas_u JEMALLOC_N(atomic_cas_u)
#define atomic_cas_uint32 JEMALLOC_N(atomic_cas_uint32)
#define atomic_cas_uint64 JEMALLOC_N(atomic_cas_uint64)
#define atomic_cas_z JEMALLOC_N(atomic_cas_z)
#define atomic_sub_p JEMALLOC_N(atomic_sub_p)
#define atomic_sub_u JEMALLOC_N(atomic_sub_u)
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
#define atomic_write_p JEMALLOC_N(atomic_write_p)
#define atomic_write_u JEMALLOC_N(atomic_write_u)
#define atomic_write_uint32 JEMALLOC_N(atomic_write_uint32)
#define atomic_write_uint64 JEMALLOC_N(atomic_write_uint64)
#define atomic_write_z JEMALLOC_N(atomic_write_z)
#define base_alloc JEMALLOC_N(base_alloc)
#define base_boot JEMALLOC_N(base_boot)
#define base_postfork_child JEMALLOC_N(base_postfork_child)
#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
#define base_prefork JEMALLOC_N(base_prefork)
#define base_stats_get JEMALLOC_N(base_stats_get)
#define bitmap_full JEMALLOC_N(bitmap_full)
#define bitmap_get JEMALLOC_N(bitmap_get)
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
#define bitmap_init JEMALLOC_N(bitmap_init)
#define bitmap_set JEMALLOC_N(bitmap_set)
#define bitmap_sfu JEMALLOC_N(bitmap_sfu)
#define bitmap_size JEMALLOC_N(bitmap_size)
#define bitmap_unset JEMALLOC_N(bitmap_unset)
#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc)
#define bootstrap_free JEMALLOC_N(bootstrap_free)
#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc)
#define bt_init JEMALLOC_N(bt_init)
#define buferror JEMALLOC_N(buferror)
#define chunk_alloc_base JEMALLOC_N(chunk_alloc_base)
#define chunk_alloc_cache JEMALLOC_N(chunk_alloc_cache)
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
#define chunk_alloc_wrapper JEMALLOC_N(chunk_alloc_wrapper)
#define chunk_boot JEMALLOC_N(chunk_boot)
#define chunk_dalloc_cache JEMALLOC_N(chunk_dalloc_cache)
#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap)
#define chunk_dalloc_wrapper JEMALLOC_N(chunk_dalloc_wrapper)
#define chunk_deregister JEMALLOC_N(chunk_deregister)
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
#define chunk_dss_mergeable JEMALLOC_N(chunk_dss_mergeable)
#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
#define chunk_hooks_default JEMALLOC_N(chunk_hooks_default)
#define chunk_hooks_get JEMALLOC_N(chunk_hooks_get)
#define chunk_hooks_set JEMALLOC_N(chunk_hooks_set)
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
#define chunk_lookup JEMALLOC_N(chunk_lookup)
#define chunk_npages JEMALLOC_N(chunk_npages)
#define chunk_purge_wrapper JEMALLOC_N(chunk_purge_wrapper)
#define chunk_register JEMALLOC_N(chunk_register)
#define chunks_rtree JEMALLOC_N(chunks_rtree)
#define chunksize JEMALLOC_N(chunksize)
#define chunksize_mask JEMALLOC_N(chunksize_mask)
#define ckh_count JEMALLOC_N(ckh_count)
#define ckh_delete JEMALLOC_N(ckh_delete)
#define ckh_insert JEMALLOC_N(ckh_insert)
#define ckh_iter JEMALLOC_N(ckh_iter)
#define ckh_new JEMALLOC_N(ckh_new)
#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
#define ckh_remove JEMALLOC_N(ckh_remove)
#define ckh_search JEMALLOC_N(ckh_search)
#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
#define ctl_boot JEMALLOC_N(ctl_boot)
#define ctl_bymib JEMALLOC_N(ctl_bymib)
#define ctl_byname JEMALLOC_N(ctl_byname)
#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
#define ctl_prefork JEMALLOC_N(ctl_prefork)
#define decay_ticker_get JEMALLOC_N(decay_ticker_get)
#define dss_prec_names JEMALLOC_N(dss_prec_names)
#define extent_node_achunk_get JEMALLOC_N(extent_node_achunk_get)
#define extent_node_achunk_set JEMALLOC_N(extent_node_achunk_set)
#define extent_node_addr_get JEMALLOC_N(extent_node_addr_get)
#define extent_node_addr_set JEMALLOC_N(extent_node_addr_set)
#define extent_node_arena_get JEMALLOC_N(extent_node_arena_get)
#define extent_node_arena_set JEMALLOC_N(extent_node_arena_set)
#define extent_node_committed_get JEMALLOC_N(extent_node_committed_get)
#define extent_node_committed_set JEMALLOC_N(extent_node_committed_set)
#define extent_node_dirty_insert JEMALLOC_N(extent_node_dirty_insert)
#define extent_node_dirty_linkage_init JEMALLOC_N(extent_node_dirty_linkage_init)
#define extent_node_dirty_remove JEMALLOC_N(extent_node_dirty_remove)
#define extent_node_init JEMALLOC_N(extent_node_init)
#define extent_node_prof_tctx_get JEMALLOC_N(extent_node_prof_tctx_get)
#define extent_node_prof_tctx_set JEMALLOC_N(extent_node_prof_tctx_set)
#define extent_node_size_get JEMALLOC_N(extent_node_size_get)
#define extent_node_size_set JEMALLOC_N(extent_node_size_set)
#define extent_node_sn_get JEMALLOC_N(extent_node_sn_get)
#define extent_node_sn_set JEMALLOC_N(extent_node_sn_set)
#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get)
#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set)
#define extent_size_quantize_ceil JEMALLOC_N(extent_size_quantize_ceil)
#define extent_size_quantize_floor JEMALLOC_N(extent_size_quantize_floor)
#define extent_tree_ad_destroy JEMALLOC_N(extent_tree_ad_destroy)
#define extent_tree_ad_destroy_recurse JEMALLOC_N(extent_tree_ad_destroy_recurse)
#define extent_tree_ad_empty JEMALLOC_N(extent_tree_ad_empty)
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse)
#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start)
#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last)
#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new)
#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next)
#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch)
#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev)
#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch)
#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove)
#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter)
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
#define extent_tree_szsnad_destroy JEMALLOC_N(extent_tree_szsnad_destroy)
#define extent_tree_szsnad_destroy_recurse JEMALLOC_N(extent_tree_szsnad_destroy_recurse)
#define extent_tree_szsnad_empty JEMALLOC_N(extent_tree_szsnad_empty)
#define extent_tree_szsnad_first JEMALLOC_N(extent_tree_szsnad_first)
#define extent_tree_szsnad_insert JEMALLOC_N(extent_tree_szsnad_insert)
#define extent_tree_szsnad_iter JEMALLOC_N(extent_tree_szsnad_iter)
#define extent_tree_szsnad_iter_recurse JEMALLOC_N(extent_tree_szsnad_iter_recurse)
#define extent_tree_szsnad_iter_start JEMALLOC_N(extent_tree_szsnad_iter_start)
#define extent_tree_szsnad_last JEMALLOC_N(extent_tree_szsnad_last)
#define extent_tree_szsnad_new JEMALLOC_N(extent_tree_szsnad_new)
#define extent_tree_szsnad_next JEMALLOC_N(extent_tree_szsnad_next)
#define extent_tree_szsnad_nsearch JEMALLOC_N(extent_tree_szsnad_nsearch)
#define extent_tree_szsnad_prev JEMALLOC_N(extent_tree_szsnad_prev)
#define extent_tree_szsnad_psearch JEMALLOC_N(extent_tree_szsnad_psearch)
#define extent_tree_szsnad_remove JEMALLOC_N(extent_tree_szsnad_remove)
#define extent_tree_szsnad_reverse_iter JEMALLOC_N(extent_tree_szsnad_reverse_iter)
#define extent_tree_szsnad_reverse_iter_recurse JEMALLOC_N(extent_tree_szsnad_reverse_iter_recurse)
#define extent_tree_szsnad_reverse_iter_start JEMALLOC_N(extent_tree_szsnad_reverse_iter_start)
#define extent_tree_szsnad_search JEMALLOC_N(extent_tree_szsnad_search)
#define ffs_llu JEMALLOC_N(ffs_llu)
#define ffs_lu JEMALLOC_N(ffs_lu)
#define ffs_u JEMALLOC_N(ffs_u)
#define ffs_u32 JEMALLOC_N(ffs_u32)
#define ffs_u64 JEMALLOC_N(ffs_u64)
#define ffs_zu JEMALLOC_N(ffs_zu)
#define get_errno JEMALLOC_N(get_errno)
#define hash JEMALLOC_N(hash)
#define hash_fmix_32 JEMALLOC_N(hash_fmix_32)
#define hash_fmix_64 JEMALLOC_N(hash_fmix_64)
#define hash_get_block_32 JEMALLOC_N(hash_get_block_32)
#define hash_get_block_64 JEMALLOC_N(hash_get_block_64)
#define hash_rotl_32 JEMALLOC_N(hash_rotl_32)
#define hash_rotl_64 JEMALLOC_N(hash_rotl_64)
#define hash_x64_128 JEMALLOC_N(hash_x64_128)
#define hash_x86_128 JEMALLOC_N(hash_x86_128)
#define hash_x86_32 JEMALLOC_N(hash_x86_32)
#define huge_aalloc JEMALLOC_N(huge_aalloc)
#define huge_dalloc JEMALLOC_N(huge_dalloc)
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
#define huge_malloc JEMALLOC_N(huge_malloc)
#define huge_palloc JEMALLOC_N(huge_palloc)
#define huge_prof_tctx_get JEMALLOC_N(huge_prof_tctx_get)
#define huge_prof_tctx_reset JEMALLOC_N(huge_prof_tctx_reset)
#define huge_prof_tctx_set JEMALLOC_N(huge_prof_tctx_set)
#define huge_ralloc JEMALLOC_N(huge_ralloc)
#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
#define huge_salloc JEMALLOC_N(huge_salloc)
#define iaalloc JEMALLOC_N(iaalloc)
#define ialloc JEMALLOC_N(ialloc)
#define iallocztm JEMALLOC_N(iallocztm)
#define iarena_cleanup JEMALLOC_N(iarena_cleanup)
#define idalloc JEMALLOC_N(idalloc)
#define idalloctm JEMALLOC_N(idalloctm)
#define in_valgrind JEMALLOC_N(in_valgrind)
#define index2size JEMALLOC_N(index2size)
#define index2size_compute JEMALLOC_N(index2size_compute)
#define index2size_lookup JEMALLOC_N(index2size_lookup)
#define index2size_tab JEMALLOC_N(index2size_tab)
#define ipalloc JEMALLOC_N(ipalloc)
#define ipalloct JEMALLOC_N(ipalloct)
#define ipallocztm JEMALLOC_N(ipallocztm)
#define iqalloc JEMALLOC_N(iqalloc)
#define iralloc JEMALLOC_N(iralloc)
#define iralloct JEMALLOC_N(iralloct)
#define iralloct_realign JEMALLOC_N(iralloct_realign)
#define isalloc JEMALLOC_N(isalloc)
#define isdalloct JEMALLOC_N(isdalloct)
#define isqalloc JEMALLOC_N(isqalloc)
#define isthreaded JEMALLOC_N(isthreaded)
#define ivsalloc JEMALLOC_N(ivsalloc)
#define ixalloc JEMALLOC_N(ixalloc)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
#define large_maxclass JEMALLOC_N(large_maxclass)
#define lg_floor JEMALLOC_N(lg_floor)
#define lg_prof_sample JEMALLOC_N(lg_prof_sample)
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
#define malloc_mutex_assert_not_owner JEMALLOC_N(malloc_mutex_assert_not_owner)
#define malloc_mutex_assert_owner JEMALLOC_N(malloc_mutex_assert_owner)
#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot)
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock)
#define malloc_printf JEMALLOC_N(malloc_printf)
#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0)
#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1)
#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup)
#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
#define malloc_write JEMALLOC_N(malloc_write)
#define map_bias JEMALLOC_N(map_bias)
#define map_misc_offset JEMALLOC_N(map_misc_offset)
#define mb_write JEMALLOC_N(mb_write)
#define narenas_auto JEMALLOC_N(narenas_auto)
#define narenas_tdata_cleanup JEMALLOC_N(narenas_tdata_cleanup)
#define narenas_total_get JEMALLOC_N(narenas_total_get)
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
#define nhclasses JEMALLOC_N(nhclasses)
#define nlclasses JEMALLOC_N(nlclasses)
#define nstime_add JEMALLOC_N(nstime_add)
#define nstime_compare JEMALLOC_N(nstime_compare)
#define nstime_copy JEMALLOC_N(nstime_copy)
#define nstime_divide JEMALLOC_N(nstime_divide)
#define nstime_idivide JEMALLOC_N(nstime_idivide)
#define nstime_imultiply JEMALLOC_N(nstime_imultiply)
#define nstime_init JEMALLOC_N(nstime_init)
#define nstime_init2 JEMALLOC_N(nstime_init2)
#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
#define nstime_ns JEMALLOC_N(nstime_ns)
#define nstime_nsec JEMALLOC_N(nstime_nsec)
#define nstime_sec JEMALLOC_N(nstime_sec)
#define nstime_subtract JEMALLOC_N(nstime_subtract)
#define nstime_update JEMALLOC_N(nstime_update)
#define opt_abort JEMALLOC_N(opt_abort)
#define opt_decay_time JEMALLOC_N(opt_decay_time)
#define opt_dss JEMALLOC_N(opt_dss)
#define opt_junk JEMALLOC_N(opt_junk)
#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc)
#define opt_junk_free JEMALLOC_N(opt_junk_free)
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
#define opt_narenas JEMALLOC_N(opt_narenas)
#define opt_prof JEMALLOC_N(opt_prof)
#define opt_prof_accum JEMALLOC_N(opt_prof_accum)
#define opt_prof_active JEMALLOC_N(opt_prof_active)
#define opt_prof_final JEMALLOC_N(opt_prof_final)
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init)
#define opt_purge JEMALLOC_N(opt_purge)
#define opt_quarantine JEMALLOC_N(opt_quarantine)
#define opt_redzone JEMALLOC_N(opt_redzone)
#define opt_stats_print JEMALLOC_N(opt_stats_print)
#define opt_tcache JEMALLOC_N(opt_tcache)
#define opt_thp JEMALLOC_N(opt_thp)
#define opt_utrace JEMALLOC_N(opt_utrace)
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
#define opt_zero JEMALLOC_N(opt_zero)
#define p2rz JEMALLOC_N(p2rz)
#define pages_boot JEMALLOC_N(pages_boot)
#define pages_commit JEMALLOC_N(pages_commit)
#define pages_decommit JEMALLOC_N(pages_decommit)
#define pages_huge JEMALLOC_N(pages_huge)
#define pages_map JEMALLOC_N(pages_map)
#define pages_nohuge JEMALLOC_N(pages_nohuge)
#define pages_purge JEMALLOC_N(pages_purge)
#define pages_trim JEMALLOC_N(pages_trim)
#define pages_unmap JEMALLOC_N(pages_unmap)
#define pind2sz JEMALLOC_N(pind2sz)
#define pind2sz_compute JEMALLOC_N(pind2sz_compute)
#define pind2sz_lookup JEMALLOC_N(pind2sz_lookup)
#define pind2sz_tab JEMALLOC_N(pind2sz_tab)
#define pow2_ceil_u32 JEMALLOC_N(pow2_ceil_u32)
#define pow2_ceil_u64 JEMALLOC_N(pow2_ceil_u64)
#define pow2_ceil_zu JEMALLOC_N(pow2_ceil_zu)
#define prng_lg_range_u32 JEMALLOC_N(prng_lg_range_u32)
#define prng_lg_range_u64 JEMALLOC_N(prng_lg_range_u64)
#define prng_lg_range_zu JEMALLOC_N(prng_lg_range_zu)
#define prng_range_u32 JEMALLOC_N(prng_range_u32)
#define prng_range_u64 JEMALLOC_N(prng_range_u64)
#define prng_range_zu JEMALLOC_N(prng_range_zu)
#define prng_state_next_u32 JEMALLOC_N(prng_state_next_u32)
#define prng_state_next_u64 JEMALLOC_N(prng_state_next_u64)
#define prng_state_next_zu JEMALLOC_N(prng_state_next_zu)
#define prof_active JEMALLOC_N(prof_active)
#define prof_active_get JEMALLOC_N(prof_active_get)
#define prof_active_get_unlocked JEMALLOC_N(prof_active_get_unlocked)
#define prof_active_set JEMALLOC_N(prof_active_set)
#define prof_alloc_prep JEMALLOC_N(prof_alloc_prep)
#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback)
#define prof_backtrace JEMALLOC_N(prof_backtrace)
#define prof_boot0 JEMALLOC_N(prof_boot0)
#define prof_boot1 JEMALLOC_N(prof_boot1)
#define prof_boot2 JEMALLOC_N(prof_boot2)
#define prof_bt_count JEMALLOC_N(prof_bt_count)
#define prof_dump_header JEMALLOC_N(prof_dump_header)
#define prof_dump_open JEMALLOC_N(prof_dump_open)
#define prof_free JEMALLOC_N(prof_free)
#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object)
#define prof_gdump JEMALLOC_N(prof_gdump)
#define prof_gdump_get JEMALLOC_N(prof_gdump_get)
#define prof_gdump_get_unlocked JEMALLOC_N(prof_gdump_get_unlocked)
#define prof_gdump_set JEMALLOC_N(prof_gdump_set)
#define prof_gdump_val JEMALLOC_N(prof_gdump_val)
#define prof_idump JEMALLOC_N(prof_idump)
#define prof_interval JEMALLOC_N(prof_interval)
#define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_malloc JEMALLOC_N(prof_malloc)
#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object)
#define prof_mdump JEMALLOC_N(prof_mdump)
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
#define prof_prefork0 JEMALLOC_N(prof_prefork0)
#define prof_prefork1 JEMALLOC_N(prof_prefork1)
#define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_reset JEMALLOC_N(prof_reset)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
#define prof_tctx_get JEMALLOC_N(prof_tctx_get)
#define prof_tctx_reset JEMALLOC_N(prof_tctx_reset)
#define prof_tctx_set JEMALLOC_N(prof_tctx_set)
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
#define prof_tdata_count JEMALLOC_N(prof_tdata_count)
#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit)
#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get)
#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get)
#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set)
#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set)
#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get)
#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set)
#define psz2ind JEMALLOC_N(psz2ind)
#define psz2u JEMALLOC_N(psz2u)
#define purge_mode_names JEMALLOC_N(purge_mode_names)
#define quarantine JEMALLOC_N(quarantine)
#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook)
#define quarantine_alloc_hook_work JEMALLOC_N(quarantine_alloc_hook_work)
#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup)
#define rtree_child_read JEMALLOC_N(rtree_child_read)
#define rtree_child_read_hard JEMALLOC_N(rtree_child_read_hard)
#define rtree_child_tryread JEMALLOC_N(rtree_child_tryread)
#define rtree_delete JEMALLOC_N(rtree_delete)
#define rtree_get JEMALLOC_N(rtree_get)
#define rtree_new JEMALLOC_N(rtree_new)
#define rtree_node_valid JEMALLOC_N(rtree_node_valid)
#define rtree_set JEMALLOC_N(rtree_set)
#define rtree_start_level JEMALLOC_N(rtree_start_level)
#define rtree_subkey JEMALLOC_N(rtree_subkey)
#define rtree_subtree_read JEMALLOC_N(rtree_subtree_read)
#define rtree_subtree_read_hard JEMALLOC_N(rtree_subtree_read_hard)
#define rtree_subtree_tryread JEMALLOC_N(rtree_subtree_tryread)
#define rtree_val_read JEMALLOC_N(rtree_val_read)
#define rtree_val_write JEMALLOC_N(rtree_val_write)
#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
#define s2u JEMALLOC_N(s2u)
#define s2u_compute JEMALLOC_N(s2u_compute)
#define s2u_lookup JEMALLOC_N(s2u_lookup)
#define sa2u JEMALLOC_N(sa2u)
#define set_errno JEMALLOC_N(set_errno)
#define size2index JEMALLOC_N(size2index)
#define size2index_compute JEMALLOC_N(size2index_compute)
#define size2index_lookup JEMALLOC_N(size2index_lookup)
#define size2index_tab JEMALLOC_N(size2index_tab)
#define spin_adaptive JEMALLOC_N(spin_adaptive)
#define spin_init JEMALLOC_N(spin_init)
#define stats_cactive JEMALLOC_N(stats_cactive)
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
#define stats_print JEMALLOC_N(stats_print)
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate)
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
#define tcache_boot JEMALLOC_N(tcache_boot)
#define tcache_cleanup JEMALLOC_N(tcache_cleanup)
#define tcache_create JEMALLOC_N(tcache_create)
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
#define tcache_enabled_cleanup JEMALLOC_N(tcache_enabled_cleanup)
#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
#define tcache_event JEMALLOC_N(tcache_event)
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
#define tcache_flush JEMALLOC_N(tcache_flush)
#define tcache_get JEMALLOC_N(tcache_get)
#define tcache_get_hard JEMALLOC_N(tcache_get_hard)
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
#define tcache_postfork_child JEMALLOC_N(tcache_postfork_child)
#define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent)
#define tcache_prefork JEMALLOC_N(tcache_prefork)
#define tcache_salloc JEMALLOC_N(tcache_salloc)
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
#define tcaches JEMALLOC_N(tcaches)
#define tcaches_create JEMALLOC_N(tcaches_create)
#define tcaches_destroy JEMALLOC_N(tcaches_destroy)
#define tcaches_flush JEMALLOC_N(tcaches_flush)
#define tcaches_get JEMALLOC_N(tcaches_get)
#define thread_allocated_cleanup JEMALLOC_N(thread_allocated_cleanup)
#define thread_deallocated_cleanup JEMALLOC_N(thread_deallocated_cleanup)
#define ticker_copy JEMALLOC_N(ticker_copy)
#define ticker_init JEMALLOC_N(ticker_init)
#define ticker_read JEMALLOC_N(ticker_read)
#define ticker_tick JEMALLOC_N(ticker_tick)
#define ticker_ticks JEMALLOC_N(ticker_ticks)
#define tsd_arena_get JEMALLOC_N(tsd_arena_get)
#define tsd_arena_set JEMALLOC_N(tsd_arena_set)
#define tsd_arenap_get JEMALLOC_N(tsd_arenap_get)
#define tsd_arenas_tdata_bypass_get JEMALLOC_N(tsd_arenas_tdata_bypass_get)
#define tsd_arenas_tdata_bypass_set JEMALLOC_N(tsd_arenas_tdata_bypass_set)
#define tsd_arenas_tdata_bypassp_get JEMALLOC_N(tsd_arenas_tdata_bypassp_get)
#define tsd_arenas_tdata_get JEMALLOC_N(tsd_arenas_tdata_get)
#define tsd_arenas_tdata_set JEMALLOC_N(tsd_arenas_tdata_set)
#define tsd_arenas_tdatap_get JEMALLOC_N(tsd_arenas_tdatap_get)
#define tsd_boot JEMALLOC_N(tsd_boot)
#define tsd_boot0 JEMALLOC_N(tsd_boot0)
#define tsd_boot1 JEMALLOC_N(tsd_boot1)
#define tsd_booted JEMALLOC_N(tsd_booted)
#define tsd_booted_get JEMALLOC_N(tsd_booted_get)
#define tsd_cleanup JEMALLOC_N(tsd_cleanup)
#define tsd_cleanup_wrapper JEMALLOC_N(tsd_cleanup_wrapper)
#define tsd_fetch JEMALLOC_N(tsd_fetch)
#define tsd_fetch_impl JEMALLOC_N(tsd_fetch_impl)
#define tsd_get JEMALLOC_N(tsd_get)
#define tsd_get_allocates JEMALLOC_N(tsd_get_allocates)
#define tsd_iarena_get JEMALLOC_N(tsd_iarena_get)
#define tsd_iarena_set JEMALLOC_N(tsd_iarena_set)
#define tsd_iarenap_get JEMALLOC_N(tsd_iarenap_get)
#define tsd_initialized JEMALLOC_N(tsd_initialized)
#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
#define tsd_init_finish JEMALLOC_N(tsd_init_finish)
#define tsd_init_head JEMALLOC_N(tsd_init_head)
#define tsd_narenas_tdata_get JEMALLOC_N(tsd_narenas_tdata_get)
#define tsd_narenas_tdata_set JEMALLOC_N(tsd_narenas_tdata_set)
#define tsd_narenas_tdatap_get JEMALLOC_N(tsd_narenas_tdatap_get)
#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get)
#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set)
#define tsd_nominal JEMALLOC_N(tsd_nominal)
#define tsd_prof_tdata_get JEMALLOC_N(tsd_prof_tdata_get)
#define tsd_prof_tdata_set JEMALLOC_N(tsd_prof_tdata_set)
#define tsd_prof_tdatap_get JEMALLOC_N(tsd_prof_tdatap_get)
#define tsd_quarantine_get JEMALLOC_N(tsd_quarantine_get)
#define tsd_quarantine_set JEMALLOC_N(tsd_quarantine_set)
#define tsd_quarantinep_get JEMALLOC_N(tsd_quarantinep_get)
#define tsd_set JEMALLOC_N(tsd_set)
#define tsd_tcache_enabled_get JEMALLOC_N(tsd_tcache_enabled_get)
#define tsd_tcache_enabled_set JEMALLOC_N(tsd_tcache_enabled_set)
#define tsd_tcache_enabledp_get JEMALLOC_N(tsd_tcache_enabledp_get)
#define tsd_tcache_get JEMALLOC_N(tsd_tcache_get)
#define tsd_tcache_set JEMALLOC_N(tsd_tcache_set)
#define tsd_tcachep_get JEMALLOC_N(tsd_tcachep_get)
#define tsd_thread_allocated_get JEMALLOC_N(tsd_thread_allocated_get)
#define tsd_thread_allocated_set JEMALLOC_N(tsd_thread_allocated_set)
#define tsd_thread_allocatedp_get JEMALLOC_N(tsd_thread_allocatedp_get)
#define tsd_thread_deallocated_get JEMALLOC_N(tsd_thread_deallocated_get)
#define tsd_thread_deallocated_set JEMALLOC_N(tsd_thread_deallocated_set)
#define tsd_thread_deallocatedp_get JEMALLOC_N(tsd_thread_deallocatedp_get)
#define tsd_tls JEMALLOC_N(tsd_tls)
#define tsd_tsd JEMALLOC_N(tsd_tsd)
#define tsd_tsdn JEMALLOC_N(tsd_tsdn)
#define tsd_witness_fork_get JEMALLOC_N(tsd_witness_fork_get)
#define tsd_witness_fork_set JEMALLOC_N(tsd_witness_fork_set)
#define tsd_witness_forkp_get JEMALLOC_N(tsd_witness_forkp_get)
#define tsd_witnesses_get JEMALLOC_N(tsd_witnesses_get)
#define tsd_witnesses_set JEMALLOC_N(tsd_witnesses_set)
#define tsd_witnessesp_get JEMALLOC_N(tsd_witnessesp_get)
#define tsdn_fetch JEMALLOC_N(tsdn_fetch)
#define tsdn_null JEMALLOC_N(tsdn_null)
#define tsdn_tsd JEMALLOC_N(tsdn_tsd)
#define u2rz JEMALLOC_N(u2rz)
#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block)
#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined)
#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess)
#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined)
#define witness_assert_depth JEMALLOC_N(witness_assert_depth)
#define witness_assert_depth_to_rank JEMALLOC_N(witness_assert_depth_to_rank)
#define witness_assert_lockless JEMALLOC_N(witness_assert_lockless)
#define witness_assert_not_owner JEMALLOC_N(witness_assert_not_owner)
#define witness_assert_owner JEMALLOC_N(witness_assert_owner)
#define witness_depth_error JEMALLOC_N(witness_depth_error)
#define witness_fork_cleanup JEMALLOC_N(witness_fork_cleanup)
#define witness_init JEMALLOC_N(witness_init)
#define witness_lock JEMALLOC_N(witness_lock)
#define witness_lock_error JEMALLOC_N(witness_lock_error)
#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
#define witness_owner JEMALLOC_N(witness_owner)
#define witness_owner_error JEMALLOC_N(witness_owner_error)
#define witness_postfork_child JEMALLOC_N(witness_postfork_child)
#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent)
#define witness_prefork JEMALLOC_N(witness_prefork)
#define witness_unlock JEMALLOC_N(witness_unlock)
#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup)
#define zone_register JEMALLOC_N(zone_register)

View File

@ -0,0 +1,5 @@
#!/bin/sh
for symbol in `cat $1` ; do
echo "#define ${symbol} JEMALLOC_N(${symbol})"
done

View File

@ -0,0 +1,639 @@
a0dalloc
a0get
a0malloc
arena_aalloc
arena_alloc_junk_small
arena_basic_stats_merge
arena_bin_index
arena_bin_info
arena_bitselm_get_const
arena_bitselm_get_mutable
arena_boot
arena_choose
arena_choose_hard
arena_choose_impl
arena_chunk_alloc_huge
arena_chunk_cache_maybe_insert
arena_chunk_cache_maybe_remove
arena_chunk_dalloc_huge
arena_chunk_ralloc_huge_expand
arena_chunk_ralloc_huge_shrink
arena_chunk_ralloc_huge_similar
arena_cleanup
arena_dalloc
arena_dalloc_bin
arena_dalloc_bin_junked_locked
arena_dalloc_junk_large
arena_dalloc_junk_small
arena_dalloc_large
arena_dalloc_large_junked_locked
arena_dalloc_small
arena_decay_tick
arena_decay_ticks
arena_decay_time_default_get
arena_decay_time_default_set
arena_decay_time_get
arena_decay_time_set
arena_dss_prec_get
arena_dss_prec_set
arena_extent_sn_next
arena_get
arena_ichoose
arena_init
arena_lg_dirty_mult_default_get
arena_lg_dirty_mult_default_set
arena_lg_dirty_mult_get
arena_lg_dirty_mult_set
arena_malloc
arena_malloc_hard
arena_malloc_large
arena_mapbits_allocated_get
arena_mapbits_binind_get
arena_mapbits_decommitted_get
arena_mapbits_dirty_get
arena_mapbits_get
arena_mapbits_internal_set
arena_mapbits_large_binind_set
arena_mapbits_large_get
arena_mapbits_large_set
arena_mapbits_large_size_get
arena_mapbits_size_decode
arena_mapbits_size_encode
arena_mapbits_small_runind_get
arena_mapbits_small_set
arena_mapbits_unallocated_set
arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get
arena_mapbitsp_get_const
arena_mapbitsp_get_mutable
arena_mapbitsp_read
arena_mapbitsp_write
arena_maxrun
arena_maybe_purge
arena_metadata_allocated_add
arena_metadata_allocated_get
arena_metadata_allocated_sub
arena_migrate
arena_miscelm_get_const
arena_miscelm_get_mutable
arena_miscelm_to_pageind
arena_miscelm_to_rpages
arena_new
arena_node_alloc
arena_node_dalloc
arena_nthreads_dec
arena_nthreads_get
arena_nthreads_inc
arena_palloc
arena_postfork_child
arena_postfork_parent
arena_prefork0
arena_prefork1
arena_prefork2
arena_prefork3
arena_prof_accum
arena_prof_accum_impl
arena_prof_accum_locked
arena_prof_promoted
arena_prof_tctx_get
arena_prof_tctx_reset
arena_prof_tctx_set
arena_ptr_small_binind_get
arena_purge
arena_quarantine_junk_small
arena_ralloc
arena_ralloc_junk_large
arena_ralloc_no_move
arena_rd_to_miscelm
arena_redzone_corruption
arena_reset
arena_run_regind
arena_run_to_miscelm
arena_salloc
arena_sdalloc
arena_stats_merge
arena_tcache_fill_small
arena_tdata_get
arena_tdata_get_hard
arenas
arenas_tdata_bypass_cleanup
arenas_tdata_cleanup
atomic_add_p
atomic_add_u
atomic_add_uint32
atomic_add_uint64
atomic_add_z
atomic_cas_p
atomic_cas_u
atomic_cas_uint32
atomic_cas_uint64
atomic_cas_z
atomic_sub_p
atomic_sub_u
atomic_sub_uint32
atomic_sub_uint64
atomic_sub_z
atomic_write_p
atomic_write_u
atomic_write_uint32
atomic_write_uint64
atomic_write_z
base_alloc
base_boot
base_postfork_child
base_postfork_parent
base_prefork
base_stats_get
bitmap_full
bitmap_get
bitmap_info_init
bitmap_init
bitmap_set
bitmap_sfu
bitmap_size
bitmap_unset
bootstrap_calloc
bootstrap_free
bootstrap_malloc
bt_init
buferror
chunk_alloc_base
chunk_alloc_cache
chunk_alloc_dss
chunk_alloc_mmap
chunk_alloc_wrapper
chunk_boot
chunk_dalloc_cache
chunk_dalloc_mmap
chunk_dalloc_wrapper
chunk_deregister
chunk_dss_boot
chunk_dss_mergeable
chunk_dss_prec_get
chunk_dss_prec_set
chunk_hooks_default
chunk_hooks_get
chunk_hooks_set
chunk_in_dss
chunk_lookup
chunk_npages
chunk_purge_wrapper
chunk_register
chunks_rtree
chunksize
chunksize_mask
ckh_count
ckh_delete
ckh_insert
ckh_iter
ckh_new
ckh_pointer_hash
ckh_pointer_keycomp
ckh_remove
ckh_search
ckh_string_hash
ckh_string_keycomp
ctl_boot
ctl_bymib
ctl_byname
ctl_nametomib
ctl_postfork_child
ctl_postfork_parent
ctl_prefork
decay_ticker_get
dss_prec_names
extent_node_achunk_get
extent_node_achunk_set
extent_node_addr_get
extent_node_addr_set
extent_node_arena_get
extent_node_arena_set
extent_node_committed_get
extent_node_committed_set
extent_node_dirty_insert
extent_node_dirty_linkage_init
extent_node_dirty_remove
extent_node_init
extent_node_prof_tctx_get
extent_node_prof_tctx_set
extent_node_size_get
extent_node_size_set
extent_node_sn_get
extent_node_sn_set
extent_node_zeroed_get
extent_node_zeroed_set
extent_size_quantize_ceil
extent_size_quantize_floor
extent_tree_ad_destroy
extent_tree_ad_destroy_recurse
extent_tree_ad_empty
extent_tree_ad_first
extent_tree_ad_insert
extent_tree_ad_iter
extent_tree_ad_iter_recurse
extent_tree_ad_iter_start
extent_tree_ad_last
extent_tree_ad_new
extent_tree_ad_next
extent_tree_ad_nsearch
extent_tree_ad_prev
extent_tree_ad_psearch
extent_tree_ad_remove
extent_tree_ad_reverse_iter
extent_tree_ad_reverse_iter_recurse
extent_tree_ad_reverse_iter_start
extent_tree_ad_search
extent_tree_szsnad_destroy
extent_tree_szsnad_destroy_recurse
extent_tree_szsnad_empty
extent_tree_szsnad_first
extent_tree_szsnad_insert
extent_tree_szsnad_iter
extent_tree_szsnad_iter_recurse
extent_tree_szsnad_iter_start
extent_tree_szsnad_last
extent_tree_szsnad_new
extent_tree_szsnad_next
extent_tree_szsnad_nsearch
extent_tree_szsnad_prev
extent_tree_szsnad_psearch
extent_tree_szsnad_remove
extent_tree_szsnad_reverse_iter
extent_tree_szsnad_reverse_iter_recurse
extent_tree_szsnad_reverse_iter_start
extent_tree_szsnad_search
ffs_llu
ffs_lu
ffs_u
ffs_u32
ffs_u64
ffs_zu
get_errno
hash
hash_fmix_32
hash_fmix_64
hash_get_block_32
hash_get_block_64
hash_rotl_32
hash_rotl_64
hash_x64_128
hash_x86_128
hash_x86_32
huge_aalloc
huge_dalloc
huge_dalloc_junk
huge_malloc
huge_palloc
huge_prof_tctx_get
huge_prof_tctx_reset
huge_prof_tctx_set
huge_ralloc
huge_ralloc_no_move
huge_salloc
iaalloc
ialloc
iallocztm
iarena_cleanup
idalloc
idalloctm
in_valgrind
index2size
index2size_compute
index2size_lookup
index2size_tab
ipalloc
ipalloct
ipallocztm
iqalloc
iralloc
iralloct
iralloct_realign
isalloc
isdalloct
isqalloc
isthreaded
ivsalloc
ixalloc
jemalloc_postfork_child
jemalloc_postfork_parent
jemalloc_prefork
large_maxclass
lg_floor
lg_prof_sample
malloc_cprintf
malloc_mutex_assert_not_owner
malloc_mutex_assert_owner
malloc_mutex_boot
malloc_mutex_init
malloc_mutex_lock
malloc_mutex_postfork_child
malloc_mutex_postfork_parent
malloc_mutex_prefork
malloc_mutex_unlock
malloc_printf
malloc_snprintf
malloc_strtoumax
malloc_tsd_boot0
malloc_tsd_boot1
malloc_tsd_cleanup_register
malloc_tsd_dalloc
malloc_tsd_malloc
malloc_tsd_no_cleanup
malloc_vcprintf
malloc_vsnprintf
malloc_write
map_bias
map_misc_offset
mb_write
narenas_auto
narenas_tdata_cleanup
narenas_total_get
ncpus
nhbins
nhclasses
nlclasses
nstime_add
nstime_compare
nstime_copy
nstime_divide
nstime_idivide
nstime_imultiply
nstime_init
nstime_init2
nstime_monotonic
nstime_ns
nstime_nsec
nstime_sec
nstime_subtract
nstime_update
opt_abort
opt_decay_time
opt_dss
opt_junk
opt_junk_alloc
opt_junk_free
opt_lg_chunk
opt_lg_dirty_mult
opt_lg_prof_interval
opt_lg_prof_sample
opt_lg_tcache_max
opt_narenas
opt_prof
opt_prof_accum
opt_prof_active
opt_prof_final
opt_prof_gdump
opt_prof_leak
opt_prof_prefix
opt_prof_thread_active_init
opt_purge
opt_quarantine
opt_redzone
opt_stats_print
opt_tcache
opt_thp
opt_utrace
opt_xmalloc
opt_zero
p2rz
pages_boot
pages_commit
pages_decommit
pages_huge
pages_map
pages_nohuge
pages_purge
pages_trim
pages_unmap
pind2sz
pind2sz_compute
pind2sz_lookup
pind2sz_tab
pow2_ceil_u32
pow2_ceil_u64
pow2_ceil_zu
prng_lg_range_u32
prng_lg_range_u64
prng_lg_range_zu
prng_range_u32
prng_range_u64
prng_range_zu
prng_state_next_u32
prng_state_next_u64
prng_state_next_zu
prof_active
prof_active_get
prof_active_get_unlocked
prof_active_set
prof_alloc_prep
prof_alloc_rollback
prof_backtrace
prof_boot0
prof_boot1
prof_boot2
prof_bt_count
prof_dump_header
prof_dump_open
prof_free
prof_free_sampled_object
prof_gdump
prof_gdump_get
prof_gdump_get_unlocked
prof_gdump_set
prof_gdump_val
prof_idump
prof_interval
prof_lookup
prof_malloc
prof_malloc_sample_object
prof_mdump
prof_postfork_child
prof_postfork_parent
prof_prefork0
prof_prefork1
prof_realloc
prof_reset
prof_sample_accum_update
prof_sample_threshold_update
prof_tctx_get
prof_tctx_reset
prof_tctx_set
prof_tdata_cleanup
prof_tdata_count
prof_tdata_get
prof_tdata_init
prof_tdata_reinit
prof_thread_active_get
prof_thread_active_init_get
prof_thread_active_init_set
prof_thread_active_set
prof_thread_name_get
prof_thread_name_set
psz2ind
psz2u
purge_mode_names
quarantine
quarantine_alloc_hook
quarantine_alloc_hook_work
quarantine_cleanup
rtree_child_read
rtree_child_read_hard
rtree_child_tryread
rtree_delete
rtree_get
rtree_new
rtree_node_valid
rtree_set
rtree_start_level
rtree_subkey
rtree_subtree_read
rtree_subtree_read_hard
rtree_subtree_tryread
rtree_val_read
rtree_val_write
run_quantize_ceil
run_quantize_floor
s2u
s2u_compute
s2u_lookup
sa2u
set_errno
size2index
size2index_compute
size2index_lookup
size2index_tab
spin_adaptive
spin_init
stats_cactive
stats_cactive_add
stats_cactive_get
stats_cactive_sub
stats_print
tcache_alloc_easy
tcache_alloc_large
tcache_alloc_small
tcache_alloc_small_hard
tcache_arena_reassociate
tcache_bin_flush_large
tcache_bin_flush_small
tcache_bin_info
tcache_boot
tcache_cleanup
tcache_create
tcache_dalloc_large
tcache_dalloc_small
tcache_enabled_cleanup
tcache_enabled_get
tcache_enabled_set
tcache_event
tcache_event_hard
tcache_flush
tcache_get
tcache_get_hard
tcache_maxclass
tcache_postfork_child
tcache_postfork_parent
tcache_prefork
tcache_salloc
tcache_stats_merge
tcaches
tcaches_create
tcaches_destroy
tcaches_flush
tcaches_get
thread_allocated_cleanup
thread_deallocated_cleanup
ticker_copy
ticker_init
ticker_read
ticker_tick
ticker_ticks
tsd_arena_get
tsd_arena_set
tsd_arenap_get
tsd_arenas_tdata_bypass_get
tsd_arenas_tdata_bypass_set
tsd_arenas_tdata_bypassp_get
tsd_arenas_tdata_get
tsd_arenas_tdata_set
tsd_arenas_tdatap_get
tsd_boot
tsd_boot0
tsd_boot1
tsd_booted
tsd_booted_get
tsd_cleanup
tsd_cleanup_wrapper
tsd_fetch
tsd_fetch_impl
tsd_get
tsd_get_allocates
tsd_iarena_get
tsd_iarena_set
tsd_iarenap_get
tsd_initialized
tsd_init_check_recursion
tsd_init_finish
tsd_init_head
tsd_narenas_tdata_get
tsd_narenas_tdata_set
tsd_narenas_tdatap_get
tsd_wrapper_get
tsd_wrapper_set
tsd_nominal
tsd_prof_tdata_get
tsd_prof_tdata_set
tsd_prof_tdatap_get
tsd_quarantine_get
tsd_quarantine_set
tsd_quarantinep_get
tsd_set
tsd_tcache_enabled_get
tsd_tcache_enabled_set
tsd_tcache_enabledp_get
tsd_tcache_get
tsd_tcache_set
tsd_tcachep_get
tsd_thread_allocated_get
tsd_thread_allocated_set
tsd_thread_allocatedp_get
tsd_thread_deallocated_get
tsd_thread_deallocated_set
tsd_thread_deallocatedp_get
tsd_tls
tsd_tsd
tsd_tsdn
tsd_witness_fork_get
tsd_witness_fork_set
tsd_witness_forkp_get
tsd_witnesses_get
tsd_witnesses_set
tsd_witnessesp_get
tsdn_fetch
tsdn_null
tsdn_tsd
u2rz
valgrind_freelike_block
valgrind_make_mem_defined
valgrind_make_mem_noaccess
valgrind_make_mem_undefined
witness_assert_depth
witness_assert_depth_to_rank
witness_assert_lockless
witness_assert_not_owner
witness_assert_owner
witness_depth_error
witness_fork_cleanup
witness_init
witness_lock
witness_lock_error
witness_not_owner_error
witness_owner
witness_owner_error
witness_postfork_child
witness_postfork_parent
witness_prefork
witness_unlock
witnesses_cleanup
zone_register

View File

@ -0,0 +1,639 @@
#undef a0dalloc
#undef a0get
#undef a0malloc
#undef arena_aalloc
#undef arena_alloc_junk_small
#undef arena_basic_stats_merge
#undef arena_bin_index
#undef arena_bin_info
#undef arena_bitselm_get_const
#undef arena_bitselm_get_mutable
#undef arena_boot
#undef arena_choose
#undef arena_choose_hard
#undef arena_choose_impl
#undef arena_chunk_alloc_huge
#undef arena_chunk_cache_maybe_insert
#undef arena_chunk_cache_maybe_remove
#undef arena_chunk_dalloc_huge
#undef arena_chunk_ralloc_huge_expand
#undef arena_chunk_ralloc_huge_shrink
#undef arena_chunk_ralloc_huge_similar
#undef arena_cleanup
#undef arena_dalloc
#undef arena_dalloc_bin
#undef arena_dalloc_bin_junked_locked
#undef arena_dalloc_junk_large
#undef arena_dalloc_junk_small
#undef arena_dalloc_large
#undef arena_dalloc_large_junked_locked
#undef arena_dalloc_small
#undef arena_decay_tick
#undef arena_decay_ticks
#undef arena_decay_time_default_get
#undef arena_decay_time_default_set
#undef arena_decay_time_get
#undef arena_decay_time_set
#undef arena_dss_prec_get
#undef arena_dss_prec_set
#undef arena_extent_sn_next
#undef arena_get
#undef arena_ichoose
#undef arena_init
#undef arena_lg_dirty_mult_default_get
#undef arena_lg_dirty_mult_default_set
#undef arena_lg_dirty_mult_get
#undef arena_lg_dirty_mult_set
#undef arena_malloc
#undef arena_malloc_hard
#undef arena_malloc_large
#undef arena_mapbits_allocated_get
#undef arena_mapbits_binind_get
#undef arena_mapbits_decommitted_get
#undef arena_mapbits_dirty_get
#undef arena_mapbits_get
#undef arena_mapbits_internal_set
#undef arena_mapbits_large_binind_set
#undef arena_mapbits_large_get
#undef arena_mapbits_large_set
#undef arena_mapbits_large_size_get
#undef arena_mapbits_size_decode
#undef arena_mapbits_size_encode
#undef arena_mapbits_small_runind_get
#undef arena_mapbits_small_set
#undef arena_mapbits_unallocated_set
#undef arena_mapbits_unallocated_size_get
#undef arena_mapbits_unallocated_size_set
#undef arena_mapbits_unzeroed_get
#undef arena_mapbitsp_get_const
#undef arena_mapbitsp_get_mutable
#undef arena_mapbitsp_read
#undef arena_mapbitsp_write
#undef arena_maxrun
#undef arena_maybe_purge
#undef arena_metadata_allocated_add
#undef arena_metadata_allocated_get
#undef arena_metadata_allocated_sub
#undef arena_migrate
#undef arena_miscelm_get_const
#undef arena_miscelm_get_mutable
#undef arena_miscelm_to_pageind
#undef arena_miscelm_to_rpages
#undef arena_new
#undef arena_node_alloc
#undef arena_node_dalloc
#undef arena_nthreads_dec
#undef arena_nthreads_get
#undef arena_nthreads_inc
#undef arena_palloc
#undef arena_postfork_child
#undef arena_postfork_parent
#undef arena_prefork0
#undef arena_prefork1
#undef arena_prefork2
#undef arena_prefork3
#undef arena_prof_accum
#undef arena_prof_accum_impl
#undef arena_prof_accum_locked
#undef arena_prof_promoted
#undef arena_prof_tctx_get
#undef arena_prof_tctx_reset
#undef arena_prof_tctx_set
#undef arena_ptr_small_binind_get
#undef arena_purge
#undef arena_quarantine_junk_small
#undef arena_ralloc
#undef arena_ralloc_junk_large
#undef arena_ralloc_no_move
#undef arena_rd_to_miscelm
#undef arena_redzone_corruption
#undef arena_reset
#undef arena_run_regind
#undef arena_run_to_miscelm
#undef arena_salloc
#undef arena_sdalloc
#undef arena_stats_merge
#undef arena_tcache_fill_small
#undef arena_tdata_get
#undef arena_tdata_get_hard
#undef arenas
#undef arenas_tdata_bypass_cleanup
#undef arenas_tdata_cleanup
#undef atomic_add_p
#undef atomic_add_u
#undef atomic_add_uint32
#undef atomic_add_uint64
#undef atomic_add_z
#undef atomic_cas_p
#undef atomic_cas_u
#undef atomic_cas_uint32
#undef atomic_cas_uint64
#undef atomic_cas_z
#undef atomic_sub_p
#undef atomic_sub_u
#undef atomic_sub_uint32
#undef atomic_sub_uint64
#undef atomic_sub_z
#undef atomic_write_p
#undef atomic_write_u
#undef atomic_write_uint32
#undef atomic_write_uint64
#undef atomic_write_z
#undef base_alloc
#undef base_boot
#undef base_postfork_child
#undef base_postfork_parent
#undef base_prefork
#undef base_stats_get
#undef bitmap_full
#undef bitmap_get
#undef bitmap_info_init
#undef bitmap_init
#undef bitmap_set
#undef bitmap_sfu
#undef bitmap_size
#undef bitmap_unset
#undef bootstrap_calloc
#undef bootstrap_free
#undef bootstrap_malloc
#undef bt_init
#undef buferror
#undef chunk_alloc_base
#undef chunk_alloc_cache
#undef chunk_alloc_dss
#undef chunk_alloc_mmap
#undef chunk_alloc_wrapper
#undef chunk_boot
#undef chunk_dalloc_cache
#undef chunk_dalloc_mmap
#undef chunk_dalloc_wrapper
#undef chunk_deregister
#undef chunk_dss_boot
#undef chunk_dss_mergeable
#undef chunk_dss_prec_get
#undef chunk_dss_prec_set
#undef chunk_hooks_default
#undef chunk_hooks_get
#undef chunk_hooks_set
#undef chunk_in_dss
#undef chunk_lookup
#undef chunk_npages
#undef chunk_purge_wrapper
#undef chunk_register
#undef chunks_rtree
#undef chunksize
#undef chunksize_mask
#undef ckh_count
#undef ckh_delete
#undef ckh_insert
#undef ckh_iter
#undef ckh_new
#undef ckh_pointer_hash
#undef ckh_pointer_keycomp
#undef ckh_remove
#undef ckh_search
#undef ckh_string_hash
#undef ckh_string_keycomp
#undef ctl_boot
#undef ctl_bymib
#undef ctl_byname
#undef ctl_nametomib
#undef ctl_postfork_child
#undef ctl_postfork_parent
#undef ctl_prefork
#undef decay_ticker_get
#undef dss_prec_names
#undef extent_node_achunk_get
#undef extent_node_achunk_set
#undef extent_node_addr_get
#undef extent_node_addr_set
#undef extent_node_arena_get
#undef extent_node_arena_set
#undef extent_node_committed_get
#undef extent_node_committed_set
#undef extent_node_dirty_insert
#undef extent_node_dirty_linkage_init
#undef extent_node_dirty_remove
#undef extent_node_init
#undef extent_node_prof_tctx_get
#undef extent_node_prof_tctx_set
#undef extent_node_size_get
#undef extent_node_size_set
#undef extent_node_sn_get
#undef extent_node_sn_set
#undef extent_node_zeroed_get
#undef extent_node_zeroed_set
#undef extent_size_quantize_ceil
#undef extent_size_quantize_floor
#undef extent_tree_ad_destroy
#undef extent_tree_ad_destroy_recurse
#undef extent_tree_ad_empty
#undef extent_tree_ad_first
#undef extent_tree_ad_insert
#undef extent_tree_ad_iter
#undef extent_tree_ad_iter_recurse
#undef extent_tree_ad_iter_start
#undef extent_tree_ad_last
#undef extent_tree_ad_new
#undef extent_tree_ad_next
#undef extent_tree_ad_nsearch
#undef extent_tree_ad_prev
#undef extent_tree_ad_psearch
#undef extent_tree_ad_remove
#undef extent_tree_ad_reverse_iter
#undef extent_tree_ad_reverse_iter_recurse
#undef extent_tree_ad_reverse_iter_start
#undef extent_tree_ad_search
#undef extent_tree_szsnad_destroy
#undef extent_tree_szsnad_destroy_recurse
#undef extent_tree_szsnad_empty
#undef extent_tree_szsnad_first
#undef extent_tree_szsnad_insert
#undef extent_tree_szsnad_iter
#undef extent_tree_szsnad_iter_recurse
#undef extent_tree_szsnad_iter_start
#undef extent_tree_szsnad_last
#undef extent_tree_szsnad_new
#undef extent_tree_szsnad_next
#undef extent_tree_szsnad_nsearch
#undef extent_tree_szsnad_prev
#undef extent_tree_szsnad_psearch
#undef extent_tree_szsnad_remove
#undef extent_tree_szsnad_reverse_iter
#undef extent_tree_szsnad_reverse_iter_recurse
#undef extent_tree_szsnad_reverse_iter_start
#undef extent_tree_szsnad_search
#undef ffs_llu
#undef ffs_lu
#undef ffs_u
#undef ffs_u32
#undef ffs_u64
#undef ffs_zu
#undef get_errno
#undef hash
#undef hash_fmix_32
#undef hash_fmix_64
#undef hash_get_block_32
#undef hash_get_block_64
#undef hash_rotl_32
#undef hash_rotl_64
#undef hash_x64_128
#undef hash_x86_128
#undef hash_x86_32
#undef huge_aalloc
#undef huge_dalloc
#undef huge_dalloc_junk
#undef huge_malloc
#undef huge_palloc
#undef huge_prof_tctx_get
#undef huge_prof_tctx_reset
#undef huge_prof_tctx_set
#undef huge_ralloc
#undef huge_ralloc_no_move
#undef huge_salloc
#undef iaalloc
#undef ialloc
#undef iallocztm
#undef iarena_cleanup
#undef idalloc
#undef idalloctm
#undef in_valgrind
#undef index2size
#undef index2size_compute
#undef index2size_lookup
#undef index2size_tab
#undef ipalloc
#undef ipalloct
#undef ipallocztm
#undef iqalloc
#undef iralloc
#undef iralloct
#undef iralloct_realign
#undef isalloc
#undef isdalloct
#undef isqalloc
#undef isthreaded
#undef ivsalloc
#undef ixalloc
#undef jemalloc_postfork_child
#undef jemalloc_postfork_parent
#undef jemalloc_prefork
#undef large_maxclass
#undef lg_floor
#undef lg_prof_sample
#undef malloc_cprintf
#undef malloc_mutex_assert_not_owner
#undef malloc_mutex_assert_owner
#undef malloc_mutex_boot
#undef malloc_mutex_init
#undef malloc_mutex_lock
#undef malloc_mutex_postfork_child
#undef malloc_mutex_postfork_parent
#undef malloc_mutex_prefork
#undef malloc_mutex_unlock
#undef malloc_printf
#undef malloc_snprintf
#undef malloc_strtoumax
#undef malloc_tsd_boot0
#undef malloc_tsd_boot1
#undef malloc_tsd_cleanup_register
#undef malloc_tsd_dalloc
#undef malloc_tsd_malloc
#undef malloc_tsd_no_cleanup
#undef malloc_vcprintf
#undef malloc_vsnprintf
#undef malloc_write
#undef map_bias
#undef map_misc_offset
#undef mb_write
#undef narenas_auto
#undef narenas_tdata_cleanup
#undef narenas_total_get
#undef ncpus
#undef nhbins
#undef nhclasses
#undef nlclasses
#undef nstime_add
#undef nstime_compare
#undef nstime_copy
#undef nstime_divide
#undef nstime_idivide
#undef nstime_imultiply
#undef nstime_init
#undef nstime_init2
#undef nstime_monotonic
#undef nstime_ns
#undef nstime_nsec
#undef nstime_sec
#undef nstime_subtract
#undef nstime_update
#undef opt_abort
#undef opt_decay_time
#undef opt_dss
#undef opt_junk
#undef opt_junk_alloc
#undef opt_junk_free
#undef opt_lg_chunk
#undef opt_lg_dirty_mult
#undef opt_lg_prof_interval
#undef opt_lg_prof_sample
#undef opt_lg_tcache_max
#undef opt_narenas
#undef opt_prof
#undef opt_prof_accum
#undef opt_prof_active
#undef opt_prof_final
#undef opt_prof_gdump
#undef opt_prof_leak
#undef opt_prof_prefix
#undef opt_prof_thread_active_init
#undef opt_purge
#undef opt_quarantine
#undef opt_redzone
#undef opt_stats_print
#undef opt_tcache
#undef opt_thp
#undef opt_utrace
#undef opt_xmalloc
#undef opt_zero
#undef p2rz
#undef pages_boot
#undef pages_commit
#undef pages_decommit
#undef pages_huge
#undef pages_map
#undef pages_nohuge
#undef pages_purge
#undef pages_trim
#undef pages_unmap
#undef pind2sz
#undef pind2sz_compute
#undef pind2sz_lookup
#undef pind2sz_tab
#undef pow2_ceil_u32
#undef pow2_ceil_u64
#undef pow2_ceil_zu
#undef prng_lg_range_u32
#undef prng_lg_range_u64
#undef prng_lg_range_zu
#undef prng_range_u32
#undef prng_range_u64
#undef prng_range_zu
#undef prng_state_next_u32
#undef prng_state_next_u64
#undef prng_state_next_zu
#undef prof_active
#undef prof_active_get
#undef prof_active_get_unlocked
#undef prof_active_set
#undef prof_alloc_prep
#undef prof_alloc_rollback
#undef prof_backtrace
#undef prof_boot0
#undef prof_boot1
#undef prof_boot2
#undef prof_bt_count
#undef prof_dump_header
#undef prof_dump_open
#undef prof_free
#undef prof_free_sampled_object
#undef prof_gdump
#undef prof_gdump_get
#undef prof_gdump_get_unlocked
#undef prof_gdump_set
#undef prof_gdump_val
#undef prof_idump
#undef prof_interval
#undef prof_lookup
#undef prof_malloc
#undef prof_malloc_sample_object
#undef prof_mdump
#undef prof_postfork_child
#undef prof_postfork_parent
#undef prof_prefork0
#undef prof_prefork1
#undef prof_realloc
#undef prof_reset
#undef prof_sample_accum_update
#undef prof_sample_threshold_update
#undef prof_tctx_get
#undef prof_tctx_reset
#undef prof_tctx_set
#undef prof_tdata_cleanup
#undef prof_tdata_count
#undef prof_tdata_get
#undef prof_tdata_init
#undef prof_tdata_reinit
#undef prof_thread_active_get
#undef prof_thread_active_init_get
#undef prof_thread_active_init_set
#undef prof_thread_active_set
#undef prof_thread_name_get
#undef prof_thread_name_set
#undef psz2ind
#undef psz2u
#undef purge_mode_names
#undef quarantine
#undef quarantine_alloc_hook
#undef quarantine_alloc_hook_work
#undef quarantine_cleanup
#undef rtree_child_read
#undef rtree_child_read_hard
#undef rtree_child_tryread
#undef rtree_delete
#undef rtree_get
#undef rtree_new
#undef rtree_node_valid
#undef rtree_set
#undef rtree_start_level
#undef rtree_subkey
#undef rtree_subtree_read
#undef rtree_subtree_read_hard
#undef rtree_subtree_tryread
#undef rtree_val_read
#undef rtree_val_write
#undef run_quantize_ceil
#undef run_quantize_floor
#undef s2u
#undef s2u_compute
#undef s2u_lookup
#undef sa2u
#undef set_errno
#undef size2index
#undef size2index_compute
#undef size2index_lookup
#undef size2index_tab
#undef spin_adaptive
#undef spin_init
#undef stats_cactive
#undef stats_cactive_add
#undef stats_cactive_get
#undef stats_cactive_sub
#undef stats_print
#undef tcache_alloc_easy
#undef tcache_alloc_large
#undef tcache_alloc_small
#undef tcache_alloc_small_hard
#undef tcache_arena_reassociate
#undef tcache_bin_flush_large
#undef tcache_bin_flush_small
#undef tcache_bin_info
#undef tcache_boot
#undef tcache_cleanup
#undef tcache_create
#undef tcache_dalloc_large
#undef tcache_dalloc_small
#undef tcache_enabled_cleanup
#undef tcache_enabled_get
#undef tcache_enabled_set
#undef tcache_event
#undef tcache_event_hard
#undef tcache_flush
#undef tcache_get
#undef tcache_get_hard
#undef tcache_maxclass
#undef tcache_postfork_child
#undef tcache_postfork_parent
#undef tcache_prefork
#undef tcache_salloc
#undef tcache_stats_merge
#undef tcaches
#undef tcaches_create
#undef tcaches_destroy
#undef tcaches_flush
#undef tcaches_get
#undef thread_allocated_cleanup
#undef thread_deallocated_cleanup
#undef ticker_copy
#undef ticker_init
#undef ticker_read
#undef ticker_tick
#undef ticker_ticks
#undef tsd_arena_get
#undef tsd_arena_set
#undef tsd_arenap_get
#undef tsd_arenas_tdata_bypass_get
#undef tsd_arenas_tdata_bypass_set
#undef tsd_arenas_tdata_bypassp_get
#undef tsd_arenas_tdata_get
#undef tsd_arenas_tdata_set
#undef tsd_arenas_tdatap_get
#undef tsd_boot
#undef tsd_boot0
#undef tsd_boot1
#undef tsd_booted
#undef tsd_booted_get
#undef tsd_cleanup
#undef tsd_cleanup_wrapper
#undef tsd_fetch
#undef tsd_fetch_impl
#undef tsd_get
#undef tsd_get_allocates
#undef tsd_iarena_get
#undef tsd_iarena_set
#undef tsd_iarenap_get
#undef tsd_initialized
#undef tsd_init_check_recursion
#undef tsd_init_finish
#undef tsd_init_head
#undef tsd_narenas_tdata_get
#undef tsd_narenas_tdata_set
#undef tsd_narenas_tdatap_get
#undef tsd_wrapper_get
#undef tsd_wrapper_set
#undef tsd_nominal
#undef tsd_prof_tdata_get
#undef tsd_prof_tdata_set
#undef tsd_prof_tdatap_get
#undef tsd_quarantine_get
#undef tsd_quarantine_set
#undef tsd_quarantinep_get
#undef tsd_set
#undef tsd_tcache_enabled_get
#undef tsd_tcache_enabled_set
#undef tsd_tcache_enabledp_get
#undef tsd_tcache_get
#undef tsd_tcache_set
#undef tsd_tcachep_get
#undef tsd_thread_allocated_get
#undef tsd_thread_allocated_set
#undef tsd_thread_allocatedp_get
#undef tsd_thread_deallocated_get
#undef tsd_thread_deallocated_set
#undef tsd_thread_deallocatedp_get
#undef tsd_tls
#undef tsd_tsd
#undef tsd_tsdn
#undef tsd_witness_fork_get
#undef tsd_witness_fork_set
#undef tsd_witness_forkp_get
#undef tsd_witnesses_get
#undef tsd_witnesses_set
#undef tsd_witnessesp_get
#undef tsdn_fetch
#undef tsdn_null
#undef tsdn_tsd
#undef u2rz
#undef valgrind_freelike_block
#undef valgrind_make_mem_defined
#undef valgrind_make_mem_noaccess
#undef valgrind_make_mem_undefined
#undef witness_assert_depth
#undef witness_assert_depth_to_rank
#undef witness_assert_lockless
#undef witness_assert_not_owner
#undef witness_assert_owner
#undef witness_depth_error
#undef witness_fork_cleanup
#undef witness_init
#undef witness_lock
#undef witness_lock_error
#undef witness_not_owner_error
#undef witness_owner
#undef witness_owner_error
#undef witness_postfork_child
#undef witness_postfork_parent
#undef witness_prefork
#undef witness_unlock
#undef witnesses_cleanup
#undef zone_register

View File

@ -0,0 +1,5 @@
#!/bin/sh
for symbol in `cat $1` ; do
echo "#undef ${symbol}"
done

View File

@ -0,0 +1,207 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* Simple linear congruential pseudo-random number generator:
*
* prng(y) = (a*x + c) % m
*
* where the following constants ensure maximal period:
*
* a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
* c == Odd number (relatively prime to 2^n).
* m == 2^32
*
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
*
* This choice of m has the disadvantage that the quality of the bits is
* proportional to bit position. For example, the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
*/
#define PRNG_A_32 UINT32_C(1103515241)
#define PRNG_C_32 UINT32_C(12347)
#define PRNG_A_64 UINT64_C(6364136223846793005)
#define PRNG_C_64 UINT64_C(1442695040888963407)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint32_t prng_state_next_u32(uint32_t state);
uint64_t prng_state_next_u64(uint64_t state);
size_t prng_state_next_zu(size_t state);
uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range,
bool atomic);
uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range);
size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic);
uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic);
uint64_t prng_range_u64(uint64_t *state, uint64_t range);
size_t prng_range_zu(size_t *state, size_t range, bool atomic);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
JEMALLOC_ALWAYS_INLINE uint32_t
prng_state_next_u32(uint32_t state)
{
return ((state * PRNG_A_32) + PRNG_C_32);
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_state_next_u64(uint64_t state)
{
return ((state * PRNG_A_64) + PRNG_C_64);
}
JEMALLOC_ALWAYS_INLINE size_t
prng_state_next_zu(size_t state)
{
#if LG_SIZEOF_PTR == 2
return ((state * PRNG_A_32) + PRNG_C_32);
#elif LG_SIZEOF_PTR == 3
return ((state * PRNG_A_64) + PRNG_C_64);
#else
#error Unsupported pointer size
#endif
}
JEMALLOC_ALWAYS_INLINE uint32_t
prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic)
{
uint32_t ret, state1;
assert(lg_range > 0);
assert(lg_range <= 32);
if (atomic) {
uint32_t state0;
do {
state0 = atomic_read_uint32(state);
state1 = prng_state_next_u32(state0);
} while (atomic_cas_uint32(state, state0, state1));
} else {
state1 = prng_state_next_u32(*state);
*state = state1;
}
ret = state1 >> (32 - lg_range);
return (ret);
}
/* 64-bit atomic operations cannot be supported on all relevant platforms. */
JEMALLOC_ALWAYS_INLINE uint64_t
prng_lg_range_u64(uint64_t *state, unsigned lg_range)
{
uint64_t ret, state1;
assert(lg_range > 0);
assert(lg_range <= 64);
state1 = prng_state_next_u64(*state);
*state = state1;
ret = state1 >> (64 - lg_range);
return (ret);
}
JEMALLOC_ALWAYS_INLINE size_t
prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic)
{
size_t ret, state1;
assert(lg_range > 0);
assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
if (atomic) {
size_t state0;
do {
state0 = atomic_read_z(state);
state1 = prng_state_next_zu(state0);
} while (atomic_cas_z(state, state0, state1));
} else {
state1 = prng_state_next_zu(*state);
*state = state1;
}
ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
return (ret);
}
JEMALLOC_ALWAYS_INLINE uint32_t
prng_range_u32(uint32_t *state, uint32_t range, bool atomic)
{
uint32_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_u32(state, lg_range, atomic);
} while (ret >= range);
return (ret);
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_range_u64(uint64_t *state, uint64_t range)
{
uint64_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_u64(state, lg_range);
} while (ret >= range);
return (ret);
}
JEMALLOC_ALWAYS_INLINE size_t
prng_range_zu(size_t *state, size_t range, bool atomic)
{
size_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_zu(state, lg_range, atomic);
} while (ret >= range);
return (ret);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,547 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct prof_bt_s prof_bt_t;
typedef struct prof_cnt_s prof_cnt_t;
typedef struct prof_tctx_s prof_tctx_t;
typedef struct prof_gctx_s prof_gctx_t;
typedef struct prof_tdata_s prof_tdata_t;
/* Option defaults. */
#ifdef JEMALLOC_PROF
# define PROF_PREFIX_DEFAULT "jeprof"
#else
# define PROF_PREFIX_DEFAULT ""
#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
/*
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
* is based on __builtin_return_address() necessarily has a hard-coded number
* of backtrace frame handlers, and should be kept in sync with this setting.
*/
#define PROF_BT_MAX 128
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
#define PROF_DUMP_BUFSIZE 65536
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
/*
* Number of mutexes shared among all gctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NCTX_LOCKS 1024
/*
* Number of mutexes shared among all tdata's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NTDATA_LOCKS 256
/*
* prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct prof_bt_s {
/* Backtrace, stored as len program counters. */
void **vec;
unsigned len;
};
#ifdef JEMALLOC_PROF_LIBGCC
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef struct {
prof_bt_t *bt;
unsigned max;
} prof_unwind_data_t;
#endif
struct prof_cnt_s {
/* Profiling counters. */
uint64_t curobjs;
uint64_t curbytes;
uint64_t accumobjs;
uint64_t accumbytes;
};
typedef enum {
prof_tctx_state_initializing,
prof_tctx_state_nominal,
prof_tctx_state_dumping,
prof_tctx_state_purgatory /* Dumper must finish destroying. */
} prof_tctx_state_t;
struct prof_tctx_s {
/* Thread data for thread that performed the allocation. */
prof_tdata_t *tdata;
/*
* Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
* defunct during teardown.
*/
uint64_t thr_uid;
uint64_t thr_discrim;
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t cnts;
/* Associated global context. */
prof_gctx_t *gctx;
/*
* UID that distinguishes multiple tctx's created by the same thread,
* but coexisting in gctx->tctxs. There are two ways that such
* coexistence can occur:
* - A dumper thread can cause a tctx to be retained in the purgatory
* state.
* - Although a single "producer" thread must create all tctx's which
* share the same thr_uid, multiple "consumers" can each concurrently
* execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
* gets called once each time cnts.cur{objs,bytes} drop to 0, but this
* threshold can be hit again before the first consumer finishes
* executing prof_tctx_destroy().
*/
uint64_t tctx_uid;
/* Linkage into gctx's tctxs. */
rb_node(prof_tctx_t) tctx_link;
/*
* True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
* sample vs destroy race.
*/
bool prepared;
/* Current dump-related state, protected by gctx->lock. */
prof_tctx_state_t state;
/*
* Copy of cnts snapshotted during early dump phase, protected by
* dump_mtx.
*/
prof_cnt_t dump_cnts;
};
typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
struct prof_gctx_s {
/* Protects nlimbo, cnt_summed, and tctxs. */
malloc_mutex_t *lock;
/*
* Number of threads that currently cause this gctx to be in a state of
* limbo due to one of:
* - Initializing this gctx.
* - Initializing per thread counters associated with this gctx.
* - Preparing to destroy this gctx.
* - Dumping a heap profile that includes this gctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* gctx.
*/
unsigned nlimbo;
/*
* Tree of profile counters, one for each thread that has allocated in
* this context.
*/
prof_tctx_tree_t tctxs;
/* Linkage for tree of contexts to be dumped. */
rb_node(prof_gctx_t) dump_link;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* Associated backtrace. */
prof_bt_t bt;
/* Backtrace vector, variable size, referred to by bt. */
void *vec[1];
};
typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
struct prof_tdata_s {
malloc_mutex_t *lock;
/* Monotonically increasing unique thread identifier. */
uint64_t thr_uid;
/*
* Monotonically increasing discriminator among tdata structures
* associated with the same thr_uid.
*/
uint64_t thr_discrim;
/* Included in heap profile dumps if non-NULL. */
char *thread_name;
bool attached;
bool expired;
rb_node(prof_tdata_t) tdata_link;
/*
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
* necessary when incrementing this field, because only one thread ever
* does so.
*/
uint64_t tctx_uid_next;
/*
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
* backtraces for which it has non-zero allocation/deallocation counters
* associated with thread-specific prof_tctx_t objects. Other threads
* may write to prof_tctx_t contents when freeing associated objects.
*/
ckh_t bt2tctx;
/* Sampling state. */
uint64_t prng_state;
uint64_t bytes_until_sample;
/* State used to avoid dumping while operating on prof internals. */
bool enq;
bool enq_idump;
bool enq_gdump;
/*
* Set to true during an early dump phase for tdata's which are
* currently being dumped. New threads' tdata's have this initialized
* to false so that they aren't accidentally included in later dump
* phases.
*/
bool dumping;
/*
* True if profiling is active for this tdata's thread
* (thread.prof.active mallctl).
*/
bool active;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* Backtrace vector, used for calls to prof_backtrace(). */
void *vec[PROF_BT_MAX];
};
typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_prof;
extern bool opt_prof_active;
extern bool opt_prof_thread_active_init;
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
extern bool opt_prof_gdump; /* High-water memory dumping. */
extern bool opt_prof_final; /* Final profile dumping. */
extern bool opt_prof_leak; /* Dump leak summary at exit. */
extern bool opt_prof_accum; /* Report cumulative bytes. */
extern char opt_prof_prefix[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX +
#endif
1];
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern bool prof_active;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern bool prof_gdump_val;
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
* interval between profile dumps averages prof_interval, though the actual
* interval between dumps will tend to be sporadic, and the interval will be a
* maximum of approximately (prof_interval * narenas).
*/
extern uint64_t prof_interval;
/*
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
* resets.
*/
extern size_t lg_prof_sample;
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(prof_bt_t *bt);
prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
#ifdef JEMALLOC_JET
size_t prof_tdata_count(void);
size_t prof_bt_count(void);
const prof_cnt_t *prof_cnt_all(void);
typedef int (prof_dump_open_t)(bool, const char *);
extern prof_dump_open_t *prof_dump_open;
typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
extern prof_dump_header_t *prof_dump_header;
#endif
void prof_idump(tsdn_t *tsdn);
bool prof_mdump(tsd_t *tsd, const char *filename);
void prof_gdump(tsdn_t *tsdn);
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
void prof_reset(tsd_t *tsd, size_t lg_sample);
void prof_tdata_cleanup(tsd_t *tsd);
bool prof_active_get(tsdn_t *tsdn);
bool prof_active_set(tsdn_t *tsdn, bool active);
const char *prof_thread_name_get(tsd_t *tsd);
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
bool prof_thread_active_get(tsd_t *tsd);
bool prof_thread_active_set(tsd_t *tsd, bool active);
bool prof_thread_active_init_get(tsdn_t *tsdn);
bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
bool prof_gdump_get(tsdn_t *tsdn);
bool prof_gdump_set(tsdn_t *tsdn, bool active);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(tsd_t *tsd);
void prof_prefork0(tsdn_t *tsdn);
void prof_prefork1(tsdn_t *tsdn);
void prof_postfork_parent(tsdn_t *tsdn);
void prof_postfork_child(tsdn_t *tsdn);
void prof_sample_threshold_update(prof_tdata_t *tdata);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool prof_active_get_unlocked(void);
bool prof_gdump_get_unlocked(void);
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr);
void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
const void *old_ptr, prof_tctx_t *tctx);
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
prof_tdata_t **tdata_out);
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
bool update);
void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
size_t old_usize, prof_tctx_t *old_tctx);
void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
JEMALLOC_ALWAYS_INLINE bool
prof_active_get_unlocked(void)
{
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return (prof_active);
}
JEMALLOC_ALWAYS_INLINE bool
prof_gdump_get_unlocked(void)
{
/*
* No locking is used when reading prof_gdump_val in the fast path, so
* there are no guarantees regarding how long it will take for all
* threads to notice state changes.
*/
return (prof_gdump_val);
}
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
prof_tdata_get(tsd_t *tsd, bool create)
{
prof_tdata_t *tdata;
cassert(config_prof);
tdata = tsd_prof_tdata_get(tsd);
if (create) {
if (unlikely(tdata == NULL)) {
if (tsd_nominal(tsd)) {
tdata = prof_tdata_init(tsd);
tsd_prof_tdata_set(tsd, tdata);
}
} else if (unlikely(tdata->expired)) {
tdata = prof_tdata_reinit(tsd, tdata);
tsd_prof_tdata_set(tsd, tdata);
}
assert(tdata == NULL || tdata->attached);
}
return (tdata);
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_tctx_get(tsdn_t *tsdn, const void *ptr)
{
cassert(config_prof);
assert(ptr != NULL);
return (arena_prof_tctx_get(tsdn, ptr));
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
arena_prof_tctx_set(tsdn, ptr, usize, tctx);
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr,
prof_tctx_t *old_tctx)
{
cassert(config_prof);
assert(ptr != NULL);
arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx);
}
JEMALLOC_ALWAYS_INLINE bool
prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
prof_tdata_t **tdata_out)
{
prof_tdata_t *tdata;
cassert(config_prof);
tdata = prof_tdata_get(tsd, true);
if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX))
tdata = NULL;
if (tdata_out != NULL)
*tdata_out = tdata;
if (unlikely(tdata == NULL))
return (true);
if (likely(tdata->bytes_until_sample >= usize)) {
if (update)
tdata->bytes_until_sample -= usize;
return (true);
} else {
/* Compute new sample threshold. */
if (update)
prof_sample_threshold_update(tdata);
return (!tdata->active);
}
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
{
prof_tctx_t *ret;
prof_tdata_t *tdata;
prof_bt_t bt;
assert(usize == s2u(usize));
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
&tdata)))
ret = (prof_tctx_t *)(uintptr_t)1U;
else {
bt_init(&bt, tdata->vec);
prof_backtrace(&bt);
ret = prof_lookup(tsd, &bt);
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
assert(usize == isalloc(tsdn, ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_malloc_sample_object(tsdn, ptr, usize, tctx);
else
prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
}
JEMALLOC_ALWAYS_INLINE void
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
prof_tctx_t *old_tctx)
{
bool sampled, old_sampled;
cassert(config_prof);
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
if (prof_active && !updated && ptr != NULL) {
assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
/*
* Don't sample. The usize passed to prof_alloc_prep()
* was larger than what actually got allocated, so a
* backtrace was captured for this allocation, even
* though its actual usize was insufficient to cross the
* sample threshold.
*/
prof_alloc_rollback(tsd, tctx, true);
tctx = (prof_tctx_t *)(uintptr_t)1U;
}
}
sampled = ((uintptr_t)tctx > (uintptr_t)1U);
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
if (unlikely(sampled))
prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
else
prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx);
if (unlikely(old_sampled))
prof_free_sampled_object(tsd, old_usize, old_tctx);
}
JEMALLOC_ALWAYS_INLINE void
prof_free(tsd_t *tsd, const void *ptr, size_t usize)
{
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
cassert(config_prof);
assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_free_sampled_object(tsd, usize, tctx);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -0,0 +1,20 @@
#define je_malloc_conf JEMALLOC_N(malloc_conf)
#define je_malloc_message JEMALLOC_N(malloc_message)
#define je_malloc JEMALLOC_N(malloc)
#define je_calloc JEMALLOC_N(calloc)
#define je_posix_memalign JEMALLOC_N(posix_memalign)
#define je_aligned_alloc JEMALLOC_N(aligned_alloc)
#define je_realloc JEMALLOC_N(realloc)
#define je_free JEMALLOC_N(free)
#define je_mallocx JEMALLOC_N(mallocx)
#define je_rallocx JEMALLOC_N(rallocx)
#define je_xallocx JEMALLOC_N(xallocx)
#define je_sallocx JEMALLOC_N(sallocx)
#define je_dallocx JEMALLOC_N(dallocx)
#define je_sdallocx JEMALLOC_N(sdallocx)
#define je_nallocx JEMALLOC_N(nallocx)
#define je_mallctl JEMALLOC_N(mallctl)
#define je_mallctlnametomib JEMALLOC_N(mallctlnametomib)
#define je_mallctlbymib JEMALLOC_N(mallctlbymib)
#define je_malloc_stats_print JEMALLOC_N(malloc_stats_print)
#define je_malloc_usable_size JEMALLOC_N(malloc_usable_size)

View File

@ -0,0 +1,6 @@
#!/bin/sh
for nm in `cat $1` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
echo "#define je_${n} JEMALLOC_N(${n})"
done

View File

@ -0,0 +1,20 @@
malloc_conf:je_malloc_conf
malloc_message:je_malloc_message
malloc:je_malloc
calloc:je_calloc
posix_memalign:je_posix_memalign
aligned_alloc:je_aligned_alloc
realloc:je_realloc
free:je_free
mallocx:je_mallocx
rallocx:je_rallocx
xallocx:je_xallocx
sallocx:je_sallocx
dallocx:je_dallocx
sdallocx:je_sdallocx
nallocx:je_nallocx
mallctl:je_mallctl
mallctlnametomib:je_mallctlnametomib
mallctlbymib:je_mallctlbymib
malloc_stats_print:je_malloc_stats_print
malloc_usable_size:je_malloc_usable_size

View File

@ -0,0 +1,20 @@
#undef je_malloc_conf
#undef je_malloc_message
#undef je_malloc
#undef je_calloc
#undef je_posix_memalign
#undef je_aligned_alloc
#undef je_realloc
#undef je_free
#undef je_mallocx
#undef je_rallocx
#undef je_xallocx
#undef je_sallocx
#undef je_dallocx
#undef je_sdallocx
#undef je_nallocx
#undef je_mallctl
#undef je_mallctlnametomib
#undef je_mallctlbymib
#undef je_malloc_stats_print
#undef je_malloc_usable_size

View File

@ -0,0 +1,6 @@
#!/bin/sh
for nm in `cat $1` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
echo "#undef je_${n}"
done

View File

@ -0,0 +1,81 @@
/* List definitions. */
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
#define ql_head_initializer(a_head) {NULL}
#define ql_elm(a_type) qr(a_type)
/* List functions. */
#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
#define ql_first(a_head) ((a_head)->qlh_first)
#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
} else { \
ql_first(a_head) = NULL; \
} \
} while (0)
#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)

View File

@ -0,0 +1,69 @@
/* Ring definitions. */
#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) \
do \
{ \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
void *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
/*
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code.
*/
#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
= (a_qr)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
? (var)->a_field.qre_prev : NULL))

View File

@ -0,0 +1,60 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct quarantine_obj_s quarantine_obj_t;
typedef struct quarantine_s quarantine_t;
/* Default per thread quarantine size if valgrind is enabled. */
#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct quarantine_obj_s {
void *ptr;
size_t usize;
};
struct quarantine_s {
size_t curbytes;
size_t curobjs;
size_t first;
#define LG_MAXOBJS_INIT 10
size_t lg_maxobjs;
quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void quarantine_alloc_hook_work(tsd_t *tsd);
void quarantine(tsd_t *tsd, void *ptr);
void quarantine_cleanup(tsd_t *tsd);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void quarantine_alloc_hook(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
JEMALLOC_ALWAYS_INLINE void
quarantine_alloc_hook(void)
{
tsd_t *tsd;
assert(config_fill && opt_quarantine);
tsd = tsd_fetch();
if (tsd_quarantine_get(tsd) == NULL)
quarantine_alloc_hook_work(tsd);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,366 @@
/*
* This radix tree implementation is tailored to the singular purpose of
* associating metadata with chunks that are currently owned by jemalloc.
*
*******************************************************************************
*/
#ifdef JEMALLOC_H_TYPES
typedef struct rtree_node_elm_s rtree_node_elm_t;
typedef struct rtree_level_s rtree_level_t;
typedef struct rtree_s rtree_t;
/*
* RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the
* machine address width.
*/
#define LG_RTREE_BITS_PER_LEVEL 4
#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL)
/* Maximum rtree height. */
#define RTREE_HEIGHT_MAX \
((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
/* Used for two-stage lock-free node initialization. */
#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
/*
* The node allocation callback function's argument is the number of contiguous
* rtree_node_elm_t structures to allocate, and the resulting memory must be
* zeroed.
*/
typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t);
typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *);
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct rtree_node_elm_s {
union {
void *pun;
rtree_node_elm_t *child;
extent_node_t *val;
};
};
struct rtree_level_s {
/*
* A non-NULL subtree points to a subtree rooted along the hypothetical
* path to the leaf node corresponding to key 0. Depending on what keys
* have been used to store to the tree, an arbitrary combination of
* subtree pointers may remain NULL.
*
* Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4.
* This results in a 3-level tree, and the leftmost leaf can be directly
* accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding
* 0x00000000) can be accessed via subtrees[1], and the remainder of the
* tree can be accessed via subtrees[0].
*
* levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...]
*
* levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ]
*
* levels[2] : [val(0x000000000000) | val(0x000000000001) | ...]
*
* This has practical implications on x64, which currently uses only the
* lower 47 bits of virtual address space in userland, thus leaving
* subtrees[0] unused and avoiding a level of tree traversal.
*/
union {
void *subtree_pun;
rtree_node_elm_t *subtree;
};
/* Number of key bits distinguished by this level. */
unsigned bits;
/*
* Cumulative number of key bits distinguished by traversing to
* corresponding tree level.
*/
unsigned cumbits;
};
struct rtree_s {
rtree_node_alloc_t *alloc;
rtree_node_dalloc_t *dalloc;
unsigned height;
/*
* Precomputed table used to convert from the number of leading 0 key
* bits to which subtree level to start at.
*/
unsigned start_level[RTREE_HEIGHT_MAX];
rtree_level_t levels[RTREE_HEIGHT_MAX];
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
rtree_node_dalloc_t *dalloc);
void rtree_delete(rtree_t *rtree);
rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree,
unsigned level);
rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree,
rtree_node_elm_t *elm, unsigned level);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
bool rtree_node_valid(rtree_node_elm_t *node);
rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm,
bool dependent);
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
unsigned level, bool dependent);
extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
bool dependent);
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
const extent_node_t *val);
rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
bool dependent);
rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
bool dependent);
extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
JEMALLOC_ALWAYS_INLINE unsigned
rtree_start_level(rtree_t *rtree, uintptr_t key)
{
unsigned start_level;
if (unlikely(key == 0))
return (rtree->height - 1);
start_level = rtree->start_level[lg_floor(key) >>
LG_RTREE_BITS_PER_LEVEL];
assert(start_level < rtree->height);
return (start_level);
}
JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
{
return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
rtree->levels[level].cumbits)) & ((ZU(1) <<
rtree->levels[level].bits) - 1));
}
JEMALLOC_ALWAYS_INLINE bool
rtree_node_valid(rtree_node_elm_t *node)
{
return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
}
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_child_tryread(rtree_node_elm_t *elm, bool dependent)
{
rtree_node_elm_t *child;
/* Double-checked read (first read may be stale. */
child = elm->child;
if (!dependent && !rtree_node_valid(child))
child = atomic_read_p(&elm->pun);
assert(!dependent || child != NULL);
return (child);
}
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level,
bool dependent)
{
rtree_node_elm_t *child;
child = rtree_child_tryread(elm, dependent);
if (!dependent && unlikely(!rtree_node_valid(child)))
child = rtree_child_read_hard(rtree, elm, level);
assert(!dependent || child != NULL);
return (child);
}
JEMALLOC_ALWAYS_INLINE extent_node_t *
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
{
if (dependent) {
/*
* Reading a val on behalf of a pointer to a valid allocation is
* guaranteed to be a clean read even without synchronization,
* because the rtree update became visible in memory before the
* pointer came into existence.
*/
return (elm->val);
} else {
/*
* An arbitrary read, e.g. on behalf of ivsalloc(), may not be
* dependent on a previous rtree write, which means a stale read
* could result if synchronization were omitted here.
*/
return (atomic_read_p(&elm->pun));
}
}
JEMALLOC_INLINE void
rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
{
atomic_write_p(&elm->pun, val);
}
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
{
rtree_node_elm_t *subtree;
/* Double-checked read (first read may be stale. */
subtree = rtree->levels[level].subtree;
if (!dependent && unlikely(!rtree_node_valid(subtree)))
subtree = atomic_read_p(&rtree->levels[level].subtree_pun);
assert(!dependent || subtree != NULL);
return (subtree);
}
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent)
{
rtree_node_elm_t *subtree;
subtree = rtree_subtree_tryread(rtree, level, dependent);
if (!dependent && unlikely(!rtree_node_valid(subtree)))
subtree = rtree_subtree_read_hard(rtree, level);
assert(!dependent || subtree != NULL);
return (subtree);
}
JEMALLOC_ALWAYS_INLINE extent_node_t *
rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
{
uintptr_t subkey;
unsigned start_level;
rtree_node_elm_t *node;
start_level = rtree_start_level(rtree, key);
node = rtree_subtree_tryread(rtree, start_level, dependent);
#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height)
switch (start_level + RTREE_GET_BIAS) {
#define RTREE_GET_SUBTREE(level) \
case level: \
assert(level < (RTREE_HEIGHT_MAX-1)); \
if (!dependent && unlikely(!rtree_node_valid(node))) \
return (NULL); \
subkey = rtree_subkey(rtree, key, level - \
RTREE_GET_BIAS); \
node = rtree_child_tryread(&node[subkey], dependent); \
/* Fall through. */
#define RTREE_GET_LEAF(level) \
case level: \
assert(level == (RTREE_HEIGHT_MAX-1)); \
if (!dependent && unlikely(!rtree_node_valid(node))) \
return (NULL); \
subkey = rtree_subkey(rtree, key, level - \
RTREE_GET_BIAS); \
/* \
* node is a leaf, so it contains values rather than \
* child pointers. \
*/ \
return (rtree_val_read(rtree, &node[subkey], \
dependent));
#if RTREE_HEIGHT_MAX > 1
RTREE_GET_SUBTREE(0)
#endif
#if RTREE_HEIGHT_MAX > 2
RTREE_GET_SUBTREE(1)
#endif
#if RTREE_HEIGHT_MAX > 3
RTREE_GET_SUBTREE(2)
#endif
#if RTREE_HEIGHT_MAX > 4
RTREE_GET_SUBTREE(3)
#endif
#if RTREE_HEIGHT_MAX > 5
RTREE_GET_SUBTREE(4)
#endif
#if RTREE_HEIGHT_MAX > 6
RTREE_GET_SUBTREE(5)
#endif
#if RTREE_HEIGHT_MAX > 7
RTREE_GET_SUBTREE(6)
#endif
#if RTREE_HEIGHT_MAX > 8
RTREE_GET_SUBTREE(7)
#endif
#if RTREE_HEIGHT_MAX > 9
RTREE_GET_SUBTREE(8)
#endif
#if RTREE_HEIGHT_MAX > 10
RTREE_GET_SUBTREE(9)
#endif
#if RTREE_HEIGHT_MAX > 11
RTREE_GET_SUBTREE(10)
#endif
#if RTREE_HEIGHT_MAX > 12
RTREE_GET_SUBTREE(11)
#endif
#if RTREE_HEIGHT_MAX > 13
RTREE_GET_SUBTREE(12)
#endif
#if RTREE_HEIGHT_MAX > 14
RTREE_GET_SUBTREE(13)
#endif
#if RTREE_HEIGHT_MAX > 15
RTREE_GET_SUBTREE(14)
#endif
#if RTREE_HEIGHT_MAX > 16
# error Unsupported RTREE_HEIGHT_MAX
#endif
RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1)
#undef RTREE_GET_SUBTREE
#undef RTREE_GET_LEAF
default: not_reached();
}
#undef RTREE_GET_BIAS
not_reached();
}
JEMALLOC_INLINE bool
rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
{
uintptr_t subkey;
unsigned i, start_level;
rtree_node_elm_t *node, *child;
start_level = rtree_start_level(rtree, key);
node = rtree_subtree_read(rtree, start_level, false);
if (node == NULL)
return (true);
for (i = start_level; /**/; i++, node = child) {
subkey = rtree_subkey(rtree, key, i);
if (i == rtree->height - 1) {
/*
* node is a leaf, so it contains values rather than
* child pointers.
*/
rtree_val_write(rtree, &node[subkey], val);
return (false);
}
assert(i + 1 < rtree->height);
child = rtree_child_read(rtree, &node[subkey], i, false);
if (child == NULL)
return (true);
}
not_reached();
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,318 @@
#!/bin/sh
#
# Usage: size_classes.sh <lg_qarr> <lg_tmin> <lg_parr> <lg_g>
# The following limits are chosen such that they cover all supported platforms.
# Pointer sizes.
lg_zarr="2 3"
# Quanta.
lg_qarr=$1
# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)].
lg_tmin=$2
# Maximum lookup size.
lg_kmax=12
# Page sizes.
lg_parr=`echo $3 | tr ',' ' '`
# Size class group size (number of size classes for each size doubling).
lg_g=$4
pow2() {
e=$1
pow2_result=1
while [ ${e} -gt 0 ] ; do
pow2_result=$((${pow2_result} + ${pow2_result}))
e=$((${e} - 1))
done
}
lg() {
x=$1
lg_result=0
while [ ${x} -gt 1 ] ; do
lg_result=$((${lg_result} + 1))
x=$((${x} / 2))
done
}
size_class() {
index=$1
lg_grp=$2
lg_delta=$3
ndelta=$4
lg_p=$5
lg_kmax=$6
if [ ${lg_delta} -ge ${lg_p} ] ; then
psz="yes"
else
pow2 ${lg_p}; p=${pow2_result}
pow2 ${lg_grp}; grp=${pow2_result}
pow2 ${lg_delta}; delta=${pow2_result}
sz=$((${grp} + ${delta} * ${ndelta}))
npgs=$((${sz} / ${p}))
if [ ${sz} -eq $((${npgs} * ${p})) ] ; then
psz="yes"
else
psz="no"
fi
fi
lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta}
if [ ${pow2_result} -lt ${ndelta} ] ; then
rem="yes"
else
rem="no"
fi
lg_size=${lg_grp}
if [ $((${lg_delta} + ${lg_ndelta})) -eq ${lg_grp} ] ; then
lg_size=$((${lg_grp} + 1))
else
lg_size=${lg_grp}
rem="yes"
fi
if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then
bin="yes"
else
bin="no"
fi
if [ ${lg_size} -lt ${lg_kmax} \
-o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then
lg_delta_lookup=${lg_delta}
else
lg_delta_lookup="no"
fi
printf ' SC(%3d, %6d, %8d, %6d, %3s, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${psz} ${bin} ${lg_delta_lookup}
# Defined upon return:
# - psz ("yes" or "no")
# - bin ("yes" or "no")
# - lg_delta_lookup (${lg_delta} or "no")
}
sep_line() {
echo " \\"
}
size_classes() {
lg_z=$1
lg_q=$2
lg_t=$3
lg_p=$4
lg_g=$5
pow2 $((${lg_z} + 3)); ptr_bits=${pow2_result}
pow2 ${lg_g}; g=${pow2_result}
echo "#define SIZE_CLASSES \\"
echo " /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \\"
ntbins=0
nlbins=0
lg_tiny_maxclass='"NA"'
nbins=0
npsizes=0
# Tiny size classes.
ndelta=0
index=0
lg_grp=${lg_t}
lg_delta=${lg_grp}
while [ ${lg_grp} -lt ${lg_q} ] ; do
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
if [ ${lg_delta_lookup} != "no" ] ; then
nlbins=$((${index} + 1))
fi
if [ ${psz} = "yes" ] ; then
npsizes=$((${npsizes} + 1))
fi
if [ ${bin} != "no" ] ; then
nbins=$((${index} + 1))
fi
ntbins=$((${ntbins} + 1))
lg_tiny_maxclass=${lg_grp} # Final written value is correct.
index=$((${index} + 1))
lg_delta=${lg_grp}
lg_grp=$((${lg_grp} + 1))
done
# First non-tiny group.
if [ ${ntbins} -gt 0 ] ; then
sep_line
# The first size class has an unusual encoding, because the size has to be
# split between grp and delta*ndelta.
lg_grp=$((${lg_grp} - 1))
ndelta=1
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
index=$((${index} + 1))
lg_grp=$((${lg_grp} + 1))
lg_delta=$((${lg_delta} + 1))
if [ ${psz} = "yes" ] ; then
npsizes=$((${npsizes} + 1))
fi
fi
while [ ${ndelta} -lt ${g} ] ; do
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
index=$((${index} + 1))
ndelta=$((${ndelta} + 1))
if [ ${psz} = "yes" ] ; then
npsizes=$((${npsizes} + 1))
fi
done
# All remaining groups.
lg_grp=$((${lg_grp} + ${lg_g}))
while [ ${lg_grp} -lt $((${ptr_bits} - 1)) ] ; do
sep_line
ndelta=1
if [ ${lg_grp} -eq $((${ptr_bits} - 2)) ] ; then
ndelta_limit=$((${g} - 1))
else
ndelta_limit=${g}
fi
while [ ${ndelta} -le ${ndelta_limit} ] ; do
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
if [ ${lg_delta_lookup} != "no" ] ; then
nlbins=$((${index} + 1))
# Final written value is correct:
lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
fi
if [ ${psz} = "yes" ] ; then
npsizes=$((${npsizes} + 1))
fi
if [ ${bin} != "no" ] ; then
nbins=$((${index} + 1))
# Final written value is correct:
small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
if [ ${lg_g} -gt 0 ] ; then
lg_large_minclass=$((${lg_grp} + 1))
else
lg_large_minclass=$((${lg_grp} + 2))
fi
fi
# Final written value is correct:
huge_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
index=$((${index} + 1))
ndelta=$((${ndelta} + 1))
done
lg_grp=$((${lg_grp} + 1))
lg_delta=$((${lg_delta} + 1))
done
echo
nsizes=${index}
# Defined upon completion:
# - ntbins
# - nlbins
# - nbins
# - nsizes
# - npsizes
# - lg_tiny_maxclass
# - lookup_maxclass
# - small_maxclass
# - lg_large_minclass
# - huge_maxclass
}
cat <<EOF
/* This file was automatically generated by size_classes.sh. */
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to
* be defined prior to inclusion, and it in turn defines:
*
* LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
* SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz,
* bin, lg_delta_lookup) tuples.
* index: Size class index.
* lg_grp: Lg group base size (no deltas added).
* lg_delta: Lg delta to previous size class.
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
* psz: 'yes' if a multiple of the page size, 'no' otherwise.
* bin: 'yes' if a small bin size class, 'no' otherwise.
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
* otherwise.
* NTBINS: Number of tiny bins.
* NLBINS: Number of bins supported by the lookup table.
* NBINS: Number of small size class bins.
* NSIZES: Number of size classes.
* NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE).
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
* SMALL_MAXCLASS: Maximum small size class.
* LG_LARGE_MINCLASS: Lg of minimum large size class.
* HUGE_MAXCLASS: Maximum (huge) size class.
*/
#define LG_SIZE_CLASS_GROUP ${lg_g}
EOF
for lg_z in ${lg_zarr} ; do
for lg_q in ${lg_qarr} ; do
lg_t=${lg_tmin}
while [ ${lg_t} -le ${lg_q} ] ; do
# Iterate through page sizes and compute how many bins there are.
for lg_p in ${lg_parr} ; do
echo "#if (LG_SIZEOF_PTR == ${lg_z} && LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})"
size_classes ${lg_z} ${lg_q} ${lg_t} ${lg_p} ${lg_g}
echo "#define SIZE_CLASSES_DEFINED"
echo "#define NTBINS ${ntbins}"
echo "#define NLBINS ${nlbins}"
echo "#define NBINS ${nbins}"
echo "#define NSIZES ${nsizes}"
echo "#define NPSIZES ${npsizes}"
echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}"
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
echo "#define SMALL_MAXCLASS ${small_maxclass}"
echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
echo "#define HUGE_MAXCLASS ${huge_maxclass}"
echo "#endif"
echo
done
lg_t=$((${lg_t} + 1))
done
done
done
cat <<EOF
#ifndef SIZE_CLASSES_DEFINED
# error "No size class definitions match configuration"
#endif
#undef SIZE_CLASSES_DEFINED
/*
* The size2index_tab lookup table uses uint8_t to encode each bin index, so we
* cannot support more than 256 small size classes. Further constrain NBINS to
* 255 since all small size classes, plus a "not small" size class must be
* stored in 8 bits of arena_chunk_map_bits_t's bits field.
*/
#if (NBINS > 255)
# error "Too many small size classes"
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
EOF

View File

@ -0,0 +1,246 @@
/*
* This file was generated by the following command:
* sh smoothstep.sh smoother 200 24 3 15
*/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* This header defines a precomputed table based on the smoothstep family of
* sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
* to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
* that floating point math can be avoided.
*
* 3 2
* smoothstep(x) = -2x + 3x
*
* 5 4 3
* smootherstep(x) = 6x - 15x + 10x
*
* 7 6 5 4
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
#define SMOOTHSTEP_VARIANT "smoother"
#define SMOOTHSTEP_NSTEPS 200
#define SMOOTHSTEP_BFP 24
#define SMOOTHSTEP \
/* STEP(step, h, x, y) */ \
STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

Some files were not shown because too many files have changed in this diff Show More