mirror of
https://github.com/freebsd/freebsd-src
synced 2024-10-15 04:43:53 +00:00
Update jemalloc to version 4.0.0.
This commit is contained in:
parent
2cb71df183
commit
d0e79aa362
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=286866
|
@ -1,10 +1,10 @@
|
|||
Unless otherwise specified, files in the jemalloc source distribution are
|
||||
subject to the following license:
|
||||
--------------------------------------------------------------------------------
|
||||
Copyright (C) 2002-2014 Jason Evans <jasone@canonware.com>.
|
||||
Copyright (C) 2002-2015 Jason Evans <jasone@canonware.com>.
|
||||
All rights reserved.
|
||||
Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
|
||||
Copyright (C) 2009-2014 Facebook, Inc. All rights reserved.
|
||||
Copyright (C) 2009-2015 Facebook, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
|
|
@ -1,10 +1,166 @@
|
|||
Following are change highlights associated with official releases. Important
|
||||
bug fixes are all mentioned, but internal enhancements are omitted here for
|
||||
brevity (even though they are more fun to write about). Much more detail can be
|
||||
found in the git revision history:
|
||||
bug fixes are all mentioned, but some internal enhancements are omitted here for
|
||||
brevity. Much more detail can be found in the git revision history:
|
||||
|
||||
https://github.com/jemalloc/jemalloc
|
||||
|
||||
* 4.0.0 (August 17, 2015)
|
||||
|
||||
This version contains many speed and space optimizations, both minor and
|
||||
major. The major themes are generalization, unification, and simplification.
|
||||
Although many of these optimizations cause no visible behavior change, their
|
||||
cumulative effect is substantial.
|
||||
|
||||
New features:
|
||||
- Normalize size class spacing to be consistent across the complete size
|
||||
range. By default there are four size classes per size doubling, but this
|
||||
is now configurable via the --with-lg-size-class-group option. Also add the
|
||||
--with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and
|
||||
--with-lg-tiny-min options, which can be used to tweak page and size class
|
||||
settings. Impacts:
|
||||
+ Worst case performance for incrementally growing/shrinking reallocation
|
||||
is improved because there are far fewer size classes, and therefore
|
||||
copying happens less often.
|
||||
+ Internal fragmentation is limited to 20% for all but the smallest size
|
||||
classes (those less than four times the quantum). (1B + 4 KiB)
|
||||
and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation.
|
||||
+ Chunk fragmentation tends to be lower because there are fewer distinct run
|
||||
sizes to pack.
|
||||
- Add support for explicit tcaches. The "tcache.create", "tcache.flush", and
|
||||
"tcache.destroy" mallctls control tcache lifetime and flushing, and the
|
||||
MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API
|
||||
control which tcache is used for each operation.
|
||||
- Implement per thread heap profiling, as well as the ability to
|
||||
enable/disable heap profiling on a per thread basis. Add the "prof.reset",
|
||||
"prof.lg_sample", "thread.prof.name", "thread.prof.active",
|
||||
"opt.prof_thread_active_init", "prof.thread_active_init", and
|
||||
"thread.prof.active" mallctls.
|
||||
- Add support for per arena application-specified chunk allocators, configured
|
||||
via the "arena.<i>.chunk_hooks" mallctl.
|
||||
- Refactor huge allocation to be managed by arenas, so that arenas now
|
||||
function as general purpose independent allocators. This is important in
|
||||
the context of user-specified chunk allocators, aside from the scalability
|
||||
benefits. Related new statistics:
|
||||
+ The "stats.arenas.<i>.huge.allocated", "stats.arenas.<i>.huge.nmalloc",
|
||||
"stats.arenas.<i>.huge.ndalloc", and "stats.arenas.<i>.huge.nrequests"
|
||||
mallctls provide high level per arena huge allocation statistics.
|
||||
+ The "arenas.nhchunks", "arenas.hchunk.<i>.size",
|
||||
"stats.arenas.<i>.hchunks.<j>.nmalloc",
|
||||
"stats.arenas.<i>.hchunks.<j>.ndalloc",
|
||||
"stats.arenas.<i>.hchunks.<j>.nrequests", and
|
||||
"stats.arenas.<i>.hchunks.<j>.curhchunks" mallctls provide per size class
|
||||
statistics.
|
||||
- Add the 'util' column to malloc_stats_print() output, which reports the
|
||||
proportion of available regions that are currently in use for each small
|
||||
size class.
|
||||
- Add "alloc" and "free" modes for for junk filling (see the "opt.junk"
|
||||
mallctl), so that it is possible to separately enable junk filling for
|
||||
allocation versus deallocation.
|
||||
- Add the jemalloc-config script, which provides information about how
|
||||
jemalloc was configured, and how to integrate it into application builds.
|
||||
- Add metadata statistics, which are accessible via the "stats.metadata",
|
||||
"stats.arenas.<i>.metadata.mapped", and
|
||||
"stats.arenas.<i>.metadata.allocated" mallctls.
|
||||
- Add the "stats.resident" mallctl, which reports the upper limit of
|
||||
physically resident memory mapped by the allocator.
|
||||
- Add per arena control over unused dirty page purging, via the
|
||||
"arenas.lg_dirty_mult", "arena.<i>.lg_dirty_mult", and
|
||||
"stats.arenas.<i>.lg_dirty_mult" mallctls.
|
||||
- Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump
|
||||
feature on/off during program execution.
|
||||
- Add sdallocx(), which implements sized deallocation. The primary
|
||||
optimization over dallocx() is the removal of a metadata read, which often
|
||||
suffers an L1 cache miss.
|
||||
- Add missing header includes in jemalloc/jemalloc.h, so that applications
|
||||
only have to #include <jemalloc/jemalloc.h>.
|
||||
- Add support for additional platforms:
|
||||
+ Bitrig
|
||||
+ Cygwin
|
||||
+ DragonFlyBSD
|
||||
+ iOS
|
||||
+ OpenBSD
|
||||
+ OpenRISC/or1k
|
||||
|
||||
Optimizations:
|
||||
- Maintain dirty runs in per arena LRUs rather than in per arena trees of
|
||||
dirty-run-containing chunks. In practice this change significantly reduces
|
||||
dirty page purging volume.
|
||||
- Integrate whole chunks into the unused dirty page purging machinery. This
|
||||
reduces the cost of repeated huge allocation/deallocation, because it
|
||||
effectively introduces a cache of chunks.
|
||||
- Split the arena chunk map into two separate arrays, in order to increase
|
||||
cache locality for the frequently accessed bits.
|
||||
- Move small run metadata out of runs, into arena chunk headers. This reduces
|
||||
run fragmentation, smaller runs reduce external fragmentation for small size
|
||||
classes, and packed (less uniformly aligned) metadata layout improves CPU
|
||||
cache set distribution.
|
||||
- Randomly distribute large allocation base pointer alignment relative to page
|
||||
boundaries in order to more uniformly utilize CPU cache sets. This can be
|
||||
disabled via the --disable-cache-oblivious configure option, and queried via
|
||||
the "config.cache_oblivious" mallctl.
|
||||
- Micro-optimize the fast paths for the public API functions.
|
||||
- Refactor thread-specific data to reside in a single structure. This assures
|
||||
that only a single TLS read is necessary per call into the public API.
|
||||
- Implement in-place huge allocation growing and shrinking.
|
||||
- Refactor rtree (radix tree for chunk lookups) to be lock-free, and make
|
||||
additional optimizations that reduce maximum lookup depth to one or two
|
||||
levels. This resolves what was a concurrency bottleneck for per arena huge
|
||||
allocation, because a global data structure is critical for determining
|
||||
which arenas own which huge allocations.
|
||||
|
||||
Incompatible changes:
|
||||
- Replace --enable-cc-silence with --disable-cc-silence to suppress spurious
|
||||
warnings by default.
|
||||
- Assure that the constness of malloc_usable_size()'s return type matches that
|
||||
of the system implementation.
|
||||
- Change the heap profile dump format to support per thread heap profiling,
|
||||
rename pprof to jeprof, and enhance it with the --thread=<n> option. As a
|
||||
result, the bundled jeprof must now be used rather than the upstream
|
||||
(gperftools) pprof.
|
||||
- Disable "opt.prof_final" by default, in order to avoid atexit(3), which can
|
||||
internally deadlock on some platforms.
|
||||
- Change the "arenas.nlruns" mallctl type from size_t to unsigned.
|
||||
- Replace the "stats.arenas.<i>.bins.<j>.allocated" mallctl with
|
||||
"stats.arenas.<i>.bins.<j>.curregs".
|
||||
- Ignore MALLOC_CONF in set{uid,gid,cap} binaries.
|
||||
- Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the
|
||||
MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage.
|
||||
|
||||
Removed features:
|
||||
- Remove the *allocm() API, which is superseded by the *allocx() API.
|
||||
- Remove the --enable-dss options, and make dss non-optional on all platforms
|
||||
which support sbrk(2).
|
||||
- Remove the "arenas.purge" mallctl, which was obsoleted by the
|
||||
"arena.<i>.purge" mallctl in 3.1.0.
|
||||
- Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically
|
||||
detects whether it is running inside Valgrind.
|
||||
- Remove the "stats.huge.allocated", "stats.huge.nmalloc", and
|
||||
"stats.huge.ndalloc" mallctls.
|
||||
- Remove the --enable-mremap option.
|
||||
- Remove the "stats.chunks.current", "stats.chunks.total", and
|
||||
"stats.chunks.high" mallctls.
|
||||
|
||||
Bug fixes:
|
||||
- Fix the cactive statistic to decrease (rather than increase) when active
|
||||
memory decreases. This regression was first released in 3.5.0.
|
||||
- Fix OOM handling in memalign() and valloc(). A variant of this bug existed
|
||||
in all releases since 2.0.0, which introduced these functions.
|
||||
- Fix an OOM-related regression in arena_tcache_fill_small(), which could
|
||||
cause cache corruption on OOM. This regression was present in all releases
|
||||
from 2.2.0 through 3.6.0.
|
||||
- Fix size class overflow handling for malloc(), posix_memalign(), memalign(),
|
||||
calloc(), and realloc() when profiling is enabled.
|
||||
- Fix the "arena.<i>.dss" mallctl to return an error if "primary" or
|
||||
"secondary" precedence is specified, but sbrk(2) is not supported.
|
||||
- Fix fallback lg_floor() implementations to handle extremely large inputs.
|
||||
- Ensure the default purgeable zone is after the default zone on OS X.
|
||||
- Fix latent bugs in atomic_*().
|
||||
- Fix the "arena.<i>.dss" mallctl to handle read-only calls.
|
||||
- Fix tls_model configuration to enable the initial-exec model when possible.
|
||||
- Mark malloc_conf as a weak symbol so that the application can override it.
|
||||
- Correctly detect glibc's adaptive pthread mutexes.
|
||||
- Fix the --without-export configure option.
|
||||
|
||||
* 3.6.0 (March 31, 2014)
|
||||
|
||||
This version contains a critical bug fix for a regression present in 3.5.0 and
|
||||
|
@ -21,7 +177,7 @@ found in the git revision history:
|
|||
backtracing to be reliable.
|
||||
- Use dss allocation precedence for huge allocations as well as small/large
|
||||
allocations.
|
||||
- Fix test assertion failure message formatting. This bug did not manifect on
|
||||
- Fix test assertion failure message formatting. This bug did not manifest on
|
||||
x86_64 systems because of implementation subtleties in va_list.
|
||||
- Fix inconsequential test failures for hash and SFMT code.
|
||||
|
||||
|
@ -516,7 +672,7 @@ found in the git revision history:
|
|||
- Make it possible for the application to manually flush a thread's cache, via
|
||||
the "tcache.flush" mallctl.
|
||||
- Base maximum dirty page count on proportion of active memory.
|
||||
- Compute various addtional run-time statistics, including per size class
|
||||
- Compute various additional run-time statistics, including per size class
|
||||
statistics for large objects.
|
||||
- Expose malloc_stats_print(), which can be called repeatedly by the
|
||||
application.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
$FreeBSD$
|
||||
.git
|
||||
.gitignore
|
||||
.autom4te.cfg
|
||||
.git*
|
||||
FREEBSD-*
|
||||
INSTALL
|
||||
Makefile*
|
||||
|
@ -40,7 +40,10 @@ include/jemalloc/jemalloc_protos.h
|
|||
include/jemalloc/jemalloc_protos.h.in
|
||||
include/jemalloc/jemalloc_rename.h
|
||||
include/jemalloc/jemalloc_rename.sh
|
||||
include/jemalloc/jemalloc_typedefs.h.in
|
||||
include/msvc_compat/
|
||||
install-sh
|
||||
jemalloc.pc*
|
||||
src/valgrind.c
|
||||
src/zone.c
|
||||
test/
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
|
||||
index d8e2e71..330ba2a 100644
|
||||
index 8fc774b..47b453c 100644
|
||||
--- a/doc/jemalloc.xml.in
|
||||
+++ b/doc/jemalloc.xml.in
|
||||
@@ -57,12 +57,23 @@
|
||||
@@ -53,6 +53,17 @@
|
||||
<para>This manual describes jemalloc @jemalloc_version@. More information
|
||||
can be found at the <ulink
|
||||
url="http://www.canonware.com/jemalloc/">jemalloc website</ulink>.</para>
|
||||
+
|
||||
+ <para>The following configuration options are enabled in libc's built-in
|
||||
+ jemalloc: <option>--enable-dss</option>,
|
||||
+ <option>--enable-experimental</option>, <option>--enable-fill</option>,
|
||||
+ jemalloc: <option>--enable-fill</option>,
|
||||
+ <option>--enable-lazy-lock</option>, <option>--enable-munmap</option>,
|
||||
+ <option>--enable-stats</option>, <option>--enable-tcache</option>,
|
||||
+ <option>--enable-tls</option>, <option>--enable-utrace</option>, and
|
||||
|
@ -17,17 +16,11 @@ index d8e2e71..330ba2a 100644
|
|||
+ <option>--enable-debug</option> is enabled in development versions of
|
||||
+ FreeBSD (controlled by the <constant>MALLOC_PRODUCTION</constant> make
|
||||
+ variable).</para>
|
||||
+
|
||||
</refsect1>
|
||||
<refsynopsisdiv>
|
||||
<title>SYNOPSIS</title>
|
||||
<funcsynopsis>
|
||||
<funcsynopsisinfo>#include <<filename class="headerfile">stdlib.h</filename>>
|
||||
-#include <<filename class="headerfile">jemalloc/jemalloc.h</filename>></funcsynopsisinfo>
|
||||
+#include <<filename class="headerfile">malloc_np.h</filename>></funcsynopsisinfo>
|
||||
<refsect2>
|
||||
<title>Standard API</title>
|
||||
<funcprototype>
|
||||
@@ -2342,4 +2353,19 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
|
||||
@@ -2759,4 +2770,18 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
|
||||
<para>The <function>posix_memalign<parameter/></function> function conforms
|
||||
to IEEE Std 1003.1-2001 (“POSIX.1”).</para>
|
||||
</refsect1>
|
||||
|
@ -38,9 +31,8 @@ index d8e2e71..330ba2a 100644
|
|||
+ FreeBSD 7.0.</para>
|
||||
+
|
||||
+ <para>The <function>aligned_alloc<parameter/></function>,
|
||||
+ <function>malloc_stats_print<parameter/></function>,
|
||||
+ <function>mallctl*<parameter/></function>, and
|
||||
+ <function>*allocm<parameter/></function> functions first appeared in
|
||||
+ <function>malloc_stats_print<parameter/></function>, and
|
||||
+ <function>mallctl*<parameter/></function> functions first appeared in
|
||||
+ FreeBSD 10.0.</para>
|
||||
+
|
||||
+ <para>The <function>*allocx<parameter/></function> functions first appeared
|
||||
|
@ -48,20 +40,11 @@ index d8e2e71..330ba2a 100644
|
|||
+ </refsect1>
|
||||
</refentry>
|
||||
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
|
||||
index 574bbb1..e3eafdf 100644
|
||||
index 7a137b6..b0001e9 100644
|
||||
--- a/include/jemalloc/internal/jemalloc_internal.h.in
|
||||
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
|
||||
@@ -1,5 +1,8 @@
|
||||
#ifndef JEMALLOC_INTERNAL_H
|
||||
#define JEMALLOC_INTERNAL_H
|
||||
+#include "libc_private.h"
|
||||
+#include "namespace.h"
|
||||
+
|
||||
#include <math.h>
|
||||
#ifdef _WIN32
|
||||
# include <windows.h>
|
||||
@@ -65,6 +68,9 @@ typedef intptr_t ssize_t;
|
||||
#include <valgrind/memcheck.h>
|
||||
@@ -8,6 +8,9 @@
|
||||
#include <sys/ktrace.h>
|
||||
#endif
|
||||
|
||||
+#include "un-namespace.h"
|
||||
|
@ -70,7 +53,7 @@ index 574bbb1..e3eafdf 100644
|
|||
#define JEMALLOC_NO_DEMANGLE
|
||||
#ifdef JEMALLOC_JET
|
||||
# define JEMALLOC_N(n) jet_##n
|
||||
@@ -99,13 +105,7 @@ static const bool config_fill =
|
||||
@@ -42,13 +45,7 @@ static const bool config_fill =
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
@ -85,11 +68,25 @@ index 574bbb1..e3eafdf 100644
|
|||
static const bool config_prof =
|
||||
#ifdef JEMALLOC_PROF
|
||||
true
|
||||
diff --git a/include/jemalloc/internal/jemalloc_internal_decls.h b/include/jemalloc/internal/jemalloc_internal_decls.h
|
||||
index a601d6e..e7094b2 100644
|
||||
--- a/include/jemalloc/internal/jemalloc_internal_decls.h
|
||||
+++ b/include/jemalloc/internal/jemalloc_internal_decls.h
|
||||
@@ -1,6 +1,9 @@
|
||||
#ifndef JEMALLOC_INTERNAL_DECLS_H
|
||||
#define JEMALLOC_INTERNAL_DECLS_H
|
||||
|
||||
+#include "libc_private.h"
|
||||
+#include "namespace.h"
|
||||
+
|
||||
#include <math.h>
|
||||
#ifdef _WIN32
|
||||
# include <windows.h>
|
||||
diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h
|
||||
index de44e14..564d604 100644
|
||||
index f051f29..561378f 100644
|
||||
--- a/include/jemalloc/internal/mutex.h
|
||||
+++ b/include/jemalloc/internal/mutex.h
|
||||
@@ -43,9 +43,6 @@ struct malloc_mutex_s {
|
||||
@@ -47,15 +47,13 @@ struct malloc_mutex_s {
|
||||
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
extern bool isthreaded;
|
||||
|
@ -99,24 +96,31 @@ index de44e14..564d604 100644
|
|||
#endif
|
||||
|
||||
bool malloc_mutex_init(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_prefork(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
|
||||
void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
|
||||
+bool malloc_mutex_first_thread(void);
|
||||
bool mutex_boot(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
|
||||
index 93516d2..22f9af9 100644
|
||||
index dbf6aa7..f87dba8 100644
|
||||
--- a/include/jemalloc/internal/private_symbols.txt
|
||||
+++ b/include/jemalloc/internal/private_symbols.txt
|
||||
@@ -226,7 +226,6 @@ iralloc
|
||||
iralloct
|
||||
iralloct_realign
|
||||
@@ -277,7 +277,6 @@ iralloct_realign
|
||||
isalloc
|
||||
isdalloct
|
||||
isqalloc
|
||||
-isthreaded
|
||||
ivsalloc
|
||||
ixalloc
|
||||
jemalloc_postfork_child
|
||||
diff --git a/include/jemalloc/jemalloc_FreeBSD.h b/include/jemalloc/jemalloc_FreeBSD.h
|
||||
new file mode 100644
|
||||
index 0000000..94554bc
|
||||
index 0000000..66d6da5
|
||||
--- /dev/null
|
||||
+++ b/include/jemalloc/jemalloc_FreeBSD.h
|
||||
@@ -0,0 +1,134 @@
|
||||
@@ -0,0 +1,137 @@
|
||||
+/*
|
||||
+ * Override settings that were generated in jemalloc_defs.h as necessary.
|
||||
+ */
|
||||
|
@ -192,6 +196,7 @@ index 0000000..94554bc
|
|||
+#undef je_realloc
|
||||
+#undef je_free
|
||||
+#undef je_posix_memalign
|
||||
+#undef je_aligned_alloc
|
||||
+#undef je_malloc_usable_size
|
||||
+#undef je_mallocx
|
||||
+#undef je_rallocx
|
||||
|
@ -209,6 +214,7 @@ index 0000000..94554bc
|
|||
+#define je_realloc __realloc
|
||||
+#define je_free __free
|
||||
+#define je_posix_memalign __posix_memalign
|
||||
+#define je_aligned_alloc __aligned_alloc
|
||||
+#define je_malloc_usable_size __malloc_usable_size
|
||||
+#define je_mallocx __mallocx
|
||||
+#define je_rallocx __rallocx
|
||||
|
@ -238,6 +244,7 @@ index 0000000..94554bc
|
|||
+__weak_reference(__realloc, realloc);
|
||||
+__weak_reference(__free, free);
|
||||
+__weak_reference(__posix_memalign, posix_memalign);
|
||||
+__weak_reference(__aligned_alloc, aligned_alloc);
|
||||
+__weak_reference(__malloc_usable_size, malloc_usable_size);
|
||||
+__weak_reference(__mallocx, mallocx);
|
||||
+__weak_reference(__rallocx, rallocx);
|
||||
|
@ -263,32 +270,142 @@ index f943891..47d032c 100755
|
|||
+#include "jemalloc_FreeBSD.h"
|
||||
EOF
|
||||
diff --git a/src/jemalloc.c b/src/jemalloc.c
|
||||
index 204778b..9e5f2df 100644
|
||||
index ed7863b..d078a1f 100644
|
||||
--- a/src/jemalloc.c
|
||||
+++ b/src/jemalloc.c
|
||||
@@ -8,6 +8,10 @@ malloc_tsd_data(, arenas, arena_t *, NULL)
|
||||
malloc_tsd_data(, thread_allocated, thread_allocated_t,
|
||||
THREAD_ALLOCATED_INITIALIZER)
|
||||
@@ -4,6 +4,10 @@
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
+/* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
|
||||
+const char *__malloc_options_1_0 = NULL;
|
||||
+__sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
|
||||
+
|
||||
/* Runtime configuration options. */
|
||||
const char *je_malloc_conf;
|
||||
const char *je_malloc_conf JEMALLOC_ATTR(weak);
|
||||
bool opt_abort =
|
||||
@@ -457,7 +461,8 @@ malloc_conf_init(void)
|
||||
#endif
|
||||
;
|
||||
@@ -2475,6 +2479,107 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
|
||||
*/
|
||||
/******************************************************************************/
|
||||
/*
|
||||
+ * Begin compatibility functions.
|
||||
+ */
|
||||
+
|
||||
+#define ALLOCM_LG_ALIGN(la) (la)
|
||||
+#define ALLOCM_ALIGN(a) (ffsl(a)-1)
|
||||
+#define ALLOCM_ZERO ((int)0x40)
|
||||
+#define ALLOCM_NO_MOVE ((int)0x80)
|
||||
+
|
||||
+#define ALLOCM_SUCCESS 0
|
||||
+#define ALLOCM_ERR_OOM 1
|
||||
+#define ALLOCM_ERR_NOT_MOVED 2
|
||||
+
|
||||
+int
|
||||
+je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
|
||||
+{
|
||||
+ void *p;
|
||||
+
|
||||
+ assert(ptr != NULL);
|
||||
+
|
||||
+ p = je_mallocx(size, flags);
|
||||
+ if (p == NULL)
|
||||
+ return (ALLOCM_ERR_OOM);
|
||||
+ if (rsize != NULL)
|
||||
+ *rsize = isalloc(p, config_prof);
|
||||
+ *ptr = p;
|
||||
+ return (ALLOCM_SUCCESS);
|
||||
+}
|
||||
+
|
||||
+int
|
||||
+je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
|
||||
+{
|
||||
+ int ret;
|
||||
+ bool no_move = flags & ALLOCM_NO_MOVE;
|
||||
+
|
||||
+ assert(ptr != NULL);
|
||||
+ assert(*ptr != NULL);
|
||||
+ assert(size != 0);
|
||||
+ assert(SIZE_T_MAX - size >= extra);
|
||||
+
|
||||
+ if (no_move) {
|
||||
+ size_t usize = je_xallocx(*ptr, size, extra, flags);
|
||||
+ ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
|
||||
+ if (rsize != NULL)
|
||||
+ *rsize = usize;
|
||||
+ } else {
|
||||
+ void *p = je_rallocx(*ptr, size+extra, flags);
|
||||
+ if (p != NULL) {
|
||||
+ *ptr = p;
|
||||
+ ret = ALLOCM_SUCCESS;
|
||||
+ } else
|
||||
+ ret = ALLOCM_ERR_OOM;
|
||||
+ if (rsize != NULL)
|
||||
+ *rsize = isalloc(*ptr, config_prof);
|
||||
+ }
|
||||
+ return (ret);
|
||||
+}
|
||||
+
|
||||
+int
|
||||
+je_sallocm(const void *ptr, size_t *rsize, int flags)
|
||||
+{
|
||||
+
|
||||
+ assert(rsize != NULL);
|
||||
+ *rsize = je_sallocx(ptr, flags);
|
||||
+ return (ALLOCM_SUCCESS);
|
||||
+}
|
||||
+
|
||||
+int
|
||||
+je_dallocm(void *ptr, int flags)
|
||||
+{
|
||||
+
|
||||
+ je_dallocx(ptr, flags);
|
||||
+ return (ALLOCM_SUCCESS);
|
||||
+}
|
||||
+
|
||||
+int
|
||||
+je_nallocm(size_t *rsize, size_t size, int flags)
|
||||
+{
|
||||
+ size_t usize;
|
||||
+
|
||||
+ usize = je_nallocx(size, flags);
|
||||
+ if (usize == 0)
|
||||
+ return (ALLOCM_ERR_OOM);
|
||||
+ if (rsize != NULL)
|
||||
+ *rsize = usize;
|
||||
+ return (ALLOCM_SUCCESS);
|
||||
+}
|
||||
+
|
||||
+#undef ALLOCM_LG_ALIGN
|
||||
+#undef ALLOCM_ALIGN
|
||||
+#undef ALLOCM_ZERO
|
||||
+#undef ALLOCM_NO_MOVE
|
||||
+
|
||||
+#undef ALLOCM_SUCCESS
|
||||
+#undef ALLOCM_ERR_OOM
|
||||
+#undef ALLOCM_ERR_NOT_MOVED
|
||||
+
|
||||
+/*
|
||||
+ * End compatibility functions.
|
||||
+ */
|
||||
+/******************************************************************************/
|
||||
+/*
|
||||
* The following functions are used by threading libraries for protection of
|
||||
* malloc during fork().
|
||||
*/
|
||||
@@ -2575,4 +2680,11 @@ jemalloc_postfork_child(void)
|
||||
ctl_postfork_child();
|
||||
}
|
||||
|
||||
- if ((opts = getenv(envname)) != NULL) {
|
||||
+ if (issetugid() == 0 && (opts = getenv(envname)) !=
|
||||
+ NULL) {
|
||||
/*
|
||||
* Do nothing; opts is already initialized to
|
||||
* the value of the MALLOC_CONF environment
|
||||
+void
|
||||
+_malloc_first_thread(void)
|
||||
+{
|
||||
+
|
||||
+ (void)malloc_mutex_first_thread();
|
||||
+}
|
||||
+
|
||||
/******************************************************************************/
|
||||
diff --git a/src/mutex.c b/src/mutex.c
|
||||
index 788eca3..6f5954e 100644
|
||||
index 2d47af9..934d5aa 100644
|
||||
--- a/src/mutex.c
|
||||
+++ b/src/mutex.c
|
||||
@@ -66,6 +66,17 @@ pthread_create(pthread_t *__restrict thread,
|
||||
|
@ -296,21 +413,45 @@ index 788eca3..6f5954e 100644
|
|||
JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
|
||||
void *(calloc_cb)(size_t, size_t));
|
||||
+
|
||||
+__weak_reference(_pthread_mutex_init_calloc_cb_stub,
|
||||
+ _pthread_mutex_init_calloc_cb);
|
||||
+
|
||||
+#pragma weak _pthread_mutex_init_calloc_cb
|
||||
+int
|
||||
+_pthread_mutex_init_calloc_cb_stub(pthread_mutex_t *mutex,
|
||||
+_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
|
||||
+ void *(calloc_cb)(size_t, size_t))
|
||||
+{
|
||||
+
|
||||
+ return (0);
|
||||
+ return (((int (*)(pthread_mutex_t *, void *(*)(size_t, size_t)))
|
||||
+ __libc_interposing[INTERPOS__pthread_mutex_init_calloc_cb])(mutex,
|
||||
+ calloc_cb));
|
||||
+}
|
||||
#endif
|
||||
|
||||
bool
|
||||
@@ -137,7 +148,7 @@ malloc_mutex_postfork_child(malloc_mutex_t *mutex)
|
||||
}
|
||||
|
||||
bool
|
||||
-mutex_boot(void)
|
||||
+malloc_mutex_first_thread(void)
|
||||
{
|
||||
|
||||
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||
@@ -151,3 +162,14 @@ mutex_boot(void)
|
||||
#endif
|
||||
return (false);
|
||||
}
|
||||
+
|
||||
+bool
|
||||
+mutex_boot(void)
|
||||
+{
|
||||
+
|
||||
+#ifndef JEMALLOC_MUTEX_INIT_CB
|
||||
+ return (malloc_mutex_first_thread());
|
||||
+#else
|
||||
+ return (false);
|
||||
+#endif
|
||||
+}
|
||||
diff --git a/src/util.c b/src/util.c
|
||||
index 93a19fd..70b3e45 100644
|
||||
index 4cb0d6c..25b61c2 100644
|
||||
--- a/src/util.c
|
||||
+++ b/src/util.c
|
||||
@@ -58,6 +58,22 @@ wrtmessage(void *cbopaque, const char *s)
|
||||
|
|
|
@ -80,7 +80,13 @@ do_extract() {
|
|||
}
|
||||
|
||||
do_diff() {
|
||||
(cd ${work}; git add -A; git diff --cached) > FREEBSD-diffs
|
||||
(
|
||||
cd ${work}
|
||||
find . -name '*.orig' -delete
|
||||
find . -name '*.rej' -delete
|
||||
git add -A
|
||||
git diff --cached
|
||||
) > FREEBSD-diffs
|
||||
}
|
||||
|
||||
command=$1
|
||||
|
|
|
@ -1 +1 @@
|
|||
3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340
|
||||
4.0.0-0-g6e98caf8f064482b9ab292ef3638dea67420bbc2
|
||||
|
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -11,6 +11,7 @@
|
|||
|
||||
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
|
||||
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
|
||||
#define atomic_read_p(p) atomic_add_p(p, NULL)
|
||||
#define atomic_read_z(p) atomic_add_z(p, 0)
|
||||
#define atomic_read_u(p) atomic_add_u(p, 0)
|
||||
|
||||
|
@ -18,89 +19,139 @@
|
|||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
/*
|
||||
* All arithmetic functions return the arithmetic result of the atomic
|
||||
* operation. Some atomic operation APIs return the value prior to mutation, in
|
||||
* which case the following functions must redundantly compute the result so
|
||||
* that it can be returned. These functions are normally inlined, so the extra
|
||||
* operations can be optimized away if the return values aren't used by the
|
||||
* callers.
|
||||
*
|
||||
* <t> atomic_read_<t>(<t> *p) { return (*p); }
|
||||
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p + x); }
|
||||
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p - x); }
|
||||
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
|
||||
* {
|
||||
* if (*p != c)
|
||||
* return (true);
|
||||
* *p = s;
|
||||
* return (false);
|
||||
* }
|
||||
* void atomic_write_<t>(<t> *p, <t> x) { *p = x; }
|
||||
*/
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
|
||||
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
|
||||
bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s);
|
||||
void atomic_write_uint64(uint64_t *p, uint64_t x);
|
||||
uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
|
||||
uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
|
||||
bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s);
|
||||
void atomic_write_uint32(uint32_t *p, uint32_t x);
|
||||
void *atomic_add_p(void **p, void *x);
|
||||
void *atomic_sub_p(void **p, void *x);
|
||||
bool atomic_cas_p(void **p, void *c, void *s);
|
||||
void atomic_write_p(void **p, const void *x);
|
||||
size_t atomic_add_z(size_t *p, size_t x);
|
||||
size_t atomic_sub_z(size_t *p, size_t x);
|
||||
bool atomic_cas_z(size_t *p, size_t c, size_t s);
|
||||
void atomic_write_z(size_t *p, size_t x);
|
||||
unsigned atomic_add_u(unsigned *p, unsigned x);
|
||||
unsigned atomic_sub_u(unsigned *p, unsigned x);
|
||||
bool atomic_cas_u(unsigned *p, unsigned c, unsigned s);
|
||||
void atomic_write_u(unsigned *p, unsigned x);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
|
||||
/******************************************************************************/
|
||||
/* 64-bit operations. */
|
||||
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
|
||||
# ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (__sync_add_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
#elif (defined(_MSC_VER))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd64(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd64(p, -((int64_t)x)));
|
||||
}
|
||||
#elif (defined(JEMALLOC_OSATOMIC))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
|
||||
}
|
||||
# elif (defined(__amd64__) || defined(__x86_64__))
|
||||
# if (defined(__amd64__) || defined(__x86_64__))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
uint64_t t = x;
|
||||
|
||||
asm volatile (
|
||||
"lock; xaddq %0, %1;"
|
||||
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||
: "+r" (t), "=m" (*p) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
);
|
||||
|
||||
return (x);
|
||||
return (t + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
uint64_t t;
|
||||
|
||||
x = (uint64_t)(-(int64_t)x);
|
||||
t = x;
|
||||
asm volatile (
|
||||
"lock; xaddq %0, %1;"
|
||||
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||
: "+r" (t), "=m" (*p) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
);
|
||||
|
||||
return (x);
|
||||
return (t + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
||||
{
|
||||
uint8_t success;
|
||||
|
||||
asm volatile (
|
||||
"lock; cmpxchgq %4, %0;"
|
||||
"sete %1;"
|
||||
: "=m" (*p), "=a" (success) /* Outputs. */
|
||||
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
|
||||
: "memory" /* Clobbers. */
|
||||
);
|
||||
|
||||
return (!(bool)success);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
asm volatile (
|
||||
"xchgq %1, %0;" /* Lock is implied by xchgq. */
|
||||
: "=m" (*p), "+r" (x) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
: "memory" /* Clobbers. */
|
||||
);
|
||||
}
|
||||
# elif (defined(JEMALLOC_C11ATOMICS))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
||||
return (atomic_fetch_add(a, x) + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
||||
return (atomic_fetch_sub(a, x) - x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
||||
{
|
||||
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
||||
return (!atomic_compare_exchange_strong(a, &c, s));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
||||
atomic_store(a, x);
|
||||
}
|
||||
# elif (defined(JEMALLOC_ATOMIC9))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
|
@ -124,7 +175,88 @@ atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|||
|
||||
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
|
||||
}
|
||||
# elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
||||
{
|
||||
|
||||
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
||||
|
||||
return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
||||
|
||||
atomic_store_rel_long(p, x);
|
||||
}
|
||||
# elif (defined(JEMALLOC_OSATOMIC))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
||||
{
|
||||
|
||||
return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
uint64_t o;
|
||||
|
||||
/*The documented OSAtomic*() API does not expose an atomic exchange. */
|
||||
do {
|
||||
o = atomic_read_uint64(p);
|
||||
} while (atomic_cas_uint64(p, o, x));
|
||||
}
|
||||
# elif (defined(_MSC_VER))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd64(p, x) + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
||||
{
|
||||
uint64_t o;
|
||||
|
||||
o = InterlockedCompareExchange64(p, s, c);
|
||||
return (o != c);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
InterlockedExchange64(p, x);
|
||||
}
|
||||
# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
|
||||
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
@ -138,6 +270,20 @@ atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|||
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
||||
{
|
||||
|
||||
return (!__sync_bool_compare_and_swap(p, c, s));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
|
||||
__sync_lock_test_and_set(p, x);
|
||||
}
|
||||
# else
|
||||
# error "Missing implementation for 64-bit atomic operations"
|
||||
# endif
|
||||
|
@ -145,74 +291,91 @@ atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|||
|
||||
/******************************************************************************/
|
||||
/* 32-bit operations. */
|
||||
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (__sync_add_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
#elif (defined(_MSC_VER))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd(p, -((int32_t)x)));
|
||||
}
|
||||
#elif (defined(JEMALLOC_OSATOMIC))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
|
||||
}
|
||||
#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
|
||||
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
uint32_t t = x;
|
||||
|
||||
asm volatile (
|
||||
"lock; xaddl %0, %1;"
|
||||
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||
: "+r" (t), "=m" (*p) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
);
|
||||
|
||||
return (x);
|
||||
return (t + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
uint32_t t;
|
||||
|
||||
x = (uint32_t)(-(int32_t)x);
|
||||
t = x;
|
||||
asm volatile (
|
||||
"lock; xaddl %0, %1;"
|
||||
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||
: "+r" (t), "=m" (*p) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
);
|
||||
|
||||
return (x);
|
||||
return (t + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
||||
{
|
||||
uint8_t success;
|
||||
|
||||
asm volatile (
|
||||
"lock; cmpxchgl %4, %0;"
|
||||
"sete %1;"
|
||||
: "=m" (*p), "=a" (success) /* Outputs. */
|
||||
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
|
||||
: "memory"
|
||||
);
|
||||
|
||||
return (!(bool)success);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
asm volatile (
|
||||
"xchgl %1, %0;" /* Lock is implied by xchgl. */
|
||||
: "=m" (*p), "+r" (x) /* Outputs. */
|
||||
: "m" (*p) /* Inputs. */
|
||||
: "memory" /* Clobbers. */
|
||||
);
|
||||
}
|
||||
# elif (defined(JEMALLOC_C11ATOMICS))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
||||
return (atomic_fetch_add(a, x) + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
||||
return (atomic_fetch_sub(a, x) - x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
||||
{
|
||||
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
||||
return (!atomic_compare_exchange_strong(a, &c, s));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
||||
atomic_store(a, x);
|
||||
}
|
||||
#elif (defined(JEMALLOC_ATOMIC9))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
|
@ -228,7 +391,84 @@ atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|||
|
||||
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
|
||||
}
|
||||
#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
||||
{
|
||||
|
||||
return (!atomic_cmpset_32(p, c, s));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
atomic_store_rel_32(p, x);
|
||||
}
|
||||
#elif (defined(JEMALLOC_OSATOMIC))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
||||
{
|
||||
|
||||
return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
uint32_t o;
|
||||
|
||||
/*The documented OSAtomic*() API does not expose an atomic exchange. */
|
||||
do {
|
||||
o = atomic_read_uint32(p);
|
||||
} while (atomic_cas_uint32(p, o, x));
|
||||
}
|
||||
#elif (defined(_MSC_VER))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd(p, x) + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
||||
{
|
||||
uint32_t o;
|
||||
|
||||
o = InterlockedCompareExchange(p, s, c);
|
||||
return (o != c);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
InterlockedExchange(p, x);
|
||||
}
|
||||
#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
|
||||
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
@ -242,10 +482,72 @@ atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|||
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
||||
{
|
||||
|
||||
return (!__sync_bool_compare_and_swap(p, c, s));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
|
||||
__sync_lock_test_and_set(p, x);
|
||||
}
|
||||
#else
|
||||
# error "Missing implementation for 32-bit atomic operations"
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/* Pointer operations. */
|
||||
JEMALLOC_INLINE void *
|
||||
atomic_add_p(void **p, void *x)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
atomic_sub_p(void **p, void *x)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return ((void *)atomic_add_uint64((uint64_t *)p,
|
||||
(uint64_t)-((int64_t)x)));
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
return ((void *)atomic_add_uint32((uint32_t *)p,
|
||||
(uint32_t)-((int32_t)x)));
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_p(void **p, void *c, void *s)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_p(void **p, const void *x)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
|
||||
#endif
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
/* size_t operations. */
|
||||
JEMALLOC_INLINE size_t
|
||||
|
@ -272,6 +574,28 @@ atomic_sub_z(size_t *p, size_t x)
|
|||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_z(size_t *p, size_t c, size_t s)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_z(size_t *p, size_t x)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
|
||||
#endif
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
/* unsigned operations. */
|
||||
JEMALLOC_INLINE unsigned
|
||||
|
@ -297,6 +621,29 @@ atomic_sub_u(unsigned *p, unsigned x)
|
|||
(uint32_t)-((int32_t)x)));
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_u(unsigned *p, unsigned c, unsigned s)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_INT == 3)
|
||||
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
|
||||
#elif (LG_SIZEOF_INT == 2)
|
||||
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_u(unsigned *p, unsigned x)
|
||||
{
|
||||
|
||||
#if (LG_SIZEOF_INT == 3)
|
||||
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
|
||||
#elif (LG_SIZEOF_INT == 2)
|
||||
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
|
||||
#endif
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
#endif
|
||||
|
||||
|
|
|
@ -10,9 +10,7 @@
|
|||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
void *base_alloc(size_t size);
|
||||
void *base_calloc(size_t number, size_t size);
|
||||
extent_node_t *base_node_alloc(void);
|
||||
void base_node_dealloc(extent_node_t *node);
|
||||
void base_stats_get(size_t *allocated, size_t *resident, size_t *mapped);
|
||||
bool base_boot(void);
|
||||
void base_prefork(void);
|
||||
void base_postfork_parent(void);
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
|
||||
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
|
||||
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
|
||||
|
||||
typedef struct bitmap_level_s bitmap_level_t;
|
||||
typedef struct bitmap_info_s bitmap_info_t;
|
||||
|
@ -14,6 +15,51 @@ typedef unsigned long bitmap_t;
|
|||
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
|
||||
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
|
||||
|
||||
/* Number of groups required to store a given number of bits. */
|
||||
#define BITMAP_BITS2GROUPS(nbits) \
|
||||
((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
|
||||
|
||||
/*
|
||||
* Number of groups required at a particular level for a given number of bits.
|
||||
*/
|
||||
#define BITMAP_GROUPS_L0(nbits) \
|
||||
BITMAP_BITS2GROUPS(nbits)
|
||||
#define BITMAP_GROUPS_L1(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
|
||||
#define BITMAP_GROUPS_L2(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
|
||||
#define BITMAP_GROUPS_L3(nbits) \
|
||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
|
||||
BITMAP_BITS2GROUPS((nbits)))))
|
||||
|
||||
/*
|
||||
* Assuming the number of levels, number of groups required for a given number
|
||||
* of bits.
|
||||
*/
|
||||
#define BITMAP_GROUPS_1_LEVEL(nbits) \
|
||||
BITMAP_GROUPS_L0(nbits)
|
||||
#define BITMAP_GROUPS_2_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
|
||||
#define BITMAP_GROUPS_3_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
|
||||
#define BITMAP_GROUPS_4_LEVEL(nbits) \
|
||||
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
|
||||
|
||||
/*
|
||||
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
|
||||
*/
|
||||
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
|
||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
|
||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
|
||||
#else
|
||||
# error "Unsupported bitmap size"
|
||||
#endif
|
||||
|
||||
/* Maximum number of levels possible. */
|
||||
#define BITMAP_MAX_LEVELS \
|
||||
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
|
||||
|
@ -93,7 +139,7 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
|||
bitmap_t g;
|
||||
|
||||
assert(bit < binfo->nbits);
|
||||
assert(bitmap_get(bitmap, binfo, bit) == false);
|
||||
assert(!bitmap_get(bitmap, binfo, bit));
|
||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||
gp = &bitmap[goff];
|
||||
g = *gp;
|
||||
|
@ -126,15 +172,15 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
|||
bitmap_t g;
|
||||
unsigned i;
|
||||
|
||||
assert(bitmap_full(bitmap, binfo) == false);
|
||||
assert(!bitmap_full(bitmap, binfo));
|
||||
|
||||
i = binfo->nlevels - 1;
|
||||
g = bitmap[binfo->levels[i].group_offset];
|
||||
bit = ffsl(g) - 1;
|
||||
bit = jemalloc_ffsl(g) - 1;
|
||||
while (i > 0) {
|
||||
i--;
|
||||
g = bitmap[binfo->levels[i].group_offset + bit];
|
||||
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1);
|
||||
bit = (bit << LG_BITMAP_GROUP_NBITS) + (jemalloc_ffsl(g) - 1);
|
||||
}
|
||||
|
||||
bitmap_set(bitmap, binfo, bit);
|
||||
|
@ -158,7 +204,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
|||
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
|
||||
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
assert(bitmap_get(bitmap, binfo, bit) == false);
|
||||
assert(!bitmap_get(bitmap, binfo, bit));
|
||||
/* Propagate group state transitions up the tree. */
|
||||
if (propagate) {
|
||||
unsigned i;
|
||||
|
@ -172,7 +218,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
|||
== 0);
|
||||
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
if (propagate == false)
|
||||
if (!propagate)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
* Size and alignment of memory chunks that are allocated by the OS's virtual
|
||||
* memory system.
|
||||
*/
|
||||
#define LG_CHUNK_DEFAULT 22
|
||||
#define LG_CHUNK_DEFAULT 21
|
||||
|
||||
/* Return the chunk address for allocation address a. */
|
||||
#define CHUNK_ADDR2BASE(a) \
|
||||
|
@ -19,6 +19,16 @@
|
|||
#define CHUNK_CEILING(s) \
|
||||
(((s) + chunksize_mask) & ~chunksize_mask)
|
||||
|
||||
#define CHUNK_HOOKS_INITIALIZER { \
|
||||
NULL, \
|
||||
NULL, \
|
||||
NULL, \
|
||||
NULL, \
|
||||
NULL, \
|
||||
NULL, \
|
||||
NULL \
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
@ -30,23 +40,36 @@
|
|||
extern size_t opt_lg_chunk;
|
||||
extern const char *opt_dss;
|
||||
|
||||
/* Protects stats_chunks; currently not used for any other purpose. */
|
||||
extern malloc_mutex_t chunks_mtx;
|
||||
/* Chunk statistics. */
|
||||
extern chunk_stats_t stats_chunks;
|
||||
|
||||
extern rtree_t *chunks_rtree;
|
||||
extern rtree_t chunks_rtree;
|
||||
|
||||
extern size_t chunksize;
|
||||
extern size_t chunksize_mask; /* (chunksize - 1). */
|
||||
extern size_t chunk_npages;
|
||||
extern size_t map_bias; /* Number of arena chunk header pages. */
|
||||
extern size_t arena_maxclass; /* Max size class for arenas. */
|
||||
|
||||
void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
|
||||
dss_prec_t dss_prec);
|
||||
void chunk_unmap(void *chunk, size_t size);
|
||||
void chunk_dealloc(void *chunk, size_t size, bool unmap);
|
||||
extern const chunk_hooks_t chunk_hooks_default;
|
||||
|
||||
chunk_hooks_t chunk_hooks_get(arena_t *arena);
|
||||
chunk_hooks_t chunk_hooks_set(arena_t *arena,
|
||||
const chunk_hooks_t *chunk_hooks);
|
||||
|
||||
bool chunk_register(const void *chunk, const extent_node_t *node);
|
||||
void chunk_deregister(const void *chunk, const extent_node_t *node);
|
||||
void *chunk_alloc_base(size_t size);
|
||||
void *chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
bool dalloc_node);
|
||||
void *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit);
|
||||
void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *chunk, size_t size, bool committed);
|
||||
void chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *chunk, size_t size, bool zeroed, bool committed);
|
||||
void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *chunk, size_t size, bool committed);
|
||||
bool chunk_purge_arena(arena_t *arena, void *chunk, size_t offset,
|
||||
size_t length);
|
||||
bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *chunk, size_t size, size_t offset, size_t length);
|
||||
bool chunk_boot(void);
|
||||
void chunk_prefork(void);
|
||||
void chunk_postfork_parent(void);
|
||||
|
@ -56,6 +79,19 @@ void chunk_postfork_child(void);
|
|||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
extent_node_t *chunk_lookup(const void *chunk, bool dependent);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
|
||||
JEMALLOC_INLINE extent_node_t *
|
||||
chunk_lookup(const void *ptr, bool dependent)
|
||||
{
|
||||
|
||||
return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent));
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
|
||||
|
|
|
@ -23,7 +23,8 @@ extern const char *dss_prec_names[];
|
|||
|
||||
dss_prec_t chunk_dss_prec_get(void);
|
||||
bool chunk_dss_prec_set(dss_prec_t dss_prec);
|
||||
void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero);
|
||||
void *chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size,
|
||||
size_t alignment, bool *zero, bool *commit);
|
||||
bool chunk_in_dss(void *chunk);
|
||||
bool chunk_dss_boot(void);
|
||||
void chunk_dss_prefork(void);
|
||||
|
|
|
@ -9,10 +9,9 @@
|
|||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
bool pages_purge(void *addr, size_t length);
|
||||
|
||||
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
|
||||
bool chunk_dealloc_mmap(void *chunk, size_t size);
|
||||
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero,
|
||||
bool *commit);
|
||||
bool chunk_dalloc_mmap(void *chunk, size_t size);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
|
|
|
@ -66,13 +66,13 @@ struct ckh_s {
|
|||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
ckh_keycomp_t *keycomp);
|
||||
void ckh_delete(ckh_t *ckh);
|
||||
void ckh_delete(tsd_t *tsd, ckh_t *ckh);
|
||||
size_t ckh_count(ckh_t *ckh);
|
||||
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
|
||||
bool ckh_insert(ckh_t *ckh, const void *key, const void *data);
|
||||
bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key,
|
||||
bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
|
||||
bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
|
||||
void **data);
|
||||
bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
|
||||
void ckh_string_hash(const void *key, size_t r_hash[2]);
|
||||
|
|
|
@ -34,6 +34,7 @@ struct ctl_arena_stats_s {
|
|||
bool initialized;
|
||||
unsigned nthreads;
|
||||
const char *dss;
|
||||
ssize_t lg_dirty_mult;
|
||||
size_t pactive;
|
||||
size_t pdirty;
|
||||
arena_stats_t astats;
|
||||
|
@ -46,22 +47,15 @@ struct ctl_arena_stats_s {
|
|||
|
||||
malloc_bin_stats_t bstats[NBINS];
|
||||
malloc_large_stats_t *lstats; /* nlclasses elements. */
|
||||
malloc_huge_stats_t *hstats; /* nhclasses elements. */
|
||||
};
|
||||
|
||||
struct ctl_stats_s {
|
||||
size_t allocated;
|
||||
size_t active;
|
||||
size_t metadata;
|
||||
size_t resident;
|
||||
size_t mapped;
|
||||
struct {
|
||||
size_t current; /* stats_chunks.curchunks */
|
||||
uint64_t total; /* stats_chunks.nchunks */
|
||||
size_t high; /* stats_chunks.highchunks */
|
||||
} chunks;
|
||||
struct {
|
||||
size_t allocated; /* huge_allocated */
|
||||
uint64_t nmalloc; /* huge_nmalloc */
|
||||
uint64_t ndalloc; /* huge_ndalloc */
|
||||
} huge;
|
||||
unsigned narenas;
|
||||
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
|
||||
};
|
||||
|
|
|
@ -7,25 +7,53 @@ typedef struct extent_node_s extent_node_t;
|
|||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
/* Tree of extents. */
|
||||
/* Tree of extents. Use accessor functions for en_* fields. */
|
||||
struct extent_node_s {
|
||||
/* Linkage for the size/address-ordered tree. */
|
||||
rb_node(extent_node_t) link_szad;
|
||||
|
||||
/* Linkage for the address-ordered tree. */
|
||||
rb_node(extent_node_t) link_ad;
|
||||
|
||||
/* Profile counters, used for huge objects. */
|
||||
prof_ctx_t *prof_ctx;
|
||||
/* Arena from which this extent came, if any. */
|
||||
arena_t *en_arena;
|
||||
|
||||
/* Pointer to the extent that this tree node is responsible for. */
|
||||
void *addr;
|
||||
void *en_addr;
|
||||
|
||||
/* Total region size. */
|
||||
size_t size;
|
||||
size_t en_size;
|
||||
|
||||
/* True if zero-filled; used by chunk recycling code. */
|
||||
bool zeroed;
|
||||
/*
|
||||
* The zeroed flag is used by chunk recycling code to track whether
|
||||
* memory is zero-filled.
|
||||
*/
|
||||
bool en_zeroed;
|
||||
|
||||
/*
|
||||
* True if physical memory is committed to the extent, whether
|
||||
* explicitly or implicitly as on a system that overcommits and
|
||||
* satisfies physical memory needs on demand via soft page faults.
|
||||
*/
|
||||
bool en_committed;
|
||||
|
||||
/*
|
||||
* The achunk flag is used to validate that huge allocation lookups
|
||||
* don't return arena chunks.
|
||||
*/
|
||||
bool en_achunk;
|
||||
|
||||
/* Profile counters, used for huge objects. */
|
||||
prof_tctx_t *en_prof_tctx;
|
||||
|
||||
/* Linkage for arena's runs_dirty and chunks_cache rings. */
|
||||
arena_runs_dirty_link_t rd;
|
||||
qr(extent_node_t) cc_link;
|
||||
|
||||
union {
|
||||
/* Linkage for the size/address-ordered tree. */
|
||||
rb_node(extent_node_t) szad_link;
|
||||
|
||||
/* Linkage for arena's huge and node_cache lists. */
|
||||
ql_elm(extent_node_t) ql_link;
|
||||
};
|
||||
|
||||
/* Linkage for the address-ordered tree. */
|
||||
rb_node(extent_node_t) ad_link;
|
||||
};
|
||||
typedef rb_tree(extent_node_t) extent_tree_t;
|
||||
|
||||
|
@ -41,6 +69,171 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
|
|||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
arena_t *extent_node_arena_get(const extent_node_t *node);
|
||||
void *extent_node_addr_get(const extent_node_t *node);
|
||||
size_t extent_node_size_get(const extent_node_t *node);
|
||||
bool extent_node_zeroed_get(const extent_node_t *node);
|
||||
bool extent_node_committed_get(const extent_node_t *node);
|
||||
bool extent_node_achunk_get(const extent_node_t *node);
|
||||
prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
|
||||
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
|
||||
void extent_node_addr_set(extent_node_t *node, void *addr);
|
||||
void extent_node_size_set(extent_node_t *node, size_t size);
|
||||
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
|
||||
void extent_node_committed_set(extent_node_t *node, bool committed);
|
||||
void extent_node_achunk_set(extent_node_t *node, bool achunk);
|
||||
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
|
||||
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
|
||||
size_t size, bool zeroed, bool committed);
|
||||
void extent_node_dirty_linkage_init(extent_node_t *node);
|
||||
void extent_node_dirty_insert(extent_node_t *node,
|
||||
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
|
||||
void extent_node_dirty_remove(extent_node_t *node);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
|
||||
JEMALLOC_INLINE arena_t *
|
||||
extent_node_arena_get(const extent_node_t *node)
|
||||
{
|
||||
|
||||
return (node->en_arena);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
extent_node_addr_get(const extent_node_t *node)
|
||||
{
|
||||
|
||||
return (node->en_addr);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
extent_node_size_get(const extent_node_t *node)
|
||||
{
|
||||
|
||||
return (node->en_size);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
extent_node_zeroed_get(const extent_node_t *node)
|
||||
{
|
||||
|
||||
return (node->en_zeroed);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
extent_node_committed_get(const extent_node_t *node)
|
||||
{
|
||||
|
||||
assert(!node->en_achunk);
|
||||
return (node->en_committed);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
extent_node_achunk_get(const extent_node_t *node)
|
||||
{
|
||||
|
||||
return (node->en_achunk);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE prof_tctx_t *
|
||||
extent_node_prof_tctx_get(const extent_node_t *node)
|
||||
{
|
||||
|
||||
return (node->en_prof_tctx);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_node_arena_set(extent_node_t *node, arena_t *arena)
|
||||
{
|
||||
|
||||
node->en_arena = arena;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_node_addr_set(extent_node_t *node, void *addr)
|
||||
{
|
||||
|
||||
node->en_addr = addr;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_node_size_set(extent_node_t *node, size_t size)
|
||||
{
|
||||
|
||||
node->en_size = size;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
|
||||
{
|
||||
|
||||
node->en_zeroed = zeroed;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_node_committed_set(extent_node_t *node, bool committed)
|
||||
{
|
||||
|
||||
node->en_committed = committed;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_node_achunk_set(extent_node_t *node, bool achunk)
|
||||
{
|
||||
|
||||
node->en_achunk = achunk;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
|
||||
{
|
||||
|
||||
node->en_prof_tctx = tctx;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
|
||||
bool zeroed, bool committed)
|
||||
{
|
||||
|
||||
extent_node_arena_set(node, arena);
|
||||
extent_node_addr_set(node, addr);
|
||||
extent_node_size_set(node, size);
|
||||
extent_node_zeroed_set(node, zeroed);
|
||||
extent_node_committed_set(node, committed);
|
||||
extent_node_achunk_set(node, false);
|
||||
if (config_prof)
|
||||
extent_node_prof_tctx_set(node, NULL);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_node_dirty_linkage_init(extent_node_t *node)
|
||||
{
|
||||
|
||||
qr_new(&node->rd, rd_link);
|
||||
qr_new(node, cc_link);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_node_dirty_insert(extent_node_t *node,
|
||||
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
|
||||
{
|
||||
|
||||
qr_meld(runs_dirty, &node->rd, rd_link);
|
||||
qr_meld(chunks_dirty, node, cc_link);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_node_dirty_remove(extent_node_t *node)
|
||||
{
|
||||
|
||||
qr_remove(&node->rd, rd_link);
|
||||
qr_remove(node, cc_link);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
|
||||
|
|
|
@ -35,13 +35,14 @@ JEMALLOC_INLINE uint32_t
|
|||
hash_rotl_32(uint32_t x, int8_t r)
|
||||
{
|
||||
|
||||
return (x << r) | (x >> (32 - r));
|
||||
return ((x << r) | (x >> (32 - r)));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
hash_rotl_64(uint64_t x, int8_t r)
|
||||
{
|
||||
return (x << r) | (x >> (64 - r));
|
||||
|
||||
return ((x << r) | (x >> (64 - r)));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
|
@ -76,9 +77,9 @@ hash_fmix_64(uint64_t k)
|
|||
{
|
||||
|
||||
k ^= k >> 33;
|
||||
k *= QU(0xff51afd7ed558ccdLLU);
|
||||
k *= KQU(0xff51afd7ed558ccd);
|
||||
k ^= k >> 33;
|
||||
k *= QU(0xc4ceb9fe1a85ec53LLU);
|
||||
k *= KQU(0xc4ceb9fe1a85ec53);
|
||||
k ^= k >> 33;
|
||||
|
||||
return (k);
|
||||
|
@ -247,8 +248,8 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
|
|||
uint64_t h1 = seed;
|
||||
uint64_t h2 = seed;
|
||||
|
||||
const uint64_t c1 = QU(0x87c37b91114253d5LLU);
|
||||
const uint64_t c2 = QU(0x4cf5ad432745937fLLU);
|
||||
const uint64_t c1 = KQU(0x87c37b91114253d5);
|
||||
const uint64_t c2 = KQU(0x4cf5ad432745937f);
|
||||
|
||||
/* body */
|
||||
{
|
||||
|
|
|
@ -9,34 +9,24 @@
|
|||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
/* Huge allocation statistics. */
|
||||
extern uint64_t huge_nmalloc;
|
||||
extern uint64_t huge_ndalloc;
|
||||
extern size_t huge_allocated;
|
||||
|
||||
/* Protects chunk-related data structures. */
|
||||
extern malloc_mutex_t huge_mtx;
|
||||
|
||||
void *huge_malloc(size_t size, bool zero, dss_prec_t dss_prec);
|
||||
void *huge_palloc(size_t size, size_t alignment, bool zero,
|
||||
dss_prec_t dss_prec);
|
||||
void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
|
||||
tcache_t *tcache);
|
||||
void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
|
||||
bool zero, tcache_t *tcache);
|
||||
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra);
|
||||
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec);
|
||||
size_t extra, bool zero);
|
||||
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
|
||||
size_t size, size_t extra, size_t alignment, bool zero,
|
||||
tcache_t *tcache);
|
||||
#ifdef JEMALLOC_JET
|
||||
typedef void (huge_dalloc_junk_t)(void *, size_t);
|
||||
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
||||
#endif
|
||||
void huge_dalloc(void *ptr, bool unmap);
|
||||
void huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
|
||||
arena_t *huge_aalloc(const void *ptr);
|
||||
size_t huge_salloc(const void *ptr);
|
||||
dss_prec_t huge_dss_prec_get(arena_t *arena);
|
||||
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
|
||||
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
|
||||
bool huge_boot(void);
|
||||
void huge_prefork(void);
|
||||
void huge_postfork_parent(void);
|
||||
void huge_postfork_child(void);
|
||||
prof_tctx_t *huge_prof_tctx_get(const void *ptr);
|
||||
void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,67 @@
|
|||
#ifndef JEMALLOC_INTERNAL_DECLS_H
|
||||
#define JEMALLOC_INTERNAL_DECLS_H
|
||||
|
||||
#include "libc_private.h"
|
||||
#include "namespace.h"
|
||||
|
||||
#include <math.h>
|
||||
#ifdef _WIN32
|
||||
# include <windows.h>
|
||||
# include "msvc_compat/windows_extra.h"
|
||||
|
||||
#else
|
||||
# include <sys/param.h>
|
||||
# include <sys/mman.h>
|
||||
# if !defined(__pnacl__) && !defined(__native_client__)
|
||||
# include <sys/syscall.h>
|
||||
# if !defined(SYS_write) && defined(__NR_write)
|
||||
# define SYS_write __NR_write
|
||||
# endif
|
||||
# include <sys/uio.h>
|
||||
# endif
|
||||
# include <pthread.h>
|
||||
# include <errno.h>
|
||||
#endif
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <limits.h>
|
||||
#ifndef SIZE_T_MAX
|
||||
# define SIZE_T_MAX SIZE_MAX
|
||||
#endif
|
||||
#include <stdarg.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#ifndef offsetof
|
||||
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
|
||||
#endif
|
||||
#include <string.h>
|
||||
#include <strings.h>
|
||||
#include <ctype.h>
|
||||
#ifdef _MSC_VER
|
||||
# include <io.h>
|
||||
typedef intptr_t ssize_t;
|
||||
# define PATH_MAX 1024
|
||||
# define STDERR_FILENO 2
|
||||
# define __func__ __FUNCTION__
|
||||
# ifdef JEMALLOC_HAS_RESTRICT
|
||||
# define restrict __restrict
|
||||
# endif
|
||||
/* Disable warnings about deprecated system functions. */
|
||||
# pragma warning(disable: 4996)
|
||||
#if _MSC_VER < 1800
|
||||
static int
|
||||
isblank(int c)
|
||||
{
|
||||
|
||||
return (c == '\t' || c == ' ');
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
# include <unistd.h>
|
||||
#endif
|
||||
#include <fcntl.h>
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_H */
|
|
@ -23,6 +23,9 @@
|
|||
*/
|
||||
#define CPU_SPINWAIT __asm__ volatile("pause")
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
/* #undef JEMALLOC_C11ATOMICS */
|
||||
|
||||
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
|
||||
#define JEMALLOC_ATOMIC9 1
|
||||
|
||||
|
@ -36,7 +39,7 @@
|
|||
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
|
||||
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
|
||||
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
|
||||
* functions are defined in libgcc instead of being inlines)
|
||||
* functions are defined in libgcc instead of being inlines).
|
||||
*/
|
||||
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */
|
||||
|
||||
|
@ -44,16 +47,36 @@
|
|||
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
|
||||
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
|
||||
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
|
||||
* functions are defined in libgcc instead of being inlines)
|
||||
* functions are defined in libgcc instead of being inlines).
|
||||
*/
|
||||
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_BUILTIN_CLZ
|
||||
|
||||
/*
|
||||
* Defined if madvise(2) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_MADVISE
|
||||
|
||||
/*
|
||||
* Defined if OSSpin*() functions are available, as provided by Darwin, and
|
||||
* documented in the spinlock(3) manual page.
|
||||
*/
|
||||
/* #undef JEMALLOC_OSSPIN */
|
||||
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_SECURE_GETENV */
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_ISSETUGID
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||
|
@ -77,9 +100,6 @@
|
|||
*/
|
||||
#define JEMALLOC_MUTEX_INIT_CB 1
|
||||
|
||||
/* Defined if sbrk() is supported. */
|
||||
#define JEMALLOC_HAVE_SBRK
|
||||
|
||||
/* Non-empty if the tls_model attribute is supported. */
|
||||
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
|
||||
|
||||
|
@ -138,8 +158,26 @@
|
|||
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
||||
#define JEMALLOC_LAZY_LOCK
|
||||
|
||||
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
|
||||
#define STATIC_PAGE_SHIFT 12
|
||||
/* Minimum size class to support is 2^LG_TINY_MIN bytes. */
|
||||
#define LG_TINY_MIN 3
|
||||
|
||||
/*
|
||||
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||
* classes).
|
||||
*/
|
||||
/* #undef LG_QUANTUM */
|
||||
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 12
|
||||
|
||||
/*
|
||||
* If defined, adjacent virtual memory mappings with identical attributes
|
||||
* automatically coalesce, and they fragment when changes are made to subranges.
|
||||
* This is the normal order of things for mmap()/munmap(), but on Windows
|
||||
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
|
||||
* mappings do *not* coalesce/fragment.
|
||||
*/
|
||||
#define JEMALLOC_MAPS_COALESCE
|
||||
|
||||
/*
|
||||
* If defined, use munmap() to unmap freed chunks, rather than storing them for
|
||||
|
@ -148,22 +186,28 @@
|
|||
*/
|
||||
#define JEMALLOC_MUNMAP
|
||||
|
||||
/*
|
||||
* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
|
||||
* disabled by default because it is Linux-specific and it will cause virtual
|
||||
* memory map holes, much like munmap(2) does.
|
||||
*/
|
||||
/* #undef JEMALLOC_MREMAP */
|
||||
|
||||
/* TLS is used to map arenas and magazine caches to threads. */
|
||||
#define JEMALLOC_TLS
|
||||
|
||||
/*
|
||||
* ffs()/ffsl() functions to use for bitmapping. Don't use these directly;
|
||||
* instead, use jemalloc_ffs() or jemalloc_ffsl() from util.h.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
|
||||
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
|
||||
|
||||
/*
|
||||
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
|
||||
* within jemalloc-owned chunks before dereferencing them.
|
||||
*/
|
||||
/* #undef JEMALLOC_IVSALLOC */
|
||||
|
||||
/*
|
||||
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||
* pointer alignments across all cache indices.
|
||||
*/
|
||||
#define JEMALLOC_CACHE_OBLIVIOUS
|
||||
|
||||
/*
|
||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||
*/
|
||||
|
@ -183,9 +227,7 @@
|
|||
/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED */
|
||||
#define JEMALLOC_PURGE_MADVISE_FREE
|
||||
|
||||
/*
|
||||
* Define if operating system has alloca.h header.
|
||||
*/
|
||||
/* Define if operating system has alloca.h header. */
|
||||
/* #undef JEMALLOC_HAS_ALLOCA_H */
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
|
@ -203,4 +245,19 @@
|
|||
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
|
||||
#define LG_SIZEOF_INTMAX_T 3
|
||||
|
||||
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
|
||||
/* #undef JEMALLOC_GLIBC_MALLOC_HOOK */
|
||||
|
||||
/* glibc memalign hook. */
|
||||
/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */
|
||||
|
||||
/* Adaptive mutex support in pthreads. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
* JEMALLOC_PREFIX is not defined).
|
||||
*/
|
||||
/* #undef JEMALLOC_EXPORT */
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
||||
|
|
|
@ -39,9 +39,15 @@
|
|||
#endif
|
||||
|
||||
#define ZU(z) ((size_t)z)
|
||||
#define ZI(z) ((ssize_t)z)
|
||||
#define QU(q) ((uint64_t)q)
|
||||
#define QI(q) ((int64_t)q)
|
||||
|
||||
#define KZU(z) ZU(z##ULL)
|
||||
#define KZI(z) ZI(z##LL)
|
||||
#define KQU(q) QU(q##ULL)
|
||||
#define KQI(q) QI(q##LL)
|
||||
|
||||
#ifndef __DECONST
|
||||
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
|
||||
#endif
|
||||
|
|
|
@ -10,7 +10,7 @@ typedef struct malloc_mutex_s malloc_mutex_t;
|
|||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
|
||||
#else
|
||||
# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \
|
||||
# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
|
||||
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
|
||||
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
|
||||
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
|
||||
|
@ -26,7 +26,11 @@ typedef struct malloc_mutex_s malloc_mutex_t;
|
|||
|
||||
struct malloc_mutex_s {
|
||||
#ifdef _WIN32
|
||||
# if _WIN32_WINNT >= 0x0600
|
||||
SRWLOCK lock;
|
||||
# else
|
||||
CRITICAL_SECTION lock;
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
OSSpinLock lock;
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
|
@ -68,7 +72,11 @@ malloc_mutex_lock(malloc_mutex_t *mutex)
|
|||
|
||||
if (isthreaded) {
|
||||
#ifdef _WIN32
|
||||
# if _WIN32_WINNT >= 0x0600
|
||||
AcquireSRWLockExclusive(&mutex->lock);
|
||||
# else
|
||||
EnterCriticalSection(&mutex->lock);
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
OSSpinLockLock(&mutex->lock);
|
||||
#else
|
||||
|
@ -83,7 +91,11 @@ malloc_mutex_unlock(malloc_mutex_t *mutex)
|
|||
|
||||
if (isthreaded) {
|
||||
#ifdef _WIN32
|
||||
# if _WIN32_WINNT >= 0x0600
|
||||
ReleaseSRWLockExclusive(&mutex->lock);
|
||||
# else
|
||||
LeaveCriticalSection(&mutex->lock);
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
OSSpinLockUnlock(&mutex->lock);
|
||||
#else
|
||||
|
|
26
contrib/jemalloc/include/jemalloc/internal/pages.h
Normal file
26
contrib/jemalloc/include/jemalloc/internal/pages.h
Normal file
|
@ -0,0 +1,26 @@
|
|||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
void *pages_map(void *addr, size_t size);
|
||||
void pages_unmap(void *addr, size_t size);
|
||||
void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
|
||||
size_t size);
|
||||
bool pages_commit(void *addr, size_t size);
|
||||
bool pages_decommit(void *addr, size_t size);
|
||||
bool pages_purge(void *addr, size_t size);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
|
|
@ -1,44 +1,75 @@
|
|||
#define a0calloc JEMALLOC_N(a0calloc)
|
||||
#define a0free JEMALLOC_N(a0free)
|
||||
#define a0dalloc JEMALLOC_N(a0dalloc)
|
||||
#define a0get JEMALLOC_N(a0get)
|
||||
#define a0malloc JEMALLOC_N(a0malloc)
|
||||
#define arena_aalloc JEMALLOC_N(arena_aalloc)
|
||||
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
|
||||
#define arena_bin_index JEMALLOC_N(arena_bin_index)
|
||||
#define arena_bin_info JEMALLOC_N(arena_bin_info)
|
||||
#define arena_bitselm_get JEMALLOC_N(arena_bitselm_get)
|
||||
#define arena_boot JEMALLOC_N(arena_boot)
|
||||
#define arena_choose JEMALLOC_N(arena_choose)
|
||||
#define arena_choose_hard JEMALLOC_N(arena_choose_hard)
|
||||
#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge)
|
||||
#define arena_chunk_cache_maybe_insert JEMALLOC_N(arena_chunk_cache_maybe_insert)
|
||||
#define arena_chunk_cache_maybe_remove JEMALLOC_N(arena_chunk_cache_maybe_remove)
|
||||
#define arena_chunk_dalloc_huge JEMALLOC_N(arena_chunk_dalloc_huge)
|
||||
#define arena_chunk_ralloc_huge_expand JEMALLOC_N(arena_chunk_ralloc_huge_expand)
|
||||
#define arena_chunk_ralloc_huge_shrink JEMALLOC_N(arena_chunk_ralloc_huge_shrink)
|
||||
#define arena_chunk_ralloc_huge_similar JEMALLOC_N(arena_chunk_ralloc_huge_similar)
|
||||
#define arena_cleanup JEMALLOC_N(arena_cleanup)
|
||||
#define arena_dalloc JEMALLOC_N(arena_dalloc)
|
||||
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
|
||||
#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
|
||||
#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked)
|
||||
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
|
||||
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
|
||||
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
|
||||
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
|
||||
#define arena_dalloc_large_junked_locked JEMALLOC_N(arena_dalloc_large_junked_locked)
|
||||
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
|
||||
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
|
||||
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
|
||||
#define arena_get JEMALLOC_N(arena_get)
|
||||
#define arena_get_hard JEMALLOC_N(arena_get_hard)
|
||||
#define arena_init JEMALLOC_N(arena_init)
|
||||
#define arena_lg_dirty_mult_default_get JEMALLOC_N(arena_lg_dirty_mult_default_get)
|
||||
#define arena_lg_dirty_mult_default_set JEMALLOC_N(arena_lg_dirty_mult_default_set)
|
||||
#define arena_lg_dirty_mult_get JEMALLOC_N(arena_lg_dirty_mult_get)
|
||||
#define arena_lg_dirty_mult_set JEMALLOC_N(arena_lg_dirty_mult_set)
|
||||
#define arena_malloc JEMALLOC_N(arena_malloc)
|
||||
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
|
||||
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
|
||||
#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
|
||||
#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get)
|
||||
#define arena_mapbits_decommitted_get JEMALLOC_N(arena_mapbits_decommitted_get)
|
||||
#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
|
||||
#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
|
||||
#define arena_mapbits_internal_set JEMALLOC_N(arena_mapbits_internal_set)
|
||||
#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
|
||||
#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
|
||||
#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
|
||||
#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
|
||||
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
|
||||
#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
|
||||
#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
|
||||
#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
|
||||
#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
|
||||
#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
|
||||
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
|
||||
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
|
||||
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
|
||||
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
|
||||
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
|
||||
#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
|
||||
#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
|
||||
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
|
||||
#define arena_maxclass JEMALLOC_N(arena_maxclass)
|
||||
#define arena_maxrun JEMALLOC_N(arena_maxrun)
|
||||
#define arena_maybe_purge JEMALLOC_N(arena_maybe_purge)
|
||||
#define arena_metadata_allocated_add JEMALLOC_N(arena_metadata_allocated_add)
|
||||
#define arena_metadata_allocated_get JEMALLOC_N(arena_metadata_allocated_get)
|
||||
#define arena_metadata_allocated_sub JEMALLOC_N(arena_metadata_allocated_sub)
|
||||
#define arena_migrate JEMALLOC_N(arena_migrate)
|
||||
#define arena_miscelm_get JEMALLOC_N(arena_miscelm_get)
|
||||
#define arena_miscelm_to_pageind JEMALLOC_N(arena_miscelm_to_pageind)
|
||||
#define arena_miscelm_to_rpages JEMALLOC_N(arena_miscelm_to_rpages)
|
||||
#define arena_nbound JEMALLOC_N(arena_nbound)
|
||||
#define arena_new JEMALLOC_N(arena_new)
|
||||
#define arena_node_alloc JEMALLOC_N(arena_node_alloc)
|
||||
#define arena_node_dalloc JEMALLOC_N(arena_node_dalloc)
|
||||
#define arena_palloc JEMALLOC_N(arena_palloc)
|
||||
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
|
||||
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
|
||||
|
@ -46,50 +77,46 @@
|
|||
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
|
||||
#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl)
|
||||
#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked)
|
||||
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
|
||||
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
|
||||
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
|
||||
#define arena_prof_tctx_get JEMALLOC_N(arena_prof_tctx_get)
|
||||
#define arena_prof_tctx_set JEMALLOC_N(arena_prof_tctx_set)
|
||||
#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
|
||||
#define arena_purge_all JEMALLOC_N(arena_purge_all)
|
||||
#define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small)
|
||||
#define arena_ralloc JEMALLOC_N(arena_ralloc)
|
||||
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
|
||||
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
|
||||
#define arena_rd_to_miscelm JEMALLOC_N(arena_rd_to_miscelm)
|
||||
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
|
||||
#define arena_run_regind JEMALLOC_N(arena_run_regind)
|
||||
#define arena_run_to_miscelm JEMALLOC_N(arena_run_to_miscelm)
|
||||
#define arena_salloc JEMALLOC_N(arena_salloc)
|
||||
#define arenas_cache_bypass_cleanup JEMALLOC_N(arenas_cache_bypass_cleanup)
|
||||
#define arenas_cache_cleanup JEMALLOC_N(arenas_cache_cleanup)
|
||||
#define arena_sdalloc JEMALLOC_N(arena_sdalloc)
|
||||
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
|
||||
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
|
||||
#define arenas JEMALLOC_N(arenas)
|
||||
#define arenas_booted JEMALLOC_N(arenas_booted)
|
||||
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
|
||||
#define arenas_extend JEMALLOC_N(arenas_extend)
|
||||
#define arenas_initialized JEMALLOC_N(arenas_initialized)
|
||||
#define arenas_lock JEMALLOC_N(arenas_lock)
|
||||
#define arenas_tls JEMALLOC_N(arenas_tls)
|
||||
#define arenas_tsd JEMALLOC_N(arenas_tsd)
|
||||
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
|
||||
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
|
||||
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
|
||||
#define arenas_tsd_get_wrapper JEMALLOC_N(arenas_tsd_get_wrapper)
|
||||
#define arenas_tsd_init_head JEMALLOC_N(arenas_tsd_init_head)
|
||||
#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
|
||||
#define atomic_add_p JEMALLOC_N(atomic_add_p)
|
||||
#define atomic_add_u JEMALLOC_N(atomic_add_u)
|
||||
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
|
||||
#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
|
||||
#define atomic_add_z JEMALLOC_N(atomic_add_z)
|
||||
#define atomic_cas_p JEMALLOC_N(atomic_cas_p)
|
||||
#define atomic_cas_u JEMALLOC_N(atomic_cas_u)
|
||||
#define atomic_cas_uint32 JEMALLOC_N(atomic_cas_uint32)
|
||||
#define atomic_cas_uint64 JEMALLOC_N(atomic_cas_uint64)
|
||||
#define atomic_cas_z JEMALLOC_N(atomic_cas_z)
|
||||
#define atomic_sub_p JEMALLOC_N(atomic_sub_p)
|
||||
#define atomic_sub_u JEMALLOC_N(atomic_sub_u)
|
||||
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
|
||||
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
|
||||
#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
|
||||
#define base_alloc JEMALLOC_N(base_alloc)
|
||||
#define base_boot JEMALLOC_N(base_boot)
|
||||
#define base_calloc JEMALLOC_N(base_calloc)
|
||||
#define base_node_alloc JEMALLOC_N(base_node_alloc)
|
||||
#define base_node_dealloc JEMALLOC_N(base_node_dealloc)
|
||||
#define base_postfork_child JEMALLOC_N(base_postfork_child)
|
||||
#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
|
||||
#define base_prefork JEMALLOC_N(base_prefork)
|
||||
#define base_stats_get JEMALLOC_N(base_stats_get)
|
||||
#define bitmap_full JEMALLOC_N(bitmap_full)
|
||||
#define bitmap_get JEMALLOC_N(bitmap_get)
|
||||
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
|
||||
|
@ -99,49 +126,54 @@
|
|||
#define bitmap_sfu JEMALLOC_N(bitmap_sfu)
|
||||
#define bitmap_size JEMALLOC_N(bitmap_size)
|
||||
#define bitmap_unset JEMALLOC_N(bitmap_unset)
|
||||
#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc)
|
||||
#define bootstrap_free JEMALLOC_N(bootstrap_free)
|
||||
#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc)
|
||||
#define bt_init JEMALLOC_N(bt_init)
|
||||
#define buferror JEMALLOC_N(buferror)
|
||||
#define choose_arena JEMALLOC_N(choose_arena)
|
||||
#define choose_arena_hard JEMALLOC_N(choose_arena_hard)
|
||||
#define chunk_alloc JEMALLOC_N(chunk_alloc)
|
||||
#define chunk_alloc_base JEMALLOC_N(chunk_alloc_base)
|
||||
#define chunk_alloc_cache JEMALLOC_N(chunk_alloc_cache)
|
||||
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
|
||||
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
|
||||
#define chunk_alloc_wrapper JEMALLOC_N(chunk_alloc_wrapper)
|
||||
#define chunk_boot JEMALLOC_N(chunk_boot)
|
||||
#define chunk_dealloc JEMALLOC_N(chunk_dealloc)
|
||||
#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
|
||||
#define chunk_dalloc_arena JEMALLOC_N(chunk_dalloc_arena)
|
||||
#define chunk_dalloc_cache JEMALLOC_N(chunk_dalloc_cache)
|
||||
#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap)
|
||||
#define chunk_dalloc_wrapper JEMALLOC_N(chunk_dalloc_wrapper)
|
||||
#define chunk_deregister JEMALLOC_N(chunk_deregister)
|
||||
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
|
||||
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
|
||||
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
|
||||
#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
|
||||
#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
|
||||
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
|
||||
#define chunk_hooks_default JEMALLOC_N(chunk_hooks_default)
|
||||
#define chunk_hooks_get JEMALLOC_N(chunk_hooks_get)
|
||||
#define chunk_hooks_set JEMALLOC_N(chunk_hooks_set)
|
||||
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
|
||||
#define chunk_lookup JEMALLOC_N(chunk_lookup)
|
||||
#define chunk_npages JEMALLOC_N(chunk_npages)
|
||||
#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
|
||||
#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
|
||||
#define chunk_prefork JEMALLOC_N(chunk_prefork)
|
||||
#define chunk_unmap JEMALLOC_N(chunk_unmap)
|
||||
#define chunks_mtx JEMALLOC_N(chunks_mtx)
|
||||
#define chunks_rtree JEMALLOC_N(chunks_rtree)
|
||||
#define chunk_purge_arena JEMALLOC_N(chunk_purge_arena)
|
||||
#define chunk_purge_wrapper JEMALLOC_N(chunk_purge_wrapper)
|
||||
#define chunk_register JEMALLOC_N(chunk_register)
|
||||
#define chunksize JEMALLOC_N(chunksize)
|
||||
#define chunksize_mask JEMALLOC_N(chunksize_mask)
|
||||
#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
|
||||
#define chunks_rtree JEMALLOC_N(chunks_rtree)
|
||||
#define ckh_count JEMALLOC_N(ckh_count)
|
||||
#define ckh_delete JEMALLOC_N(ckh_delete)
|
||||
#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert)
|
||||
#define ckh_insert JEMALLOC_N(ckh_insert)
|
||||
#define ckh_isearch JEMALLOC_N(ckh_isearch)
|
||||
#define ckh_iter JEMALLOC_N(ckh_iter)
|
||||
#define ckh_new JEMALLOC_N(ckh_new)
|
||||
#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
|
||||
#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
|
||||
#define ckh_rebuild JEMALLOC_N(ckh_rebuild)
|
||||
#define ckh_remove JEMALLOC_N(ckh_remove)
|
||||
#define ckh_search JEMALLOC_N(ckh_search)
|
||||
#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
|
||||
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
|
||||
#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert)
|
||||
#define ckh_try_insert JEMALLOC_N(ckh_try_insert)
|
||||
#define ctl_boot JEMALLOC_N(ctl_boot)
|
||||
#define ctl_bymib JEMALLOC_N(ctl_bymib)
|
||||
#define ctl_byname JEMALLOC_N(ctl_byname)
|
||||
|
@ -150,6 +182,23 @@
|
|||
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
|
||||
#define ctl_prefork JEMALLOC_N(ctl_prefork)
|
||||
#define dss_prec_names JEMALLOC_N(dss_prec_names)
|
||||
#define extent_node_achunk_get JEMALLOC_N(extent_node_achunk_get)
|
||||
#define extent_node_achunk_set JEMALLOC_N(extent_node_achunk_set)
|
||||
#define extent_node_addr_get JEMALLOC_N(extent_node_addr_get)
|
||||
#define extent_node_addr_set JEMALLOC_N(extent_node_addr_set)
|
||||
#define extent_node_arena_get JEMALLOC_N(extent_node_arena_get)
|
||||
#define extent_node_arena_set JEMALLOC_N(extent_node_arena_set)
|
||||
#define extent_node_dirty_insert JEMALLOC_N(extent_node_dirty_insert)
|
||||
#define extent_node_dirty_linkage_init JEMALLOC_N(extent_node_dirty_linkage_init)
|
||||
#define extent_node_dirty_remove JEMALLOC_N(extent_node_dirty_remove)
|
||||
#define extent_node_init JEMALLOC_N(extent_node_init)
|
||||
#define extent_node_prof_tctx_get JEMALLOC_N(extent_node_prof_tctx_get)
|
||||
#define extent_node_prof_tctx_set JEMALLOC_N(extent_node_prof_tctx_set)
|
||||
#define extent_node_size_get JEMALLOC_N(extent_node_size_get)
|
||||
#define extent_node_size_set JEMALLOC_N(extent_node_size_set)
|
||||
#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get)
|
||||
#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set)
|
||||
#define extent_tree_ad_empty JEMALLOC_N(extent_tree_ad_empty)
|
||||
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
|
||||
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
|
||||
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
|
||||
|
@ -166,6 +215,7 @@
|
|||
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
|
||||
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
|
||||
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
|
||||
#define extent_tree_szad_empty JEMALLOC_N(extent_tree_szad_empty)
|
||||
#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
|
||||
#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
|
||||
#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter)
|
||||
|
@ -193,44 +243,46 @@
|
|||
#define hash_x64_128 JEMALLOC_N(hash_x64_128)
|
||||
#define hash_x86_128 JEMALLOC_N(hash_x86_128)
|
||||
#define hash_x86_32 JEMALLOC_N(hash_x86_32)
|
||||
#define huge_allocated JEMALLOC_N(huge_allocated)
|
||||
#define huge_boot JEMALLOC_N(huge_boot)
|
||||
#define huge_aalloc JEMALLOC_N(huge_aalloc)
|
||||
#define huge_dalloc JEMALLOC_N(huge_dalloc)
|
||||
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
|
||||
#define huge_dss_prec_get JEMALLOC_N(huge_dss_prec_get)
|
||||
#define huge_malloc JEMALLOC_N(huge_malloc)
|
||||
#define huge_mtx JEMALLOC_N(huge_mtx)
|
||||
#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
|
||||
#define huge_nmalloc JEMALLOC_N(huge_nmalloc)
|
||||
#define huge_palloc JEMALLOC_N(huge_palloc)
|
||||
#define huge_postfork_child JEMALLOC_N(huge_postfork_child)
|
||||
#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent)
|
||||
#define huge_prefork JEMALLOC_N(huge_prefork)
|
||||
#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get)
|
||||
#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set)
|
||||
#define huge_prof_tctx_get JEMALLOC_N(huge_prof_tctx_get)
|
||||
#define huge_prof_tctx_set JEMALLOC_N(huge_prof_tctx_set)
|
||||
#define huge_ralloc JEMALLOC_N(huge_ralloc)
|
||||
#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
|
||||
#define huge_salloc JEMALLOC_N(huge_salloc)
|
||||
#define iallocm JEMALLOC_N(iallocm)
|
||||
#define iaalloc JEMALLOC_N(iaalloc)
|
||||
#define iallocztm JEMALLOC_N(iallocztm)
|
||||
#define icalloc JEMALLOC_N(icalloc)
|
||||
#define icalloct JEMALLOC_N(icalloct)
|
||||
#define idalloc JEMALLOC_N(idalloc)
|
||||
#define idalloct JEMALLOC_N(idalloct)
|
||||
#define idalloctm JEMALLOC_N(idalloctm)
|
||||
#define imalloc JEMALLOC_N(imalloc)
|
||||
#define imalloct JEMALLOC_N(imalloct)
|
||||
#define index2size JEMALLOC_N(index2size)
|
||||
#define index2size_compute JEMALLOC_N(index2size_compute)
|
||||
#define index2size_lookup JEMALLOC_N(index2size_lookup)
|
||||
#define index2size_tab JEMALLOC_N(index2size_tab)
|
||||
#define in_valgrind JEMALLOC_N(in_valgrind)
|
||||
#define ipalloc JEMALLOC_N(ipalloc)
|
||||
#define ipalloct JEMALLOC_N(ipalloct)
|
||||
#define ipallocztm JEMALLOC_N(ipallocztm)
|
||||
#define iqalloc JEMALLOC_N(iqalloc)
|
||||
#define iqalloct JEMALLOC_N(iqalloct)
|
||||
#define iralloc JEMALLOC_N(iralloc)
|
||||
#define iralloct JEMALLOC_N(iralloct)
|
||||
#define iralloct_realign JEMALLOC_N(iralloct_realign)
|
||||
#define isalloc JEMALLOC_N(isalloc)
|
||||
#define isdalloct JEMALLOC_N(isdalloct)
|
||||
#define isqalloc JEMALLOC_N(isqalloc)
|
||||
#define ivsalloc JEMALLOC_N(ivsalloc)
|
||||
#define ixalloc JEMALLOC_N(ixalloc)
|
||||
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
|
||||
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
|
||||
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
|
||||
#define lg_floor JEMALLOC_N(lg_floor)
|
||||
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
|
||||
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
|
||||
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
|
||||
|
@ -241,7 +293,8 @@
|
|||
#define malloc_printf JEMALLOC_N(malloc_printf)
|
||||
#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
|
||||
#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
|
||||
#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot)
|
||||
#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0)
|
||||
#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1)
|
||||
#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
|
||||
#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
|
||||
#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
|
||||
|
@ -250,16 +303,18 @@
|
|||
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
|
||||
#define malloc_write JEMALLOC_N(malloc_write)
|
||||
#define map_bias JEMALLOC_N(map_bias)
|
||||
#define map_misc_offset JEMALLOC_N(map_misc_offset)
|
||||
#define mb_write JEMALLOC_N(mb_write)
|
||||
#define mutex_boot JEMALLOC_N(mutex_boot)
|
||||
#define narenas_auto JEMALLOC_N(narenas_auto)
|
||||
#define narenas_total JEMALLOC_N(narenas_total)
|
||||
#define narenas_cache_cleanup JEMALLOC_N(narenas_cache_cleanup)
|
||||
#define narenas_total_get JEMALLOC_N(narenas_total_get)
|
||||
#define ncpus JEMALLOC_N(ncpus)
|
||||
#define nhbins JEMALLOC_N(nhbins)
|
||||
#define opt_abort JEMALLOC_N(opt_abort)
|
||||
#define opt_dss JEMALLOC_N(opt_dss)
|
||||
#define opt_junk JEMALLOC_N(opt_junk)
|
||||
#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc)
|
||||
#define opt_junk_free JEMALLOC_N(opt_junk_free)
|
||||
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
|
||||
#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
|
||||
#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
|
||||
|
@ -273,84 +328,98 @@
|
|||
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
|
||||
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
|
||||
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
|
||||
#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init)
|
||||
#define opt_quarantine JEMALLOC_N(opt_quarantine)
|
||||
#define opt_redzone JEMALLOC_N(opt_redzone)
|
||||
#define opt_stats_print JEMALLOC_N(opt_stats_print)
|
||||
#define opt_tcache JEMALLOC_N(opt_tcache)
|
||||
#define opt_utrace JEMALLOC_N(opt_utrace)
|
||||
#define opt_valgrind JEMALLOC_N(opt_valgrind)
|
||||
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
|
||||
#define opt_zero JEMALLOC_N(opt_zero)
|
||||
#define p2rz JEMALLOC_N(p2rz)
|
||||
#define pages_commit JEMALLOC_N(pages_commit)
|
||||
#define pages_decommit JEMALLOC_N(pages_decommit)
|
||||
#define pages_map JEMALLOC_N(pages_map)
|
||||
#define pages_purge JEMALLOC_N(pages_purge)
|
||||
#define pages_trim JEMALLOC_N(pages_trim)
|
||||
#define pages_unmap JEMALLOC_N(pages_unmap)
|
||||
#define pow2_ceil JEMALLOC_N(pow2_ceil)
|
||||
#define prof_active_get JEMALLOC_N(prof_active_get)
|
||||
#define prof_active_get_unlocked JEMALLOC_N(prof_active_get_unlocked)
|
||||
#define prof_active_set JEMALLOC_N(prof_active_set)
|
||||
#define prof_alloc_prep JEMALLOC_N(prof_alloc_prep)
|
||||
#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback)
|
||||
#define prof_backtrace JEMALLOC_N(prof_backtrace)
|
||||
#define prof_boot0 JEMALLOC_N(prof_boot0)
|
||||
#define prof_boot1 JEMALLOC_N(prof_boot1)
|
||||
#define prof_boot2 JEMALLOC_N(prof_boot2)
|
||||
#define prof_bt_count JEMALLOC_N(prof_bt_count)
|
||||
#define prof_ctx_get JEMALLOC_N(prof_ctx_get)
|
||||
#define prof_ctx_set JEMALLOC_N(prof_ctx_set)
|
||||
#define prof_dump_header JEMALLOC_N(prof_dump_header)
|
||||
#define prof_dump_open JEMALLOC_N(prof_dump_open)
|
||||
#define prof_free JEMALLOC_N(prof_free)
|
||||
#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object)
|
||||
#define prof_gdump JEMALLOC_N(prof_gdump)
|
||||
#define prof_gdump_get JEMALLOC_N(prof_gdump_get)
|
||||
#define prof_gdump_get_unlocked JEMALLOC_N(prof_gdump_get_unlocked)
|
||||
#define prof_gdump_set JEMALLOC_N(prof_gdump_set)
|
||||
#define prof_gdump_val JEMALLOC_N(prof_gdump_val)
|
||||
#define prof_idump JEMALLOC_N(prof_idump)
|
||||
#define prof_interval JEMALLOC_N(prof_interval)
|
||||
#define prof_lookup JEMALLOC_N(prof_lookup)
|
||||
#define prof_malloc JEMALLOC_N(prof_malloc)
|
||||
#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object)
|
||||
#define prof_mdump JEMALLOC_N(prof_mdump)
|
||||
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
|
||||
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
|
||||
#define prof_prefork JEMALLOC_N(prof_prefork)
|
||||
#define prof_promote JEMALLOC_N(prof_promote)
|
||||
#define prof_realloc JEMALLOC_N(prof_realloc)
|
||||
#define prof_reset JEMALLOC_N(prof_reset)
|
||||
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
|
||||
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
|
||||
#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted)
|
||||
#define prof_tctx_get JEMALLOC_N(prof_tctx_get)
|
||||
#define prof_tctx_set JEMALLOC_N(prof_tctx_set)
|
||||
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
|
||||
#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
|
||||
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
|
||||
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
|
||||
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
|
||||
#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)
|
||||
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
|
||||
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
|
||||
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
|
||||
#define prof_tdata_tsd_get_wrapper JEMALLOC_N(prof_tdata_tsd_get_wrapper)
|
||||
#define prof_tdata_tsd_init_head JEMALLOC_N(prof_tdata_tsd_init_head)
|
||||
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
|
||||
#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit)
|
||||
#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get)
|
||||
#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get)
|
||||
#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set)
|
||||
#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set)
|
||||
#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get)
|
||||
#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set)
|
||||
#define quarantine JEMALLOC_N(quarantine)
|
||||
#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook)
|
||||
#define quarantine_boot JEMALLOC_N(quarantine_boot)
|
||||
#define quarantine_booted JEMALLOC_N(quarantine_booted)
|
||||
#define quarantine_alloc_hook_work JEMALLOC_N(quarantine_alloc_hook_work)
|
||||
#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup)
|
||||
#define quarantine_init JEMALLOC_N(quarantine_init)
|
||||
#define quarantine_tls JEMALLOC_N(quarantine_tls)
|
||||
#define quarantine_tsd JEMALLOC_N(quarantine_tsd)
|
||||
#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
|
||||
#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
|
||||
#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
|
||||
#define quarantine_tsd_get_wrapper JEMALLOC_N(quarantine_tsd_get_wrapper)
|
||||
#define quarantine_tsd_init_head JEMALLOC_N(quarantine_tsd_init_head)
|
||||
#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
|
||||
#define register_zone JEMALLOC_N(register_zone)
|
||||
#define rtree_child_read JEMALLOC_N(rtree_child_read)
|
||||
#define rtree_child_read_hard JEMALLOC_N(rtree_child_read_hard)
|
||||
#define rtree_child_tryread JEMALLOC_N(rtree_child_tryread)
|
||||
#define rtree_delete JEMALLOC_N(rtree_delete)
|
||||
#define rtree_get JEMALLOC_N(rtree_get)
|
||||
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
|
||||
#define rtree_new JEMALLOC_N(rtree_new)
|
||||
#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child)
|
||||
#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent)
|
||||
#define rtree_prefork JEMALLOC_N(rtree_prefork)
|
||||
#define rtree_node_valid JEMALLOC_N(rtree_node_valid)
|
||||
#define rtree_set JEMALLOC_N(rtree_set)
|
||||
#define rtree_start_level JEMALLOC_N(rtree_start_level)
|
||||
#define rtree_subkey JEMALLOC_N(rtree_subkey)
|
||||
#define rtree_subtree_read JEMALLOC_N(rtree_subtree_read)
|
||||
#define rtree_subtree_read_hard JEMALLOC_N(rtree_subtree_read_hard)
|
||||
#define rtree_subtree_tryread JEMALLOC_N(rtree_subtree_tryread)
|
||||
#define rtree_val_read JEMALLOC_N(rtree_val_read)
|
||||
#define rtree_val_write JEMALLOC_N(rtree_val_write)
|
||||
#define s2u JEMALLOC_N(s2u)
|
||||
#define s2u_compute JEMALLOC_N(s2u_compute)
|
||||
#define s2u_lookup JEMALLOC_N(s2u_lookup)
|
||||
#define sa2u JEMALLOC_N(sa2u)
|
||||
#define set_errno JEMALLOC_N(set_errno)
|
||||
#define small_size2bin JEMALLOC_N(small_size2bin)
|
||||
#define size2index JEMALLOC_N(size2index)
|
||||
#define size2index_compute JEMALLOC_N(size2index_compute)
|
||||
#define size2index_lookup JEMALLOC_N(size2index_lookup)
|
||||
#define size2index_tab JEMALLOC_N(size2index_tab)
|
||||
#define stats_cactive JEMALLOC_N(stats_cactive)
|
||||
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
|
||||
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
|
||||
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
|
||||
#define stats_chunks JEMALLOC_N(stats_chunks)
|
||||
#define stats_print JEMALLOC_N(stats_print)
|
||||
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
|
||||
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
|
||||
|
@ -358,55 +427,67 @@
|
|||
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
|
||||
#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
|
||||
#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
|
||||
#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate)
|
||||
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
|
||||
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
|
||||
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
|
||||
#define tcache_boot0 JEMALLOC_N(tcache_boot0)
|
||||
#define tcache_boot1 JEMALLOC_N(tcache_boot1)
|
||||
#define tcache_booted JEMALLOC_N(tcache_booted)
|
||||
#define tcache_boot JEMALLOC_N(tcache_boot)
|
||||
#define tcache_cleanup JEMALLOC_N(tcache_cleanup)
|
||||
#define tcache_create JEMALLOC_N(tcache_create)
|
||||
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
|
||||
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
|
||||
#define tcache_destroy JEMALLOC_N(tcache_destroy)
|
||||
#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted)
|
||||
#define tcache_enabled_cleanup JEMALLOC_N(tcache_enabled_cleanup)
|
||||
#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
|
||||
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
|
||||
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
|
||||
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
|
||||
#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd)
|
||||
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
|
||||
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
|
||||
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
|
||||
#define tcache_enabled_tsd_get_wrapper JEMALLOC_N(tcache_enabled_tsd_get_wrapper)
|
||||
#define tcache_enabled_tsd_init_head JEMALLOC_N(tcache_enabled_tsd_init_head)
|
||||
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
|
||||
#define tcache_event JEMALLOC_N(tcache_event)
|
||||
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
|
||||
#define tcache_flush JEMALLOC_N(tcache_flush)
|
||||
#define tcache_get JEMALLOC_N(tcache_get)
|
||||
#define tcache_initialized JEMALLOC_N(tcache_initialized)
|
||||
#define tcache_get_hard JEMALLOC_N(tcache_get_hard)
|
||||
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
|
||||
#define tcaches JEMALLOC_N(tcaches)
|
||||
#define tcache_salloc JEMALLOC_N(tcache_salloc)
|
||||
#define tcaches_create JEMALLOC_N(tcaches_create)
|
||||
#define tcaches_destroy JEMALLOC_N(tcaches_destroy)
|
||||
#define tcaches_flush JEMALLOC_N(tcaches_flush)
|
||||
#define tcaches_get JEMALLOC_N(tcaches_get)
|
||||
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
|
||||
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
|
||||
#define tcache_tls JEMALLOC_N(tcache_tls)
|
||||
#define tcache_tsd JEMALLOC_N(tcache_tsd)
|
||||
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
|
||||
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
|
||||
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
|
||||
#define tcache_tsd_get_wrapper JEMALLOC_N(tcache_tsd_get_wrapper)
|
||||
#define tcache_tsd_init_head JEMALLOC_N(tcache_tsd_init_head)
|
||||
#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
|
||||
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
|
||||
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
|
||||
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
|
||||
#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd)
|
||||
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
|
||||
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
|
||||
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
|
||||
#define thread_allocated_tsd_get_wrapper JEMALLOC_N(thread_allocated_tsd_get_wrapper)
|
||||
#define thread_allocated_tsd_init_head JEMALLOC_N(thread_allocated_tsd_init_head)
|
||||
#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
|
||||
#define thread_allocated_cleanup JEMALLOC_N(thread_allocated_cleanup)
|
||||
#define thread_deallocated_cleanup JEMALLOC_N(thread_deallocated_cleanup)
|
||||
#define tsd_arena_get JEMALLOC_N(tsd_arena_get)
|
||||
#define tsd_arena_set JEMALLOC_N(tsd_arena_set)
|
||||
#define tsd_boot JEMALLOC_N(tsd_boot)
|
||||
#define tsd_boot0 JEMALLOC_N(tsd_boot0)
|
||||
#define tsd_boot1 JEMALLOC_N(tsd_boot1)
|
||||
#define tsd_booted JEMALLOC_N(tsd_booted)
|
||||
#define tsd_cleanup JEMALLOC_N(tsd_cleanup)
|
||||
#define tsd_cleanup_wrapper JEMALLOC_N(tsd_cleanup_wrapper)
|
||||
#define tsd_fetch JEMALLOC_N(tsd_fetch)
|
||||
#define tsd_get JEMALLOC_N(tsd_get)
|
||||
#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get)
|
||||
#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set)
|
||||
#define tsd_initialized JEMALLOC_N(tsd_initialized)
|
||||
#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
|
||||
#define tsd_init_finish JEMALLOC_N(tsd_init_finish)
|
||||
#define tsd_init_head JEMALLOC_N(tsd_init_head)
|
||||
#define tsd_nominal JEMALLOC_N(tsd_nominal)
|
||||
#define tsd_quarantine_get JEMALLOC_N(tsd_quarantine_get)
|
||||
#define tsd_quarantine_set JEMALLOC_N(tsd_quarantine_set)
|
||||
#define tsd_set JEMALLOC_N(tsd_set)
|
||||
#define tsd_tcache_enabled_get JEMALLOC_N(tsd_tcache_enabled_get)
|
||||
#define tsd_tcache_enabled_set JEMALLOC_N(tsd_tcache_enabled_set)
|
||||
#define tsd_tcache_get JEMALLOC_N(tsd_tcache_get)
|
||||
#define tsd_tcache_set JEMALLOC_N(tsd_tcache_set)
|
||||
#define tsd_tls JEMALLOC_N(tsd_tls)
|
||||
#define tsd_tsd JEMALLOC_N(tsd_tsd)
|
||||
#define tsd_prof_tdata_get JEMALLOC_N(tsd_prof_tdata_get)
|
||||
#define tsd_prof_tdata_set JEMALLOC_N(tsd_prof_tdata_set)
|
||||
#define tsd_thread_allocated_get JEMALLOC_N(tsd_thread_allocated_get)
|
||||
#define tsd_thread_allocated_set JEMALLOC_N(tsd_thread_allocated_set)
|
||||
#define tsd_thread_deallocated_get JEMALLOC_N(tsd_thread_deallocated_get)
|
||||
#define tsd_thread_deallocated_set JEMALLOC_N(tsd_thread_deallocated_set)
|
||||
#define u2rz JEMALLOC_N(u2rz)
|
||||
#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block)
|
||||
#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined)
|
||||
#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess)
|
||||
#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined)
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
|
||||
*
|
||||
* This choice of m has the disadvantage that the quality of the bits is
|
||||
* proportional to bit position. For example. the lowest bit has a cycle of 2,
|
||||
* proportional to bit position. For example, the lowest bit has a cycle of 2,
|
||||
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
|
||||
* bits.
|
||||
*
|
||||
|
@ -26,22 +26,22 @@
|
|||
* const uint32_t a, c : See above discussion.
|
||||
*/
|
||||
#define prng32(r, lg_range, state, a, c) do { \
|
||||
assert(lg_range > 0); \
|
||||
assert(lg_range <= 32); \
|
||||
assert((lg_range) > 0); \
|
||||
assert((lg_range) <= 32); \
|
||||
\
|
||||
r = (state * (a)) + (c); \
|
||||
state = r; \
|
||||
r >>= (32 - lg_range); \
|
||||
r >>= (32 - (lg_range)); \
|
||||
} while (false)
|
||||
|
||||
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
|
||||
#define prng64(r, lg_range, state, a, c) do { \
|
||||
assert(lg_range > 0); \
|
||||
assert(lg_range <= 64); \
|
||||
assert((lg_range) > 0); \
|
||||
assert((lg_range) <= 64); \
|
||||
\
|
||||
r = (state * (a)) + (c); \
|
||||
state = r; \
|
||||
r >>= (64 - lg_range); \
|
||||
r >>= (64 - (lg_range)); \
|
||||
} while (false)
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
|
||||
typedef struct prof_bt_s prof_bt_t;
|
||||
typedef struct prof_cnt_s prof_cnt_t;
|
||||
typedef struct prof_thr_cnt_s prof_thr_cnt_t;
|
||||
typedef struct prof_ctx_s prof_ctx_t;
|
||||
typedef struct prof_tctx_s prof_tctx_t;
|
||||
typedef struct prof_gctx_s prof_gctx_t;
|
||||
typedef struct prof_tdata_s prof_tdata_t;
|
||||
|
||||
/* Option defaults. */
|
||||
|
@ -23,9 +23,6 @@ typedef struct prof_tdata_s prof_tdata_t;
|
|||
*/
|
||||
#define PROF_BT_MAX 128
|
||||
|
||||
/* Maximum number of backtraces to store in each per thread LRU cache. */
|
||||
#define PROF_TCMAX 1024
|
||||
|
||||
/* Initial hash table size. */
|
||||
#define PROF_CKH_MINITEMS 64
|
||||
|
||||
|
@ -36,11 +33,17 @@ typedef struct prof_tdata_s prof_tdata_t;
|
|||
#define PROF_PRINTF_BUFSIZE 128
|
||||
|
||||
/*
|
||||
* Number of mutexes shared among all ctx's. No space is allocated for these
|
||||
* Number of mutexes shared among all gctx's. No space is allocated for these
|
||||
* unless profiling is enabled, so it's okay to over-provision.
|
||||
*/
|
||||
#define PROF_NCTX_LOCKS 1024
|
||||
|
||||
/*
|
||||
* Number of mutexes shared among all tdata's. No space is allocated for these
|
||||
* unless profiling is enabled, so it's okay to over-provision.
|
||||
*/
|
||||
#define PROF_NTDATA_LOCKS 256
|
||||
|
||||
/*
|
||||
* prof_tdata pointers close to NULL are used to encode state information that
|
||||
* is used for cleaning up during thread shutdown.
|
||||
|
@ -63,141 +66,185 @@ struct prof_bt_s {
|
|||
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
|
||||
typedef struct {
|
||||
prof_bt_t *bt;
|
||||
unsigned nignore;
|
||||
unsigned max;
|
||||
} prof_unwind_data_t;
|
||||
#endif
|
||||
|
||||
struct prof_cnt_s {
|
||||
/*
|
||||
* Profiling counters. An allocation/deallocation pair can operate on
|
||||
* different prof_thr_cnt_t objects that are linked into the same
|
||||
* prof_ctx_t cnts_ql, so it is possible for the cur* counters to go
|
||||
* negative. In principle it is possible for the *bytes counters to
|
||||
* overflow/underflow, but a general solution would require something
|
||||
* like 128-bit counters; this implementation doesn't bother to solve
|
||||
* that problem.
|
||||
*/
|
||||
int64_t curobjs;
|
||||
int64_t curbytes;
|
||||
/* Profiling counters. */
|
||||
uint64_t curobjs;
|
||||
uint64_t curbytes;
|
||||
uint64_t accumobjs;
|
||||
uint64_t accumbytes;
|
||||
};
|
||||
|
||||
struct prof_thr_cnt_s {
|
||||
/* Linkage into prof_ctx_t's cnts_ql. */
|
||||
ql_elm(prof_thr_cnt_t) cnts_link;
|
||||
typedef enum {
|
||||
prof_tctx_state_initializing,
|
||||
prof_tctx_state_nominal,
|
||||
prof_tctx_state_dumping,
|
||||
prof_tctx_state_purgatory /* Dumper must finish destroying. */
|
||||
} prof_tctx_state_t;
|
||||
|
||||
/* Linkage into thread's LRU. */
|
||||
ql_elm(prof_thr_cnt_t) lru_link;
|
||||
struct prof_tctx_s {
|
||||
/* Thread data for thread that performed the allocation. */
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
/*
|
||||
* Associated context. If a thread frees an object that it did not
|
||||
* allocate, it is possible that the context is not cached in the
|
||||
* thread's hash table, in which case it must be able to look up the
|
||||
* context, insert a new prof_thr_cnt_t into the thread's hash table,
|
||||
* and link it into the prof_ctx_t's cnts_ql.
|
||||
* Copy of tdata->thr_uid, necessary because tdata may be defunct during
|
||||
* teardown.
|
||||
*/
|
||||
prof_ctx_t *ctx;
|
||||
uint64_t thr_uid;
|
||||
|
||||
/*
|
||||
* Threads use memory barriers to update the counters. Since there is
|
||||
* only ever one writer, the only challenge is for the reader to get a
|
||||
* consistent read of the counters.
|
||||
*
|
||||
* The writer uses this series of operations:
|
||||
*
|
||||
* 1) Increment epoch to an odd number.
|
||||
* 2) Update counters.
|
||||
* 3) Increment epoch to an even number.
|
||||
*
|
||||
* The reader must assure 1) that the epoch is even while it reads the
|
||||
* counters, and 2) that the epoch doesn't change between the time it
|
||||
* starts and finishes reading the counters.
|
||||
*/
|
||||
unsigned epoch;
|
||||
|
||||
/* Profiling counters. */
|
||||
/* Profiling counters, protected by tdata->lock. */
|
||||
prof_cnt_t cnts;
|
||||
|
||||
/* Associated global context. */
|
||||
prof_gctx_t *gctx;
|
||||
|
||||
/*
|
||||
* UID that distinguishes multiple tctx's created by the same thread,
|
||||
* but coexisting in gctx->tctxs. There are two ways that such
|
||||
* coexistence can occur:
|
||||
* - A dumper thread can cause a tctx to be retained in the purgatory
|
||||
* state.
|
||||
* - Although a single "producer" thread must create all tctx's which
|
||||
* share the same thr_uid, multiple "consumers" can each concurrently
|
||||
* execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
|
||||
* gets called once each time cnts.cur{objs,bytes} drop to 0, but this
|
||||
* threshold can be hit again before the first consumer finishes
|
||||
* executing prof_tctx_destroy().
|
||||
*/
|
||||
uint64_t tctx_uid;
|
||||
|
||||
/* Linkage into gctx's tctxs. */
|
||||
rb_node(prof_tctx_t) tctx_link;
|
||||
|
||||
/*
|
||||
* True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
|
||||
* sample vs destroy race.
|
||||
*/
|
||||
bool prepared;
|
||||
|
||||
/* Current dump-related state, protected by gctx->lock. */
|
||||
prof_tctx_state_t state;
|
||||
|
||||
/*
|
||||
* Copy of cnts snapshotted during early dump phase, protected by
|
||||
* dump_mtx.
|
||||
*/
|
||||
prof_cnt_t dump_cnts;
|
||||
};
|
||||
typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
|
||||
|
||||
struct prof_ctx_s {
|
||||
/* Associated backtrace. */
|
||||
prof_bt_t *bt;
|
||||
|
||||
/* Protects nlimbo, cnt_merged, and cnts_ql. */
|
||||
struct prof_gctx_s {
|
||||
/* Protects nlimbo, cnt_summed, and tctxs. */
|
||||
malloc_mutex_t *lock;
|
||||
|
||||
/*
|
||||
* Number of threads that currently cause this ctx to be in a state of
|
||||
* Number of threads that currently cause this gctx to be in a state of
|
||||
* limbo due to one of:
|
||||
* - Initializing per thread counters associated with this ctx.
|
||||
* - Preparing to destroy this ctx.
|
||||
* - Dumping a heap profile that includes this ctx.
|
||||
* - Initializing this gctx.
|
||||
* - Initializing per thread counters associated with this gctx.
|
||||
* - Preparing to destroy this gctx.
|
||||
* - Dumping a heap profile that includes this gctx.
|
||||
* nlimbo must be 1 (single destroyer) in order to safely destroy the
|
||||
* ctx.
|
||||
* gctx.
|
||||
*/
|
||||
unsigned nlimbo;
|
||||
|
||||
/*
|
||||
* Tree of profile counters, one for each thread that has allocated in
|
||||
* this context.
|
||||
*/
|
||||
prof_tctx_tree_t tctxs;
|
||||
|
||||
/* Linkage for tree of contexts to be dumped. */
|
||||
rb_node(prof_gctx_t) dump_link;
|
||||
|
||||
/* Temporary storage for summation during dump. */
|
||||
prof_cnt_t cnt_summed;
|
||||
|
||||
/* When threads exit, they merge their stats into cnt_merged. */
|
||||
prof_cnt_t cnt_merged;
|
||||
/* Associated backtrace. */
|
||||
prof_bt_t bt;
|
||||
|
||||
/*
|
||||
* List of profile counters, one for each thread that has allocated in
|
||||
* this context.
|
||||
*/
|
||||
ql_head(prof_thr_cnt_t) cnts_ql;
|
||||
|
||||
/* Linkage for list of contexts to be dumped. */
|
||||
ql_elm(prof_ctx_t) dump_link;
|
||||
/* Backtrace vector, variable size, referred to by bt. */
|
||||
void *vec[1];
|
||||
};
|
||||
typedef ql_head(prof_ctx_t) prof_ctx_list_t;
|
||||
typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
|
||||
|
||||
struct prof_tdata_s {
|
||||
malloc_mutex_t *lock;
|
||||
|
||||
/* Monotonically increasing unique thread identifier. */
|
||||
uint64_t thr_uid;
|
||||
|
||||
/*
|
||||
* Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a
|
||||
* cache of backtraces, with associated thread-specific prof_thr_cnt_t
|
||||
* objects. Other threads may read the prof_thr_cnt_t contents, but no
|
||||
* others will ever write them.
|
||||
*
|
||||
* Upon thread exit, the thread must merge all the prof_thr_cnt_t
|
||||
* counter data into the associated prof_ctx_t objects, and unlink/free
|
||||
* the prof_thr_cnt_t objects.
|
||||
* Monotonically increasing discriminator among tdata structures
|
||||
* associated with the same thr_uid.
|
||||
*/
|
||||
ckh_t bt2cnt;
|
||||
uint64_t thr_discrim;
|
||||
|
||||
/* LRU for contents of bt2cnt. */
|
||||
ql_head(prof_thr_cnt_t) lru_ql;
|
||||
/* Included in heap profile dumps if non-NULL. */
|
||||
char *thread_name;
|
||||
|
||||
/* Backtrace vector, used for calls to prof_backtrace(). */
|
||||
void **vec;
|
||||
bool attached;
|
||||
bool expired;
|
||||
|
||||
rb_node(prof_tdata_t) tdata_link;
|
||||
|
||||
/*
|
||||
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
|
||||
* necessary when incrementing this field, because only one thread ever
|
||||
* does so.
|
||||
*/
|
||||
uint64_t tctx_uid_next;
|
||||
|
||||
/*
|
||||
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
|
||||
* backtraces for which it has non-zero allocation/deallocation counters
|
||||
* associated with thread-specific prof_tctx_t objects. Other threads
|
||||
* may write to prof_tctx_t contents when freeing associated objects.
|
||||
*/
|
||||
ckh_t bt2tctx;
|
||||
|
||||
/* Sampling state. */
|
||||
uint64_t prng_state;
|
||||
uint64_t threshold;
|
||||
uint64_t accum;
|
||||
uint64_t bytes_until_sample;
|
||||
|
||||
/* State used to avoid dumping while operating on prof internals. */
|
||||
bool enq;
|
||||
bool enq_idump;
|
||||
bool enq_gdump;
|
||||
|
||||
/*
|
||||
* Set to true during an early dump phase for tdata's which are
|
||||
* currently being dumped. New threads' tdata's have this initialized
|
||||
* to false so that they aren't accidentally included in later dump
|
||||
* phases.
|
||||
*/
|
||||
bool dumping;
|
||||
|
||||
/*
|
||||
* True if profiling is active for this tdata's thread
|
||||
* (thread.prof.active mallctl).
|
||||
*/
|
||||
bool active;
|
||||
|
||||
/* Temporary storage for summation during dump. */
|
||||
prof_cnt_t cnt_summed;
|
||||
|
||||
/* Backtrace vector, used for calls to prof_backtrace(). */
|
||||
void *vec[PROF_BT_MAX];
|
||||
};
|
||||
typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
extern bool opt_prof;
|
||||
/*
|
||||
* Even if opt_prof is true, sampling can be temporarily disabled by setting
|
||||
* opt_prof_active to false. No locking is used when updating opt_prof_active,
|
||||
* so there are no guarantees regarding how long it will take for all threads
|
||||
* to notice state changes.
|
||||
*/
|
||||
extern bool opt_prof_active;
|
||||
extern bool opt_prof_thread_active_init;
|
||||
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
|
||||
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
|
||||
extern bool opt_prof_gdump; /* High-water memory dumping. */
|
||||
|
@ -211,6 +258,12 @@ extern char opt_prof_prefix[
|
|||
#endif
|
||||
1];
|
||||
|
||||
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
|
||||
extern bool prof_active;
|
||||
|
||||
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
|
||||
extern bool prof_gdump_val;
|
||||
|
||||
/*
|
||||
* Profile dump interval, measured in bytes allocated. Each arena triggers a
|
||||
* profile dump when it reaches this threshold. The effect is that the
|
||||
|
@ -221,391 +274,248 @@ extern char opt_prof_prefix[
|
|||
extern uint64_t prof_interval;
|
||||
|
||||
/*
|
||||
* If true, promote small sampled objects to large objects, since small run
|
||||
* headers do not have embedded profile context pointers.
|
||||
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
|
||||
* resets.
|
||||
*/
|
||||
extern bool prof_promote;
|
||||
extern size_t lg_prof_sample;
|
||||
|
||||
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
|
||||
void prof_malloc_sample_object(const void *ptr, size_t usize,
|
||||
prof_tctx_t *tctx);
|
||||
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
|
||||
void bt_init(prof_bt_t *bt, void **vec);
|
||||
void prof_backtrace(prof_bt_t *bt, unsigned nignore);
|
||||
prof_thr_cnt_t *prof_lookup(prof_bt_t *bt);
|
||||
void prof_backtrace(prof_bt_t *bt);
|
||||
prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
|
||||
#ifdef JEMALLOC_JET
|
||||
size_t prof_tdata_count(void);
|
||||
size_t prof_bt_count(void);
|
||||
const prof_cnt_t *prof_cnt_all(void);
|
||||
typedef int (prof_dump_open_t)(bool, const char *);
|
||||
extern prof_dump_open_t *prof_dump_open;
|
||||
typedef bool (prof_dump_header_t)(bool, const prof_cnt_t *);
|
||||
extern prof_dump_header_t *prof_dump_header;
|
||||
#endif
|
||||
void prof_idump(void);
|
||||
bool prof_mdump(const char *filename);
|
||||
void prof_gdump(void);
|
||||
prof_tdata_t *prof_tdata_init(void);
|
||||
void prof_tdata_cleanup(void *arg);
|
||||
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
|
||||
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
|
||||
void prof_reset(tsd_t *tsd, size_t lg_sample);
|
||||
void prof_tdata_cleanup(tsd_t *tsd);
|
||||
const char *prof_thread_name_get(void);
|
||||
bool prof_active_get(void);
|
||||
bool prof_active_set(bool active);
|
||||
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
|
||||
bool prof_thread_active_get(void);
|
||||
bool prof_thread_active_set(bool active);
|
||||
bool prof_thread_active_init_get(void);
|
||||
bool prof_thread_active_init_set(bool active_init);
|
||||
bool prof_gdump_get(void);
|
||||
bool prof_gdump_set(bool active);
|
||||
void prof_boot0(void);
|
||||
void prof_boot1(void);
|
||||
bool prof_boot2(void);
|
||||
void prof_prefork(void);
|
||||
void prof_postfork_parent(void);
|
||||
void prof_postfork_child(void);
|
||||
void prof_sample_threshold_update(prof_tdata_t *tdata);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#define PROF_ALLOC_PREP(nignore, size, ret) do { \
|
||||
prof_tdata_t *prof_tdata; \
|
||||
prof_bt_t bt; \
|
||||
\
|
||||
assert(size == s2u(size)); \
|
||||
\
|
||||
prof_tdata = prof_tdata_get(true); \
|
||||
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
|
||||
if (prof_tdata != NULL) \
|
||||
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
|
||||
else \
|
||||
ret = NULL; \
|
||||
break; \
|
||||
} \
|
||||
\
|
||||
if (opt_prof_active == false) { \
|
||||
/* Sampling is currently inactive, so avoid sampling. */\
|
||||
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
|
||||
} else if (opt_lg_prof_sample == 0) { \
|
||||
/* Don't bother with sampling logic, since sampling */\
|
||||
/* interval is 1. */\
|
||||
bt_init(&bt, prof_tdata->vec); \
|
||||
prof_backtrace(&bt, nignore); \
|
||||
ret = prof_lookup(&bt); \
|
||||
} else { \
|
||||
if (prof_tdata->threshold == 0) { \
|
||||
/* Initialize. Seed the prng differently for */\
|
||||
/* each thread. */\
|
||||
prof_tdata->prng_state = \
|
||||
(uint64_t)(uintptr_t)&size; \
|
||||
prof_sample_threshold_update(prof_tdata); \
|
||||
} \
|
||||
\
|
||||
/* Determine whether to capture a backtrace based on */\
|
||||
/* whether size is enough for prof_accum to reach */\
|
||||
/* prof_tdata->threshold. However, delay updating */\
|
||||
/* these variables until prof_{m,re}alloc(), because */\
|
||||
/* we don't know for sure that the allocation will */\
|
||||
/* succeed. */\
|
||||
/* */\
|
||||
/* Use subtraction rather than addition to avoid */\
|
||||
/* potential integer overflow. */\
|
||||
if (size >= prof_tdata->threshold - \
|
||||
prof_tdata->accum) { \
|
||||
bt_init(&bt, prof_tdata->vec); \
|
||||
prof_backtrace(&bt, nignore); \
|
||||
ret = prof_lookup(&bt); \
|
||||
} else \
|
||||
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *)
|
||||
|
||||
prof_tdata_t *prof_tdata_get(bool create);
|
||||
void prof_sample_threshold_update(prof_tdata_t *prof_tdata);
|
||||
prof_ctx_t *prof_ctx_get(const void *ptr);
|
||||
void prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx);
|
||||
bool prof_sample_accum_update(size_t size);
|
||||
void prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt);
|
||||
void prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
|
||||
size_t old_usize, prof_ctx_t *old_ctx);
|
||||
void prof_free(const void *ptr, size_t size);
|
||||
bool prof_active_get_unlocked(void);
|
||||
bool prof_gdump_get_unlocked(void);
|
||||
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
|
||||
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
|
||||
prof_tdata_t **tdata_out);
|
||||
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool update);
|
||||
prof_tctx_t *prof_tctx_get(const void *ptr);
|
||||
void prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
|
||||
void prof_malloc_sample_object(const void *ptr, size_t usize,
|
||||
prof_tctx_t *tctx);
|
||||
void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx);
|
||||
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
|
||||
prof_tctx_t *tctx, bool updated, size_t old_usize, prof_tctx_t *old_tctx);
|
||||
void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
|
||||
/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */
|
||||
malloc_tsd_externs(prof_tdata, prof_tdata_t *)
|
||||
malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL,
|
||||
prof_tdata_cleanup)
|
||||
|
||||
JEMALLOC_INLINE prof_tdata_t *
|
||||
prof_tdata_get(bool create)
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_active_get_unlocked(void)
|
||||
{
|
||||
prof_tdata_t *prof_tdata;
|
||||
|
||||
/*
|
||||
* Even if opt_prof is true, sampling can be temporarily disabled by
|
||||
* setting prof_active to false. No locking is used when reading
|
||||
* prof_active in the fast path, so there are no guarantees regarding
|
||||
* how long it will take for all threads to notice state changes.
|
||||
*/
|
||||
return (prof_active);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_gdump_get_unlocked(void)
|
||||
{
|
||||
|
||||
/*
|
||||
* No locking is used when reading prof_gdump_val in the fast path, so
|
||||
* there are no guarantees regarding how long it will take for all
|
||||
* threads to notice state changes.
|
||||
*/
|
||||
return (prof_gdump_val);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
|
||||
prof_tdata_get(tsd_t *tsd, bool create)
|
||||
{
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
prof_tdata = *prof_tdata_tsd_get();
|
||||
if (create && prof_tdata == NULL)
|
||||
prof_tdata = prof_tdata_init();
|
||||
tdata = tsd_prof_tdata_get(tsd);
|
||||
if (create) {
|
||||
if (unlikely(tdata == NULL)) {
|
||||
if (tsd_nominal(tsd)) {
|
||||
tdata = prof_tdata_init(tsd);
|
||||
tsd_prof_tdata_set(tsd, tdata);
|
||||
}
|
||||
} else if (unlikely(tdata->expired)) {
|
||||
tdata = prof_tdata_reinit(tsd, tdata);
|
||||
tsd_prof_tdata_set(tsd, tdata);
|
||||
}
|
||||
assert(tdata == NULL || tdata->attached);
|
||||
}
|
||||
|
||||
return (prof_tdata);
|
||||
return (tdata);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
prof_sample_threshold_update(prof_tdata_t *prof_tdata)
|
||||
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
||||
prof_tctx_get(const void *ptr)
|
||||
{
|
||||
/*
|
||||
* The body of this function is compiled out unless heap profiling is
|
||||
* enabled, so that it is possible to compile jemalloc with floating
|
||||
* point support completely disabled. Avoiding floating point code is
|
||||
* important on memory-constrained systems, but it also enables a
|
||||
* workaround for versions of glibc that don't properly save/restore
|
||||
* floating point registers during dynamic lazy symbol loading (which
|
||||
* internally calls into whatever malloc implementation happens to be
|
||||
* integrated into the application). Note that some compilers (e.g.
|
||||
* gcc 4.8) may use floating point registers for fast memory moves, so
|
||||
* jemalloc must be compiled with such optimizations disabled (e.g.
|
||||
* -mno-sse) in order for the workaround to be complete.
|
||||
*/
|
||||
#ifdef JEMALLOC_PROF
|
||||
uint64_t r;
|
||||
double u;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
/*
|
||||
* Compute sample threshold as a geometrically distributed random
|
||||
* variable with mean (2^opt_lg_prof_sample).
|
||||
*
|
||||
* __ __
|
||||
* | log(u) | 1
|
||||
* prof_tdata->threshold = | -------- |, where p = -------------------
|
||||
* | log(1-p) | opt_lg_prof_sample
|
||||
* 2
|
||||
*
|
||||
* For more information on the math, see:
|
||||
*
|
||||
* Non-Uniform Random Variate Generation
|
||||
* Luc Devroye
|
||||
* Springer-Verlag, New York, 1986
|
||||
* pp 500
|
||||
* (http://luc.devroye.org/rnbookindex.html)
|
||||
*/
|
||||
prng64(r, 53, prof_tdata->prng_state,
|
||||
UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
|
||||
u = (double)r * (1.0/9007199254740992.0L);
|
||||
prof_tdata->threshold = (uint64_t)(log(u) /
|
||||
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
|
||||
+ (uint64_t)1U;
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE prof_ctx_t *
|
||||
prof_ctx_get(const void *ptr)
|
||||
{
|
||||
prof_ctx_t *ret;
|
||||
arena_chunk_t *chunk;
|
||||
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk != ptr) {
|
||||
/* Region. */
|
||||
ret = arena_prof_ctx_get(ptr);
|
||||
} else
|
||||
ret = huge_prof_ctx_get(ptr);
|
||||
return (arena_prof_tctx_get(ptr));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
arena_prof_tctx_set(ptr, tctx);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
|
||||
prof_tdata_t **tdata_out)
|
||||
{
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
tdata = prof_tdata_get(tsd, true);
|
||||
if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
|
||||
tdata = NULL;
|
||||
|
||||
if (tdata_out != NULL)
|
||||
*tdata_out = tdata;
|
||||
|
||||
if (tdata == NULL)
|
||||
return (true);
|
||||
|
||||
if (tdata->bytes_until_sample >= usize) {
|
||||
if (update)
|
||||
tdata->bytes_until_sample -= usize;
|
||||
return (true);
|
||||
} else {
|
||||
/* Compute new sample threshold. */
|
||||
if (update)
|
||||
prof_sample_threshold_update(tdata);
|
||||
return (!tdata->active);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
||||
prof_alloc_prep(tsd_t *tsd, size_t usize, bool update)
|
||||
{
|
||||
prof_tctx_t *ret;
|
||||
prof_tdata_t *tdata;
|
||||
prof_bt_t bt;
|
||||
|
||||
assert(usize == s2u(usize));
|
||||
|
||||
if (!prof_active_get_unlocked() || likely(prof_sample_accum_update(tsd,
|
||||
usize, update, &tdata)))
|
||||
ret = (prof_tctx_t *)(uintptr_t)1U;
|
||||
else {
|
||||
bt_init(&bt, tdata->vec);
|
||||
prof_backtrace(&bt);
|
||||
ret = prof_lookup(tsd, &bt);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx)
|
||||
{
|
||||
arena_chunk_t *chunk;
|
||||
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk != ptr) {
|
||||
/* Region. */
|
||||
arena_prof_ctx_set(ptr, usize, ctx);
|
||||
} else
|
||||
huge_prof_ctx_set(ptr, ctx);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
prof_sample_accum_update(size_t size)
|
||||
{
|
||||
prof_tdata_t *prof_tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
/* Sampling logic is unnecessary if the interval is 1. */
|
||||
assert(opt_lg_prof_sample != 0);
|
||||
|
||||
prof_tdata = prof_tdata_get(false);
|
||||
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
|
||||
return (true);
|
||||
|
||||
/* Take care to avoid integer overflow. */
|
||||
if (size >= prof_tdata->threshold - prof_tdata->accum) {
|
||||
prof_tdata->accum -= (prof_tdata->threshold - size);
|
||||
/* Compute new sample threshold. */
|
||||
prof_sample_threshold_update(prof_tdata);
|
||||
while (prof_tdata->accum >= prof_tdata->threshold) {
|
||||
prof_tdata->accum -= prof_tdata->threshold;
|
||||
prof_sample_threshold_update(prof_tdata);
|
||||
}
|
||||
return (false);
|
||||
} else {
|
||||
prof_tdata->accum += size;
|
||||
return (true);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt)
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
assert(usize == isalloc(ptr, true));
|
||||
|
||||
if (opt_lg_prof_sample != 0) {
|
||||
if (prof_sample_accum_update(usize)) {
|
||||
/*
|
||||
* Don't sample. For malloc()-like allocation, it is
|
||||
* always possible to tell in advance how large an
|
||||
* object's usable size will be, so there should never
|
||||
* be a difference between the usize passed to
|
||||
* PROF_ALLOC_PREP() and prof_malloc().
|
||||
*/
|
||||
assert((uintptr_t)cnt == (uintptr_t)1U);
|
||||
}
|
||||
}
|
||||
|
||||
if ((uintptr_t)cnt > (uintptr_t)1U) {
|
||||
prof_ctx_set(ptr, usize, cnt->ctx);
|
||||
|
||||
cnt->epoch++;
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
cnt->cnts.curobjs++;
|
||||
cnt->cnts.curbytes += usize;
|
||||
if (opt_prof_accum) {
|
||||
cnt->cnts.accumobjs++;
|
||||
cnt->cnts.accumbytes += usize;
|
||||
}
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
cnt->epoch++;
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
} else
|
||||
prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
|
||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
||||
prof_malloc_sample_object(ptr, usize, tctx);
|
||||
else
|
||||
prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
|
||||
size_t old_usize, prof_ctx_t *old_ctx)
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
|
||||
bool updated, size_t old_usize, prof_tctx_t *old_tctx)
|
||||
{
|
||||
prof_thr_cnt_t *told_cnt;
|
||||
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
|
||||
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
|
||||
|
||||
if (ptr != NULL) {
|
||||
if (!updated && ptr != NULL) {
|
||||
assert(usize == isalloc(ptr, true));
|
||||
if (opt_lg_prof_sample != 0) {
|
||||
if (prof_sample_accum_update(usize)) {
|
||||
/*
|
||||
* Don't sample. The usize passed to
|
||||
* PROF_ALLOC_PREP() was larger than what
|
||||
* actually got allocated, so a backtrace was
|
||||
* captured for this allocation, even though
|
||||
* its actual usize was insufficient to cross
|
||||
* the sample threshold.
|
||||
*/
|
||||
cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((uintptr_t)old_ctx > (uintptr_t)1U) {
|
||||
told_cnt = prof_lookup(old_ctx->bt);
|
||||
if (told_cnt == NULL) {
|
||||
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
|
||||
/*
|
||||
* It's too late to propagate OOM for this realloc(),
|
||||
* so operate directly on old_cnt->ctx->cnt_merged.
|
||||
* Don't sample. The usize passed to PROF_ALLOC_PREP()
|
||||
* was larger than what actually got allocated, so a
|
||||
* backtrace was captured for this allocation, even
|
||||
* though its actual usize was insufficient to cross the
|
||||
* sample threshold.
|
||||
*/
|
||||
malloc_mutex_lock(old_ctx->lock);
|
||||
old_ctx->cnt_merged.curobjs--;
|
||||
old_ctx->cnt_merged.curbytes -= old_usize;
|
||||
malloc_mutex_unlock(old_ctx->lock);
|
||||
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
|
||||
tctx = (prof_tctx_t *)(uintptr_t)1U;
|
||||
}
|
||||
} else
|
||||
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
|
||||
}
|
||||
|
||||
if ((uintptr_t)told_cnt > (uintptr_t)1U)
|
||||
told_cnt->epoch++;
|
||||
if ((uintptr_t)cnt > (uintptr_t)1U) {
|
||||
prof_ctx_set(ptr, usize, cnt->ctx);
|
||||
cnt->epoch++;
|
||||
} else if (ptr != NULL)
|
||||
prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
if ((uintptr_t)told_cnt > (uintptr_t)1U) {
|
||||
told_cnt->cnts.curobjs--;
|
||||
told_cnt->cnts.curbytes -= old_usize;
|
||||
}
|
||||
if ((uintptr_t)cnt > (uintptr_t)1U) {
|
||||
cnt->cnts.curobjs++;
|
||||
cnt->cnts.curbytes += usize;
|
||||
if (opt_prof_accum) {
|
||||
cnt->cnts.accumobjs++;
|
||||
cnt->cnts.accumbytes += usize;
|
||||
}
|
||||
}
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
if ((uintptr_t)told_cnt > (uintptr_t)1U)
|
||||
told_cnt->epoch++;
|
||||
if ((uintptr_t)cnt > (uintptr_t)1U)
|
||||
cnt->epoch++;
|
||||
/*********/
|
||||
mb_write(); /* Not strictly necessary. */
|
||||
if (unlikely((uintptr_t)old_tctx > (uintptr_t)1U))
|
||||
prof_free_sampled_object(tsd, old_usize, old_tctx);
|
||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
||||
prof_malloc_sample_object(ptr, usize, tctx);
|
||||
else
|
||||
prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
prof_free(const void *ptr, size_t size)
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_free(tsd_t *tsd, const void *ptr, size_t usize)
|
||||
{
|
||||
prof_ctx_t *ctx = prof_ctx_get(ptr);
|
||||
prof_tctx_t *tctx = prof_tctx_get(ptr);
|
||||
|
||||
cassert(config_prof);
|
||||
assert(usize == isalloc(ptr, true));
|
||||
|
||||
if ((uintptr_t)ctx > (uintptr_t)1) {
|
||||
prof_thr_cnt_t *tcnt;
|
||||
assert(size == isalloc(ptr, true));
|
||||
tcnt = prof_lookup(ctx->bt);
|
||||
|
||||
if (tcnt != NULL) {
|
||||
tcnt->epoch++;
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
tcnt->cnts.curobjs--;
|
||||
tcnt->cnts.curbytes -= size;
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
tcnt->epoch++;
|
||||
/*********/
|
||||
mb_write();
|
||||
/*********/
|
||||
} else {
|
||||
/*
|
||||
* OOM during free() cannot be propagated, so operate
|
||||
* directly on cnt->ctx->cnt_merged.
|
||||
*/
|
||||
malloc_mutex_lock(ctx->lock);
|
||||
ctx->cnt_merged.curobjs--;
|
||||
ctx->cnt_merged.curbytes -= size;
|
||||
malloc_mutex_unlock(ctx->lock);
|
||||
}
|
||||
}
|
||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
||||
prof_free_sampled_object(tsd, usize, tctx);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#define je_xallocx JEMALLOC_N(xallocx)
|
||||
#define je_sallocx JEMALLOC_N(sallocx)
|
||||
#define je_dallocx JEMALLOC_N(dallocx)
|
||||
#define je_sdallocx JEMALLOC_N(sdallocx)
|
||||
#define je_nallocx JEMALLOC_N(nallocx)
|
||||
#define je_mallctl JEMALLOC_N(mallctl)
|
||||
#define je_mallctlnametomib JEMALLOC_N(mallctlnametomib)
|
||||
|
@ -18,8 +19,3 @@
|
|||
#define je_malloc_stats_print JEMALLOC_N(malloc_stats_print)
|
||||
#define je_malloc_usable_size JEMALLOC_N(malloc_usable_size)
|
||||
#define je_valloc JEMALLOC_N(valloc)
|
||||
#define je_allocm JEMALLOC_N(allocm)
|
||||
#define je_dallocm JEMALLOC_N(dallocm)
|
||||
#define je_nallocm JEMALLOC_N(nallocm)
|
||||
#define je_rallocm JEMALLOC_N(rallocm)
|
||||
#define je_sallocm JEMALLOC_N(sallocm)
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
/*
|
||||
* List definitions.
|
||||
*/
|
||||
/* List definitions. */
|
||||
#define ql_head(a_type) \
|
||||
struct { \
|
||||
a_type *qlh_first; \
|
||||
|
|
|
@ -40,8 +40,10 @@ struct { \
|
|||
(a_qr_b)->a_field.qre_prev = t; \
|
||||
} while (0)
|
||||
|
||||
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
|
||||
* have two copies of the code. */
|
||||
/*
|
||||
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
|
||||
* have two copies of the code.
|
||||
*/
|
||||
#define qr_split(a_qr_a, a_qr_b, a_field) \
|
||||
qr_meld((a_qr_a), (a_qr_b), a_field)
|
||||
|
||||
|
|
|
@ -29,36 +29,29 @@ struct quarantine_s {
|
|||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
quarantine_t *quarantine_init(size_t lg_maxobjs);
|
||||
void quarantine(void *ptr);
|
||||
void quarantine_cleanup(void *arg);
|
||||
bool quarantine_boot(void);
|
||||
void quarantine_alloc_hook_work(tsd_t *tsd);
|
||||
void quarantine(tsd_t *tsd, void *ptr);
|
||||
void quarantine_cleanup(tsd_t *tsd);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
malloc_tsd_protos(JEMALLOC_ATTR(unused), quarantine, quarantine_t *)
|
||||
|
||||
void quarantine_alloc_hook(void);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
|
||||
malloc_tsd_externs(quarantine, quarantine_t *)
|
||||
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, quarantine, quarantine_t *, NULL,
|
||||
quarantine_cleanup)
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
quarantine_alloc_hook(void)
|
||||
{
|
||||
quarantine_t *quarantine;
|
||||
tsd_t *tsd;
|
||||
|
||||
assert(config_fill && opt_quarantine);
|
||||
|
||||
quarantine = *quarantine_tsd_get();
|
||||
if (quarantine == NULL)
|
||||
quarantine_init(LG_MAXOBJS_INIT);
|
||||
tsd = tsd_fetch();
|
||||
if (tsd_quarantine_get(tsd) == NULL)
|
||||
quarantine_alloc_hook_work(tsd);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -158,6 +158,8 @@ struct { \
|
|||
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
|
||||
a_attr void \
|
||||
a_prefix##new(a_rbt_type *rbtree); \
|
||||
a_attr bool \
|
||||
a_prefix##empty(a_rbt_type *rbtree); \
|
||||
a_attr a_type * \
|
||||
a_prefix##first(a_rbt_type *rbtree); \
|
||||
a_attr a_type * \
|
||||
|
@ -198,7 +200,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
|||
* int (a_cmp *)(a_type *a_node, a_type *a_other);
|
||||
* ^^^^^^
|
||||
* or a_key
|
||||
* Interpretation of comparision function return values:
|
||||
* Interpretation of comparison function return values:
|
||||
* -1 : a_node < a_other
|
||||
* 0 : a_node == a_other
|
||||
* 1 : a_node > a_other
|
||||
|
@ -224,6 +226,13 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
|||
* Args:
|
||||
* tree: Pointer to an uninitialized red-black tree object.
|
||||
*
|
||||
* static bool
|
||||
* ex_empty(ex_t *tree);
|
||||
* Description: Determine whether tree is empty.
|
||||
* Args:
|
||||
* tree: Pointer to an initialized red-black tree object.
|
||||
* Ret: True if tree is empty, false otherwise.
|
||||
*
|
||||
* static ex_node_t *
|
||||
* ex_first(ex_t *tree);
|
||||
* static ex_node_t *
|
||||
|
@ -309,6 +318,10 @@ a_attr void \
|
|||
a_prefix##new(a_rbt_type *rbtree) { \
|
||||
rb_new(a_type, a_field, rbtree); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_prefix##empty(a_rbt_type *rbtree) { \
|
||||
return (rbtree->rbt_root == &rbtree->rbt_nil); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##first(a_rbt_type *rbtree) { \
|
||||
a_type *ret; \
|
||||
|
@ -580,7 +593,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
|||
if (left != &rbtree->rbt_nil) { \
|
||||
/* node has no successor, but it has a left child. */\
|
||||
/* Splice node out, without losing the left child. */\
|
||||
assert(rbtn_red_get(a_type, a_field, node) == false); \
|
||||
assert(!rbtn_red_get(a_type, a_field, node)); \
|
||||
assert(rbtn_red_get(a_type, a_field, left)); \
|
||||
rbtn_black_set(a_type, a_field, left); \
|
||||
if (pathp == path) { \
|
||||
|
@ -616,8 +629,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
|||
if (pathp->cmp < 0) { \
|
||||
rbtn_left_set(a_type, a_field, pathp->node, \
|
||||
pathp[1].node); \
|
||||
assert(rbtn_red_get(a_type, a_field, pathp[1].node) \
|
||||
== false); \
|
||||
assert(!rbtn_red_get(a_type, a_field, pathp[1].node)); \
|
||||
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
|
||||
a_type *right = rbtn_right_get(a_type, a_field, \
|
||||
pathp->node); \
|
||||
|
@ -681,7 +693,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
|||
rbtn_rotate_left(a_type, a_field, pathp->node, \
|
||||
tnode); \
|
||||
/* Balance restored, but rotation modified */\
|
||||
/* subree root, which may actually be the tree */\
|
||||
/* subtree root, which may actually be the tree */\
|
||||
/* root. */\
|
||||
if (pathp == path) { \
|
||||
/* Set root. */ \
|
||||
|
@ -849,7 +861,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
|||
} \
|
||||
/* Set root. */ \
|
||||
rbtree->rbt_root = path->node; \
|
||||
assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \
|
||||
assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
|
||||
|
|
|
@ -1,170 +1,292 @@
|
|||
/*
|
||||
* This radix tree implementation is tailored to the singular purpose of
|
||||
* tracking which chunks are currently owned by jemalloc. This functionality
|
||||
* is mandatory for OS X, where jemalloc must be able to respond to object
|
||||
* ownership queries.
|
||||
* associating metadata with chunks that are currently owned by jemalloc.
|
||||
*
|
||||
*******************************************************************************
|
||||
*/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
typedef struct rtree_node_elm_s rtree_node_elm_t;
|
||||
typedef struct rtree_level_s rtree_level_t;
|
||||
typedef struct rtree_s rtree_t;
|
||||
|
||||
/*
|
||||
* Size of each radix tree node (must be a power of 2). This impacts tree
|
||||
* depth.
|
||||
* RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the
|
||||
* machine address width.
|
||||
*/
|
||||
#define RTREE_NODESIZE (1U << 16)
|
||||
#define LG_RTREE_BITS_PER_LEVEL 4
|
||||
#define RTREE_BITS_PER_LEVEL (ZU(1) << LG_RTREE_BITS_PER_LEVEL)
|
||||
#define RTREE_HEIGHT_MAX \
|
||||
((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
|
||||
|
||||
typedef void *(rtree_alloc_t)(size_t);
|
||||
typedef void (rtree_dalloc_t)(void *);
|
||||
/* Used for two-stage lock-free node initialization. */
|
||||
#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
|
||||
|
||||
/*
|
||||
* The node allocation callback function's argument is the number of contiguous
|
||||
* rtree_node_elm_t structures to allocate, and the resulting memory must be
|
||||
* zeroed.
|
||||
*/
|
||||
typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t);
|
||||
typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *);
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
struct rtree_node_elm_s {
|
||||
union {
|
||||
void *pun;
|
||||
rtree_node_elm_t *child;
|
||||
extent_node_t *val;
|
||||
};
|
||||
};
|
||||
|
||||
struct rtree_level_s {
|
||||
/*
|
||||
* A non-NULL subtree points to a subtree rooted along the hypothetical
|
||||
* path to the leaf node corresponding to key 0. Depending on what keys
|
||||
* have been used to store to the tree, an arbitrary combination of
|
||||
* subtree pointers may remain NULL.
|
||||
*
|
||||
* Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4.
|
||||
* This results in a 3-level tree, and the leftmost leaf can be directly
|
||||
* accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding
|
||||
* 0x00000000) can be accessed via subtrees[1], and the remainder of the
|
||||
* tree can be accessed via subtrees[0].
|
||||
*
|
||||
* levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...]
|
||||
*
|
||||
* levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ]
|
||||
*
|
||||
* levels[2] : [val(0x000000000000) | val(0x000000000001) | ...]
|
||||
*
|
||||
* This has practical implications on x64, which currently uses only the
|
||||
* lower 47 bits of virtual address space in userland, thus leaving
|
||||
* subtrees[0] unused and avoiding a level of tree traversal.
|
||||
*/
|
||||
union {
|
||||
void *subtree_pun;
|
||||
rtree_node_elm_t *subtree;
|
||||
};
|
||||
/* Number of key bits distinguished by this level. */
|
||||
unsigned bits;
|
||||
/*
|
||||
* Cumulative number of key bits distinguished by traversing to
|
||||
* corresponding tree level.
|
||||
*/
|
||||
unsigned cumbits;
|
||||
};
|
||||
|
||||
struct rtree_s {
|
||||
rtree_alloc_t *alloc;
|
||||
rtree_dalloc_t *dalloc;
|
||||
malloc_mutex_t mutex;
|
||||
void **root;
|
||||
unsigned height;
|
||||
unsigned level2bits[1]; /* Dynamically sized. */
|
||||
rtree_node_alloc_t *alloc;
|
||||
rtree_node_dalloc_t *dalloc;
|
||||
unsigned height;
|
||||
/*
|
||||
* Precomputed table used to convert from the number of leading 0 key
|
||||
* bits to which subtree level to start at.
|
||||
*/
|
||||
unsigned start_level[RTREE_HEIGHT_MAX];
|
||||
rtree_level_t levels[RTREE_HEIGHT_MAX];
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc);
|
||||
bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
|
||||
rtree_node_dalloc_t *dalloc);
|
||||
void rtree_delete(rtree_t *rtree);
|
||||
void rtree_prefork(rtree_t *rtree);
|
||||
void rtree_postfork_parent(rtree_t *rtree);
|
||||
void rtree_postfork_child(rtree_t *rtree);
|
||||
rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree,
|
||||
unsigned level);
|
||||
rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree,
|
||||
rtree_node_elm_t *elm, unsigned level);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key);
|
||||
#endif
|
||||
uint8_t rtree_get(rtree_t *rtree, uintptr_t key);
|
||||
bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val);
|
||||
unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
|
||||
uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
|
||||
|
||||
bool rtree_node_valid(rtree_node_elm_t *node);
|
||||
rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm);
|
||||
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
|
||||
unsigned level);
|
||||
extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
|
||||
bool dependent);
|
||||
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
|
||||
const extent_node_t *val);
|
||||
rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level);
|
||||
rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level);
|
||||
|
||||
extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
|
||||
bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
|
||||
#define RTREE_GET_GENERATE(f) \
|
||||
/* The least significant bits of the key are ignored. */ \
|
||||
JEMALLOC_INLINE uint8_t \
|
||||
f(rtree_t *rtree, uintptr_t key) \
|
||||
{ \
|
||||
uint8_t ret; \
|
||||
uintptr_t subkey; \
|
||||
unsigned i, lshift, height, bits; \
|
||||
void **node, **child; \
|
||||
\
|
||||
RTREE_LOCK(&rtree->mutex); \
|
||||
for (i = lshift = 0, height = rtree->height, node = rtree->root;\
|
||||
i < height - 1; \
|
||||
i++, lshift += bits, node = child) { \
|
||||
bits = rtree->level2bits[i]; \
|
||||
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
|
||||
3)) - bits); \
|
||||
child = (void**)node[subkey]; \
|
||||
if (child == NULL) { \
|
||||
RTREE_UNLOCK(&rtree->mutex); \
|
||||
return (0); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
/* \
|
||||
* node is a leaf, so it contains values rather than node \
|
||||
* pointers. \
|
||||
*/ \
|
||||
bits = rtree->level2bits[i]; \
|
||||
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
|
||||
bits); \
|
||||
{ \
|
||||
uint8_t *leaf = (uint8_t *)node; \
|
||||
ret = leaf[subkey]; \
|
||||
} \
|
||||
RTREE_UNLOCK(&rtree->mutex); \
|
||||
\
|
||||
RTREE_GET_VALIDATE \
|
||||
return (ret); \
|
||||
JEMALLOC_INLINE unsigned
|
||||
rtree_start_level(rtree_t *rtree, uintptr_t key)
|
||||
{
|
||||
unsigned start_level;
|
||||
|
||||
if (unlikely(key == 0))
|
||||
return (rtree->height - 1);
|
||||
|
||||
start_level = rtree->start_level[lg_floor(key) >>
|
||||
LG_RTREE_BITS_PER_LEVEL];
|
||||
assert(start_level < rtree->height);
|
||||
return (start_level);
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
# define RTREE_LOCK(l) malloc_mutex_lock(l)
|
||||
# define RTREE_UNLOCK(l) malloc_mutex_unlock(l)
|
||||
# define RTREE_GET_VALIDATE
|
||||
RTREE_GET_GENERATE(rtree_get_locked)
|
||||
# undef RTREE_LOCK
|
||||
# undef RTREE_UNLOCK
|
||||
# undef RTREE_GET_VALIDATE
|
||||
#endif
|
||||
JEMALLOC_INLINE uintptr_t
|
||||
rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
|
||||
{
|
||||
|
||||
#define RTREE_LOCK(l)
|
||||
#define RTREE_UNLOCK(l)
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
/*
|
||||
* Suppose that it were possible for a jemalloc-allocated chunk to be
|
||||
* munmap()ped, followed by a different allocator in another thread re-using
|
||||
* overlapping virtual memory, all without invalidating the cached rtree
|
||||
* value. The result would be a false positive (the rtree would claim that
|
||||
* jemalloc owns memory that it had actually discarded). This scenario
|
||||
* seems impossible, but the following assertion is a prudent sanity check.
|
||||
*/
|
||||
# define RTREE_GET_VALIDATE \
|
||||
assert(rtree_get_locked(rtree, key) == ret);
|
||||
#else
|
||||
# define RTREE_GET_VALIDATE
|
||||
#endif
|
||||
RTREE_GET_GENERATE(rtree_get)
|
||||
#undef RTREE_LOCK
|
||||
#undef RTREE_UNLOCK
|
||||
#undef RTREE_GET_VALIDATE
|
||||
return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
rtree->levels[level].cumbits)) & ((ZU(1) <<
|
||||
rtree->levels[level].bits) - 1));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val)
|
||||
rtree_node_valid(rtree_node_elm_t *node)
|
||||
{
|
||||
|
||||
return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE rtree_node_elm_t *
|
||||
rtree_child_tryread(rtree_node_elm_t *elm)
|
||||
{
|
||||
rtree_node_elm_t *child;
|
||||
|
||||
/* Double-checked read (first read may be stale. */
|
||||
child = elm->child;
|
||||
if (!rtree_node_valid(child))
|
||||
child = atomic_read_p(&elm->pun);
|
||||
return (child);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE rtree_node_elm_t *
|
||||
rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
|
||||
{
|
||||
rtree_node_elm_t *child;
|
||||
|
||||
child = rtree_child_tryread(elm);
|
||||
if (unlikely(!rtree_node_valid(child)))
|
||||
child = rtree_child_read_hard(rtree, elm, level);
|
||||
return (child);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE extent_node_t *
|
||||
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
|
||||
{
|
||||
|
||||
if (dependent) {
|
||||
/*
|
||||
* Reading a val on behalf of a pointer to a valid allocation is
|
||||
* guaranteed to be a clean read even without synchronization,
|
||||
* because the rtree update became visible in memory before the
|
||||
* pointer came into existence.
|
||||
*/
|
||||
return (elm->val);
|
||||
} else {
|
||||
/*
|
||||
* An arbitrary read, e.g. on behalf of ivsalloc(), may not be
|
||||
* dependent on a previous rtree write, which means a stale read
|
||||
* could result if synchronization were omitted here.
|
||||
*/
|
||||
return (atomic_read_p(&elm->pun));
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
|
||||
{
|
||||
|
||||
atomic_write_p(&elm->pun, val);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE rtree_node_elm_t *
|
||||
rtree_subtree_tryread(rtree_t *rtree, unsigned level)
|
||||
{
|
||||
rtree_node_elm_t *subtree;
|
||||
|
||||
/* Double-checked read (first read may be stale. */
|
||||
subtree = rtree->levels[level].subtree;
|
||||
if (!rtree_node_valid(subtree))
|
||||
subtree = atomic_read_p(&rtree->levels[level].subtree_pun);
|
||||
return (subtree);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE rtree_node_elm_t *
|
||||
rtree_subtree_read(rtree_t *rtree, unsigned level)
|
||||
{
|
||||
rtree_node_elm_t *subtree;
|
||||
|
||||
subtree = rtree_subtree_tryread(rtree, level);
|
||||
if (unlikely(!rtree_node_valid(subtree)))
|
||||
subtree = rtree_subtree_read_hard(rtree, level);
|
||||
return (subtree);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE extent_node_t *
|
||||
rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
|
||||
{
|
||||
uintptr_t subkey;
|
||||
unsigned i, lshift, height, bits;
|
||||
void **node, **child;
|
||||
unsigned i, start_level;
|
||||
rtree_node_elm_t *node, *child;
|
||||
|
||||
malloc_mutex_lock(&rtree->mutex);
|
||||
for (i = lshift = 0, height = rtree->height, node = rtree->root;
|
||||
i < height - 1;
|
||||
i++, lshift += bits, node = child) {
|
||||
bits = rtree->level2bits[i];
|
||||
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
bits);
|
||||
child = (void**)node[subkey];
|
||||
if (child == NULL) {
|
||||
size_t size = ((i + 1 < height - 1) ? sizeof(void *)
|
||||
: (sizeof(uint8_t))) << rtree->level2bits[i+1];
|
||||
child = (void**)rtree->alloc(size);
|
||||
if (child == NULL) {
|
||||
malloc_mutex_unlock(&rtree->mutex);
|
||||
return (true);
|
||||
}
|
||||
memset(child, 0, size);
|
||||
node[subkey] = child;
|
||||
start_level = rtree_start_level(rtree, key);
|
||||
|
||||
for (i = start_level, node = rtree_subtree_tryread(rtree, start_level);
|
||||
/**/; i++, node = child) {
|
||||
if (!dependent && unlikely(!rtree_node_valid(node)))
|
||||
return (NULL);
|
||||
subkey = rtree_subkey(rtree, key, i);
|
||||
if (i == rtree->height - 1) {
|
||||
/*
|
||||
* node is a leaf, so it contains values rather than
|
||||
* child pointers.
|
||||
*/
|
||||
return (rtree_val_read(rtree, &node[subkey],
|
||||
dependent));
|
||||
}
|
||||
assert(i < rtree->height - 1);
|
||||
child = rtree_child_tryread(&node[subkey]);
|
||||
}
|
||||
not_reached();
|
||||
}
|
||||
|
||||
/* node is a leaf, so it contains values rather than node pointers. */
|
||||
bits = rtree->level2bits[i];
|
||||
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
|
||||
{
|
||||
uint8_t *leaf = (uint8_t *)node;
|
||||
leaf[subkey] = val;
|
||||
JEMALLOC_INLINE bool
|
||||
rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
|
||||
{
|
||||
uintptr_t subkey;
|
||||
unsigned i, start_level;
|
||||
rtree_node_elm_t *node, *child;
|
||||
|
||||
start_level = rtree_start_level(rtree, key);
|
||||
|
||||
node = rtree_subtree_read(rtree, start_level);
|
||||
if (node == NULL)
|
||||
return (true);
|
||||
for (i = start_level; /**/; i++, node = child) {
|
||||
subkey = rtree_subkey(rtree, key, i);
|
||||
if (i == rtree->height - 1) {
|
||||
/*
|
||||
* node is a leaf, so it contains values rather than
|
||||
* child pointers.
|
||||
*/
|
||||
rtree_val_write(rtree, &node[subkey], val);
|
||||
return (false);
|
||||
}
|
||||
assert(i + 1 < rtree->height);
|
||||
child = rtree_child_read(rtree, &node[subkey], i);
|
||||
if (child == NULL)
|
||||
return (true);
|
||||
}
|
||||
malloc_mutex_unlock(&rtree->mutex);
|
||||
|
||||
return (false);
|
||||
not_reached();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -4,6 +4,7 @@
|
|||
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
|
||||
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
|
||||
typedef struct malloc_large_stats_s malloc_large_stats_t;
|
||||
typedef struct malloc_huge_stats_s malloc_huge_stats_t;
|
||||
typedef struct arena_stats_s arena_stats_t;
|
||||
typedef struct chunk_stats_s chunk_stats_t;
|
||||
|
||||
|
@ -20,12 +21,6 @@ struct tcache_bin_stats_s {
|
|||
};
|
||||
|
||||
struct malloc_bin_stats_s {
|
||||
/*
|
||||
* Current number of bytes allocated, including objects currently
|
||||
* cached by tcache.
|
||||
*/
|
||||
size_t allocated;
|
||||
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the bin. Note that tcache may allocate an object, then recycle it
|
||||
|
@ -42,6 +37,12 @@ struct malloc_bin_stats_s {
|
|||
*/
|
||||
uint64_t nrequests;
|
||||
|
||||
/*
|
||||
* Current number of regions of this size class, including regions
|
||||
* currently cached by tcache.
|
||||
*/
|
||||
size_t curregs;
|
||||
|
||||
/* Number of tcache fills from this bin. */
|
||||
uint64_t nfills;
|
||||
|
||||
|
@ -78,10 +79,25 @@ struct malloc_large_stats_s {
|
|||
*/
|
||||
uint64_t nrequests;
|
||||
|
||||
/* Current number of runs of this size class. */
|
||||
/*
|
||||
* Current number of runs of this size class, including runs currently
|
||||
* cached by tcache.
|
||||
*/
|
||||
size_t curruns;
|
||||
};
|
||||
|
||||
struct malloc_huge_stats_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the arena.
|
||||
*/
|
||||
uint64_t nmalloc;
|
||||
uint64_t ndalloc;
|
||||
|
||||
/* Current number of (multi-)chunk allocations of this size class. */
|
||||
size_t curhchunks;
|
||||
};
|
||||
|
||||
struct arena_stats_s {
|
||||
/* Number of bytes currently mapped. */
|
||||
size_t mapped;
|
||||
|
@ -95,34 +111,28 @@ struct arena_stats_s {
|
|||
uint64_t nmadvise;
|
||||
uint64_t purged;
|
||||
|
||||
/*
|
||||
* Number of bytes currently mapped purely for metadata purposes, and
|
||||
* number of bytes currently allocated for internal metadata.
|
||||
*/
|
||||
size_t metadata_mapped;
|
||||
size_t metadata_allocated; /* Protected via atomic_*_z(). */
|
||||
|
||||
/* Per-size-category statistics. */
|
||||
size_t allocated_large;
|
||||
uint64_t nmalloc_large;
|
||||
uint64_t ndalloc_large;
|
||||
uint64_t nrequests_large;
|
||||
|
||||
/*
|
||||
* One element for each possible size class, including sizes that
|
||||
* overlap with bin size classes. This is necessary because ipalloc()
|
||||
* sometimes has to use such large objects in order to assure proper
|
||||
* alignment.
|
||||
*/
|
||||
size_t allocated_huge;
|
||||
uint64_t nmalloc_huge;
|
||||
uint64_t ndalloc_huge;
|
||||
|
||||
/* One element for each large size class. */
|
||||
malloc_large_stats_t *lstats;
|
||||
};
|
||||
|
||||
struct chunk_stats_s {
|
||||
/* Number of chunks that were allocated. */
|
||||
uint64_t nchunks;
|
||||
|
||||
/* High-water mark for number of chunks allocated. */
|
||||
size_t highchunks;
|
||||
|
||||
/*
|
||||
* Current number of chunks allocated. This value isn't maintained for
|
||||
* any other purpose, so keep track of it in order to be able to set
|
||||
* highchunks.
|
||||
*/
|
||||
size_t curchunks;
|
||||
/* One element for each huge size class. */
|
||||
malloc_huge_stats_t *hstats;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
typedef struct tcache_bin_info_s tcache_bin_info_t;
|
||||
typedef struct tcache_bin_s tcache_bin_t;
|
||||
typedef struct tcache_s tcache_t;
|
||||
typedef struct tcaches_s tcaches_t;
|
||||
|
||||
/*
|
||||
* tcache pointers close to NULL are used to encode state information that is
|
||||
|
@ -15,6 +16,11 @@ typedef struct tcache_s tcache_t;
|
|||
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
|
||||
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
|
||||
|
||||
/*
|
||||
* Absolute minimum number of cache slots for each small bin.
|
||||
*/
|
||||
#define TCACHE_NSLOTS_SMALL_MIN 20
|
||||
|
||||
/*
|
||||
* Absolute maximum number of cache slots for each small bin in the thread
|
||||
* cache. This is an additional constraint beyond that imposed as: twice the
|
||||
|
@ -69,10 +75,9 @@ struct tcache_bin_s {
|
|||
|
||||
struct tcache_s {
|
||||
ql_elm(tcache_t) link; /* Used for aggregating stats. */
|
||||
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
|
||||
arena_t *arena; /* This thread's arena. */
|
||||
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
|
||||
unsigned ev_cnt; /* Event count since incremental GC. */
|
||||
unsigned next_gc_bin; /* Next bin to GC. */
|
||||
index_t next_gc_bin; /* Next bin to GC. */
|
||||
tcache_bin_t tbins[1]; /* Dynamically sized. */
|
||||
/*
|
||||
* The pointer stacks associated with tbins follow as a contiguous
|
||||
|
@ -82,6 +87,14 @@ struct tcache_s {
|
|||
*/
|
||||
};
|
||||
|
||||
/* Linkage for list of available (previously used) explicit tcache IDs. */
|
||||
struct tcaches_s {
|
||||
union {
|
||||
tcache_t *tcache;
|
||||
tcaches_t *next;
|
||||
};
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
@ -95,84 +108,90 @@ extern tcache_bin_info_t *tcache_bin_info;
|
|||
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
|
||||
* large-object bins.
|
||||
*/
|
||||
extern size_t nhbins;
|
||||
extern size_t nhbins;
|
||||
|
||||
/* Maximum cached size class. */
|
||||
extern size_t tcache_maxclass;
|
||||
extern size_t tcache_maxclass;
|
||||
|
||||
/*
|
||||
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
|
||||
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
|
||||
* completely disjoint from this data structure. tcaches starts off as a sparse
|
||||
* array, so it has no physical memory footprint until individual pages are
|
||||
* touched. This allows the entire array to be allocated the first time an
|
||||
* explicit tcache is created without a disproportionate impact on memory usage.
|
||||
*/
|
||||
extern tcaches_t *tcaches;
|
||||
|
||||
size_t tcache_salloc(const void *ptr);
|
||||
void tcache_event_hard(tcache_t *tcache);
|
||||
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
|
||||
size_t binind);
|
||||
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
tcache_t *tcache);
|
||||
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
tcache_t *tcache);
|
||||
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
|
||||
void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||
tcache_bin_t *tbin, index_t binind);
|
||||
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
index_t binind, unsigned rem);
|
||||
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
||||
unsigned rem, tcache_t *tcache);
|
||||
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
|
||||
void tcache_arena_dissociate(tcache_t *tcache);
|
||||
tcache_t *tcache_create(arena_t *arena);
|
||||
void tcache_destroy(tcache_t *tcache);
|
||||
void tcache_thread_cleanup(void *arg);
|
||||
void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
|
||||
arena_t *newarena);
|
||||
void tcache_arena_dissociate(tcache_t *tcache, arena_t *arena);
|
||||
tcache_t *tcache_get_hard(tsd_t *tsd);
|
||||
tcache_t *tcache_create(tsd_t *tsd, arena_t *arena);
|
||||
void tcache_cleanup(tsd_t *tsd);
|
||||
void tcache_enabled_cleanup(tsd_t *tsd);
|
||||
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
|
||||
bool tcache_boot0(void);
|
||||
bool tcache_boot1(void);
|
||||
bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
|
||||
void tcaches_flush(tsd_t *tsd, unsigned ind);
|
||||
void tcaches_destroy(tsd_t *tsd, unsigned ind);
|
||||
bool tcache_boot(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *)
|
||||
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t)
|
||||
|
||||
void tcache_event(tcache_t *tcache);
|
||||
void tcache_event(tsd_t *tsd, tcache_t *tcache);
|
||||
void tcache_flush(void);
|
||||
bool tcache_enabled_get(void);
|
||||
tcache_t *tcache_get(bool create);
|
||||
tcache_t *tcache_get(tsd_t *tsd, bool create);
|
||||
void tcache_enabled_set(bool enabled);
|
||||
void *tcache_alloc_easy(tcache_bin_t *tbin);
|
||||
void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
|
||||
void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
|
||||
void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind);
|
||||
void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
|
||||
void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||
size_t size, bool zero);
|
||||
void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||
size_t size, bool zero);
|
||||
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
||||
index_t binind);
|
||||
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
||||
size_t size);
|
||||
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
|
||||
/* Map of thread-specific caches. */
|
||||
malloc_tsd_externs(tcache, tcache_t *)
|
||||
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache, tcache_t *, NULL,
|
||||
tcache_thread_cleanup)
|
||||
/* Per thread flag that allows thread caches to be disabled. */
|
||||
malloc_tsd_externs(tcache_enabled, tcache_enabled_t)
|
||||
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t,
|
||||
tcache_enabled_default, malloc_tsd_no_cleanup)
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
tcache_flush(void)
|
||||
{
|
||||
tcache_t *tcache;
|
||||
tsd_t *tsd;
|
||||
|
||||
cassert(config_tcache);
|
||||
|
||||
tcache = *tcache_tsd_get();
|
||||
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
|
||||
return;
|
||||
tcache_destroy(tcache);
|
||||
tcache = NULL;
|
||||
tcache_tsd_set(&tcache);
|
||||
tsd = tsd_fetch();
|
||||
tcache_cleanup(tsd);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
tcache_enabled_get(void)
|
||||
{
|
||||
tsd_t *tsd;
|
||||
tcache_enabled_t tcache_enabled;
|
||||
|
||||
cassert(config_tcache);
|
||||
|
||||
tcache_enabled = *tcache_enabled_tsd_get();
|
||||
tsd = tsd_fetch();
|
||||
tcache_enabled = tsd_tcache_enabled_get(tsd);
|
||||
if (tcache_enabled == tcache_enabled_default) {
|
||||
tcache_enabled = (tcache_enabled_t)opt_tcache;
|
||||
tcache_enabled_tsd_set(&tcache_enabled);
|
||||
tsd_tcache_enabled_set(tsd, tcache_enabled);
|
||||
}
|
||||
|
||||
return ((bool)tcache_enabled);
|
||||
|
@ -181,85 +200,41 @@ tcache_enabled_get(void)
|
|||
JEMALLOC_INLINE void
|
||||
tcache_enabled_set(bool enabled)
|
||||
{
|
||||
tsd_t *tsd;
|
||||
tcache_enabled_t tcache_enabled;
|
||||
tcache_t *tcache;
|
||||
|
||||
cassert(config_tcache);
|
||||
|
||||
tsd = tsd_fetch();
|
||||
|
||||
tcache_enabled = (tcache_enabled_t)enabled;
|
||||
tcache_enabled_tsd_set(&tcache_enabled);
|
||||
tcache = *tcache_tsd_get();
|
||||
if (enabled) {
|
||||
if (tcache == TCACHE_STATE_DISABLED) {
|
||||
tcache = NULL;
|
||||
tcache_tsd_set(&tcache);
|
||||
}
|
||||
} else /* disabled */ {
|
||||
if (tcache > TCACHE_STATE_MAX) {
|
||||
tcache_destroy(tcache);
|
||||
tcache = NULL;
|
||||
}
|
||||
if (tcache == NULL) {
|
||||
tcache = TCACHE_STATE_DISABLED;
|
||||
tcache_tsd_set(&tcache);
|
||||
}
|
||||
}
|
||||
tsd_tcache_enabled_set(tsd, tcache_enabled);
|
||||
|
||||
if (!enabled)
|
||||
tcache_cleanup(tsd);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tcache_t *
|
||||
tcache_get(bool create)
|
||||
tcache_get(tsd_t *tsd, bool create)
|
||||
{
|
||||
tcache_t *tcache;
|
||||
|
||||
if (config_tcache == false)
|
||||
return (NULL);
|
||||
if (config_lazy_lock && isthreaded == false)
|
||||
if (!config_tcache)
|
||||
return (NULL);
|
||||
|
||||
tcache = *tcache_tsd_get();
|
||||
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
|
||||
if (tcache == TCACHE_STATE_DISABLED)
|
||||
return (NULL);
|
||||
if (tcache == NULL) {
|
||||
if (create == false) {
|
||||
/*
|
||||
* Creating a tcache here would cause
|
||||
* allocation as a side effect of free().
|
||||
* Ordinarily that would be okay since
|
||||
* tcache_create() failure is a soft failure
|
||||
* that doesn't propagate. However, if TLS
|
||||
* data are freed via free() as in glibc,
|
||||
* subtle corruption could result from setting
|
||||
* a TLS variable after its backing memory is
|
||||
* freed.
|
||||
*/
|
||||
return (NULL);
|
||||
}
|
||||
if (tcache_enabled_get() == false) {
|
||||
tcache_enabled_set(false); /* Memoize. */
|
||||
return (NULL);
|
||||
}
|
||||
return (tcache_create(choose_arena(NULL)));
|
||||
}
|
||||
if (tcache == TCACHE_STATE_PURGATORY) {
|
||||
/*
|
||||
* Make a note that an allocator function was called
|
||||
* after tcache_thread_cleanup() was called.
|
||||
*/
|
||||
tcache = TCACHE_STATE_REINCARNATED;
|
||||
tcache_tsd_set(&tcache);
|
||||
return (NULL);
|
||||
}
|
||||
if (tcache == TCACHE_STATE_REINCARNATED)
|
||||
return (NULL);
|
||||
not_reached();
|
||||
tcache = tsd_tcache_get(tsd);
|
||||
if (!create)
|
||||
return (tcache);
|
||||
if (unlikely(tcache == NULL) && tsd_nominal(tsd)) {
|
||||
tcache = tcache_get_hard(tsd);
|
||||
tsd_tcache_set(tsd, tcache);
|
||||
}
|
||||
|
||||
return (tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_event(tcache_t *tcache)
|
||||
tcache_event(tsd_t *tsd, tcache_t *tcache)
|
||||
{
|
||||
|
||||
if (TCACHE_GC_INCR == 0)
|
||||
|
@ -267,8 +242,8 @@ tcache_event(tcache_t *tcache)
|
|||
|
||||
tcache->ev_cnt++;
|
||||
assert(tcache->ev_cnt <= TCACHE_GC_INCR);
|
||||
if (tcache->ev_cnt == TCACHE_GC_INCR)
|
||||
tcache_event_hard(tcache);
|
||||
if (unlikely(tcache->ev_cnt == TCACHE_GC_INCR))
|
||||
tcache_event_hard(tsd, tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
|
@ -276,85 +251,87 @@ tcache_alloc_easy(tcache_bin_t *tbin)
|
|||
{
|
||||
void *ret;
|
||||
|
||||
if (tbin->ncached == 0) {
|
||||
if (unlikely(tbin->ncached == 0)) {
|
||||
tbin->low_water = -1;
|
||||
return (NULL);
|
||||
}
|
||||
tbin->ncached--;
|
||||
if ((int)tbin->ncached < tbin->low_water)
|
||||
if (unlikely((int)tbin->ncached < tbin->low_water))
|
||||
tbin->low_water = tbin->ncached;
|
||||
ret = tbin->avail[tbin->ncached];
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
|
||||
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
bool zero)
|
||||
{
|
||||
void *ret;
|
||||
size_t binind;
|
||||
index_t binind;
|
||||
size_t usize;
|
||||
tcache_bin_t *tbin;
|
||||
|
||||
binind = SMALL_SIZE2BIN(size);
|
||||
binind = size2index(size);
|
||||
assert(binind < NBINS);
|
||||
tbin = &tcache->tbins[binind];
|
||||
size = arena_bin_info[binind].reg_size;
|
||||
usize = index2size(binind);
|
||||
ret = tcache_alloc_easy(tbin);
|
||||
if (ret == NULL) {
|
||||
ret = tcache_alloc_small_hard(tcache, tbin, binind);
|
||||
if (unlikely(ret == NULL)) {
|
||||
ret = tcache_alloc_small_hard(tsd, arena, tcache, tbin, binind);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
}
|
||||
assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size);
|
||||
assert(tcache_salloc(ret) == usize);
|
||||
|
||||
if (zero == false) {
|
||||
if (likely(!zero)) {
|
||||
if (config_fill) {
|
||||
if (opt_junk) {
|
||||
if (unlikely(opt_junk_alloc)) {
|
||||
arena_alloc_junk_small(ret,
|
||||
&arena_bin_info[binind], false);
|
||||
} else if (opt_zero)
|
||||
memset(ret, 0, size);
|
||||
} else if (unlikely(opt_zero))
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
} else {
|
||||
if (config_fill && opt_junk) {
|
||||
if (config_fill && unlikely(opt_junk_alloc)) {
|
||||
arena_alloc_junk_small(ret, &arena_bin_info[binind],
|
||||
true);
|
||||
}
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
memset(ret, 0, size);
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
|
||||
if (config_stats)
|
||||
tbin->tstats.nrequests++;
|
||||
if (config_prof)
|
||||
tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
|
||||
tcache_event(tcache);
|
||||
tcache->prof_accumbytes += usize;
|
||||
tcache_event(tsd, tcache);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
||||
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
bool zero)
|
||||
{
|
||||
void *ret;
|
||||
size_t binind;
|
||||
index_t binind;
|
||||
size_t usize;
|
||||
tcache_bin_t *tbin;
|
||||
|
||||
size = PAGE_CEILING(size);
|
||||
assert(size <= tcache_maxclass);
|
||||
binind = NBINS + (size >> LG_PAGE) - 1;
|
||||
binind = size2index(size);
|
||||
usize = index2size(binind);
|
||||
assert(usize <= tcache_maxclass);
|
||||
assert(binind < nhbins);
|
||||
tbin = &tcache->tbins[binind];
|
||||
ret = tcache_alloc_easy(tbin);
|
||||
if (ret == NULL) {
|
||||
if (unlikely(ret == NULL)) {
|
||||
/*
|
||||
* Only allocate one large object at a time, because it's quite
|
||||
* expensive to create one and not use it.
|
||||
*/
|
||||
ret = arena_malloc_large(tcache->arena, size, zero);
|
||||
ret = arena_malloc_large(arena, usize, zero);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
} else {
|
||||
if (config_prof && prof_promote && size == PAGE) {
|
||||
if (config_prof && usize == LARGE_MINCLASS) {
|
||||
arena_chunk_t *chunk =
|
||||
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
|
||||
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
|
||||
|
@ -362,57 +339,54 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
|||
arena_mapbits_large_binind_set(chunk, pageind,
|
||||
BININD_INVALID);
|
||||
}
|
||||
if (zero == false) {
|
||||
if (likely(!zero)) {
|
||||
if (config_fill) {
|
||||
if (opt_junk)
|
||||
memset(ret, 0xa5, size);
|
||||
else if (opt_zero)
|
||||
memset(ret, 0, size);
|
||||
if (unlikely(opt_junk_alloc))
|
||||
memset(ret, 0xa5, usize);
|
||||
else if (unlikely(opt_zero))
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
} else {
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
memset(ret, 0, size);
|
||||
}
|
||||
} else
|
||||
memset(ret, 0, usize);
|
||||
|
||||
if (config_stats)
|
||||
tbin->tstats.nrequests++;
|
||||
if (config_prof)
|
||||
tcache->prof_accumbytes += size;
|
||||
tcache->prof_accumbytes += usize;
|
||||
}
|
||||
|
||||
tcache_event(tcache);
|
||||
tcache_event(tsd, tcache);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
|
||||
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind)
|
||||
{
|
||||
tcache_bin_t *tbin;
|
||||
tcache_bin_info_t *tbin_info;
|
||||
|
||||
assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
|
||||
|
||||
if (config_fill && opt_junk)
|
||||
if (config_fill && unlikely(opt_junk_free))
|
||||
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
|
||||
|
||||
tbin = &tcache->tbins[binind];
|
||||
tbin_info = &tcache_bin_info[binind];
|
||||
if (tbin->ncached == tbin_info->ncached_max) {
|
||||
tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
|
||||
1), tcache);
|
||||
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
|
||||
tcache_bin_flush_small(tsd, tcache, tbin, binind,
|
||||
(tbin_info->ncached_max >> 1));
|
||||
}
|
||||
assert(tbin->ncached < tbin_info->ncached_max);
|
||||
tbin->avail[tbin->ncached] = ptr;
|
||||
tbin->ncached++;
|
||||
|
||||
tcache_event(tcache);
|
||||
tcache_event(tsd, tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
|
||||
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size)
|
||||
{
|
||||
size_t binind;
|
||||
index_t binind;
|
||||
tcache_bin_t *tbin;
|
||||
tcache_bin_info_t *tbin_info;
|
||||
|
||||
|
@ -420,22 +394,31 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
|
|||
assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
|
||||
assert(tcache_salloc(ptr) <= tcache_maxclass);
|
||||
|
||||
binind = NBINS + (size >> LG_PAGE) - 1;
|
||||
binind = size2index(size);
|
||||
|
||||
if (config_fill && opt_junk)
|
||||
memset(ptr, 0x5a, size);
|
||||
if (config_fill && unlikely(opt_junk_free))
|
||||
arena_dalloc_junk_large(ptr, size);
|
||||
|
||||
tbin = &tcache->tbins[binind];
|
||||
tbin_info = &tcache_bin_info[binind];
|
||||
if (tbin->ncached == tbin_info->ncached_max) {
|
||||
tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
|
||||
1), tcache);
|
||||
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
|
||||
tcache_bin_flush_large(tsd, tbin, binind,
|
||||
(tbin_info->ncached_max >> 1), tcache);
|
||||
}
|
||||
assert(tbin->ncached < tbin_info->ncached_max);
|
||||
tbin->avail[tbin->ncached] = ptr;
|
||||
tbin->ncached++;
|
||||
|
||||
tcache_event(tcache);
|
||||
tcache_event(tsd, tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tcache_t *
|
||||
tcaches_get(tsd_t *tsd, unsigned ind)
|
||||
{
|
||||
tcaches_t *elm = &tcaches[ind];
|
||||
if (unlikely(elm->tcache == NULL))
|
||||
elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL));
|
||||
return (elm->tcache);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
/* Maximum number of malloc_tsd users with cleanup functions. */
|
||||
#define MALLOC_TSD_CLEANUPS_MAX 8
|
||||
#define MALLOC_TSD_CLEANUPS_MAX 2
|
||||
|
||||
typedef bool (*malloc_tsd_cleanup_t)(void);
|
||||
|
||||
|
@ -12,9 +12,18 @@ typedef struct tsd_init_block_s tsd_init_block_t;
|
|||
typedef struct tsd_init_head_s tsd_init_head_t;
|
||||
#endif
|
||||
|
||||
typedef struct tsd_s tsd_t;
|
||||
|
||||
typedef enum {
|
||||
tsd_state_uninitialized,
|
||||
tsd_state_nominal,
|
||||
tsd_state_purgatory,
|
||||
tsd_state_reincarnated
|
||||
} tsd_state_t;
|
||||
|
||||
/*
|
||||
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
|
||||
* are four macros that support (at least) three use cases: file-private,
|
||||
* are five macros that support (at least) three use cases: file-private,
|
||||
* library-private, and library-private inlined. Following is an example
|
||||
* library-private tsd variable:
|
||||
*
|
||||
|
@ -24,34 +33,36 @@ typedef struct tsd_init_head_s tsd_init_head_t;
|
|||
* int y;
|
||||
* } example_t;
|
||||
* #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
|
||||
* malloc_tsd_protos(, example, example_t *)
|
||||
* malloc_tsd_externs(example, example_t *)
|
||||
* malloc_tsd_types(example_, example_t)
|
||||
* malloc_tsd_protos(, example_, example_t)
|
||||
* malloc_tsd_externs(example_, example_t)
|
||||
* In example.c:
|
||||
* malloc_tsd_data(, example, example_t *, EX_INITIALIZER)
|
||||
* malloc_tsd_funcs(, example, example_t *, EX_INITIALIZER,
|
||||
* malloc_tsd_data(, example_, example_t, EX_INITIALIZER)
|
||||
* malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER,
|
||||
* example_tsd_cleanup)
|
||||
*
|
||||
* The result is a set of generated functions, e.g.:
|
||||
*
|
||||
* bool example_tsd_boot(void) {...}
|
||||
* example_t **example_tsd_get() {...}
|
||||
* void example_tsd_set(example_t **val) {...}
|
||||
* example_t *example_tsd_get() {...}
|
||||
* void example_tsd_set(example_t *val) {...}
|
||||
*
|
||||
* Note that all of the functions deal in terms of (a_type *) rather than
|
||||
* (a_type) so that it is possible to support non-pointer types (unlike
|
||||
* (a_type) so that it is possible to support non-pointer types (unlike
|
||||
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
|
||||
* cast to (void *). This means that the cleanup function needs to cast *and*
|
||||
* dereference the function argument, e.g.:
|
||||
* cast to (void *). This means that the cleanup function needs to cast the
|
||||
* function argument to (a_type *), then dereference the resulting pointer to
|
||||
* access fields, e.g.
|
||||
*
|
||||
* void
|
||||
* example_tsd_cleanup(void *arg)
|
||||
* {
|
||||
* example_t *example = *(example_t **)arg;
|
||||
* example_t *example = (example_t *)arg;
|
||||
*
|
||||
* example->x = 42;
|
||||
* [...]
|
||||
* if ([want the cleanup function to be called again]) {
|
||||
* example_tsd_set(&example);
|
||||
* }
|
||||
* if ([want the cleanup function to be called again])
|
||||
* example_tsd_set(example);
|
||||
* }
|
||||
*
|
||||
* If example_tsd_set() is called within example_tsd_cleanup(), it will be
|
||||
|
@ -60,63 +71,96 @@ typedef struct tsd_init_head_s tsd_init_head_t;
|
|||
* non-NULL.
|
||||
*/
|
||||
|
||||
/* malloc_tsd_types(). */
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#define malloc_tsd_types(a_name, a_type)
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_types(a_name, a_type)
|
||||
#elif (defined(_WIN32))
|
||||
#define malloc_tsd_types(a_name, a_type) \
|
||||
typedef struct { \
|
||||
bool initialized; \
|
||||
a_type val; \
|
||||
} a_name##tsd_wrapper_t;
|
||||
#else
|
||||
#define malloc_tsd_types(a_name, a_type) \
|
||||
typedef struct { \
|
||||
bool initialized; \
|
||||
a_type val; \
|
||||
} a_name##tsd_wrapper_t;
|
||||
#endif
|
||||
|
||||
/* malloc_tsd_protos(). */
|
||||
#define malloc_tsd_protos(a_attr, a_name, a_type) \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void); \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void); \
|
||||
a_name##tsd_boot0(void); \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val);
|
||||
a_name##tsd_boot1(void); \
|
||||
a_attr bool \
|
||||
a_name##tsd_boot(void); \
|
||||
a_attr a_type * \
|
||||
a_name##tsd_get(void); \
|
||||
a_attr void \
|
||||
a_name##tsd_set(a_type *val);
|
||||
|
||||
/* malloc_tsd_externs(). */
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern __thread a_type a_name##_tls; \
|
||||
extern __thread bool a_name##_initialized; \
|
||||
extern bool a_name##_booted;
|
||||
extern __thread a_type a_name##tsd_tls; \
|
||||
extern __thread bool a_name##tsd_initialized; \
|
||||
extern bool a_name##tsd_booted;
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern __thread a_type a_name##_tls; \
|
||||
extern pthread_key_t a_name##_tsd; \
|
||||
extern bool a_name##_booted;
|
||||
extern __thread a_type a_name##tsd_tls; \
|
||||
extern pthread_key_t a_name##tsd_tsd; \
|
||||
extern bool a_name##tsd_booted;
|
||||
#elif (defined(_WIN32))
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern DWORD a_name##_tsd; \
|
||||
extern bool a_name##_booted;
|
||||
extern DWORD a_name##tsd_tsd; \
|
||||
extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
|
||||
extern bool a_name##tsd_booted;
|
||||
#else
|
||||
#define malloc_tsd_externs(a_name, a_type) \
|
||||
extern pthread_key_t a_name##_tsd; \
|
||||
extern tsd_init_head_t a_name##_tsd_init_head; \
|
||||
extern bool a_name##_booted;
|
||||
extern pthread_key_t a_name##tsd_tsd; \
|
||||
extern tsd_init_head_t a_name##tsd_init_head; \
|
||||
extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
|
||||
extern bool a_name##tsd_booted;
|
||||
#endif
|
||||
|
||||
/* malloc_tsd_data(). */
|
||||
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr __thread a_type JEMALLOC_TLS_MODEL \
|
||||
a_name##_tls = a_initializer; \
|
||||
a_name##tsd_tls = a_initializer; \
|
||||
a_attr __thread bool JEMALLOC_TLS_MODEL \
|
||||
a_name##_initialized = false; \
|
||||
a_attr bool a_name##_booted = false;
|
||||
a_name##tsd_initialized = false; \
|
||||
a_attr bool a_name##tsd_booted = false;
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr __thread a_type JEMALLOC_TLS_MODEL \
|
||||
a_name##_tls = a_initializer; \
|
||||
a_attr pthread_key_t a_name##_tsd; \
|
||||
a_attr bool a_name##_booted = false;
|
||||
a_name##tsd_tls = a_initializer; \
|
||||
a_attr pthread_key_t a_name##tsd_tsd; \
|
||||
a_attr bool a_name##tsd_booted = false;
|
||||
#elif (defined(_WIN32))
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr DWORD a_name##_tsd; \
|
||||
a_attr bool a_name##_booted = false;
|
||||
a_attr DWORD a_name##tsd_tsd; \
|
||||
a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
|
||||
false, \
|
||||
a_initializer \
|
||||
}; \
|
||||
a_attr bool a_name##tsd_booted = false;
|
||||
#else
|
||||
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||
a_attr pthread_key_t a_name##_tsd; \
|
||||
a_attr tsd_init_head_t a_name##_tsd_init_head = { \
|
||||
a_attr pthread_key_t a_name##tsd_tsd; \
|
||||
a_attr tsd_init_head_t a_name##tsd_init_head = { \
|
||||
ql_head_initializer(blocks), \
|
||||
MALLOC_MUTEX_INITIALIZER \
|
||||
}; \
|
||||
a_attr bool a_name##_booted = false;
|
||||
a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
|
||||
false, \
|
||||
a_initializer \
|
||||
}; \
|
||||
a_attr bool a_name##tsd_booted = false;
|
||||
#endif
|
||||
|
||||
/* malloc_tsd_funcs(). */
|
||||
|
@ -125,75 +169,100 @@ a_attr bool a_name##_booted = false;
|
|||
a_cleanup) \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
a_name##_tsd_cleanup_wrapper(void) \
|
||||
a_name##tsd_cleanup_wrapper(void) \
|
||||
{ \
|
||||
\
|
||||
if (a_name##_initialized) { \
|
||||
a_name##_initialized = false; \
|
||||
a_cleanup(&a_name##_tls); \
|
||||
if (a_name##tsd_initialized) { \
|
||||
a_name##tsd_initialized = false; \
|
||||
a_cleanup(&a_name##tsd_tls); \
|
||||
} \
|
||||
return (a_name##_initialized); \
|
||||
return (a_name##tsd_initialized); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void) \
|
||||
a_name##tsd_boot0(void) \
|
||||
{ \
|
||||
\
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
malloc_tsd_cleanup_register( \
|
||||
&a_name##_tsd_cleanup_wrapper); \
|
||||
&a_name##tsd_cleanup_wrapper); \
|
||||
} \
|
||||
a_name##_booted = true; \
|
||||
a_name##tsd_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##tsd_boot1() \
|
||||
{ \
|
||||
\
|
||||
/* Do nothing. */ \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_boot(void) \
|
||||
{ \
|
||||
\
|
||||
return (a_name##tsd_boot0()); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void) \
|
||||
a_name##tsd_get(void) \
|
||||
{ \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
return (&a_name##_tls); \
|
||||
assert(a_name##tsd_booted); \
|
||||
return (&a_name##tsd_tls); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val) \
|
||||
a_name##tsd_set(a_type *val) \
|
||||
{ \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
a_name##_tls = (*val); \
|
||||
assert(a_name##tsd_booted); \
|
||||
a_name##tsd_tls = (*val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||
a_name##_initialized = true; \
|
||||
a_name##tsd_initialized = true; \
|
||||
}
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void) \
|
||||
a_name##tsd_boot0(void) \
|
||||
{ \
|
||||
\
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \
|
||||
if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \
|
||||
0) \
|
||||
return (true); \
|
||||
} \
|
||||
a_name##_booted = true; \
|
||||
a_name##tsd_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##tsd_boot1() \
|
||||
{ \
|
||||
\
|
||||
/* Do nothing. */ \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_boot(void) \
|
||||
{ \
|
||||
\
|
||||
return (a_name##tsd_boot0()); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void) \
|
||||
a_name##tsd_get(void) \
|
||||
{ \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
return (&a_name##_tls); \
|
||||
assert(a_name##tsd_booted); \
|
||||
return (&a_name##tsd_tls); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val) \
|
||||
a_name##tsd_set(a_type *val) \
|
||||
{ \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
a_name##_tls = (*val); \
|
||||
assert(a_name##tsd_booted); \
|
||||
a_name##tsd_tls = (*val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
if (pthread_setspecific(a_name##_tsd, \
|
||||
(void *)(&a_name##_tls))) { \
|
||||
if (pthread_setspecific(a_name##tsd_tsd, \
|
||||
(void *)(&a_name##tsd_tls))) { \
|
||||
malloc_write("<jemalloc>: Error" \
|
||||
" setting TSD for "#a_name"\n"); \
|
||||
if (opt_abort) \
|
||||
|
@ -204,27 +273,21 @@ a_name##_tsd_set(a_type *val) \
|
|||
#elif (defined(_WIN32))
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Data structure. */ \
|
||||
typedef struct { \
|
||||
bool initialized; \
|
||||
a_type val; \
|
||||
} a_name##_tsd_wrapper_t; \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
a_name##_tsd_cleanup_wrapper(void) \
|
||||
a_name##tsd_cleanup_wrapper(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
DWORD error = GetLastError(); \
|
||||
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
|
||||
TlsGetValue(a_name##tsd_tsd); \
|
||||
SetLastError(error); \
|
||||
\
|
||||
wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \
|
||||
if (wrapper == NULL) \
|
||||
return (false); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup && \
|
||||
wrapper->initialized) { \
|
||||
a_type val = wrapper->val; \
|
||||
a_type tsd_static_data = a_initializer; \
|
||||
wrapper->initialized = false; \
|
||||
wrapper->val = tsd_static_data; \
|
||||
a_cleanup(&val); \
|
||||
a_cleanup(&wrapper->val); \
|
||||
if (wrapper->initialized) { \
|
||||
/* Trigger another cleanup round. */ \
|
||||
return (true); \
|
||||
|
@ -233,63 +296,95 @@ a_name##_tsd_cleanup_wrapper(void) \
|
|||
malloc_tsd_dalloc(wrapper); \
|
||||
return (false); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void) \
|
||||
a_attr void \
|
||||
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
|
||||
{ \
|
||||
\
|
||||
a_name##_tsd = TlsAlloc(); \
|
||||
if (a_name##_tsd == TLS_OUT_OF_INDEXES) \
|
||||
return (true); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
malloc_tsd_cleanup_register( \
|
||||
&a_name##_tsd_cleanup_wrapper); \
|
||||
if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \
|
||||
malloc_write("<jemalloc>: Error setting" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
a_name##_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_name##_tsd_wrapper_t * \
|
||||
a_name##_tsd_get_wrapper(void) \
|
||||
a_attr a_name##tsd_wrapper_t * \
|
||||
a_name##tsd_wrapper_get(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
|
||||
TlsGetValue(a_name##_tsd); \
|
||||
DWORD error = GetLastError(); \
|
||||
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
|
||||
TlsGetValue(a_name##tsd_tsd); \
|
||||
SetLastError(error); \
|
||||
\
|
||||
if (wrapper == NULL) { \
|
||||
wrapper = (a_name##_tsd_wrapper_t *) \
|
||||
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
|
||||
if (unlikely(wrapper == NULL)) { \
|
||||
wrapper = (a_name##tsd_wrapper_t *) \
|
||||
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
|
||||
if (wrapper == NULL) { \
|
||||
malloc_write("<jemalloc>: Error allocating" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} else { \
|
||||
static a_type tsd_static_data = a_initializer; \
|
||||
wrapper->initialized = false; \
|
||||
wrapper->val = tsd_static_data; \
|
||||
} \
|
||||
if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \
|
||||
malloc_write("<jemalloc>: Error setting" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
wrapper->val = a_initializer; \
|
||||
} \
|
||||
a_name##tsd_wrapper_set(wrapper); \
|
||||
} \
|
||||
return (wrapper); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void) \
|
||||
a_attr bool \
|
||||
a_name##tsd_boot0(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
wrapper = a_name##_tsd_get_wrapper(); \
|
||||
a_name##tsd_tsd = TlsAlloc(); \
|
||||
if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \
|
||||
return (true); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
malloc_tsd_cleanup_register( \
|
||||
&a_name##tsd_cleanup_wrapper); \
|
||||
} \
|
||||
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
|
||||
a_name##tsd_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##tsd_boot1() \
|
||||
{ \
|
||||
a_name##tsd_wrapper_t *wrapper; \
|
||||
wrapper = (a_name##tsd_wrapper_t *) \
|
||||
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
|
||||
if (wrapper == NULL) { \
|
||||
malloc_write("<jemalloc>: Error allocating" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
memcpy(wrapper, &a_name##tsd_boot_wrapper, \
|
||||
sizeof(a_name##tsd_wrapper_t)); \
|
||||
a_name##tsd_wrapper_set(wrapper); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_boot(void) \
|
||||
{ \
|
||||
\
|
||||
if (a_name##tsd_boot0()) \
|
||||
return (true); \
|
||||
a_name##tsd_boot1(); \
|
||||
return (false); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_type * \
|
||||
a_name##tsd_get(void) \
|
||||
{ \
|
||||
a_name##tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##tsd_booted); \
|
||||
wrapper = a_name##tsd_wrapper_get(); \
|
||||
return (&wrapper->val); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val) \
|
||||
a_name##tsd_set(a_type *val) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
a_name##tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
wrapper = a_name##_tsd_get_wrapper(); \
|
||||
assert(a_name##tsd_booted); \
|
||||
wrapper = a_name##tsd_wrapper_get(); \
|
||||
wrapper->val = *(val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||
wrapper->initialized = true; \
|
||||
|
@ -297,16 +392,11 @@ a_name##_tsd_set(a_type *val) \
|
|||
#else
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Data structure. */ \
|
||||
typedef struct { \
|
||||
bool initialized; \
|
||||
a_type val; \
|
||||
} a_name##_tsd_wrapper_t; \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr void \
|
||||
a_name##_tsd_cleanup_wrapper(void *arg) \
|
||||
a_name##tsd_cleanup_wrapper(void *arg) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\
|
||||
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \
|
||||
\
|
||||
if (a_cleanup != malloc_tsd_no_cleanup && \
|
||||
wrapper->initialized) { \
|
||||
|
@ -314,7 +404,7 @@ a_name##_tsd_cleanup_wrapper(void *arg) \
|
|||
a_cleanup(&wrapper->val); \
|
||||
if (wrapper->initialized) { \
|
||||
/* Trigger another cleanup round. */ \
|
||||
if (pthread_setspecific(a_name##_tsd, \
|
||||
if (pthread_setspecific(a_name##tsd_tsd, \
|
||||
(void *)wrapper)) { \
|
||||
malloc_write("<jemalloc>: Error" \
|
||||
" setting TSD for "#a_name"\n"); \
|
||||
|
@ -326,67 +416,97 @@ a_name##_tsd_cleanup_wrapper(void *arg) \
|
|||
} \
|
||||
malloc_tsd_dalloc(wrapper); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##_tsd_boot(void) \
|
||||
a_attr void \
|
||||
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
|
||||
{ \
|
||||
\
|
||||
if (pthread_key_create(&a_name##_tsd, \
|
||||
a_name##_tsd_cleanup_wrapper) != 0) \
|
||||
return (true); \
|
||||
a_name##_booted = true; \
|
||||
return (false); \
|
||||
if (pthread_setspecific(a_name##tsd_tsd, \
|
||||
(void *)wrapper)) { \
|
||||
malloc_write("<jemalloc>: Error setting" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_name##_tsd_wrapper_t * \
|
||||
a_name##_tsd_get_wrapper(void) \
|
||||
a_attr a_name##tsd_wrapper_t * \
|
||||
a_name##tsd_wrapper_get(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
|
||||
pthread_getspecific(a_name##_tsd); \
|
||||
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
|
||||
pthread_getspecific(a_name##tsd_tsd); \
|
||||
\
|
||||
if (wrapper == NULL) { \
|
||||
if (unlikely(wrapper == NULL)) { \
|
||||
tsd_init_block_t block; \
|
||||
wrapper = tsd_init_check_recursion( \
|
||||
&a_name##_tsd_init_head, &block); \
|
||||
&a_name##tsd_init_head, &block); \
|
||||
if (wrapper) \
|
||||
return (wrapper); \
|
||||
wrapper = (a_name##_tsd_wrapper_t *) \
|
||||
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
|
||||
wrapper = (a_name##tsd_wrapper_t *) \
|
||||
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
|
||||
block.data = wrapper; \
|
||||
if (wrapper == NULL) { \
|
||||
malloc_write("<jemalloc>: Error allocating" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} else { \
|
||||
static a_type tsd_static_data = a_initializer; \
|
||||
wrapper->initialized = false; \
|
||||
wrapper->val = tsd_static_data; \
|
||||
wrapper->val = a_initializer; \
|
||||
} \
|
||||
if (pthread_setspecific(a_name##_tsd, \
|
||||
(void *)wrapper)) { \
|
||||
malloc_write("<jemalloc>: Error setting" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
tsd_init_finish(&a_name##_tsd_init_head, &block); \
|
||||
a_name##tsd_wrapper_set(wrapper); \
|
||||
tsd_init_finish(&a_name##tsd_init_head, &block); \
|
||||
} \
|
||||
return (wrapper); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_name##_tsd_get(void) \
|
||||
a_attr bool \
|
||||
a_name##tsd_boot0(void) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
wrapper = a_name##_tsd_get_wrapper(); \
|
||||
if (pthread_key_create(&a_name##tsd_tsd, \
|
||||
a_name##tsd_cleanup_wrapper) != 0) \
|
||||
return (true); \
|
||||
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
|
||||
a_name##tsd_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##tsd_boot1() \
|
||||
{ \
|
||||
a_name##tsd_wrapper_t *wrapper; \
|
||||
wrapper = (a_name##tsd_wrapper_t *) \
|
||||
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
|
||||
if (wrapper == NULL) { \
|
||||
malloc_write("<jemalloc>: Error allocating" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
abort(); \
|
||||
} \
|
||||
memcpy(wrapper, &a_name##tsd_boot_wrapper, \
|
||||
sizeof(a_name##tsd_wrapper_t)); \
|
||||
a_name##tsd_wrapper_set(wrapper); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_boot(void) \
|
||||
{ \
|
||||
\
|
||||
if (a_name##tsd_boot0()) \
|
||||
return (true); \
|
||||
a_name##tsd_boot1(); \
|
||||
return (false); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_type * \
|
||||
a_name##tsd_get(void) \
|
||||
{ \
|
||||
a_name##tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##tsd_booted); \
|
||||
wrapper = a_name##tsd_wrapper_get(); \
|
||||
return (&wrapper->val); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##_tsd_set(a_type *val) \
|
||||
a_name##tsd_set(a_type *val) \
|
||||
{ \
|
||||
a_name##_tsd_wrapper_t *wrapper; \
|
||||
a_name##tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##_booted); \
|
||||
wrapper = a_name##_tsd_get_wrapper(); \
|
||||
assert(a_name##tsd_booted); \
|
||||
wrapper = a_name##tsd_wrapper_get(); \
|
||||
wrapper->val = *(val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||
wrapper->initialized = true; \
|
||||
|
@ -410,25 +530,136 @@ struct tsd_init_head_s {
|
|||
};
|
||||
#endif
|
||||
|
||||
#define MALLOC_TSD \
|
||||
/* O(name, type) */ \
|
||||
O(tcache, tcache_t *) \
|
||||
O(thread_allocated, uint64_t) \
|
||||
O(thread_deallocated, uint64_t) \
|
||||
O(prof_tdata, prof_tdata_t *) \
|
||||
O(arena, arena_t *) \
|
||||
O(arenas_cache, arena_t **) \
|
||||
O(narenas_cache, unsigned) \
|
||||
O(arenas_cache_bypass, bool) \
|
||||
O(tcache_enabled, tcache_enabled_t) \
|
||||
O(quarantine, quarantine_t *) \
|
||||
|
||||
#define TSD_INITIALIZER { \
|
||||
tsd_state_uninitialized, \
|
||||
NULL, \
|
||||
0, \
|
||||
0, \
|
||||
NULL, \
|
||||
NULL, \
|
||||
NULL, \
|
||||
0, \
|
||||
false, \
|
||||
tcache_enabled_default, \
|
||||
NULL \
|
||||
}
|
||||
|
||||
struct tsd_s {
|
||||
tsd_state_t state;
|
||||
#define O(n, t) \
|
||||
t n;
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
};
|
||||
|
||||
static const tsd_t tsd_initializer = TSD_INITIALIZER;
|
||||
|
||||
malloc_tsd_types(, tsd_t)
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
void *malloc_tsd_malloc(size_t size);
|
||||
void malloc_tsd_dalloc(void *wrapper);
|
||||
void malloc_tsd_no_cleanup(void *);
|
||||
void malloc_tsd_no_cleanup(void *arg);
|
||||
void malloc_tsd_cleanup_register(bool (*f)(void));
|
||||
void malloc_tsd_boot(void);
|
||||
bool malloc_tsd_boot0(void);
|
||||
void malloc_tsd_boot1(void);
|
||||
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
||||
!defined(_WIN32))
|
||||
void *tsd_init_check_recursion(tsd_init_head_t *head,
|
||||
tsd_init_block_t *block);
|
||||
void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
|
||||
#endif
|
||||
void tsd_cleanup(void *arg);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t)
|
||||
|
||||
tsd_t *tsd_fetch(void);
|
||||
bool tsd_nominal(tsd_t *tsd);
|
||||
#define O(n, t) \
|
||||
t *tsd_##n##p_get(tsd_t *tsd); \
|
||||
t tsd_##n##_get(tsd_t *tsd); \
|
||||
void tsd_##n##_set(tsd_t *tsd, t n);
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
|
||||
malloc_tsd_externs(, tsd_t)
|
||||
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup)
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_fetch(void)
|
||||
{
|
||||
tsd_t *tsd = tsd_get();
|
||||
|
||||
if (unlikely(tsd->state != tsd_state_nominal)) {
|
||||
if (tsd->state == tsd_state_uninitialized) {
|
||||
tsd->state = tsd_state_nominal;
|
||||
/* Trigger cleanup handler registration. */
|
||||
tsd_set(tsd);
|
||||
} else if (tsd->state == tsd_state_purgatory) {
|
||||
tsd->state = tsd_state_reincarnated;
|
||||
tsd_set(tsd);
|
||||
} else
|
||||
assert(tsd->state == tsd_state_reincarnated);
|
||||
}
|
||||
|
||||
return (tsd);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
tsd_nominal(tsd_t *tsd)
|
||||
{
|
||||
|
||||
return (tsd->state == tsd_state_nominal);
|
||||
}
|
||||
|
||||
#define O(n, t) \
|
||||
JEMALLOC_ALWAYS_INLINE t * \
|
||||
tsd_##n##p_get(tsd_t *tsd) \
|
||||
{ \
|
||||
\
|
||||
return (&tsd->n); \
|
||||
} \
|
||||
\
|
||||
JEMALLOC_ALWAYS_INLINE t \
|
||||
tsd_##n##_get(tsd_t *tsd) \
|
||||
{ \
|
||||
\
|
||||
return (*tsd_##n##p_get(tsd)); \
|
||||
} \
|
||||
\
|
||||
JEMALLOC_ALWAYS_INLINE void \
|
||||
tsd_##n##_set(tsd_t *tsd, t n) \
|
||||
{ \
|
||||
\
|
||||
assert(tsd->state == tsd_state_nominal); \
|
||||
tsd->n = n; \
|
||||
}
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
|
|
|
@ -1,6 +1,36 @@
|
|||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#ifdef _WIN32
|
||||
# ifdef _WIN64
|
||||
# define FMT64_PREFIX "ll"
|
||||
# define FMTPTR_PREFIX "ll"
|
||||
# else
|
||||
# define FMT64_PREFIX "ll"
|
||||
# define FMTPTR_PREFIX ""
|
||||
# endif
|
||||
# define FMTd32 "d"
|
||||
# define FMTu32 "u"
|
||||
# define FMTx32 "x"
|
||||
# define FMTd64 FMT64_PREFIX "d"
|
||||
# define FMTu64 FMT64_PREFIX "u"
|
||||
# define FMTx64 FMT64_PREFIX "x"
|
||||
# define FMTdPTR FMTPTR_PREFIX "d"
|
||||
# define FMTuPTR FMTPTR_PREFIX "u"
|
||||
# define FMTxPTR FMTPTR_PREFIX "x"
|
||||
#else
|
||||
# include <inttypes.h>
|
||||
# define FMTd32 PRId32
|
||||
# define FMTu32 PRIu32
|
||||
# define FMTx32 PRIx32
|
||||
# define FMTd64 PRId64
|
||||
# define FMTu64 PRIu64
|
||||
# define FMTx64 PRIx64
|
||||
# define FMTdPTR PRIdPTR
|
||||
# define FMTuPTR PRIuPTR
|
||||
# define FMTxPTR PRIxPTR
|
||||
#endif
|
||||
|
||||
/* Size of stack-allocated buffer passed to buferror(). */
|
||||
#define BUFERROR_BUF 64
|
||||
|
||||
|
@ -22,9 +52,33 @@
|
|||
* uninitialized.
|
||||
*/
|
||||
#ifdef JEMALLOC_CC_SILENCE
|
||||
# define JEMALLOC_CC_SILENCE_INIT(v) = v
|
||||
# define JEMALLOC_CC_SILENCE_INIT(v) = v
|
||||
#else
|
||||
# define JEMALLOC_CC_SILENCE_INIT(v)
|
||||
# define JEMALLOC_CC_SILENCE_INIT(v)
|
||||
#endif
|
||||
|
||||
#define JEMALLOC_GNUC_PREREQ(major, minor) \
|
||||
(!defined(__clang__) && \
|
||||
(__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))))
|
||||
#ifndef __has_builtin
|
||||
# define __has_builtin(builtin) (0)
|
||||
#endif
|
||||
#define JEMALLOC_CLANG_HAS_BUILTIN(builtin) \
|
||||
(defined(__clang__) && __has_builtin(builtin))
|
||||
|
||||
#ifdef __GNUC__
|
||||
# define likely(x) __builtin_expect(!!(x), 1)
|
||||
# define unlikely(x) __builtin_expect(!!(x), 0)
|
||||
# if JEMALLOC_GNUC_PREREQ(4, 6) || \
|
||||
JEMALLOC_CLANG_HAS_BUILTIN(__builtin_unreachable)
|
||||
# define unreachable() __builtin_unreachable()
|
||||
# else
|
||||
# define unreachable()
|
||||
# endif
|
||||
#else
|
||||
# define likely(x) !!(x)
|
||||
# define unlikely(x) !!(x)
|
||||
# define unreachable()
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -33,7 +87,7 @@
|
|||
*/
|
||||
#ifndef assert
|
||||
#define assert(e) do { \
|
||||
if (config_debug && !(e)) { \
|
||||
if (unlikely(config_debug && !(e))) { \
|
||||
malloc_printf( \
|
||||
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
|
||||
__FILE__, __LINE__, #e); \
|
||||
|
@ -50,6 +104,7 @@
|
|||
__FILE__, __LINE__); \
|
||||
abort(); \
|
||||
} \
|
||||
unreachable(); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
|
@ -65,14 +120,14 @@
|
|||
|
||||
#ifndef assert_not_implemented
|
||||
#define assert_not_implemented(e) do { \
|
||||
if (config_debug && !(e)) \
|
||||
if (unlikely(config_debug && !(e))) \
|
||||
not_implemented(); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
|
||||
#define cassert(c) do { \
|
||||
if ((c) == false) \
|
||||
if (unlikely(!(c))) \
|
||||
not_reached(); \
|
||||
} while (0)
|
||||
|
||||
|
@ -96,25 +151,47 @@ void malloc_write(const char *s);
|
|||
int malloc_vsnprintf(char *str, size_t size, const char *format,
|
||||
va_list ap);
|
||||
int malloc_snprintf(char *str, size_t size, const char *format, ...)
|
||||
JEMALLOC_ATTR(format(printf, 3, 4));
|
||||
JEMALLOC_FORMAT_PRINTF(3, 4);
|
||||
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *format, va_list ap);
|
||||
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
|
||||
const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
|
||||
void malloc_printf(const char *format, ...)
|
||||
JEMALLOC_ATTR(format(printf, 1, 2));
|
||||
const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4);
|
||||
void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
int jemalloc_ffsl(long bitmap);
|
||||
int jemalloc_ffs(int bitmap);
|
||||
size_t pow2_ceil(size_t x);
|
||||
size_t lg_floor(size_t x);
|
||||
void set_errno(int errnum);
|
||||
int get_errno(void);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
|
||||
|
||||
/* Sanity check. */
|
||||
#if !defined(JEMALLOC_INTERNAL_FFSL) || !defined(JEMALLOC_INTERNAL_FFS)
|
||||
# error Both JEMALLOC_INTERNAL_FFSL && JEMALLOC_INTERNAL_FFS should have been defined by configure
|
||||
#endif
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE int
|
||||
jemalloc_ffsl(long bitmap)
|
||||
{
|
||||
|
||||
return (JEMALLOC_INTERNAL_FFSL(bitmap));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE int
|
||||
jemalloc_ffs(int bitmap)
|
||||
{
|
||||
|
||||
return (JEMALLOC_INTERNAL_FFS(bitmap));
|
||||
}
|
||||
|
||||
/* Compute the smallest power of 2 that is >= x. */
|
||||
JEMALLOC_INLINE size_t
|
||||
pow2_ceil(size_t x)
|
||||
|
@ -133,7 +210,82 @@ pow2_ceil(size_t x)
|
|||
return (x);
|
||||
}
|
||||
|
||||
/* Sets error code */
|
||||
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
|
||||
JEMALLOC_INLINE size_t
|
||||
lg_floor(size_t x)
|
||||
{
|
||||
size_t ret;
|
||||
|
||||
assert(x != 0);
|
||||
|
||||
asm ("bsr %1, %0"
|
||||
: "=r"(ret) // Outputs.
|
||||
: "r"(x) // Inputs.
|
||||
);
|
||||
return (ret);
|
||||
}
|
||||
#elif (defined(_MSC_VER))
|
||||
JEMALLOC_INLINE size_t
|
||||
lg_floor(size_t x)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
assert(x != 0);
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
_BitScanReverse64(&ret, x);
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
_BitScanReverse(&ret, x);
|
||||
#else
|
||||
# error "Unsupported type sizes for lg_floor()"
|
||||
#endif
|
||||
return (ret);
|
||||
}
|
||||
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
|
||||
JEMALLOC_INLINE size_t
|
||||
lg_floor(size_t x)
|
||||
{
|
||||
|
||||
assert(x != 0);
|
||||
|
||||
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
|
||||
return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x));
|
||||
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
|
||||
return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x));
|
||||
#else
|
||||
# error "Unsupported type sizes for lg_floor()"
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
JEMALLOC_INLINE size_t
|
||||
lg_floor(size_t x)
|
||||
{
|
||||
|
||||
assert(x != 0);
|
||||
|
||||
x |= (x >> 1);
|
||||
x |= (x >> 2);
|
||||
x |= (x >> 4);
|
||||
x |= (x >> 8);
|
||||
x |= (x >> 16);
|
||||
#if (LG_SIZEOF_PTR == 3 && LG_SIZEOF_PTR == LG_SIZEOF_LONG)
|
||||
x |= (x >> 32);
|
||||
if (x == KZU(0xffffffffffffffff))
|
||||
return (63);
|
||||
x++;
|
||||
return (jemalloc_ffsl(x) - 2);
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
if (x == KZU(0xffffffff))
|
||||
return (31);
|
||||
x++;
|
||||
return (jemalloc_ffs(x) - 2);
|
||||
#else
|
||||
# error "Unsupported type sizes for lg_floor()"
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Set error code. */
|
||||
JEMALLOC_INLINE void
|
||||
set_errno(int errnum)
|
||||
{
|
||||
|
@ -145,7 +297,7 @@ set_errno(int errnum)
|
|||
#endif
|
||||
}
|
||||
|
||||
/* Get last error code */
|
||||
/* Get last error code. */
|
||||
JEMALLOC_INLINE int
|
||||
get_errno(void)
|
||||
{
|
||||
|
|
112
contrib/jemalloc/include/jemalloc/internal/valgrind.h
Normal file
112
contrib/jemalloc/include/jemalloc/internal/valgrind.h
Normal file
|
@ -0,0 +1,112 @@
|
|||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#ifdef JEMALLOC_VALGRIND
|
||||
#include <valgrind/valgrind.h>
|
||||
|
||||
/*
|
||||
* The size that is reported to Valgrind must be consistent through a chain of
|
||||
* malloc..realloc..realloc calls. Request size isn't recorded anywhere in
|
||||
* jemalloc, so it is critical that all callers of these macros provide usize
|
||||
* rather than request size. As a result, buffer overflow detection is
|
||||
* technically weakened for the standard API, though it is generally accepted
|
||||
* practice to consider any extra bytes reported by malloc_usable_size() as
|
||||
* usable space.
|
||||
*/
|
||||
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \
|
||||
if (unlikely(in_valgrind)) \
|
||||
valgrind_make_mem_noaccess(ptr, usize); \
|
||||
} while (0)
|
||||
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \
|
||||
if (unlikely(in_valgrind)) \
|
||||
valgrind_make_mem_undefined(ptr, usize); \
|
||||
} while (0)
|
||||
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \
|
||||
if (unlikely(in_valgrind)) \
|
||||
valgrind_make_mem_defined(ptr, usize); \
|
||||
} while (0)
|
||||
/*
|
||||
* The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro
|
||||
* calls must be embedded in macros rather than in functions so that when
|
||||
* Valgrind reports errors, there are no extra stack frames in the backtraces.
|
||||
*/
|
||||
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
|
||||
if (unlikely(in_valgrind && cond)) \
|
||||
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
|
||||
} while (0)
|
||||
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
|
||||
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
|
||||
zero) do { \
|
||||
if (unlikely(in_valgrind)) { \
|
||||
size_t rzsize = p2rz(ptr); \
|
||||
\
|
||||
if (!maybe_moved || ptr == old_ptr) { \
|
||||
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
|
||||
usize, rzsize); \
|
||||
if (zero && old_usize < usize) { \
|
||||
valgrind_make_mem_defined( \
|
||||
(void *)((uintptr_t)ptr + \
|
||||
old_usize), usize - old_usize); \
|
||||
} \
|
||||
} else { \
|
||||
if (!old_ptr_maybe_null || old_ptr != NULL) { \
|
||||
valgrind_freelike_block(old_ptr, \
|
||||
old_rzsize); \
|
||||
} \
|
||||
if (!ptr_maybe_null || ptr != NULL) { \
|
||||
size_t copy_size = (old_usize < usize) \
|
||||
? old_usize : usize; \
|
||||
size_t tail_size = usize - copy_size; \
|
||||
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
|
||||
rzsize, false); \
|
||||
if (copy_size > 0) { \
|
||||
valgrind_make_mem_defined(ptr, \
|
||||
copy_size); \
|
||||
} \
|
||||
if (zero && tail_size > 0) { \
|
||||
valgrind_make_mem_defined( \
|
||||
(void *)((uintptr_t)ptr + \
|
||||
copy_size), tail_size); \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
|
||||
if (unlikely(in_valgrind)) \
|
||||
valgrind_freelike_block(ptr, rzsize); \
|
||||
} while (0)
|
||||
#else
|
||||
#define RUNNING_ON_VALGRIND ((unsigned)0)
|
||||
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0)
|
||||
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0)
|
||||
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0)
|
||||
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
|
||||
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
|
||||
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
|
||||
zero) do {} while (0)
|
||||
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
#ifdef JEMALLOC_VALGRIND
|
||||
void valgrind_make_mem_noaccess(void *ptr, size_t usize);
|
||||
void valgrind_make_mem_undefined(void *ptr, size_t usize);
|
||||
void valgrind_make_mem_defined(void *ptr, size_t usize);
|
||||
void valgrind_freelike_block(void *ptr, size_t usize);
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
/******************************************************************************/
|
||||
|
|
@ -7,8 +7,14 @@ extern "C" {
|
|||
/* Defined if __attribute__((...)) syntax is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR
|
||||
|
||||
/* Support the experimental API. */
|
||||
#define JEMALLOC_EXPERIMENTAL
|
||||
/* Defined if alloc_size attribute is supported. */
|
||||
/* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */
|
||||
|
||||
/* Defined if format(gnu_printf, ...) attribute is supported. */
|
||||
/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */
|
||||
|
||||
/* Defined if format(printf, ...) attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
|
||||
|
||||
/*
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
|
@ -26,6 +32,13 @@ extern "C" {
|
|||
*/
|
||||
#define JEMALLOC_USABLE_SIZE_CONST const
|
||||
|
||||
/*
|
||||
* If defined, specify throw() for the public function prototypes when compiling
|
||||
* with C++. The only justification for this is to match the prototypes that
|
||||
* glibc defines.
|
||||
*/
|
||||
/* #undef JEMALLOC_USE_CXX_THROW */
|
||||
|
||||
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
|
||||
#define LG_SIZEOF_PTR 3
|
||||
|
||||
|
@ -48,6 +61,7 @@ extern "C" {
|
|||
# define je_xallocx xallocx
|
||||
# define je_sallocx sallocx
|
||||
# define je_dallocx dallocx
|
||||
# define je_sdallocx sdallocx
|
||||
# define je_nallocx nallocx
|
||||
# define je_mallctl mallctl
|
||||
# define je_mallctlnametomib mallctlnametomib
|
||||
|
@ -55,24 +69,22 @@ extern "C" {
|
|||
# define je_malloc_stats_print malloc_stats_print
|
||||
# define je_malloc_usable_size malloc_usable_size
|
||||
# define je_valloc valloc
|
||||
# define je_allocm allocm
|
||||
# define je_dallocm dallocm
|
||||
# define je_nallocm nallocm
|
||||
# define je_rallocm rallocm
|
||||
# define je_sallocm sallocm
|
||||
#endif
|
||||
|
||||
#include "jemalloc_FreeBSD.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <limits.h>
|
||||
#include <strings.h>
|
||||
|
||||
#define JEMALLOC_VERSION "3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340"
|
||||
#define JEMALLOC_VERSION_MAJOR 3
|
||||
#define JEMALLOC_VERSION_MINOR 6
|
||||
#define JEMALLOC_VERSION "4.0.0-0-g6e98caf8f064482b9ab292ef3638dea67420bbc2"
|
||||
#define JEMALLOC_VERSION_MAJOR 4
|
||||
#define JEMALLOC_VERSION_MINOR 0
|
||||
#define JEMALLOC_VERSION_BUGFIX 0
|
||||
#define JEMALLOC_VERSION_NREV 0
|
||||
#define JEMALLOC_VERSION_GID "46c0af68bd248b04df75e4f92d5fb804c3d75340"
|
||||
#define JEMALLOC_VERSION_GID "6e98caf8f064482b9ab292ef3638dea67420bbc2"
|
||||
|
||||
# define MALLOCX_LG_ALIGN(la) (la)
|
||||
# if LG_SIZEOF_PTR == 2
|
||||
|
@ -82,48 +94,86 @@ extern "C" {
|
|||
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
|
||||
# endif
|
||||
# define MALLOCX_ZERO ((int)0x40)
|
||||
/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
|
||||
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
|
||||
/*
|
||||
* Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
|
||||
* encodes MALLOCX_TCACHE_NONE.
|
||||
*/
|
||||
# define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
|
||||
# define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
|
||||
/*
|
||||
* Bias arena index bits so that 0 encodes "use an automatically chosen arena".
|
||||
*/
|
||||
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 20))
|
||||
|
||||
#ifdef JEMALLOC_EXPERIMENTAL
|
||||
# define ALLOCM_LG_ALIGN(la) (la)
|
||||
# if LG_SIZEOF_PTR == 2
|
||||
# define ALLOCM_ALIGN(a) (ffs(a)-1)
|
||||
# else
|
||||
# define ALLOCM_ALIGN(a) \
|
||||
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
|
||||
# endif
|
||||
# define ALLOCM_ZERO ((int)0x40)
|
||||
# define ALLOCM_NO_MOVE ((int)0x80)
|
||||
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
|
||||
# define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
|
||||
# define ALLOCM_SUCCESS 0
|
||||
# define ALLOCM_ERR_OOM 1
|
||||
# define ALLOCM_ERR_NOT_MOVED 2
|
||||
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
|
||||
# define JEMALLOC_CXX_THROW throw()
|
||||
#else
|
||||
# define JEMALLOC_CXX_THROW
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_HAVE_ATTR
|
||||
# define JEMALLOC_ATTR(s) __attribute__((s))
|
||||
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
|
||||
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
|
||||
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
|
||||
# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
|
||||
# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
|
||||
# else
|
||||
# define JEMALLOC_ALLOC_SIZE(s)
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
|
||||
# endif
|
||||
# ifndef JEMALLOC_EXPORT
|
||||
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
|
||||
# endif
|
||||
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
|
||||
# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
|
||||
# else
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# endif
|
||||
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
|
||||
# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
|
||||
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
|
||||
# define JEMALLOC_RESTRICT_RETURN
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
#elif _MSC_VER
|
||||
# define JEMALLOC_ATTR(s)
|
||||
# ifdef DLLEXPORT
|
||||
# define JEMALLOC_EXPORT __declspec(dllexport)
|
||||
# else
|
||||
# define JEMALLOC_EXPORT __declspec(dllimport)
|
||||
# endif
|
||||
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
|
||||
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
|
||||
# define JEMALLOC_ALLOC_SIZE(s)
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
|
||||
# ifndef JEMALLOC_EXPORT
|
||||
# ifdef DLLEXPORT
|
||||
# define JEMALLOC_EXPORT __declspec(dllexport)
|
||||
# else
|
||||
# define JEMALLOC_EXPORT __declspec(dllimport)
|
||||
# endif
|
||||
# endif
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# define JEMALLOC_NOINLINE __declspec(noinline)
|
||||
# ifdef __cplusplus
|
||||
# define JEMALLOC_NOTHROW __declspec(nothrow)
|
||||
# else
|
||||
# define JEMALLOC_NOTHROW
|
||||
# endif
|
||||
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
|
||||
# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
|
||||
# if _MSC_VER >= 1900 && !defined(__EDG__)
|
||||
# define JEMALLOC_ALLOCATOR __declspec(allocator)
|
||||
# else
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
# endif
|
||||
#else
|
||||
# define JEMALLOC_ATTR(s)
|
||||
# define JEMALLOC_EXPORT
|
||||
# define JEMALLOC_ALIGNED(s)
|
||||
# define JEMALLOC_SECTION(s)
|
||||
# define JEMALLOC_ALLOC_SIZE(s)
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
|
||||
# define JEMALLOC_EXPORT
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# define JEMALLOC_NOINLINE
|
||||
# define JEMALLOC_NOTHROW
|
||||
# define JEMALLOC_SECTION(s)
|
||||
# define JEMALLOC_RESTRICT_RETURN
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -135,55 +185,121 @@ extern JEMALLOC_EXPORT const char *je_malloc_conf;
|
|||
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
|
||||
const char *s);
|
||||
|
||||
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
|
||||
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
|
||||
JEMALLOC_ATTR(malloc);
|
||||
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
|
||||
size_t size) JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
|
||||
JEMALLOC_ATTR(malloc);
|
||||
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
|
||||
JEMALLOC_EXPORT void je_free(void *ptr);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_malloc(size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr,
|
||||
size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment,
|
||||
size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
|
||||
JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr)
|
||||
JEMALLOC_CXX_THROW;
|
||||
|
||||
JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags);
|
||||
JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags);
|
||||
JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra,
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags)
|
||||
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size,
|
||||
int flags) JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size,
|
||||
size_t extra, int flags);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr,
|
||||
int flags) JEMALLOC_ATTR(pure);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size,
|
||||
int flags);
|
||||
JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags);
|
||||
JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags);
|
||||
JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags)
|
||||
JEMALLOC_ATTR(pure);
|
||||
|
||||
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
|
||||
size_t *miblenp);
|
||||
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name,
|
||||
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
|
||||
const char *), void *je_cbopaque, const char *opts);
|
||||
JEMALLOC_EXPORT size_t je_malloc_usable_size(
|
||||
JEMALLOC_USABLE_SIZE_CONST void *ptr);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name,
|
||||
size_t *mibp, size_t *miblenp);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib,
|
||||
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(
|
||||
void (*write_cb)(void *, const char *), void *je_cbopaque,
|
||||
const char *opts);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(
|
||||
JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
|
||||
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size)
|
||||
JEMALLOC_ATTR(malloc);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
||||
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
|
||||
JEMALLOC_ATTR(malloc);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_EXPERIMENTAL
|
||||
JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size,
|
||||
int flags) JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT int je_rallocm(void **ptr, size_t *rsize, size_t size,
|
||||
size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT int je_sallocm(const void *ptr, size_t *rsize, int flags)
|
||||
JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT int je_dallocm(void *ptr, int flags)
|
||||
JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
|
||||
#endif
|
||||
/*
|
||||
* void *
|
||||
* chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
* bool *commit, unsigned arena_ind);
|
||||
*/
|
||||
typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* chunk_commit(void *chunk, size_t size, size_t offset, size_t length,
|
||||
* unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* chunk_decommit(void *chunk, size_t size, size_t offset, size_t length,
|
||||
* unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* chunk_purge(void *chunk, size_t size, size_t offset, size_t length,
|
||||
* unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b,
|
||||
* bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
||||
* bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned);
|
||||
|
||||
typedef struct {
|
||||
chunk_alloc_t *alloc;
|
||||
chunk_dalloc_t *dalloc;
|
||||
chunk_commit_t *commit;
|
||||
chunk_decommit_t *decommit;
|
||||
chunk_purge_t *purge;
|
||||
chunk_split_t *split;
|
||||
chunk_merge_t *merge;
|
||||
} chunk_hooks_t;
|
||||
|
||||
/*
|
||||
* By default application code must explicitly refer to mangled symbol names,
|
||||
|
@ -209,6 +325,7 @@ JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
|
|||
# define xallocx je_xallocx
|
||||
# define sallocx je_sallocx
|
||||
# define dallocx je_dallocx
|
||||
# define sdallocx je_sdallocx
|
||||
# define nallocx je_nallocx
|
||||
# define mallctl je_mallctl
|
||||
# define mallctlnametomib je_mallctlnametomib
|
||||
|
@ -216,11 +333,6 @@ JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
|
|||
# define malloc_stats_print je_malloc_stats_print
|
||||
# define malloc_usable_size je_malloc_usable_size
|
||||
# define valloc je_valloc
|
||||
# define allocm je_allocm
|
||||
# define dallocm je_dallocm
|
||||
# define nallocm je_nallocm
|
||||
# define rallocm je_rallocm
|
||||
# define sallocm je_sallocm
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -244,6 +356,7 @@ JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
|
|||
# undef je_xallocx
|
||||
# undef je_sallocx
|
||||
# undef je_dallocx
|
||||
# undef je_sdallocx
|
||||
# undef je_nallocx
|
||||
# undef je_mallctl
|
||||
# undef je_mallctlnametomib
|
||||
|
@ -251,14 +364,9 @@ JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
|
|||
# undef je_malloc_stats_print
|
||||
# undef je_malloc_usable_size
|
||||
# undef je_valloc
|
||||
# undef je_allocm
|
||||
# undef je_dallocm
|
||||
# undef je_nallocm
|
||||
# undef je_rallocm
|
||||
# undef je_sallocm
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
};
|
||||
}
|
||||
#endif
|
||||
#endif /* JEMALLOC_H_ */
|
||||
|
|
|
@ -40,9 +40,6 @@
|
|||
#ifdef __arm__
|
||||
# define LG_SIZEOF_PTR 2
|
||||
#endif
|
||||
#ifdef __aarch64__
|
||||
# define LG_SIZEOF_PTR 3
|
||||
#endif
|
||||
#ifdef __mips__
|
||||
#ifdef __mips_n64
|
||||
# define LG_SIZEOF_PTR 3
|
||||
|
@ -76,6 +73,7 @@ extern int __isthreaded;
|
|||
#undef je_realloc
|
||||
#undef je_free
|
||||
#undef je_posix_memalign
|
||||
#undef je_aligned_alloc
|
||||
#undef je_malloc_usable_size
|
||||
#undef je_mallocx
|
||||
#undef je_rallocx
|
||||
|
@ -93,6 +91,7 @@ extern int __isthreaded;
|
|||
#define je_realloc __realloc
|
||||
#define je_free __free
|
||||
#define je_posix_memalign __posix_memalign
|
||||
#define je_aligned_alloc __aligned_alloc
|
||||
#define je_malloc_usable_size __malloc_usable_size
|
||||
#define je_mallocx __mallocx
|
||||
#define je_rallocx __rallocx
|
||||
|
@ -122,6 +121,7 @@ __weak_reference(__calloc, calloc);
|
|||
__weak_reference(__realloc, realloc);
|
||||
__weak_reference(__free, free);
|
||||
__weak_reference(__posix_memalign, posix_memalign);
|
||||
__weak_reference(__aligned_alloc, aligned_alloc);
|
||||
__weak_reference(__malloc_usable_size, malloc_usable_size);
|
||||
__weak_reference(__mallocx, mallocx);
|
||||
__weak_reference(__rallocx, rallocx);
|
||||
|
|
57
contrib/jemalloc/include/jemalloc/jemalloc_typedefs.h
Normal file
57
contrib/jemalloc/include/jemalloc/jemalloc_typedefs.h
Normal file
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* void *
|
||||
* chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
* bool *commit, unsigned arena_ind);
|
||||
*/
|
||||
typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* chunk_commit(void *chunk, size_t size, size_t offset, size_t length,
|
||||
* unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* chunk_decommit(void *chunk, size_t size, size_t offset, size_t length,
|
||||
* unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* chunk_purge(void *chunk, size_t size, size_t offset, size_t length,
|
||||
* unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b,
|
||||
* bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned);
|
||||
|
||||
/*
|
||||
* bool
|
||||
* chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
||||
* bool committed, unsigned arena_ind);
|
||||
*/
|
||||
typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned);
|
||||
|
||||
typedef struct {
|
||||
chunk_alloc_t *alloc;
|
||||
chunk_dalloc_t *dalloc;
|
||||
chunk_commit_t *commit;
|
||||
chunk_decommit_t *decommit;
|
||||
chunk_purge_t *purge;
|
||||
chunk_split_t *split;
|
||||
chunk_merge_t *merge;
|
||||
} chunk_hooks_t;
|
File diff suppressed because it is too large
Load diff
|
@ -5,107 +5,138 @@
|
|||
/* Data. */
|
||||
|
||||
static malloc_mutex_t base_mtx;
|
||||
|
||||
/*
|
||||
* Current pages that are being used for internal memory allocations. These
|
||||
* pages are carved up in cacheline-size quanta, so that there is no chance of
|
||||
* false cache line sharing.
|
||||
*/
|
||||
static void *base_pages;
|
||||
static void *base_next_addr;
|
||||
static void *base_past_addr; /* Addr immediately past base_pages. */
|
||||
static extent_tree_t base_avail_szad;
|
||||
static extent_node_t *base_nodes;
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static bool base_pages_alloc(size_t minsize);
|
||||
static size_t base_allocated;
|
||||
static size_t base_resident;
|
||||
static size_t base_mapped;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static bool
|
||||
base_pages_alloc(size_t minsize)
|
||||
/* base_mtx must be held. */
|
||||
static extent_node_t *
|
||||
base_node_try_alloc(void)
|
||||
{
|
||||
size_t csize;
|
||||
bool zero;
|
||||
extent_node_t *node;
|
||||
|
||||
assert(minsize != 0);
|
||||
csize = CHUNK_CEILING(minsize);
|
||||
zero = false;
|
||||
base_pages = chunk_alloc(csize, chunksize, true, &zero,
|
||||
chunk_dss_prec_get());
|
||||
if (base_pages == NULL)
|
||||
return (true);
|
||||
base_next_addr = base_pages;
|
||||
base_past_addr = (void *)((uintptr_t)base_pages + csize);
|
||||
|
||||
return (false);
|
||||
if (base_nodes == NULL)
|
||||
return (NULL);
|
||||
node = base_nodes;
|
||||
base_nodes = *(extent_node_t **)node;
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
|
||||
return (node);
|
||||
}
|
||||
|
||||
/* base_mtx must be held. */
|
||||
static void
|
||||
base_node_dalloc(extent_node_t *node)
|
||||
{
|
||||
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
|
||||
*(extent_node_t **)node = base_nodes;
|
||||
base_nodes = node;
|
||||
}
|
||||
|
||||
/* base_mtx must be held. */
|
||||
static extent_node_t *
|
||||
base_chunk_alloc(size_t minsize)
|
||||
{
|
||||
extent_node_t *node;
|
||||
size_t csize, nsize;
|
||||
void *addr;
|
||||
|
||||
assert(minsize != 0);
|
||||
node = base_node_try_alloc();
|
||||
/* Allocate enough space to also carve a node out if necessary. */
|
||||
nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
|
||||
csize = CHUNK_CEILING(minsize + nsize);
|
||||
addr = chunk_alloc_base(csize);
|
||||
if (addr == NULL) {
|
||||
if (node != NULL)
|
||||
base_node_dalloc(node);
|
||||
return (NULL);
|
||||
}
|
||||
base_mapped += csize;
|
||||
if (node == NULL) {
|
||||
node = (extent_node_t *)addr;
|
||||
addr = (void *)((uintptr_t)addr + nsize);
|
||||
csize -= nsize;
|
||||
if (config_stats) {
|
||||
base_allocated += nsize;
|
||||
base_resident += PAGE_CEILING(nsize);
|
||||
}
|
||||
}
|
||||
extent_node_init(node, NULL, addr, csize, true, true);
|
||||
return (node);
|
||||
}
|
||||
|
||||
/*
|
||||
* base_alloc() guarantees demand-zeroed memory, in order to make multi-page
|
||||
* sparse data structures such as radix tree nodes efficient with respect to
|
||||
* physical memory usage.
|
||||
*/
|
||||
void *
|
||||
base_alloc(size_t size)
|
||||
{
|
||||
void *ret;
|
||||
size_t csize;
|
||||
size_t csize, usize;
|
||||
extent_node_t *node;
|
||||
extent_node_t key;
|
||||
|
||||
/* Round size up to nearest multiple of the cacheline size. */
|
||||
/*
|
||||
* Round size up to nearest multiple of the cacheline size, so that
|
||||
* there is no chance of false cache line sharing.
|
||||
*/
|
||||
csize = CACHELINE_CEILING(size);
|
||||
|
||||
usize = s2u(csize);
|
||||
extent_node_init(&key, NULL, NULL, usize, false, false);
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
/* Make sure there's enough space for the allocation. */
|
||||
if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
|
||||
if (base_pages_alloc(csize)) {
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
/* Allocate. */
|
||||
ret = base_next_addr;
|
||||
base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
base_calloc(size_t number, size_t size)
|
||||
{
|
||||
void *ret = base_alloc(number * size);
|
||||
|
||||
if (ret != NULL)
|
||||
memset(ret, 0, number * size);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
extent_node_t *
|
||||
base_node_alloc(void)
|
||||
{
|
||||
extent_node_t *ret;
|
||||
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
if (base_nodes != NULL) {
|
||||
ret = base_nodes;
|
||||
base_nodes = *(extent_node_t **)ret;
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t));
|
||||
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
||||
if (node != NULL) {
|
||||
/* Use existing space. */
|
||||
extent_tree_szad_remove(&base_avail_szad, node);
|
||||
} else {
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
|
||||
/* Try to allocate more space. */
|
||||
node = base_chunk_alloc(csize);
|
||||
}
|
||||
if (node == NULL) {
|
||||
ret = NULL;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
ret = extent_node_addr_get(node);
|
||||
if (extent_node_size_get(node) > csize) {
|
||||
extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
|
||||
extent_node_size_set(node, extent_node_size_get(node) - csize);
|
||||
extent_tree_szad_insert(&base_avail_szad, node);
|
||||
} else
|
||||
base_node_dalloc(node);
|
||||
if (config_stats) {
|
||||
base_allocated += csize;
|
||||
/*
|
||||
* Add one PAGE to base_resident for every page boundary that is
|
||||
* crossed by the new allocation.
|
||||
*/
|
||||
base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
|
||||
PAGE_CEILING((uintptr_t)ret);
|
||||
}
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
|
||||
label_return:
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
base_node_dealloc(extent_node_t *node)
|
||||
base_stats_get(size_t *allocated, size_t *resident, size_t *mapped)
|
||||
{
|
||||
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
*(extent_node_t **)node = base_nodes;
|
||||
base_nodes = node;
|
||||
assert(base_allocated <= base_resident);
|
||||
assert(base_resident <= base_mapped);
|
||||
*allocated = base_allocated;
|
||||
*resident = base_resident;
|
||||
*mapped = base_mapped;
|
||||
malloc_mutex_unlock(&base_mtx);
|
||||
}
|
||||
|
||||
|
@ -113,9 +144,10 @@ bool
|
|||
base_boot(void)
|
||||
{
|
||||
|
||||
base_nodes = NULL;
|
||||
if (malloc_mutex_init(&base_mtx))
|
||||
return (true);
|
||||
extent_tree_szad_new(&base_avail_szad);
|
||||
base_nodes = NULL;
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
|
|
@ -2,19 +2,6 @@
|
|||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static size_t bits2groups(size_t nbits);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static size_t
|
||||
bits2groups(size_t nbits)
|
||||
{
|
||||
|
||||
return ((nbits >> LG_BITMAP_GROUP_NBITS) +
|
||||
!!(nbits & BITMAP_GROUP_NBITS_MASK));
|
||||
}
|
||||
|
||||
void
|
||||
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
|
||||
|
@ -31,15 +18,16 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
|
|||
* that requires only one group.
|
||||
*/
|
||||
binfo->levels[0].group_offset = 0;
|
||||
group_count = bits2groups(nbits);
|
||||
group_count = BITMAP_BITS2GROUPS(nbits);
|
||||
for (i = 1; group_count > 1; i++) {
|
||||
assert(i < BITMAP_MAX_LEVELS);
|
||||
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
|
||||
+ group_count;
|
||||
group_count = bits2groups(group_count);
|
||||
group_count = BITMAP_BITS2GROUPS(group_count);
|
||||
}
|
||||
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
|
||||
+ group_count;
|
||||
assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX);
|
||||
binfo->nlevels = i;
|
||||
binfo->nbits = nbits;
|
||||
}
|
||||
|
|
|
@ -5,129 +5,315 @@
|
|||
/* Data. */
|
||||
|
||||
const char *opt_dss = DSS_DEFAULT;
|
||||
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
|
||||
size_t opt_lg_chunk = 0;
|
||||
|
||||
malloc_mutex_t chunks_mtx;
|
||||
chunk_stats_t stats_chunks;
|
||||
/* Used exclusively for gdump triggering. */
|
||||
static size_t curchunks;
|
||||
static size_t highchunks;
|
||||
|
||||
/*
|
||||
* Trees of chunks that were previously allocated (trees differ only in node
|
||||
* ordering). These are used when allocating chunks, in an attempt to re-use
|
||||
* address space. Depending on function, different tree orderings are needed,
|
||||
* which is why there are two trees with the same contents.
|
||||
*/
|
||||
static extent_tree_t chunks_szad_mmap;
|
||||
static extent_tree_t chunks_ad_mmap;
|
||||
static extent_tree_t chunks_szad_dss;
|
||||
static extent_tree_t chunks_ad_dss;
|
||||
|
||||
rtree_t *chunks_rtree;
|
||||
rtree_t chunks_rtree;
|
||||
|
||||
/* Various chunk-related settings. */
|
||||
size_t chunksize;
|
||||
size_t chunksize_mask; /* (chunksize - 1). */
|
||||
size_t chunk_npages;
|
||||
size_t map_bias;
|
||||
size_t arena_maxclass; /* Max size class for arenas. */
|
||||
|
||||
static void *chunk_alloc_default(void *new_addr, size_t size,
|
||||
size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
|
||||
static bool chunk_dalloc_default(void *chunk, size_t size, bool committed,
|
||||
unsigned arena_ind);
|
||||
static bool chunk_commit_default(void *chunk, size_t size, size_t offset,
|
||||
size_t length, unsigned arena_ind);
|
||||
static bool chunk_decommit_default(void *chunk, size_t size, size_t offset,
|
||||
size_t length, unsigned arena_ind);
|
||||
static bool chunk_purge_default(void *chunk, size_t size, size_t offset,
|
||||
size_t length, unsigned arena_ind);
|
||||
static bool chunk_split_default(void *chunk, size_t size, size_t size_a,
|
||||
size_t size_b, bool committed, unsigned arena_ind);
|
||||
static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
|
||||
size_t size_b, bool committed, unsigned arena_ind);
|
||||
|
||||
const chunk_hooks_t chunk_hooks_default = {
|
||||
chunk_alloc_default,
|
||||
chunk_dalloc_default,
|
||||
chunk_commit_default,
|
||||
chunk_decommit_default,
|
||||
chunk_purge_default,
|
||||
chunk_split_default,
|
||||
chunk_merge_default
|
||||
};
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
/*
|
||||
* Function prototypes for static functions that are referenced prior to
|
||||
* definition.
|
||||
*/
|
||||
|
||||
static void *chunk_recycle(extent_tree_t *chunks_szad,
|
||||
extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
|
||||
bool *zero);
|
||||
static void chunk_record(extent_tree_t *chunks_szad,
|
||||
extent_tree_t *chunks_ad, void *chunk, size_t size);
|
||||
static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||
void *chunk, size_t size, bool zeroed, bool committed);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static chunk_hooks_t
|
||||
chunk_hooks_get_locked(arena_t *arena)
|
||||
{
|
||||
|
||||
return (arena->chunk_hooks);
|
||||
}
|
||||
|
||||
chunk_hooks_t
|
||||
chunk_hooks_get(arena_t *arena)
|
||||
{
|
||||
chunk_hooks_t chunk_hooks;
|
||||
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
chunk_hooks = chunk_hooks_get_locked(arena);
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
|
||||
return (chunk_hooks);
|
||||
}
|
||||
|
||||
chunk_hooks_t
|
||||
chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
|
||||
{
|
||||
chunk_hooks_t old_chunk_hooks;
|
||||
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
old_chunk_hooks = arena->chunk_hooks;
|
||||
/*
|
||||
* Copy each field atomically so that it is impossible for readers to
|
||||
* see partially updated pointers. There are places where readers only
|
||||
* need one hook function pointer (therefore no need to copy the
|
||||
* entirety of arena->chunk_hooks), and stale reads do not affect
|
||||
* correctness, so they perform unlocked reads.
|
||||
*/
|
||||
#define ATOMIC_COPY_HOOK(n) do { \
|
||||
union { \
|
||||
chunk_##n##_t **n; \
|
||||
void **v; \
|
||||
} u; \
|
||||
u.n = &arena->chunk_hooks.n; \
|
||||
atomic_write_p(u.v, chunk_hooks->n); \
|
||||
} while (0)
|
||||
ATOMIC_COPY_HOOK(alloc);
|
||||
ATOMIC_COPY_HOOK(dalloc);
|
||||
ATOMIC_COPY_HOOK(commit);
|
||||
ATOMIC_COPY_HOOK(decommit);
|
||||
ATOMIC_COPY_HOOK(purge);
|
||||
ATOMIC_COPY_HOOK(split);
|
||||
ATOMIC_COPY_HOOK(merge);
|
||||
#undef ATOMIC_COPY_HOOK
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
|
||||
return (old_chunk_hooks);
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
bool locked)
|
||||
{
|
||||
static const chunk_hooks_t uninitialized_hooks =
|
||||
CHUNK_HOOKS_INITIALIZER;
|
||||
|
||||
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
|
||||
0) {
|
||||
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
|
||||
chunk_hooks_get(arena);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_hooks_assure_initialized_locked(arena_t *arena,
|
||||
chunk_hooks_t *chunk_hooks)
|
||||
{
|
||||
|
||||
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
|
||||
{
|
||||
|
||||
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_register(const void *chunk, const extent_node_t *node)
|
||||
{
|
||||
|
||||
assert(extent_node_addr_get(node) == chunk);
|
||||
|
||||
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
|
||||
return (true);
|
||||
if (config_prof && opt_prof) {
|
||||
size_t size = extent_node_size_get(node);
|
||||
size_t nadd = (size == 0) ? 1 : size / chunksize;
|
||||
size_t cur = atomic_add_z(&curchunks, nadd);
|
||||
size_t high = atomic_read_z(&highchunks);
|
||||
while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
|
||||
/*
|
||||
* Don't refresh cur, because it may have decreased
|
||||
* since this thread lost the highchunks update race.
|
||||
*/
|
||||
high = atomic_read_z(&highchunks);
|
||||
}
|
||||
if (cur > high && prof_gdump_get_unlocked())
|
||||
prof_gdump();
|
||||
}
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_deregister(const void *chunk, const extent_node_t *node)
|
||||
{
|
||||
bool err;
|
||||
|
||||
err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
|
||||
assert(!err);
|
||||
if (config_prof && opt_prof) {
|
||||
size_t size = extent_node_size_get(node);
|
||||
size_t nsub = (size == 0) ? 1 : size / chunksize;
|
||||
assert(atomic_read_z(&curchunks) >= nsub);
|
||||
atomic_sub_z(&curchunks, nsub);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Do first-best-fit chunk selection, i.e. select the lowest chunk that best
|
||||
* fits.
|
||||
*/
|
||||
static extent_node_t *
|
||||
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
extent_tree_t *chunks_ad, size_t size)
|
||||
{
|
||||
extent_node_t key;
|
||||
|
||||
assert(size == CHUNK_CEILING(size));
|
||||
|
||||
extent_node_init(&key, arena, NULL, size, false, false);
|
||||
return (extent_tree_szad_nsearch(chunks_szad, &key));
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
|
||||
size_t alignment, bool base, bool *zero)
|
||||
chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
|
||||
bool dalloc_node)
|
||||
{
|
||||
void *ret;
|
||||
extent_node_t *node;
|
||||
extent_node_t key;
|
||||
size_t alloc_size, leadsize, trailsize;
|
||||
bool zeroed;
|
||||
bool zeroed, committed;
|
||||
|
||||
if (base) {
|
||||
/*
|
||||
* This function may need to call base_node_{,de}alloc(), but
|
||||
* the current chunk allocation request is on behalf of the
|
||||
* base allocator. Avoid deadlock (and if that weren't an
|
||||
* issue, potential for infinite recursion) by returning NULL.
|
||||
*/
|
||||
return (NULL);
|
||||
}
|
||||
assert(new_addr == NULL || alignment == chunksize);
|
||||
/*
|
||||
* Cached chunks use the node linkage embedded in their headers, in
|
||||
* which case dalloc_node is true, and new_addr is non-NULL because
|
||||
* we're operating on a specific chunk.
|
||||
*/
|
||||
assert(dalloc_node || new_addr != NULL);
|
||||
|
||||
alloc_size = size + alignment - chunksize;
|
||||
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
|
||||
/* Beware size_t wrap-around. */
|
||||
if (alloc_size < size)
|
||||
return (NULL);
|
||||
key.addr = NULL;
|
||||
key.size = alloc_size;
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
node = extent_tree_szad_nsearch(chunks_szad, &key);
|
||||
if (node == NULL) {
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
|
||||
if (new_addr != NULL) {
|
||||
extent_node_t key;
|
||||
extent_node_init(&key, arena, new_addr, alloc_size, false,
|
||||
false);
|
||||
node = extent_tree_ad_search(chunks_ad, &key);
|
||||
} else {
|
||||
node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
|
||||
alloc_size);
|
||||
}
|
||||
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
|
||||
size)) {
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
|
||||
(uintptr_t)node->addr;
|
||||
assert(node->size >= leadsize + size);
|
||||
trailsize = node->size - leadsize - size;
|
||||
ret = (void *)((uintptr_t)node->addr + leadsize);
|
||||
zeroed = node->zeroed;
|
||||
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
|
||||
alignment) - (uintptr_t)extent_node_addr_get(node);
|
||||
assert(new_addr == NULL || leadsize == 0);
|
||||
assert(extent_node_size_get(node) >= leadsize + size);
|
||||
trailsize = extent_node_size_get(node) - leadsize - size;
|
||||
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
|
||||
zeroed = extent_node_zeroed_get(node);
|
||||
if (zeroed)
|
||||
*zero = true;
|
||||
*zero = true;
|
||||
committed = extent_node_committed_get(node);
|
||||
if (committed)
|
||||
*commit = true;
|
||||
/* Split the lead. */
|
||||
if (leadsize != 0 &&
|
||||
chunk_hooks->split(extent_node_addr_get(node),
|
||||
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
/* Remove node from the tree. */
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
extent_tree_ad_remove(chunks_ad, node);
|
||||
arena_chunk_cache_maybe_remove(arena, node, cache);
|
||||
if (leadsize != 0) {
|
||||
/* Insert the leading space as a smaller chunk. */
|
||||
node->size = leadsize;
|
||||
extent_node_size_set(node, leadsize);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||
node = NULL;
|
||||
}
|
||||
if (trailsize != 0) {
|
||||
/* Split the trail. */
|
||||
if (chunk_hooks->split(ret, size + trailsize, size,
|
||||
trailsize, false, arena->ind)) {
|
||||
if (dalloc_node && node != NULL)
|
||||
arena_node_dalloc(arena, node);
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
|
||||
cache, ret, size + trailsize, zeroed, committed);
|
||||
return (NULL);
|
||||
}
|
||||
/* Insert the trailing space as a smaller chunk. */
|
||||
if (node == NULL) {
|
||||
/*
|
||||
* An additional node is required, but
|
||||
* base_node_alloc() can cause a new base chunk to be
|
||||
* allocated. Drop chunks_mtx in order to avoid
|
||||
* deadlock, and if node allocation fails, deallocate
|
||||
* the result before returning an error.
|
||||
*/
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
node = base_node_alloc();
|
||||
node = arena_node_alloc(arena);
|
||||
if (node == NULL) {
|
||||
chunk_dealloc(ret, size, true);
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
chunk_record(arena, chunk_hooks, chunks_szad,
|
||||
chunks_ad, cache, ret, size + trailsize,
|
||||
zeroed, committed);
|
||||
return (NULL);
|
||||
}
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
}
|
||||
node->addr = (void *)((uintptr_t)(ret) + size);
|
||||
node->size = trailsize;
|
||||
node->zeroed = zeroed;
|
||||
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
|
||||
trailsize, zeroed, committed);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||
node = NULL;
|
||||
}
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
|
||||
ret, size, zeroed, committed);
|
||||
return (NULL);
|
||||
}
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
|
||||
if (node != NULL)
|
||||
base_node_dealloc(node);
|
||||
assert(dalloc_node || node != NULL);
|
||||
if (dalloc_node && node != NULL)
|
||||
arena_node_dalloc(arena, node);
|
||||
if (*zero) {
|
||||
if (zeroed == false)
|
||||
if (!zeroed)
|
||||
memset(ret, 0, size);
|
||||
else if (config_debug) {
|
||||
size_t i;
|
||||
size_t *p = (size_t *)(uintptr_t)ret;
|
||||
|
||||
VALGRIND_MAKE_MEM_DEFINED(ret, size);
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
|
||||
for (i = 0; i < size / sizeof(size_t); i++)
|
||||
assert(p[i] == 0);
|
||||
}
|
||||
|
@ -136,138 +322,214 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
|
|||
}
|
||||
|
||||
/*
|
||||
* If the caller specifies (*zero == false), it is still possible to receive
|
||||
* zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
|
||||
* takes advantage of this to avoid demanding zeroed chunks, but taking
|
||||
* advantage of them if they are returned.
|
||||
* If the caller specifies (!*zero), it is still possible to receive zeroed
|
||||
* memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
|
||||
* advantage of this to avoid demanding zeroed chunks, but taking advantage of
|
||||
* them if they are returned.
|
||||
*/
|
||||
void *
|
||||
chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
|
||||
dss_prec_t dss_prec)
|
||||
static void *
|
||||
chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
bool *zero, bool *commit, dss_prec_t dss_prec)
|
||||
{
|
||||
void *ret;
|
||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
assert(alignment != 0);
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
|
||||
/* Retained. */
|
||||
if ((ret = chunk_recycle(arena, &chunk_hooks,
|
||||
&arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
|
||||
new_addr, size, alignment, zero, commit, true)) != NULL)
|
||||
return (ret);
|
||||
|
||||
/* "primary" dss. */
|
||||
if (config_dss && dss_prec == dss_prec_primary) {
|
||||
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
|
||||
alignment, base, zero)) != NULL)
|
||||
goto label_return;
|
||||
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
|
||||
goto label_return;
|
||||
}
|
||||
/* mmap. */
|
||||
if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
|
||||
alignment, base, zero)) != NULL)
|
||||
goto label_return;
|
||||
if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
|
||||
goto label_return;
|
||||
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
||||
chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
|
||||
NULL)
|
||||
return (ret);
|
||||
/*
|
||||
* mmap. Requesting an address is not implemented for
|
||||
* chunk_alloc_mmap(), so only call it if (new_addr == NULL).
|
||||
*/
|
||||
if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero,
|
||||
commit)) != NULL)
|
||||
return (ret);
|
||||
/* "secondary" dss. */
|
||||
if (config_dss && dss_prec == dss_prec_secondary) {
|
||||
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
|
||||
alignment, base, zero)) != NULL)
|
||||
goto label_return;
|
||||
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
|
||||
goto label_return;
|
||||
}
|
||||
if (have_dss && dss_prec == dss_prec_secondary && (ret =
|
||||
chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
|
||||
NULL)
|
||||
return (ret);
|
||||
|
||||
/* All strategies for allocation failed. */
|
||||
ret = NULL;
|
||||
label_return:
|
||||
if (ret != NULL) {
|
||||
if (config_ivsalloc && base == false) {
|
||||
if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) {
|
||||
chunk_dealloc(ret, size, true);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
if (config_stats || config_prof) {
|
||||
bool gdump;
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
if (config_stats)
|
||||
stats_chunks.nchunks += (size / chunksize);
|
||||
stats_chunks.curchunks += (size / chunksize);
|
||||
if (stats_chunks.curchunks > stats_chunks.highchunks) {
|
||||
stats_chunks.highchunks =
|
||||
stats_chunks.curchunks;
|
||||
if (config_prof)
|
||||
gdump = true;
|
||||
} else if (config_prof)
|
||||
gdump = false;
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
if (config_prof && opt_prof && opt_prof_gdump && gdump)
|
||||
prof_gdump();
|
||||
}
|
||||
if (config_valgrind)
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
}
|
||||
assert(CHUNK_ADDR2BASE(ret) == ret);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_base(size_t size)
|
||||
{
|
||||
void *ret;
|
||||
bool zero, commit;
|
||||
|
||||
/*
|
||||
* Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
|
||||
* because it's critical that chunk_alloc_base() return untouched
|
||||
* demand-zeroed virtual memory.
|
||||
*/
|
||||
zero = true;
|
||||
commit = true;
|
||||
ret = chunk_alloc_mmap(size, chunksize, &zero, &commit);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
if (config_valgrind)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
||||
size_t size, size_t alignment, bool *zero, bool dalloc_node)
|
||||
{
|
||||
void *ret;
|
||||
bool commit;
|
||||
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
assert(alignment != 0);
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
|
||||
commit = true;
|
||||
ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
|
||||
&arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
|
||||
&commit, dalloc_node);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
assert(commit);
|
||||
if (config_valgrind)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static arena_t *
|
||||
chunk_arena_get(unsigned arena_ind)
|
||||
{
|
||||
arena_t *arena;
|
||||
|
||||
/* Dodge tsd for a0 in order to avoid bootstrapping issues. */
|
||||
arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind,
|
||||
false, true);
|
||||
/*
|
||||
* The arena we're allocating on behalf of must have been initialized
|
||||
* already.
|
||||
*/
|
||||
assert(arena != NULL);
|
||||
return (arena);
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
bool *commit, unsigned arena_ind)
|
||||
{
|
||||
void *ret;
|
||||
arena_t *arena;
|
||||
|
||||
arena = chunk_arena_get(arena_ind);
|
||||
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
|
||||
commit, arena->dss_prec);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
if (config_valgrind)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
||||
size_t size, size_t alignment, bool *zero, bool *commit)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||
ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
|
||||
arena->ind);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||
size_t size)
|
||||
chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||
void *chunk, size_t size, bool zeroed, bool committed)
|
||||
{
|
||||
bool unzeroed;
|
||||
extent_node_t *xnode, *node, *prev, *xprev, key;
|
||||
extent_node_t *node, *prev;
|
||||
extent_node_t key;
|
||||
|
||||
unzeroed = pages_purge(chunk, size);
|
||||
VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||
assert(!cache || !zeroed);
|
||||
unzeroed = cache || !zeroed;
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||
|
||||
/*
|
||||
* Allocate a node before acquiring chunks_mtx even though it might not
|
||||
* be needed, because base_node_alloc() may cause a new base chunk to
|
||||
* be allocated, which could cause deadlock if chunks_mtx were already
|
||||
* held.
|
||||
*/
|
||||
xnode = base_node_alloc();
|
||||
/* Use xprev to implement conditional deferred deallocation of prev. */
|
||||
xprev = NULL;
|
||||
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
key.addr = (void *)((uintptr_t)chunk + size);
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
|
||||
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
|
||||
false, false);
|
||||
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
||||
/* Try to coalesce forward. */
|
||||
if (node != NULL && node->addr == key.addr) {
|
||||
if (node != NULL && extent_node_addr_get(node) ==
|
||||
extent_node_addr_get(&key) && extent_node_committed_get(node) ==
|
||||
committed && !chunk_hooks->merge(chunk, size,
|
||||
extent_node_addr_get(node), extent_node_size_get(node), false,
|
||||
arena->ind)) {
|
||||
/*
|
||||
* Coalesce chunk with the following address range. This does
|
||||
* not change the position within chunks_ad, so only
|
||||
* remove/insert from/into chunks_szad.
|
||||
*/
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
node->addr = chunk;
|
||||
node->size += size;
|
||||
node->zeroed = (node->zeroed && (unzeroed == false));
|
||||
arena_chunk_cache_maybe_remove(arena, node, cache);
|
||||
extent_node_addr_set(node, chunk);
|
||||
extent_node_size_set(node, size + extent_node_size_get(node));
|
||||
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
|
||||
!unzeroed);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||
} else {
|
||||
/* Coalescing forward failed, so insert a new node. */
|
||||
if (xnode == NULL) {
|
||||
node = arena_node_alloc(arena);
|
||||
if (node == NULL) {
|
||||
/*
|
||||
* base_node_alloc() failed, which is an exceedingly
|
||||
* unlikely failure. Leak chunk; its pages have
|
||||
* already been purged, so this is only a virtual
|
||||
* memory leak.
|
||||
* Node allocation failed, which is an exceedingly
|
||||
* unlikely failure. Leak chunk after making sure its
|
||||
* pages have already been purged, so that this is only
|
||||
* a virtual memory leak.
|
||||
*/
|
||||
if (cache) {
|
||||
chunk_purge_wrapper(arena, chunk_hooks, chunk,
|
||||
size, 0, size);
|
||||
}
|
||||
goto label_return;
|
||||
}
|
||||
node = xnode;
|
||||
xnode = NULL; /* Prevent deallocation below. */
|
||||
node->addr = chunk;
|
||||
node->size = size;
|
||||
node->zeroed = (unzeroed == false);
|
||||
extent_node_init(node, arena, chunk, size, !unzeroed,
|
||||
committed);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||
}
|
||||
|
||||
/* Try to coalesce backward. */
|
||||
prev = extent_tree_ad_prev(chunks_ad, node);
|
||||
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
|
||||
chunk) {
|
||||
if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
|
||||
extent_node_size_get(prev)) == chunk &&
|
||||
extent_node_committed_get(prev) == committed &&
|
||||
!chunk_hooks->merge(extent_node_addr_get(prev),
|
||||
extent_node_size_get(prev), chunk, size, false, arena->ind)) {
|
||||
/*
|
||||
* Coalesce chunk with the previous address range. This does
|
||||
* not change the position within chunks_ad, so only
|
||||
|
@ -275,44 +537,27 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
|||
*/
|
||||
extent_tree_szad_remove(chunks_szad, prev);
|
||||
extent_tree_ad_remove(chunks_ad, prev);
|
||||
|
||||
arena_chunk_cache_maybe_remove(arena, prev, cache);
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
node->addr = prev->addr;
|
||||
node->size += prev->size;
|
||||
node->zeroed = (node->zeroed && prev->zeroed);
|
||||
arena_chunk_cache_maybe_remove(arena, node, cache);
|
||||
extent_node_addr_set(node, extent_node_addr_get(prev));
|
||||
extent_node_size_set(node, extent_node_size_get(prev) +
|
||||
extent_node_size_get(node));
|
||||
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
|
||||
extent_node_zeroed_get(node));
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||
|
||||
xprev = prev;
|
||||
arena_node_dalloc(arena, prev);
|
||||
}
|
||||
|
||||
label_return:
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
/*
|
||||
* Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
|
||||
* avoid potential deadlock.
|
||||
*/
|
||||
if (xnode != NULL)
|
||||
base_node_dealloc(xnode);
|
||||
if (xprev != NULL)
|
||||
base_node_dealloc(xprev);
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_unmap(void *chunk, size_t size)
|
||||
{
|
||||
assert(chunk != NULL);
|
||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
if (config_dss && chunk_in_dss(chunk))
|
||||
chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
|
||||
else if (chunk_dealloc_mmap(chunk, size))
|
||||
chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dealloc(void *chunk, size_t size, bool unmap)
|
||||
chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
size_t size, bool committed)
|
||||
{
|
||||
|
||||
assert(chunk != NULL);
|
||||
|
@ -320,22 +565,164 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
|
|||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
if (config_ivsalloc)
|
||||
rtree_set(chunks_rtree, (uintptr_t)chunk, 0);
|
||||
if (config_stats || config_prof) {
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
assert(stats_chunks.curchunks >= (size / chunksize));
|
||||
stats_chunks.curchunks -= (size / chunksize);
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
|
||||
&arena->chunks_ad_cached, true, chunk, size, false, committed);
|
||||
arena_maybe_purge(arena);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
size_t size, bool zeroed, bool committed)
|
||||
{
|
||||
|
||||
assert(chunk != NULL);
|
||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||
/* Try to deallocate. */
|
||||
if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
|
||||
return;
|
||||
/* Try to decommit; purge if that fails. */
|
||||
if (committed) {
|
||||
committed = chunk_hooks->decommit(chunk, size, 0, size,
|
||||
arena->ind);
|
||||
}
|
||||
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
|
||||
arena->ind);
|
||||
chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
|
||||
&arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
|
||||
}
|
||||
|
||||
if (unmap)
|
||||
chunk_unmap(chunk, size);
|
||||
static bool
|
||||
chunk_dalloc_default(void *chunk, size_t size, bool committed,
|
||||
unsigned arena_ind)
|
||||
{
|
||||
|
||||
if (!have_dss || !chunk_in_dss(chunk))
|
||||
return (chunk_dalloc_mmap(chunk, size));
|
||||
return (true);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
size_t size, bool committed)
|
||||
{
|
||||
|
||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||
chunk_hooks->dalloc(chunk, size, committed, arena->ind);
|
||||
if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length,
|
||||
unsigned arena_ind)
|
||||
{
|
||||
|
||||
return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset),
|
||||
length));
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
|
||||
unsigned arena_ind)
|
||||
{
|
||||
|
||||
return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset),
|
||||
length));
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
|
||||
{
|
||||
|
||||
assert(chunk != NULL);
|
||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||
assert((offset & PAGE_MASK) == 0);
|
||||
assert(length != 0);
|
||||
assert((length & PAGE_MASK) == 0);
|
||||
|
||||
return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset),
|
||||
length));
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
|
||||
unsigned arena_ind)
|
||||
{
|
||||
|
||||
return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
|
||||
length));
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
size_t size, size_t offset, size_t length)
|
||||
{
|
||||
|
||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
|
||||
bool committed, unsigned arena_ind)
|
||||
{
|
||||
|
||||
if (!maps_coalesce)
|
||||
return (true);
|
||||
return (false);
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
||||
bool committed, unsigned arena_ind)
|
||||
{
|
||||
|
||||
if (!maps_coalesce)
|
||||
return (true);
|
||||
if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
|
||||
return (true);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
static rtree_node_elm_t *
|
||||
chunks_rtree_node_alloc(size_t nelms)
|
||||
{
|
||||
|
||||
return ((rtree_node_elm_t *)base_alloc(nelms *
|
||||
sizeof(rtree_node_elm_t)));
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_boot(void)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
SYSTEM_INFO info;
|
||||
GetSystemInfo(&info);
|
||||
|
||||
/*
|
||||
* Verify actual page size is equal to or an integral multiple of
|
||||
* configured page size.
|
||||
*/
|
||||
if (info.dwPageSize & ((1U << LG_PAGE) - 1))
|
||||
return (true);
|
||||
|
||||
/*
|
||||
* Configure chunksize (if not set) to match granularity (usually 64K),
|
||||
* so pages_map will always take fast path.
|
||||
*/
|
||||
if (!opt_lg_chunk) {
|
||||
opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity)
|
||||
- 1;
|
||||
}
|
||||
#else
|
||||
if (!opt_lg_chunk)
|
||||
opt_lg_chunk = LG_CHUNK_DEFAULT;
|
||||
#endif
|
||||
|
||||
/* Set variables according to the value of opt_lg_chunk. */
|
||||
chunksize = (ZU(1) << opt_lg_chunk);
|
||||
|
@ -343,23 +730,11 @@ chunk_boot(void)
|
|||
chunksize_mask = chunksize - 1;
|
||||
chunk_npages = (chunksize >> LG_PAGE);
|
||||
|
||||
if (config_stats || config_prof) {
|
||||
if (malloc_mutex_init(&chunks_mtx))
|
||||
return (true);
|
||||
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
|
||||
}
|
||||
if (config_dss && chunk_dss_boot())
|
||||
if (have_dss && chunk_dss_boot())
|
||||
return (true);
|
||||
if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
opt_lg_chunk, chunks_rtree_node_alloc, NULL))
|
||||
return (true);
|
||||
extent_tree_szad_new(&chunks_szad_mmap);
|
||||
extent_tree_ad_new(&chunks_ad_mmap);
|
||||
extent_tree_szad_new(&chunks_szad_dss);
|
||||
extent_tree_ad_new(&chunks_ad_dss);
|
||||
if (config_ivsalloc) {
|
||||
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
opt_lg_chunk, base_alloc, NULL);
|
||||
if (chunks_rtree == NULL)
|
||||
return (true);
|
||||
}
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
@ -368,9 +743,6 @@ void
|
|||
chunk_prefork(void)
|
||||
{
|
||||
|
||||
malloc_mutex_prefork(&chunks_mtx);
|
||||
if (config_ivsalloc)
|
||||
rtree_prefork(chunks_rtree);
|
||||
chunk_dss_prefork();
|
||||
}
|
||||
|
||||
|
@ -379,9 +751,6 @@ chunk_postfork_parent(void)
|
|||
{
|
||||
|
||||
chunk_dss_postfork_parent();
|
||||
if (config_ivsalloc)
|
||||
rtree_postfork_parent(chunks_rtree);
|
||||
malloc_mutex_postfork_parent(&chunks_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -389,7 +758,4 @@ chunk_postfork_child(void)
|
|||
{
|
||||
|
||||
chunk_dss_postfork_child();
|
||||
if (config_ivsalloc)
|
||||
rtree_postfork_child(chunks_rtree);
|
||||
malloc_mutex_postfork_child(&chunks_mtx);
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ static void *
|
|||
chunk_dss_sbrk(intptr_t increment)
|
||||
{
|
||||
|
||||
#ifdef JEMALLOC_HAVE_SBRK
|
||||
#ifdef JEMALLOC_DSS
|
||||
return (sbrk(increment));
|
||||
#else
|
||||
not_implemented();
|
||||
|
@ -45,7 +45,7 @@ chunk_dss_prec_get(void)
|
|||
{
|
||||
dss_prec_t ret;
|
||||
|
||||
if (config_dss == false)
|
||||
if (!have_dss)
|
||||
return (dss_prec_disabled);
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
ret = dss_prec_default;
|
||||
|
@ -57,8 +57,8 @@ bool
|
|||
chunk_dss_prec_set(dss_prec_t dss_prec)
|
||||
{
|
||||
|
||||
if (config_dss == false)
|
||||
return (true);
|
||||
if (!have_dss)
|
||||
return (dss_prec != dss_prec_disabled);
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
dss_prec_default = dss_prec;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
|
@ -66,11 +66,12 @@ chunk_dss_prec_set(dss_prec_t dss_prec)
|
|||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
|
||||
chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
bool *zero, bool *commit)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
cassert(config_dss);
|
||||
cassert(have_dss);
|
||||
assert(size > 0 && (size & chunksize_mask) == 0);
|
||||
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
|
||||
|
||||
|
@ -93,8 +94,17 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
|
|||
* malloc.
|
||||
*/
|
||||
do {
|
||||
/* Avoid an unnecessary system call. */
|
||||
if (new_addr != NULL && dss_max != new_addr)
|
||||
break;
|
||||
|
||||
/* Get the current end of the DSS. */
|
||||
dss_max = chunk_dss_sbrk(0);
|
||||
|
||||
/* Make sure the earlier condition still holds. */
|
||||
if (new_addr != NULL && dss_max != new_addr)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Calculate how much padding is necessary to
|
||||
* chunk-align the end of the DSS.
|
||||
|
@ -123,12 +133,20 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
|
|||
/* Success. */
|
||||
dss_max = dss_next;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
if (cpad_size != 0)
|
||||
chunk_unmap(cpad, cpad_size);
|
||||
if (cpad_size != 0) {
|
||||
chunk_hooks_t chunk_hooks =
|
||||
CHUNK_HOOKS_INITIALIZER;
|
||||
chunk_dalloc_wrapper(arena,
|
||||
&chunk_hooks, cpad, cpad_size,
|
||||
true);
|
||||
}
|
||||
if (*zero) {
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
|
||||
ret, size);
|
||||
memset(ret, 0, size);
|
||||
}
|
||||
if (!*commit)
|
||||
*commit = pages_decommit(ret, size);
|
||||
return (ret);
|
||||
}
|
||||
} while (dss_prev != (void *)-1);
|
||||
|
@ -143,7 +161,7 @@ chunk_in_dss(void *chunk)
|
|||
{
|
||||
bool ret;
|
||||
|
||||
cassert(config_dss);
|
||||
cassert(have_dss);
|
||||
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
if ((uintptr_t)chunk >= (uintptr_t)dss_base
|
||||
|
@ -160,7 +178,7 @@ bool
|
|||
chunk_dss_boot(void)
|
||||
{
|
||||
|
||||
cassert(config_dss);
|
||||
cassert(have_dss);
|
||||
|
||||
if (malloc_mutex_init(&dss_mtx))
|
||||
return (true);
|
||||
|
@ -175,7 +193,7 @@ void
|
|||
chunk_dss_prefork(void)
|
||||
{
|
||||
|
||||
if (config_dss)
|
||||
if (have_dss)
|
||||
malloc_mutex_prefork(&dss_mtx);
|
||||
}
|
||||
|
||||
|
@ -183,7 +201,7 @@ void
|
|||
chunk_dss_postfork_parent(void)
|
||||
{
|
||||
|
||||
if (config_dss)
|
||||
if (have_dss)
|
||||
malloc_mutex_postfork_parent(&dss_mtx);
|
||||
}
|
||||
|
||||
|
@ -191,7 +209,7 @@ void
|
|||
chunk_dss_postfork_child(void)
|
||||
{
|
||||
|
||||
if (config_dss)
|
||||
if (have_dss)
|
||||
malloc_mutex_postfork_child(&dss_mtx);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,146 +1,10 @@
|
|||
#define JEMALLOC_CHUNK_MMAP_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static void *pages_map(void *addr, size_t size);
|
||||
static void pages_unmap(void *addr, size_t size);
|
||||
static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
|
||||
bool *zero);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static void *
|
||||
pages_map(void *addr, size_t size)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
assert(size != 0);
|
||||
|
||||
#ifdef _WIN32
|
||||
/*
|
||||
* If VirtualAlloc can't allocate at the given address when one is
|
||||
* given, it fails and returns NULL.
|
||||
*/
|
||||
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
|
||||
PAGE_READWRITE);
|
||||
#else
|
||||
/*
|
||||
* We don't use MAP_FIXED here, because it can cause the *replacement*
|
||||
* of existing mappings, and we only want to create new mappings.
|
||||
*/
|
||||
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
|
||||
-1, 0);
|
||||
assert(ret != NULL);
|
||||
|
||||
if (ret == MAP_FAILED)
|
||||
ret = NULL;
|
||||
else if (addr != NULL && ret != addr) {
|
||||
/*
|
||||
* We succeeded in mapping memory, but not in the right place.
|
||||
*/
|
||||
if (munmap(ret, size) == -1) {
|
||||
char buf[BUFERROR_BUF];
|
||||
|
||||
buferror(get_errno(), buf, sizeof(buf));
|
||||
malloc_printf("<jemalloc: Error in munmap(): %s\n",
|
||||
buf);
|
||||
if (opt_abort)
|
||||
abort();
|
||||
}
|
||||
ret = NULL;
|
||||
}
|
||||
#endif
|
||||
assert(ret == NULL || (addr == NULL && ret != addr)
|
||||
|| (addr != NULL && ret == addr));
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
pages_unmap(void *addr, size_t size)
|
||||
{
|
||||
|
||||
#ifdef _WIN32
|
||||
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
|
||||
#else
|
||||
if (munmap(addr, size) == -1)
|
||||
#endif
|
||||
{
|
||||
char buf[BUFERROR_BUF];
|
||||
|
||||
buferror(get_errno(), buf, sizeof(buf));
|
||||
malloc_printf("<jemalloc>: Error in "
|
||||
#ifdef _WIN32
|
||||
"VirtualFree"
|
||||
#else
|
||||
"munmap"
|
||||
#endif
|
||||
"(): %s\n", buf);
|
||||
if (opt_abort)
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
static void *
|
||||
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
|
||||
{
|
||||
void *ret = (void *)((uintptr_t)addr + leadsize);
|
||||
|
||||
assert(alloc_size >= leadsize + size);
|
||||
#ifdef _WIN32
|
||||
{
|
||||
void *new_addr;
|
||||
|
||||
pages_unmap(addr, alloc_size);
|
||||
new_addr = pages_map(ret, size);
|
||||
if (new_addr == ret)
|
||||
return (ret);
|
||||
if (new_addr)
|
||||
pages_unmap(new_addr, size);
|
||||
return (NULL);
|
||||
}
|
||||
#else
|
||||
{
|
||||
size_t trailsize = alloc_size - leadsize - size;
|
||||
|
||||
if (leadsize != 0)
|
||||
pages_unmap(addr, leadsize);
|
||||
if (trailsize != 0)
|
||||
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
|
||||
return (ret);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_purge(void *addr, size_t length)
|
||||
{
|
||||
bool unzeroed;
|
||||
|
||||
#ifdef _WIN32
|
||||
VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
|
||||
unzeroed = true;
|
||||
#else
|
||||
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
|
||||
# define JEMALLOC_MADV_ZEROS true
|
||||
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
|
||||
# define JEMALLOC_MADV_PURGE MADV_FREE
|
||||
# define JEMALLOC_MADV_ZEROS false
|
||||
# else
|
||||
# error "No method defined for purging unused dirty pages."
|
||||
# endif
|
||||
int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
|
||||
unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0);
|
||||
# undef JEMALLOC_MADV_PURGE
|
||||
# undef JEMALLOC_MADV_ZEROS
|
||||
#endif
|
||||
return (unzeroed);
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
|
||||
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
|
||||
{
|
||||
void *ret, *pages;
|
||||
size_t alloc_size, leadsize;
|
||||
|
@ -160,11 +24,13 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
|
|||
|
||||
assert(ret != NULL);
|
||||
*zero = true;
|
||||
if (!*commit)
|
||||
*commit = pages_decommit(ret, size);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
|
||||
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
|
||||
{
|
||||
void *ret;
|
||||
size_t offset;
|
||||
|
@ -191,20 +57,22 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
|
|||
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
|
||||
if (offset != 0) {
|
||||
pages_unmap(ret, size);
|
||||
return (chunk_alloc_mmap_slow(size, alignment, zero));
|
||||
return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
|
||||
}
|
||||
|
||||
assert(ret != NULL);
|
||||
*zero = true;
|
||||
if (!*commit)
|
||||
*commit = pages_decommit(ret, size);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_dealloc_mmap(void *chunk, size_t size)
|
||||
chunk_dalloc_mmap(void *chunk, size_t size)
|
||||
{
|
||||
|
||||
if (config_munmap)
|
||||
pages_unmap(chunk, size);
|
||||
|
||||
return (config_munmap == false);
|
||||
return (!config_munmap);
|
||||
}
|
||||
|
|
|
@ -40,8 +40,8 @@
|
|||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static bool ckh_grow(ckh_t *ckh);
|
||||
static void ckh_shrink(ckh_t *ckh);
|
||||
static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
|
||||
static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
|
@ -185,7 +185,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
|||
}
|
||||
|
||||
bucket = tbucket;
|
||||
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
|
||||
if (!ckh_try_bucket_insert(ckh, bucket, key, data))
|
||||
return (false);
|
||||
}
|
||||
}
|
||||
|
@ -201,12 +201,12 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
|
|||
|
||||
/* Try to insert in primary bucket. */
|
||||
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
|
||||
if (!ckh_try_bucket_insert(ckh, bucket, key, data))
|
||||
return (false);
|
||||
|
||||
/* Try to insert in secondary bucket. */
|
||||
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
|
||||
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
|
||||
if (!ckh_try_bucket_insert(ckh, bucket, key, data))
|
||||
return (false);
|
||||
|
||||
/*
|
||||
|
@ -243,7 +243,7 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
|
|||
}
|
||||
|
||||
static bool
|
||||
ckh_grow(ckh_t *ckh)
|
||||
ckh_grow(tsd_t *tsd, ckh_t *ckh)
|
||||
{
|
||||
bool ret;
|
||||
ckhc_t *tab, *ttab;
|
||||
|
@ -270,7 +270,8 @@ ckh_grow(ckh_t *ckh)
|
|||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
|
||||
tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL,
|
||||
true, NULL);
|
||||
if (tab == NULL) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
|
@ -281,13 +282,13 @@ ckh_grow(ckh_t *ckh)
|
|||
tab = ttab;
|
||||
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
|
||||
|
||||
if (ckh_rebuild(ckh, tab) == false) {
|
||||
idalloc(tab);
|
||||
if (!ckh_rebuild(ckh, tab)) {
|
||||
idalloctm(tsd, tab, tcache_get(tsd, false), true);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Rebuilding failed, so back out partially rebuilt table. */
|
||||
idalloc(ckh->tab);
|
||||
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
|
||||
ckh->tab = tab;
|
||||
ckh->lg_curbuckets = lg_prevbuckets;
|
||||
}
|
||||
|
@ -298,7 +299,7 @@ ckh_grow(ckh_t *ckh)
|
|||
}
|
||||
|
||||
static void
|
||||
ckh_shrink(ckh_t *ckh)
|
||||
ckh_shrink(tsd_t *tsd, ckh_t *ckh)
|
||||
{
|
||||
ckhc_t *tab, *ttab;
|
||||
size_t lg_curcells, usize;
|
||||
|
@ -313,7 +314,8 @@ ckh_shrink(ckh_t *ckh)
|
|||
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||
if (usize == 0)
|
||||
return;
|
||||
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
|
||||
tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
|
||||
NULL);
|
||||
if (tab == NULL) {
|
||||
/*
|
||||
* An OOM error isn't worth propagating, since it doesn't
|
||||
|
@ -327,8 +329,8 @@ ckh_shrink(ckh_t *ckh)
|
|||
tab = ttab;
|
||||
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
|
||||
|
||||
if (ckh_rebuild(ckh, tab) == false) {
|
||||
idalloc(tab);
|
||||
if (!ckh_rebuild(ckh, tab)) {
|
||||
idalloctm(tsd, tab, tcache_get(tsd, false), true);
|
||||
#ifdef CKH_COUNT
|
||||
ckh->nshrinks++;
|
||||
#endif
|
||||
|
@ -336,7 +338,7 @@ ckh_shrink(ckh_t *ckh)
|
|||
}
|
||||
|
||||
/* Rebuilding failed, so back out partially rebuilt table. */
|
||||
idalloc(ckh->tab);
|
||||
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
|
||||
ckh->tab = tab;
|
||||
ckh->lg_curbuckets = lg_prevbuckets;
|
||||
#ifdef CKH_COUNT
|
||||
|
@ -345,7 +347,8 @@ ckh_shrink(ckh_t *ckh)
|
|||
}
|
||||
|
||||
bool
|
||||
ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
|
||||
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
ckh_keycomp_t *keycomp)
|
||||
{
|
||||
bool ret;
|
||||
size_t mincells, usize;
|
||||
|
@ -366,10 +369,10 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
|
|||
ckh->count = 0;
|
||||
|
||||
/*
|
||||
* Find the minimum power of 2 that is large enough to fit aBaseCount
|
||||
* Find the minimum power of 2 that is large enough to fit minitems
|
||||
* entries. We are using (2+,2) cuckoo hashing, which has an expected
|
||||
* maximum load factor of at least ~0.86, so 0.75 is a conservative load
|
||||
* factor that will typically allow 2^aLgMinItems to fit without ever
|
||||
* factor that will typically allow mincells items to fit without ever
|
||||
* growing the table.
|
||||
*/
|
||||
assert(LG_CKH_BUCKET_CELLS > 0);
|
||||
|
@ -388,7 +391,8 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
|
|||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
|
||||
ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
|
||||
NULL);
|
||||
if (ckh->tab == NULL) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
|
@ -400,16 +404,16 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
|
|||
}
|
||||
|
||||
void
|
||||
ckh_delete(ckh_t *ckh)
|
||||
ckh_delete(tsd_t *tsd, ckh_t *ckh)
|
||||
{
|
||||
|
||||
assert(ckh != NULL);
|
||||
|
||||
#ifdef CKH_VERBOSE
|
||||
malloc_printf(
|
||||
"%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
|
||||
" nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
|
||||
" nrelocs: %"PRIu64"\n", __func__, ckh,
|
||||
"%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
|
||||
" nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
|
||||
" nrelocs: %"FMTu64"\n", __func__, ckh,
|
||||
(unsigned long long)ckh->ngrows,
|
||||
(unsigned long long)ckh->nshrinks,
|
||||
(unsigned long long)ckh->nshrinkfails,
|
||||
|
@ -417,7 +421,7 @@ ckh_delete(ckh_t *ckh)
|
|||
(unsigned long long)ckh->nrelocs);
|
||||
#endif
|
||||
|
||||
idalloc(ckh->tab);
|
||||
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
|
||||
if (config_debug)
|
||||
memset(ckh, 0x5a, sizeof(ckh_t));
|
||||
}
|
||||
|
@ -452,7 +456,7 @@ ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
|
|||
}
|
||||
|
||||
bool
|
||||
ckh_insert(ckh_t *ckh, const void *key, const void *data)
|
||||
ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
|
@ -464,7 +468,7 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
|
|||
#endif
|
||||
|
||||
while (ckh_try_insert(ckh, &key, &data)) {
|
||||
if (ckh_grow(ckh)) {
|
||||
if (ckh_grow(tsd, ckh)) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
|
@ -476,7 +480,8 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
|
|||
}
|
||||
|
||||
bool
|
||||
ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
|
||||
ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
|
||||
void **data)
|
||||
{
|
||||
size_t cell;
|
||||
|
||||
|
@ -497,7 +502,7 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
|
|||
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
|
||||
> ckh->lg_minbuckets) {
|
||||
/* Ignore error due to OOM. */
|
||||
ckh_shrink(ckh);
|
||||
ckh_shrink(tsd, ckh);
|
||||
}
|
||||
|
||||
return (false);
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -3,17 +3,32 @@
|
|||
|
||||
/******************************************************************************/
|
||||
|
||||
static inline int
|
||||
JEMALLOC_INLINE_C size_t
|
||||
extent_quantize(size_t size)
|
||||
{
|
||||
|
||||
/*
|
||||
* Round down to the nearest chunk size that can actually be requested
|
||||
* during normal huge allocation.
|
||||
*/
|
||||
return (index2size(size2index(size + 1) - 1));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C int
|
||||
extent_szad_comp(extent_node_t *a, extent_node_t *b)
|
||||
{
|
||||
int ret;
|
||||
size_t a_size = a->size;
|
||||
size_t b_size = b->size;
|
||||
size_t a_qsize = extent_quantize(extent_node_size_get(a));
|
||||
size_t b_qsize = extent_quantize(extent_node_size_get(b));
|
||||
|
||||
ret = (a_size > b_size) - (a_size < b_size);
|
||||
/*
|
||||
* Compare based on quantized size rather than size, in order to sort
|
||||
* equally useful extents only by address.
|
||||
*/
|
||||
ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
|
||||
if (ret == 0) {
|
||||
uintptr_t a_addr = (uintptr_t)a->addr;
|
||||
uintptr_t b_addr = (uintptr_t)b->addr;
|
||||
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
|
||||
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
|
||||
|
||||
ret = (a_addr > b_addr) - (a_addr < b_addr);
|
||||
}
|
||||
|
@ -22,18 +37,17 @@ extent_szad_comp(extent_node_t *a, extent_node_t *b)
|
|||
}
|
||||
|
||||
/* Generate red-black tree functions. */
|
||||
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
|
||||
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
|
||||
extent_szad_comp)
|
||||
|
||||
static inline int
|
||||
JEMALLOC_INLINE_C int
|
||||
extent_ad_comp(extent_node_t *a, extent_node_t *b)
|
||||
{
|
||||
uintptr_t a_addr = (uintptr_t)a->addr;
|
||||
uintptr_t b_addr = (uintptr_t)b->addr;
|
||||
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
|
||||
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
|
||||
|
||||
return ((a_addr > b_addr) - (a_addr < b_addr));
|
||||
}
|
||||
|
||||
/* Generate red-black tree functions. */
|
||||
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
|
||||
extent_ad_comp)
|
||||
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
|
||||
|
|
|
@ -2,44 +2,68 @@
|
|||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
uint64_t huge_nmalloc;
|
||||
uint64_t huge_ndalloc;
|
||||
size_t huge_allocated;
|
||||
static extent_node_t *
|
||||
huge_node_get(const void *ptr)
|
||||
{
|
||||
extent_node_t *node;
|
||||
|
||||
malloc_mutex_t huge_mtx;
|
||||
node = chunk_lookup(ptr, true);
|
||||
assert(!extent_node_achunk_get(node));
|
||||
|
||||
/******************************************************************************/
|
||||
return (node);
|
||||
}
|
||||
|
||||
/* Tree of chunks that are stand-alone huge allocations. */
|
||||
static extent_tree_t huge;
|
||||
|
||||
void *
|
||||
huge_malloc(size_t size, bool zero, dss_prec_t dss_prec)
|
||||
static bool
|
||||
huge_node_set(const void *ptr, extent_node_t *node)
|
||||
{
|
||||
|
||||
return (huge_palloc(size, chunksize, zero, dss_prec));
|
||||
assert(extent_node_addr_get(node) == ptr);
|
||||
assert(!extent_node_achunk_get(node));
|
||||
return (chunk_register(ptr, node));
|
||||
}
|
||||
|
||||
static void
|
||||
huge_node_unset(const void *ptr, const extent_node_t *node)
|
||||
{
|
||||
|
||||
chunk_deregister(ptr, node);
|
||||
}
|
||||
|
||||
void *
|
||||
huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
|
||||
huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
|
||||
tcache_t *tcache)
|
||||
{
|
||||
size_t usize;
|
||||
|
||||
usize = s2u(size);
|
||||
if (usize == 0) {
|
||||
/* size_t overflow. */
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
|
||||
}
|
||||
|
||||
void *
|
||||
huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
|
||||
bool zero, tcache_t *tcache)
|
||||
{
|
||||
void *ret;
|
||||
size_t csize;
|
||||
size_t usize;
|
||||
extent_node_t *node;
|
||||
bool is_zeroed;
|
||||
|
||||
/* Allocate one or more contiguous chunks for this request. */
|
||||
|
||||
csize = CHUNK_CEILING(size);
|
||||
if (csize == 0) {
|
||||
/* size is large enough to cause size_t wrap-around. */
|
||||
usize = sa2u(size, alignment);
|
||||
if (unlikely(usize == 0))
|
||||
return (NULL);
|
||||
}
|
||||
assert(usize >= chunksize);
|
||||
|
||||
/* Allocate an extent node with which to track the chunk. */
|
||||
node = base_node_alloc();
|
||||
node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
|
||||
CACHELINE, false, tcache, true, arena);
|
||||
if (node == NULL)
|
||||
return (NULL);
|
||||
|
||||
|
@ -48,145 +72,33 @@ huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
|
|||
* it is possible to make correct junk/zero fill decisions below.
|
||||
*/
|
||||
is_zeroed = zero;
|
||||
ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec);
|
||||
if (ret == NULL) {
|
||||
base_node_dealloc(node);
|
||||
arena = arena_choose(tsd, arena);
|
||||
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
|
||||
size, alignment, &is_zeroed)) == NULL) {
|
||||
idalloctm(tsd, node, tcache, true);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
extent_node_init(node, arena, ret, size, is_zeroed, true);
|
||||
|
||||
if (huge_node_set(ret, node)) {
|
||||
arena_chunk_dalloc_huge(arena, ret, size);
|
||||
idalloctm(tsd, node, tcache, true);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/* Insert node into huge. */
|
||||
node->addr = ret;
|
||||
node->size = csize;
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
ql_elm_new(node, ql_link);
|
||||
ql_tail_insert(&arena->huge, node, ql_link);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
extent_tree_ad_insert(&huge, node);
|
||||
if (config_stats) {
|
||||
stats_cactive_add(csize);
|
||||
huge_nmalloc++;
|
||||
huge_allocated += csize;
|
||||
}
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||
if (!is_zeroed)
|
||||
memset(ret, 0, size);
|
||||
} else if (config_fill && unlikely(opt_junk_alloc))
|
||||
memset(ret, 0xa5, size);
|
||||
|
||||
if (config_fill && zero == false) {
|
||||
if (opt_junk)
|
||||
memset(ret, 0xa5, csize);
|
||||
else if (opt_zero && is_zeroed == false)
|
||||
memset(ret, 0, csize);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
bool
|
||||
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
|
||||
{
|
||||
|
||||
/*
|
||||
* Avoid moving the allocation if the size class can be left the same.
|
||||
*/
|
||||
if (oldsize > arena_maxclass
|
||||
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
|
||||
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
|
||||
assert(CHUNK_CEILING(oldsize) == oldsize);
|
||||
return (false);
|
||||
}
|
||||
|
||||
/* Reallocation would require a move. */
|
||||
return (true);
|
||||
}
|
||||
|
||||
void *
|
||||
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec)
|
||||
{
|
||||
void *ret;
|
||||
size_t copysize;
|
||||
|
||||
/* Try to avoid moving the allocation. */
|
||||
if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
|
||||
return (ptr);
|
||||
|
||||
/*
|
||||
* size and oldsize are different enough that we need to use a
|
||||
* different size class. In that case, fall back to allocating new
|
||||
* space and copying.
|
||||
*/
|
||||
if (alignment > chunksize)
|
||||
ret = huge_palloc(size + extra, alignment, zero, dss_prec);
|
||||
else
|
||||
ret = huge_malloc(size + extra, zero, dss_prec);
|
||||
|
||||
if (ret == NULL) {
|
||||
if (extra == 0)
|
||||
return (NULL);
|
||||
/* Try again, this time without extra. */
|
||||
if (alignment > chunksize)
|
||||
ret = huge_palloc(size, alignment, zero, dss_prec);
|
||||
else
|
||||
ret = huge_malloc(size, zero, dss_prec);
|
||||
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy at most size bytes (not size+extra), since the caller has no
|
||||
* expectation that the extra bytes will be reliably preserved.
|
||||
*/
|
||||
copysize = (size < oldsize) ? size : oldsize;
|
||||
|
||||
#ifdef JEMALLOC_MREMAP
|
||||
/*
|
||||
* Use mremap(2) if this is a huge-->huge reallocation, and neither the
|
||||
* source nor the destination are in dss.
|
||||
*/
|
||||
if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
|
||||
== false && chunk_in_dss(ret) == false))) {
|
||||
size_t newsize = huge_salloc(ret);
|
||||
|
||||
/*
|
||||
* Remove ptr from the tree of huge allocations before
|
||||
* performing the remap operation, in order to avoid the
|
||||
* possibility of another thread acquiring that mapping before
|
||||
* this one removes it from the tree.
|
||||
*/
|
||||
huge_dalloc(ptr, false);
|
||||
if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
|
||||
ret) == MAP_FAILED) {
|
||||
/*
|
||||
* Assuming no chunk management bugs in the allocator,
|
||||
* the only documented way an error can occur here is
|
||||
* if the application changed the map type for a
|
||||
* portion of the old allocation. This is firmly in
|
||||
* undefined behavior territory, so write a diagnostic
|
||||
* message, and optionally abort.
|
||||
*/
|
||||
char buf[BUFERROR_BUF];
|
||||
|
||||
buferror(get_errno(), buf, sizeof(buf));
|
||||
malloc_printf("<jemalloc>: Error in mremap(): %s\n",
|
||||
buf);
|
||||
if (opt_abort)
|
||||
abort();
|
||||
memcpy(ret, ptr, copysize);
|
||||
chunk_dealloc_mmap(ptr, oldsize);
|
||||
} else if (config_fill && zero == false && opt_junk && oldsize
|
||||
< newsize) {
|
||||
/*
|
||||
* mremap(2) clobbers the original mapping, so
|
||||
* junk/zero filling is not preserved. There is no
|
||||
* need to zero fill here, since any trailing
|
||||
* uninititialized memory is demand-zeroed by the
|
||||
* kernel, but junk filling must be redone.
|
||||
*/
|
||||
memset(ret + oldsize, 0xa5, newsize - oldsize);
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
memcpy(ret, ptr, copysize);
|
||||
iqalloct(ptr, try_tcache_dalloc);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
@ -198,12 +110,12 @@ static void
|
|||
huge_dalloc_junk(void *ptr, size_t usize)
|
||||
{
|
||||
|
||||
if (config_fill && config_dss && opt_junk) {
|
||||
if (config_fill && have_dss && unlikely(opt_junk_free)) {
|
||||
/*
|
||||
* Only bother junk filling if the chunk isn't about to be
|
||||
* unmapped.
|
||||
*/
|
||||
if (config_munmap == false || (config_dss && chunk_in_dss(ptr)))
|
||||
if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
|
||||
memset(ptr, 0x5a, usize);
|
||||
}
|
||||
}
|
||||
|
@ -213,135 +125,317 @@ huge_dalloc_junk(void *ptr, size_t usize)
|
|||
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
|
||||
#endif
|
||||
|
||||
void
|
||||
huge_dalloc(void *ptr, bool unmap)
|
||||
static void
|
||||
huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
|
||||
size_t size, size_t extra, bool zero)
|
||||
{
|
||||
extent_node_t *node, key;
|
||||
size_t usize_next;
|
||||
extent_node_t *node;
|
||||
arena_t *arena;
|
||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||
bool zeroed;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
/* Increase usize to incorporate extra. */
|
||||
while (usize < s2u(size+extra) && (usize_next = s2u(usize+1)) < oldsize)
|
||||
usize = usize_next;
|
||||
|
||||
/* Extract from tree of huge allocations. */
|
||||
key.addr = ptr;
|
||||
node = extent_tree_ad_search(&huge, &key);
|
||||
assert(node != NULL);
|
||||
assert(node->addr == ptr);
|
||||
extent_tree_ad_remove(&huge, node);
|
||||
if (oldsize == usize)
|
||||
return;
|
||||
|
||||
if (config_stats) {
|
||||
stats_cactive_sub(node->size);
|
||||
huge_ndalloc++;
|
||||
huge_allocated -= node->size;
|
||||
node = huge_node_get(ptr);
|
||||
arena = extent_node_arena_get(node);
|
||||
|
||||
/* Fill if necessary (shrinking). */
|
||||
if (oldsize > usize) {
|
||||
size_t sdiff = oldsize - usize;
|
||||
zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, ptr,
|
||||
CHUNK_CEILING(usize), usize, sdiff);
|
||||
if (config_fill && unlikely(opt_junk_free)) {
|
||||
memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
|
||||
zeroed = false;
|
||||
}
|
||||
} else
|
||||
zeroed = true;
|
||||
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
/* Update the size of the huge allocation. */
|
||||
assert(extent_node_size_get(node) != usize);
|
||||
extent_node_size_set(node, usize);
|
||||
/* Clear node's zeroed field if zeroing failed above. */
|
||||
extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
|
||||
|
||||
/* Fill if necessary (growing). */
|
||||
if (oldsize < usize) {
|
||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||
if (!zeroed) {
|
||||
memset((void *)((uintptr_t)ptr + oldsize), 0,
|
||||
usize - oldsize);
|
||||
}
|
||||
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
||||
memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
|
||||
oldsize);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
||||
{
|
||||
extent_node_t *node;
|
||||
arena_t *arena;
|
||||
chunk_hooks_t chunk_hooks;
|
||||
size_t cdiff;
|
||||
bool zeroed;
|
||||
|
||||
node = huge_node_get(ptr);
|
||||
arena = extent_node_arena_get(node);
|
||||
chunk_hooks = chunk_hooks_get(arena);
|
||||
|
||||
/* Split excess chunks. */
|
||||
cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
|
||||
if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
|
||||
CHUNK_CEILING(usize), cdiff, true, arena->ind))
|
||||
return (true);
|
||||
|
||||
if (oldsize > usize) {
|
||||
size_t sdiff = oldsize - usize;
|
||||
zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
|
||||
CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
|
||||
CHUNK_CEILING(usize), CHUNK_ADDR2OFFSET((uintptr_t)ptr +
|
||||
usize), sdiff);
|
||||
if (config_fill && unlikely(opt_junk_free)) {
|
||||
huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
|
||||
sdiff);
|
||||
zeroed = false;
|
||||
}
|
||||
} else
|
||||
zeroed = true;
|
||||
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
/* Update the size of the huge allocation. */
|
||||
extent_node_size_set(node, usize);
|
||||
/* Clear node's zeroed field if zeroing failed above. */
|
||||
extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
/* Zap the excess chunks. */
|
||||
arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
static bool
|
||||
huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
|
||||
size_t usize;
|
||||
extent_node_t *node;
|
||||
arena_t *arena;
|
||||
bool is_zeroed_subchunk, is_zeroed_chunk;
|
||||
|
||||
usize = s2u(size);
|
||||
if (usize == 0) {
|
||||
/* size_t overflow. */
|
||||
return (true);
|
||||
}
|
||||
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
node = huge_node_get(ptr);
|
||||
arena = extent_node_arena_get(node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
is_zeroed_subchunk = extent_node_zeroed_get(node);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
if (unmap)
|
||||
huge_dalloc_junk(node->addr, node->size);
|
||||
/*
|
||||
* Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
|
||||
* that it is possible to make correct junk/zero fill decisions below.
|
||||
*/
|
||||
is_zeroed_chunk = zero;
|
||||
|
||||
chunk_dealloc(node->addr, node->size, unmap);
|
||||
|
||||
base_node_dealloc(node);
|
||||
}
|
||||
|
||||
size_t
|
||||
huge_salloc(const void *ptr)
|
||||
{
|
||||
size_t ret;
|
||||
extent_node_t *node, key;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
|
||||
/* Extract from tree of huge allocations. */
|
||||
key.addr = __DECONST(void *, ptr);
|
||||
node = extent_tree_ad_search(&huge, &key);
|
||||
assert(node != NULL);
|
||||
|
||||
ret = node->size;
|
||||
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
dss_prec_t
|
||||
huge_dss_prec_get(arena_t *arena)
|
||||
{
|
||||
|
||||
return (arena_dss_prec_get(choose_arena(arena)));
|
||||
}
|
||||
|
||||
prof_ctx_t *
|
||||
huge_prof_ctx_get(const void *ptr)
|
||||
{
|
||||
prof_ctx_t *ret;
|
||||
extent_node_t *node, key;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
|
||||
/* Extract from tree of huge allocations. */
|
||||
key.addr = __DECONST(void *, ptr);
|
||||
node = extent_tree_ad_search(&huge, &key);
|
||||
assert(node != NULL);
|
||||
|
||||
ret = node->prof_ctx;
|
||||
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
|
||||
{
|
||||
extent_node_t *node, key;
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
|
||||
/* Extract from tree of huge allocations. */
|
||||
key.addr = __DECONST(void *, ptr);
|
||||
node = extent_tree_ad_search(&huge, &key);
|
||||
assert(node != NULL);
|
||||
|
||||
node->prof_ctx = ctx;
|
||||
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
}
|
||||
|
||||
bool
|
||||
huge_boot(void)
|
||||
{
|
||||
|
||||
/* Initialize chunks data. */
|
||||
if (malloc_mutex_init(&huge_mtx))
|
||||
if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
|
||||
&is_zeroed_chunk))
|
||||
return (true);
|
||||
extent_tree_ad_new(&huge);
|
||||
|
||||
if (config_stats) {
|
||||
huge_nmalloc = 0;
|
||||
huge_ndalloc = 0;
|
||||
huge_allocated = 0;
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
/* Update the size of the huge allocation. */
|
||||
extent_node_size_set(node, usize);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||
if (!is_zeroed_subchunk) {
|
||||
memset((void *)((uintptr_t)ptr + oldsize), 0,
|
||||
CHUNK_CEILING(oldsize) - oldsize);
|
||||
}
|
||||
if (!is_zeroed_chunk) {
|
||||
memset((void *)((uintptr_t)ptr +
|
||||
CHUNK_CEILING(oldsize)), 0, usize -
|
||||
CHUNK_CEILING(oldsize));
|
||||
}
|
||||
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
||||
memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
|
||||
oldsize);
|
||||
}
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
huge_prefork(void)
|
||||
bool
|
||||
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
bool zero)
|
||||
{
|
||||
size_t usize;
|
||||
|
||||
malloc_mutex_prefork(&huge_mtx);
|
||||
/* Both allocations must be huge to avoid a move. */
|
||||
if (oldsize < chunksize)
|
||||
return (true);
|
||||
|
||||
assert(s2u(oldsize) == oldsize);
|
||||
usize = s2u(size);
|
||||
if (usize == 0) {
|
||||
/* size_t overflow. */
|
||||
return (true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid moving the allocation if the existing chunk size accommodates
|
||||
* the new size.
|
||||
*/
|
||||
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)
|
||||
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(s2u(size+extra))) {
|
||||
huge_ralloc_no_move_similar(ptr, oldsize, usize, size, extra,
|
||||
zero);
|
||||
return (false);
|
||||
}
|
||||
|
||||
/* Attempt to shrink the allocation in-place. */
|
||||
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize))
|
||||
return (huge_ralloc_no_move_shrink(ptr, oldsize, usize));
|
||||
|
||||
/* Attempt to expand the allocation in-place. */
|
||||
if (huge_ralloc_no_move_expand(ptr, oldsize, size + extra, zero)) {
|
||||
if (extra == 0)
|
||||
return (true);
|
||||
|
||||
/* Try again, this time without extra. */
|
||||
return (huge_ralloc_no_move_expand(ptr, oldsize, size, zero));
|
||||
}
|
||||
return (false);
|
||||
}
|
||||
|
||||
void *
|
||||
huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra, size_t alignment, bool zero, tcache_t *tcache)
|
||||
{
|
||||
void *ret;
|
||||
size_t copysize;
|
||||
|
||||
/* Try to avoid moving the allocation. */
|
||||
if (!huge_ralloc_no_move(ptr, oldsize, size, extra, zero))
|
||||
return (ptr);
|
||||
|
||||
/*
|
||||
* size and oldsize are different enough that we need to use a
|
||||
* different size class. In that case, fall back to allocating new
|
||||
* space and copying.
|
||||
*/
|
||||
if (alignment > chunksize) {
|
||||
ret = huge_palloc(tsd, arena, size + extra, alignment, zero,
|
||||
tcache);
|
||||
} else
|
||||
ret = huge_malloc(tsd, arena, size + extra, zero, tcache);
|
||||
|
||||
if (ret == NULL) {
|
||||
if (extra == 0)
|
||||
return (NULL);
|
||||
/* Try again, this time without extra. */
|
||||
if (alignment > chunksize) {
|
||||
ret = huge_palloc(tsd, arena, size, alignment, zero,
|
||||
tcache);
|
||||
} else
|
||||
ret = huge_malloc(tsd, arena, size, zero, tcache);
|
||||
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy at most size bytes (not size+extra), since the caller has no
|
||||
* expectation that the extra bytes will be reliably preserved.
|
||||
*/
|
||||
copysize = (size < oldsize) ? size : oldsize;
|
||||
memcpy(ret, ptr, copysize);
|
||||
isqalloc(tsd, ptr, oldsize, tcache);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
huge_postfork_parent(void)
|
||||
huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
|
||||
{
|
||||
extent_node_t *node;
|
||||
arena_t *arena;
|
||||
|
||||
node = huge_node_get(ptr);
|
||||
arena = extent_node_arena_get(node);
|
||||
huge_node_unset(ptr, node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
ql_remove(&arena->huge, node, ql_link);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
huge_dalloc_junk(extent_node_addr_get(node),
|
||||
extent_node_size_get(node));
|
||||
arena_chunk_dalloc_huge(extent_node_arena_get(node),
|
||||
extent_node_addr_get(node), extent_node_size_get(node));
|
||||
idalloctm(tsd, node, tcache, true);
|
||||
}
|
||||
|
||||
arena_t *
|
||||
huge_aalloc(const void *ptr)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_parent(&huge_mtx);
|
||||
return (extent_node_arena_get(huge_node_get(ptr)));
|
||||
}
|
||||
|
||||
size_t
|
||||
huge_salloc(const void *ptr)
|
||||
{
|
||||
size_t size;
|
||||
extent_node_t *node;
|
||||
arena_t *arena;
|
||||
|
||||
node = huge_node_get(ptr);
|
||||
arena = extent_node_arena_get(node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
size = extent_node_size_get(node);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
return (size);
|
||||
}
|
||||
|
||||
prof_tctx_t *
|
||||
huge_prof_tctx_get(const void *ptr)
|
||||
{
|
||||
prof_tctx_t *tctx;
|
||||
extent_node_t *node;
|
||||
arena_t *arena;
|
||||
|
||||
node = huge_node_get(ptr);
|
||||
arena = extent_node_arena_get(node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
tctx = extent_node_prof_tctx_get(node);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
return (tctx);
|
||||
}
|
||||
|
||||
void
|
||||
huge_postfork_child(void)
|
||||
huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
|
||||
{
|
||||
extent_node_t *node;
|
||||
arena_t *arena;
|
||||
|
||||
malloc_mutex_postfork_child(&huge_mtx);
|
||||
node = huge_node_get(ptr);
|
||||
arena = extent_node_arena_get(node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
extent_node_prof_tctx_set(node, tctx);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -74,8 +74,8 @@ _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
|
|||
{
|
||||
|
||||
return (((int (*)(pthread_mutex_t *, void *(*)(size_t, size_t)))
|
||||
__libc_interposing[INTERPOS__pthread_mutex_init_calloc_cb])(
|
||||
mutex, calloc_cb));
|
||||
__libc_interposing[INTERPOS__pthread_mutex_init_calloc_cb])(mutex,
|
||||
calloc_cb));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -84,9 +84,13 @@ malloc_mutex_init(malloc_mutex_t *mutex)
|
|||
{
|
||||
|
||||
#ifdef _WIN32
|
||||
# if _WIN32_WINNT >= 0x0600
|
||||
InitializeSRWLock(&mutex->lock);
|
||||
# else
|
||||
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
|
||||
_CRT_SPINCOUNT))
|
||||
return (true);
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
mutex->lock = 0;
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
|
@ -94,8 +98,8 @@ malloc_mutex_init(malloc_mutex_t *mutex)
|
|||
mutex->postponed_next = postponed_mutexes;
|
||||
postponed_mutexes = mutex;
|
||||
} else {
|
||||
if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) !=
|
||||
0)
|
||||
if (_pthread_mutex_init_calloc_cb(&mutex->lock,
|
||||
bootstrap_calloc) != 0)
|
||||
return (true);
|
||||
}
|
||||
#else
|
||||
|
@ -151,7 +155,7 @@ malloc_mutex_first_thread(void)
|
|||
postpone_init = false;
|
||||
while (postponed_mutexes != NULL) {
|
||||
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
|
||||
base_calloc) != 0)
|
||||
bootstrap_calloc) != 0)
|
||||
return (true);
|
||||
postponed_mutexes = postponed_mutexes->postponed_next;
|
||||
}
|
||||
|
|
173
contrib/jemalloc/src/pages.c
Normal file
173
contrib/jemalloc/src/pages.c
Normal file
|
@ -0,0 +1,173 @@
|
|||
#define JEMALLOC_PAGES_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
void *
|
||||
pages_map(void *addr, size_t size)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
assert(size != 0);
|
||||
|
||||
#ifdef _WIN32
|
||||
/*
|
||||
* If VirtualAlloc can't allocate at the given address when one is
|
||||
* given, it fails and returns NULL.
|
||||
*/
|
||||
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
|
||||
PAGE_READWRITE);
|
||||
#else
|
||||
/*
|
||||
* We don't use MAP_FIXED here, because it can cause the *replacement*
|
||||
* of existing mappings, and we only want to create new mappings.
|
||||
*/
|
||||
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
|
||||
-1, 0);
|
||||
assert(ret != NULL);
|
||||
|
||||
if (ret == MAP_FAILED)
|
||||
ret = NULL;
|
||||
else if (addr != NULL && ret != addr) {
|
||||
/*
|
||||
* We succeeded in mapping memory, but not in the right place.
|
||||
*/
|
||||
pages_unmap(ret, size);
|
||||
ret = NULL;
|
||||
}
|
||||
#endif
|
||||
assert(ret == NULL || (addr == NULL && ret != addr)
|
||||
|| (addr != NULL && ret == addr));
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
pages_unmap(void *addr, size_t size)
|
||||
{
|
||||
|
||||
#ifdef _WIN32
|
||||
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
|
||||
#else
|
||||
if (munmap(addr, size) == -1)
|
||||
#endif
|
||||
{
|
||||
char buf[BUFERROR_BUF];
|
||||
|
||||
buferror(get_errno(), buf, sizeof(buf));
|
||||
malloc_printf("<jemalloc>: Error in "
|
||||
#ifdef _WIN32
|
||||
"VirtualFree"
|
||||
#else
|
||||
"munmap"
|
||||
#endif
|
||||
"(): %s\n", buf);
|
||||
if (opt_abort)
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
void *
|
||||
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
|
||||
{
|
||||
void *ret = (void *)((uintptr_t)addr + leadsize);
|
||||
|
||||
assert(alloc_size >= leadsize + size);
|
||||
#ifdef _WIN32
|
||||
{
|
||||
void *new_addr;
|
||||
|
||||
pages_unmap(addr, alloc_size);
|
||||
new_addr = pages_map(ret, size);
|
||||
if (new_addr == ret)
|
||||
return (ret);
|
||||
if (new_addr)
|
||||
pages_unmap(new_addr, size);
|
||||
return (NULL);
|
||||
}
|
||||
#else
|
||||
{
|
||||
size_t trailsize = alloc_size - leadsize - size;
|
||||
|
||||
if (leadsize != 0)
|
||||
pages_unmap(addr, leadsize);
|
||||
if (trailsize != 0)
|
||||
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
|
||||
return (ret);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool
|
||||
pages_commit_impl(void *addr, size_t size, bool commit)
|
||||
{
|
||||
|
||||
#ifndef _WIN32
|
||||
/*
|
||||
* The following decommit/commit implementation is functional, but
|
||||
* always disabled because it doesn't add value beyong improved
|
||||
* debugging (at the cost of extra system calls) on systems that
|
||||
* overcommit.
|
||||
*/
|
||||
if (false) {
|
||||
int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE;
|
||||
void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON |
|
||||
MAP_FIXED, -1, 0);
|
||||
if (result == MAP_FAILED)
|
||||
return (true);
|
||||
if (result != addr) {
|
||||
/*
|
||||
* We succeeded in mapping memory, but not in the right
|
||||
* place.
|
||||
*/
|
||||
pages_unmap(result, size);
|
||||
return (true);
|
||||
}
|
||||
return (false);
|
||||
}
|
||||
#endif
|
||||
return (true);
|
||||
}
|
||||
|
||||
bool
|
||||
pages_commit(void *addr, size_t size)
|
||||
{
|
||||
|
||||
return (pages_commit_impl(addr, size, true));
|
||||
}
|
||||
|
||||
bool
|
||||
pages_decommit(void *addr, size_t size)
|
||||
{
|
||||
|
||||
return (pages_commit_impl(addr, size, false));
|
||||
}
|
||||
|
||||
bool
|
||||
pages_purge(void *addr, size_t size)
|
||||
{
|
||||
bool unzeroed;
|
||||
|
||||
#ifdef _WIN32
|
||||
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
|
||||
unzeroed = true;
|
||||
#elif defined(JEMALLOC_HAVE_MADVISE)
|
||||
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
|
||||
# define JEMALLOC_MADV_ZEROS true
|
||||
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
|
||||
# define JEMALLOC_MADV_PURGE MADV_FREE
|
||||
# define JEMALLOC_MADV_ZEROS false
|
||||
# else
|
||||
# error "No madvise(2) flag defined for purging unused dirty pages."
|
||||
# endif
|
||||
int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
|
||||
unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
|
||||
# undef JEMALLOC_MADV_PURGE
|
||||
# undef JEMALLOC_MADV_ZEROS
|
||||
#else
|
||||
/* Last resort no-op. */
|
||||
unzeroed = true;
|
||||
#endif
|
||||
return (unzeroed);
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load diff
|
@ -2,34 +2,33 @@
|
|||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
/*
|
||||
* quarantine pointers close to NULL are used to encode state information that
|
||||
* Quarantine pointers close to NULL are used to encode state information that
|
||||
* is used for cleaning up during thread shutdown.
|
||||
*/
|
||||
#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1)
|
||||
#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2)
|
||||
#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
malloc_tsd_data(, quarantine, quarantine_t *, NULL)
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static quarantine_t *quarantine_grow(quarantine_t *quarantine);
|
||||
static void quarantine_drain_one(quarantine_t *quarantine);
|
||||
static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
|
||||
static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine);
|
||||
static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine);
|
||||
static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine,
|
||||
size_t upper_bound);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
quarantine_t *
|
||||
quarantine_init(size_t lg_maxobjs)
|
||||
static quarantine_t *
|
||||
quarantine_init(tsd_t *tsd, size_t lg_maxobjs)
|
||||
{
|
||||
quarantine_t *quarantine;
|
||||
|
||||
quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) +
|
||||
((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)));
|
||||
assert(tsd_nominal(tsd));
|
||||
|
||||
quarantine = (quarantine_t *)iallocztm(tsd, offsetof(quarantine_t, objs)
|
||||
+ ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)), false,
|
||||
tcache_get(tsd, true), true, NULL);
|
||||
if (quarantine == NULL)
|
||||
return (NULL);
|
||||
quarantine->curbytes = 0;
|
||||
|
@ -37,19 +36,36 @@ quarantine_init(size_t lg_maxobjs)
|
|||
quarantine->first = 0;
|
||||
quarantine->lg_maxobjs = lg_maxobjs;
|
||||
|
||||
quarantine_tsd_set(&quarantine);
|
||||
|
||||
return (quarantine);
|
||||
}
|
||||
|
||||
void
|
||||
quarantine_alloc_hook_work(tsd_t *tsd)
|
||||
{
|
||||
quarantine_t *quarantine;
|
||||
|
||||
if (!tsd_nominal(tsd))
|
||||
return;
|
||||
|
||||
quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT);
|
||||
/*
|
||||
* Check again whether quarantine has been initialized, because
|
||||
* quarantine_init() may have triggered recursive initialization.
|
||||
*/
|
||||
if (tsd_quarantine_get(tsd) == NULL)
|
||||
tsd_quarantine_set(tsd, quarantine);
|
||||
else
|
||||
idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
|
||||
}
|
||||
|
||||
static quarantine_t *
|
||||
quarantine_grow(quarantine_t *quarantine)
|
||||
quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
|
||||
{
|
||||
quarantine_t *ret;
|
||||
|
||||
ret = quarantine_init(quarantine->lg_maxobjs + 1);
|
||||
ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1);
|
||||
if (ret == NULL) {
|
||||
quarantine_drain_one(quarantine);
|
||||
quarantine_drain_one(tsd, quarantine);
|
||||
return (quarantine);
|
||||
}
|
||||
|
||||
|
@ -71,17 +87,18 @@ quarantine_grow(quarantine_t *quarantine)
|
|||
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
|
||||
sizeof(quarantine_obj_t));
|
||||
}
|
||||
idalloc(quarantine);
|
||||
idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
|
||||
|
||||
tsd_quarantine_set(tsd, ret);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
quarantine_drain_one(quarantine_t *quarantine)
|
||||
quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
|
||||
{
|
||||
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
|
||||
assert(obj->usize == isalloc(obj->ptr, config_prof));
|
||||
idalloc(obj->ptr);
|
||||
idalloctm(tsd, obj->ptr, NULL, false);
|
||||
quarantine->curbytes -= obj->usize;
|
||||
quarantine->curobjs--;
|
||||
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
|
||||
|
@ -89,15 +106,15 @@ quarantine_drain_one(quarantine_t *quarantine)
|
|||
}
|
||||
|
||||
static void
|
||||
quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
|
||||
quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound)
|
||||
{
|
||||
|
||||
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
|
||||
quarantine_drain_one(quarantine);
|
||||
quarantine_drain_one(tsd, quarantine);
|
||||
}
|
||||
|
||||
void
|
||||
quarantine(void *ptr)
|
||||
quarantine(tsd_t *tsd, void *ptr)
|
||||
{
|
||||
quarantine_t *quarantine;
|
||||
size_t usize = isalloc(ptr, config_prof);
|
||||
|
@ -105,17 +122,8 @@ quarantine(void *ptr)
|
|||
cassert(config_fill);
|
||||
assert(opt_quarantine);
|
||||
|
||||
quarantine = *quarantine_tsd_get();
|
||||
if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) {
|
||||
if (quarantine == QUARANTINE_STATE_PURGATORY) {
|
||||
/*
|
||||
* Make a note that quarantine() was called after
|
||||
* quarantine_cleanup() was called.
|
||||
*/
|
||||
quarantine = QUARANTINE_STATE_REINCARNATED;
|
||||
quarantine_tsd_set(&quarantine);
|
||||
}
|
||||
idalloc(ptr);
|
||||
if ((quarantine = tsd_quarantine_get(tsd)) == NULL) {
|
||||
idalloctm(tsd, ptr, NULL, false);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
|
@ -125,11 +133,11 @@ quarantine(void *ptr)
|
|||
if (quarantine->curbytes + usize > opt_quarantine) {
|
||||
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
|
||||
- usize : 0;
|
||||
quarantine_drain(quarantine, upper_bound);
|
||||
quarantine_drain(tsd, quarantine, upper_bound);
|
||||
}
|
||||
/* Grow the quarantine ring buffer if it's full. */
|
||||
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
|
||||
quarantine = quarantine_grow(quarantine);
|
||||
quarantine = quarantine_grow(tsd, quarantine);
|
||||
/* quarantine_grow() must free a slot if it fails to grow. */
|
||||
assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
|
||||
/* Append ptr if its size doesn't exceed the quarantine size. */
|
||||
|
@ -141,12 +149,12 @@ quarantine(void *ptr)
|
|||
obj->usize = usize;
|
||||
quarantine->curbytes += usize;
|
||||
quarantine->curobjs++;
|
||||
if (config_fill && opt_junk) {
|
||||
if (config_fill && unlikely(opt_junk_free)) {
|
||||
/*
|
||||
* Only do redzone validation if Valgrind isn't in
|
||||
* operation.
|
||||
*/
|
||||
if ((config_valgrind == false || opt_valgrind == false)
|
||||
if ((!config_valgrind || likely(!in_valgrind))
|
||||
&& usize <= SMALL_MAXCLASS)
|
||||
arena_quarantine_junk_small(ptr, usize);
|
||||
else
|
||||
|
@ -154,46 +162,22 @@ quarantine(void *ptr)
|
|||
}
|
||||
} else {
|
||||
assert(quarantine->curbytes == 0);
|
||||
idalloc(ptr);
|
||||
idalloctm(tsd, ptr, NULL, false);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
quarantine_cleanup(void *arg)
|
||||
quarantine_cleanup(tsd_t *tsd)
|
||||
{
|
||||
quarantine_t *quarantine = *(quarantine_t **)arg;
|
||||
quarantine_t *quarantine;
|
||||
|
||||
if (quarantine == QUARANTINE_STATE_REINCARNATED) {
|
||||
/*
|
||||
* Another destructor deallocated memory after this destructor
|
||||
* was called. Reset quarantine to QUARANTINE_STATE_PURGATORY
|
||||
* in order to receive another callback.
|
||||
*/
|
||||
quarantine = QUARANTINE_STATE_PURGATORY;
|
||||
quarantine_tsd_set(&quarantine);
|
||||
} else if (quarantine == QUARANTINE_STATE_PURGATORY) {
|
||||
/*
|
||||
* The previous time this destructor was called, we set the key
|
||||
* to QUARANTINE_STATE_PURGATORY so that other destructors
|
||||
* wouldn't cause re-creation of the quarantine. This time, do
|
||||
* nothing, so that the destructor will not be called again.
|
||||
*/
|
||||
} else if (quarantine != NULL) {
|
||||
quarantine_drain(quarantine, 0);
|
||||
idalloc(quarantine);
|
||||
quarantine = QUARANTINE_STATE_PURGATORY;
|
||||
quarantine_tsd_set(&quarantine);
|
||||
if (!config_fill)
|
||||
return;
|
||||
|
||||
quarantine = tsd_quarantine_get(tsd);
|
||||
if (quarantine != NULL) {
|
||||
quarantine_drain(tsd, quarantine, 0);
|
||||
idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
|
||||
tsd_quarantine_set(tsd, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
quarantine_boot(void)
|
||||
{
|
||||
|
||||
cassert(config_fill);
|
||||
|
||||
if (quarantine_tsd_boot())
|
||||
return (true);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
|
|
@ -1,73 +1,74 @@
|
|||
#define JEMALLOC_RTREE_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
rtree_t *
|
||||
rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc)
|
||||
static unsigned
|
||||
hmin(unsigned ha, unsigned hb)
|
||||
{
|
||||
rtree_t *ret;
|
||||
unsigned bits_per_level, bits_in_leaf, height, i;
|
||||
|
||||
return (ha < hb ? ha : hb);
|
||||
}
|
||||
|
||||
/* Only the most significant bits of keys passed to rtree_[gs]et() are used. */
|
||||
bool
|
||||
rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
|
||||
rtree_node_dalloc_t *dalloc)
|
||||
{
|
||||
unsigned bits_in_leaf, height, i;
|
||||
|
||||
assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
|
||||
|
||||
bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1;
|
||||
bits_in_leaf = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(uint8_t)))) - 1;
|
||||
bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
|
||||
: (bits % RTREE_BITS_PER_LEVEL);
|
||||
if (bits > bits_in_leaf) {
|
||||
height = 1 + (bits - bits_in_leaf) / bits_per_level;
|
||||
if ((height-1) * bits_per_level + bits_in_leaf != bits)
|
||||
height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL;
|
||||
if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits)
|
||||
height++;
|
||||
} else {
|
||||
height = 1;
|
||||
}
|
||||
assert((height-1) * bits_per_level + bits_in_leaf >= bits);
|
||||
|
||||
ret = (rtree_t*)alloc(offsetof(rtree_t, level2bits) +
|
||||
(sizeof(unsigned) * height));
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
|
||||
height));
|
||||
|
||||
ret->alloc = alloc;
|
||||
ret->dalloc = dalloc;
|
||||
if (malloc_mutex_init(&ret->mutex)) {
|
||||
if (dalloc != NULL)
|
||||
dalloc(ret);
|
||||
return (NULL);
|
||||
}
|
||||
ret->height = height;
|
||||
if (height > 1) {
|
||||
if ((height-1) * bits_per_level + bits_in_leaf > bits) {
|
||||
ret->level2bits[0] = (bits - bits_in_leaf) %
|
||||
bits_per_level;
|
||||
} else
|
||||
ret->level2bits[0] = bits_per_level;
|
||||
for (i = 1; i < height-1; i++)
|
||||
ret->level2bits[i] = bits_per_level;
|
||||
ret->level2bits[height-1] = bits_in_leaf;
|
||||
} else
|
||||
ret->level2bits[0] = bits;
|
||||
height = 1;
|
||||
assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits);
|
||||
|
||||
ret->root = (void**)alloc(sizeof(void *) << ret->level2bits[0]);
|
||||
if (ret->root == NULL) {
|
||||
if (dalloc != NULL)
|
||||
dalloc(ret);
|
||||
return (NULL);
|
||||
rtree->alloc = alloc;
|
||||
rtree->dalloc = dalloc;
|
||||
rtree->height = height;
|
||||
|
||||
/* Root level. */
|
||||
rtree->levels[0].subtree = NULL;
|
||||
rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL :
|
||||
bits_in_leaf;
|
||||
rtree->levels[0].cumbits = rtree->levels[0].bits;
|
||||
/* Interior levels. */
|
||||
for (i = 1; i < height-1; i++) {
|
||||
rtree->levels[i].subtree = NULL;
|
||||
rtree->levels[i].bits = RTREE_BITS_PER_LEVEL;
|
||||
rtree->levels[i].cumbits = rtree->levels[i-1].cumbits +
|
||||
RTREE_BITS_PER_LEVEL;
|
||||
}
|
||||
/* Leaf level. */
|
||||
if (height > 1) {
|
||||
rtree->levels[height-1].subtree = NULL;
|
||||
rtree->levels[height-1].bits = bits_in_leaf;
|
||||
rtree->levels[height-1].cumbits = bits;
|
||||
}
|
||||
memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]);
|
||||
|
||||
return (ret);
|
||||
/* Compute lookup table to be used by rtree_start_level(). */
|
||||
for (i = 0; i < RTREE_HEIGHT_MAX; i++) {
|
||||
rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height -
|
||||
1);
|
||||
}
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
static void
|
||||
rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
|
||||
rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level)
|
||||
{
|
||||
|
||||
if (level < rtree->height - 1) {
|
||||
if (level + 1 < rtree->height) {
|
||||
size_t nchildren, i;
|
||||
|
||||
nchildren = ZU(1) << rtree->level2bits[level];
|
||||
nchildren = ZU(1) << rtree->levels[level].bits;
|
||||
for (i = 0; i < nchildren; i++) {
|
||||
void **child = (void **)node[i];
|
||||
rtree_node_elm_t *child = node[i].child;
|
||||
if (child != NULL)
|
||||
rtree_delete_subtree(rtree, child, level + 1);
|
||||
}
|
||||
|
@ -78,28 +79,49 @@ rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
|
|||
void
|
||||
rtree_delete(rtree_t *rtree)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
rtree_delete_subtree(rtree, rtree->root, 0);
|
||||
rtree->dalloc(rtree);
|
||||
for (i = 0; i < rtree->height; i++) {
|
||||
rtree_node_elm_t *subtree = rtree->levels[i].subtree;
|
||||
if (subtree != NULL)
|
||||
rtree_delete_subtree(rtree, subtree, i);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rtree_prefork(rtree_t *rtree)
|
||||
static rtree_node_elm_t *
|
||||
rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp)
|
||||
{
|
||||
rtree_node_elm_t *node;
|
||||
|
||||
if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) {
|
||||
/*
|
||||
* Another thread is already in the process of initializing.
|
||||
* Spin-wait until initialization is complete.
|
||||
*/
|
||||
do {
|
||||
CPU_SPINWAIT;
|
||||
node = atomic_read_p((void **)elmp);
|
||||
} while (node == RTREE_NODE_INITIALIZING);
|
||||
} else {
|
||||
node = rtree->alloc(ZU(1) << rtree->levels[level].bits);
|
||||
if (node == NULL)
|
||||
return (NULL);
|
||||
atomic_write_p((void **)elmp, node);
|
||||
}
|
||||
|
||||
return (node);
|
||||
}
|
||||
|
||||
rtree_node_elm_t *
|
||||
rtree_subtree_read_hard(rtree_t *rtree, unsigned level)
|
||||
{
|
||||
|
||||
malloc_mutex_prefork(&rtree->mutex);
|
||||
return (rtree_node_init(rtree, level, &rtree->levels[level].subtree));
|
||||
}
|
||||
|
||||
void
|
||||
rtree_postfork_parent(rtree_t *rtree)
|
||||
rtree_node_elm_t *
|
||||
rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_parent(&rtree->mutex);
|
||||
}
|
||||
|
||||
void
|
||||
rtree_postfork_child(rtree_t *rtree)
|
||||
{
|
||||
|
||||
malloc_mutex_postfork_child(&rtree->mutex);
|
||||
return (rtree_node_init(rtree, level, &elm->child));
|
||||
}
|
||||
|
|
|
@ -6,31 +6,22 @@
|
|||
xmallctl(n, v, &sz, NULL, 0); \
|
||||
} while (0)
|
||||
|
||||
#define CTL_I_GET(n, v, t) do { \
|
||||
#define CTL_M2_GET(n, i, v, t) do { \
|
||||
size_t mib[6]; \
|
||||
size_t miblen = sizeof(mib) / sizeof(size_t); \
|
||||
size_t sz = sizeof(t); \
|
||||
xmallctlnametomib(n, mib, &miblen); \
|
||||
mib[2] = i; \
|
||||
mib[2] = (i); \
|
||||
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
|
||||
} while (0)
|
||||
|
||||
#define CTL_J_GET(n, v, t) do { \
|
||||
#define CTL_M2_M4_GET(n, i, j, v, t) do { \
|
||||
size_t mib[6]; \
|
||||
size_t miblen = sizeof(mib) / sizeof(size_t); \
|
||||
size_t sz = sizeof(t); \
|
||||
xmallctlnametomib(n, mib, &miblen); \
|
||||
mib[2] = j; \
|
||||
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
|
||||
} while (0)
|
||||
|
||||
#define CTL_IJ_GET(n, v, t) do { \
|
||||
size_t mib[6]; \
|
||||
size_t miblen = sizeof(mib) / sizeof(size_t); \
|
||||
size_t sz = sizeof(t); \
|
||||
xmallctlnametomib(n, mib, &miblen); \
|
||||
mib[2] = i; \
|
||||
mib[4] = j; \
|
||||
mib[2] = (i); \
|
||||
mib[4] = (j); \
|
||||
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
|
||||
} while (0)
|
||||
|
||||
|
@ -48,8 +39,10 @@ static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
|
|||
void *cbopaque, unsigned i);
|
||||
static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
|
||||
void *cbopaque, unsigned i);
|
||||
static void stats_arena_hchunks_print(
|
||||
void (*write_cb)(void *, const char *), void *cbopaque, unsigned i);
|
||||
static void stats_arena_print(void (*write_cb)(void *, const char *),
|
||||
void *cbopaque, unsigned i, bool bins, bool large);
|
||||
void *cbopaque, unsigned i, bool bins, bool large, bool huge);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
|
@ -58,100 +51,109 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
unsigned i)
|
||||
{
|
||||
size_t page;
|
||||
bool config_tcache;
|
||||
unsigned nbins, j, gap_start;
|
||||
bool config_tcache, in_gap;
|
||||
unsigned nbins, j;
|
||||
|
||||
CTL_GET("arenas.page", &page, size_t);
|
||||
|
||||
CTL_GET("config.tcache", &config_tcache, bool);
|
||||
if (config_tcache) {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"bins: bin size regs pgs allocated nmalloc"
|
||||
" ndalloc nrequests nfills nflushes"
|
||||
" newruns reruns curruns\n");
|
||||
"bins: size ind allocated nmalloc"
|
||||
" ndalloc nrequests curregs curruns regs"
|
||||
" pgs util nfills nflushes newruns"
|
||||
" reruns\n");
|
||||
} else {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"bins: bin size regs pgs allocated nmalloc"
|
||||
" ndalloc newruns reruns curruns\n");
|
||||
"bins: size ind allocated nmalloc"
|
||||
" ndalloc nrequests curregs curruns regs"
|
||||
" pgs util newruns reruns\n");
|
||||
}
|
||||
CTL_GET("arenas.nbins", &nbins, unsigned);
|
||||
for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
|
||||
for (j = 0, in_gap = false; j < nbins; j++) {
|
||||
uint64_t nruns;
|
||||
|
||||
CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t);
|
||||
if (nruns == 0) {
|
||||
if (gap_start == UINT_MAX)
|
||||
gap_start = j;
|
||||
} else {
|
||||
size_t reg_size, run_size, allocated;
|
||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns,
|
||||
uint64_t);
|
||||
if (nruns == 0)
|
||||
in_gap = true;
|
||||
else {
|
||||
size_t reg_size, run_size, curregs, availregs, milli;
|
||||
size_t curruns;
|
||||
uint32_t nregs;
|
||||
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
|
||||
uint64_t reruns;
|
||||
size_t curruns;
|
||||
char util[6]; /* "x.yyy". */
|
||||
|
||||
if (gap_start != UINT_MAX) {
|
||||
if (j > gap_start + 1) {
|
||||
/* Gap of more than one size class. */
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"[%u..%u]\n", gap_start,
|
||||
j - 1);
|
||||
} else {
|
||||
/* Gap of one size class. */
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"[%u]\n", gap_start);
|
||||
}
|
||||
gap_start = UINT_MAX;
|
||||
if (in_gap) {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
" ---\n");
|
||||
in_gap = false;
|
||||
}
|
||||
CTL_J_GET("arenas.bin.0.size", ®_size, size_t);
|
||||
CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
|
||||
CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
|
||||
CTL_IJ_GET("stats.arenas.0.bins.0.allocated",
|
||||
&allocated, size_t);
|
||||
CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc",
|
||||
&nmalloc, uint64_t);
|
||||
CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc",
|
||||
&ndalloc, uint64_t);
|
||||
if (config_tcache) {
|
||||
CTL_IJ_GET("stats.arenas.0.bins.0.nrequests",
|
||||
&nrequests, uint64_t);
|
||||
CTL_IJ_GET("stats.arenas.0.bins.0.nfills",
|
||||
&nfills, uint64_t);
|
||||
CTL_IJ_GET("stats.arenas.0.bins.0.nflushes",
|
||||
&nflushes, uint64_t);
|
||||
}
|
||||
CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns,
|
||||
uint64_t);
|
||||
CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns,
|
||||
CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t);
|
||||
CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
|
||||
CTL_M2_GET("arenas.bin.0.run_size", j, &run_size,
|
||||
size_t);
|
||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j,
|
||||
&nmalloc, uint64_t);
|
||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j,
|
||||
&ndalloc, uint64_t);
|
||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j,
|
||||
&curregs, size_t);
|
||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
|
||||
&nrequests, uint64_t);
|
||||
if (config_tcache) {
|
||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i,
|
||||
j, &nfills, uint64_t);
|
||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes",
|
||||
i, j, &nflushes, uint64_t);
|
||||
}
|
||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j,
|
||||
&reruns, uint64_t);
|
||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j,
|
||||
&curruns, size_t);
|
||||
|
||||
availregs = nregs * curruns;
|
||||
milli = (availregs != 0) ? (1000 * curregs) / availregs
|
||||
: 1000;
|
||||
assert(milli <= 1000);
|
||||
if (milli < 10) {
|
||||
malloc_snprintf(util, sizeof(util),
|
||||
"0.00%zu", milli);
|
||||
} else if (milli < 100) {
|
||||
malloc_snprintf(util, sizeof(util), "0.0%zu",
|
||||
milli);
|
||||
} else if (milli < 1000) {
|
||||
malloc_snprintf(util, sizeof(util), "0.%zu",
|
||||
milli);
|
||||
} else
|
||||
malloc_snprintf(util, sizeof(util), "1");
|
||||
|
||||
if (config_tcache) {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"%13u %5zu %4u %3zu %12zu %12"PRIu64
|
||||
" %12"PRIu64" %12"PRIu64" %12"PRIu64
|
||||
" %12"PRIu64" %12"PRIu64" %12"PRIu64
|
||||
" %12zu\n",
|
||||
j, reg_size, nregs, run_size / page,
|
||||
allocated, nmalloc, ndalloc, nrequests,
|
||||
nfills, nflushes, nruns, reruns, curruns);
|
||||
"%20zu %3u %12zu %12"FMTu64
|
||||
" %12"FMTu64" %12"FMTu64" %12zu"
|
||||
" %12zu %4u %3zu %-5s %12"FMTu64
|
||||
" %12"FMTu64" %12"FMTu64" %12"FMTu64"\n",
|
||||
reg_size, j, curregs * reg_size, nmalloc,
|
||||
ndalloc, nrequests, curregs, curruns, nregs,
|
||||
run_size / page, util, nfills, nflushes,
|
||||
nruns, reruns);
|
||||
} else {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"%13u %5zu %4u %3zu %12zu %12"PRIu64
|
||||
" %12"PRIu64" %12"PRIu64" %12"PRIu64
|
||||
" %12zu\n",
|
||||
j, reg_size, nregs, run_size / page,
|
||||
allocated, nmalloc, ndalloc, nruns, reruns,
|
||||
curruns);
|
||||
"%20zu %3u %12zu %12"FMTu64
|
||||
" %12"FMTu64" %12"FMTu64" %12zu"
|
||||
" %12zu %4u %3zu %-5s %12"FMTu64
|
||||
" %12"FMTu64"\n",
|
||||
reg_size, j, curregs * reg_size, nmalloc,
|
||||
ndalloc, nrequests, curregs, curruns, nregs,
|
||||
run_size / page, util, nruns, reruns);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (gap_start != UINT_MAX) {
|
||||
if (j > gap_start + 1) {
|
||||
/* Gap of more than one size class. */
|
||||
malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n",
|
||||
gap_start, j - 1);
|
||||
} else {
|
||||
/* Gap of one size class. */
|
||||
malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start);
|
||||
}
|
||||
if (in_gap) {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
" ---\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -159,110 +161,199 @@ static void
|
|||
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
unsigned i)
|
||||
{
|
||||
size_t page, nlruns, j;
|
||||
ssize_t gap_start;
|
||||
|
||||
CTL_GET("arenas.page", &page, size_t);
|
||||
unsigned nbins, nlruns, j;
|
||||
bool in_gap;
|
||||
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"large: size pages nmalloc ndalloc nrequests"
|
||||
" curruns\n");
|
||||
CTL_GET("arenas.nlruns", &nlruns, size_t);
|
||||
for (j = 0, gap_start = -1; j < nlruns; j++) {
|
||||
"large: size ind allocated nmalloc ndalloc"
|
||||
" nrequests curruns\n");
|
||||
CTL_GET("arenas.nbins", &nbins, unsigned);
|
||||
CTL_GET("arenas.nlruns", &nlruns, unsigned);
|
||||
for (j = 0, in_gap = false; j < nlruns; j++) {
|
||||
uint64_t nmalloc, ndalloc, nrequests;
|
||||
size_t run_size, curruns;
|
||||
|
||||
CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc,
|
||||
CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc,
|
||||
uint64_t);
|
||||
CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc,
|
||||
CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc,
|
||||
uint64_t);
|
||||
CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests,
|
||||
uint64_t);
|
||||
if (nrequests == 0) {
|
||||
if (gap_start == -1)
|
||||
gap_start = j;
|
||||
} else {
|
||||
CTL_J_GET("arenas.lrun.0.size", &run_size, size_t);
|
||||
CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns,
|
||||
size_t);
|
||||
if (gap_start != -1) {
|
||||
malloc_cprintf(write_cb, cbopaque, "[%zu]\n",
|
||||
j - gap_start);
|
||||
gap_start = -1;
|
||||
CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j,
|
||||
&nrequests, uint64_t);
|
||||
if (nrequests == 0)
|
||||
in_gap = true;
|
||||
else {
|
||||
CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
|
||||
CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j,
|
||||
&curruns, size_t);
|
||||
if (in_gap) {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
" ---\n");
|
||||
in_gap = false;
|
||||
}
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
|
||||
" %12zu\n",
|
||||
run_size, run_size / page, nmalloc, ndalloc,
|
||||
nrequests, curruns);
|
||||
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
|
||||
" %12"FMTu64" %12zu\n",
|
||||
run_size, nbins + j, curruns * run_size, nmalloc,
|
||||
ndalloc, nrequests, curruns);
|
||||
}
|
||||
}
|
||||
if (gap_start != -1)
|
||||
malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start);
|
||||
if (in_gap) {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
" ---\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
|
||||
void *cbopaque, unsigned i)
|
||||
{
|
||||
unsigned nbins, nlruns, nhchunks, j;
|
||||
bool in_gap;
|
||||
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"huge: size ind allocated nmalloc ndalloc"
|
||||
" nrequests curhchunks\n");
|
||||
CTL_GET("arenas.nbins", &nbins, unsigned);
|
||||
CTL_GET("arenas.nlruns", &nlruns, unsigned);
|
||||
CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
|
||||
for (j = 0, in_gap = false; j < nhchunks; j++) {
|
||||
uint64_t nmalloc, ndalloc, nrequests;
|
||||
size_t hchunk_size, curhchunks;
|
||||
|
||||
CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j,
|
||||
&nmalloc, uint64_t);
|
||||
CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j,
|
||||
&ndalloc, uint64_t);
|
||||
CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j,
|
||||
&nrequests, uint64_t);
|
||||
if (nrequests == 0)
|
||||
in_gap = true;
|
||||
else {
|
||||
CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size,
|
||||
size_t);
|
||||
CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i,
|
||||
j, &curhchunks, size_t);
|
||||
if (in_gap) {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
" ---\n");
|
||||
in_gap = false;
|
||||
}
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
|
||||
" %12"FMTu64" %12zu\n",
|
||||
hchunk_size, nbins + nlruns + j,
|
||||
curhchunks * hchunk_size, nmalloc, ndalloc,
|
||||
nrequests, curhchunks);
|
||||
}
|
||||
}
|
||||
if (in_gap) {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
" ---\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
unsigned i, bool bins, bool large)
|
||||
unsigned i, bool bins, bool large, bool huge)
|
||||
{
|
||||
unsigned nthreads;
|
||||
const char *dss;
|
||||
ssize_t lg_dirty_mult;
|
||||
size_t page, pactive, pdirty, mapped;
|
||||
size_t metadata_mapped, metadata_allocated;
|
||||
uint64_t npurge, nmadvise, purged;
|
||||
size_t small_allocated;
|
||||
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
|
||||
size_t large_allocated;
|
||||
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
|
||||
size_t huge_allocated;
|
||||
uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
|
||||
|
||||
CTL_GET("arenas.page", &page, size_t);
|
||||
|
||||
CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
|
||||
CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"assigned threads: %u\n", nthreads);
|
||||
CTL_I_GET("stats.arenas.0.dss", &dss, const char *);
|
||||
CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
|
||||
malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
|
||||
dss);
|
||||
CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t);
|
||||
CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t);
|
||||
CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t);
|
||||
CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t);
|
||||
CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t);
|
||||
CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
|
||||
if (lg_dirty_mult >= 0) {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"min active:dirty page ratio: %u:1\n",
|
||||
(1U << lg_dirty_mult));
|
||||
} else {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"min active:dirty page ratio: N/A\n");
|
||||
}
|
||||
CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
|
||||
CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
|
||||
CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t);
|
||||
CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t);
|
||||
CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s,"
|
||||
" %"PRIu64" madvise%s, %"PRIu64" purged\n",
|
||||
pactive, pdirty, npurge, npurge == 1 ? "" : "s",
|
||||
nmadvise, nmadvise == 1 ? "" : "s", purged);
|
||||
"dirty pages: %zu:%zu active:dirty, %"FMTu64" sweep%s, %"FMTu64
|
||||
" madvise%s, %"FMTu64" purged\n", pactive, pdirty, npurge, npurge ==
|
||||
1 ? "" : "s", nmadvise, nmadvise == 1 ? "" : "s", purged);
|
||||
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
" allocated nmalloc ndalloc nrequests\n");
|
||||
CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t);
|
||||
CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t);
|
||||
CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t);
|
||||
CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t);
|
||||
" allocated nmalloc ndalloc"
|
||||
" nrequests\n");
|
||||
CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated,
|
||||
size_t);
|
||||
CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t);
|
||||
CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t);
|
||||
CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests,
|
||||
uint64_t);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
|
||||
"small: %12zu %12"FMTu64" %12"FMTu64
|
||||
" %12"FMTu64"\n",
|
||||
small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
|
||||
CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t);
|
||||
CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t);
|
||||
CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t);
|
||||
CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t);
|
||||
CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
|
||||
size_t);
|
||||
CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
|
||||
CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
|
||||
CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
|
||||
uint64_t);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
|
||||
"large: %12zu %12"FMTu64" %12"FMTu64
|
||||
" %12"FMTu64"\n",
|
||||
large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
|
||||
CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t);
|
||||
CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t);
|
||||
CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t);
|
||||
CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests,
|
||||
uint64_t);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
|
||||
small_allocated + large_allocated,
|
||||
small_nmalloc + large_nmalloc,
|
||||
small_ndalloc + large_ndalloc,
|
||||
small_nrequests + large_nrequests);
|
||||
malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page);
|
||||
CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
|
||||
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
|
||||
"huge: %12zu %12"FMTu64" %12"FMTu64
|
||||
" %12"FMTu64"\n",
|
||||
huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"total: %12zu %12"FMTu64" %12"FMTu64
|
||||
" %12"FMTu64"\n",
|
||||
small_allocated + large_allocated + huge_allocated,
|
||||
small_nmalloc + large_nmalloc + huge_nmalloc,
|
||||
small_ndalloc + large_ndalloc + huge_ndalloc,
|
||||
small_nrequests + large_nrequests + huge_nrequests);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"active: %12zu\n", pactive * page);
|
||||
CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"mapped: %12zu\n", mapped);
|
||||
CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped,
|
||||
size_t);
|
||||
CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated,
|
||||
size_t);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"metadata: mapped: %zu, allocated: %zu\n",
|
||||
metadata_mapped, metadata_allocated);
|
||||
|
||||
if (bins)
|
||||
stats_arena_bins_print(write_cb, cbopaque, i);
|
||||
if (large)
|
||||
stats_arena_lruns_print(write_cb, cbopaque, i);
|
||||
if (huge)
|
||||
stats_arena_hchunks_print(write_cb, cbopaque, i);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -277,6 +368,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
bool unmerged = true;
|
||||
bool bins = true;
|
||||
bool large = true;
|
||||
bool huge = true;
|
||||
|
||||
/*
|
||||
* Refresh stats, in case mallctl() was called by the application.
|
||||
|
@ -319,6 +411,9 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
case 'l':
|
||||
large = false;
|
||||
break;
|
||||
case 'h':
|
||||
huge = false;
|
||||
break;
|
||||
default:;
|
||||
}
|
||||
}
|
||||
|
@ -327,7 +422,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
malloc_cprintf(write_cb, cbopaque,
|
||||
"___ Begin jemalloc statistics ___\n");
|
||||
if (general) {
|
||||
int err;
|
||||
const char *cpv;
|
||||
bool bv;
|
||||
unsigned uv;
|
||||
|
@ -346,26 +440,40 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
bv ? "enabled" : "disabled");
|
||||
|
||||
#define OPT_WRITE_BOOL(n) \
|
||||
if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \
|
||||
== 0) { \
|
||||
if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0) { \
|
||||
malloc_cprintf(write_cb, cbopaque, \
|
||||
" opt."#n": %s\n", bv ? "true" : "false"); \
|
||||
}
|
||||
#define OPT_WRITE_BOOL_MUTABLE(n, m) { \
|
||||
bool bv2; \
|
||||
if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0 && \
|
||||
je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \
|
||||
malloc_cprintf(write_cb, cbopaque, \
|
||||
" opt."#n": %s ("#m": %s)\n", bv ? "true" \
|
||||
: "false", bv2 ? "true" : "false"); \
|
||||
} \
|
||||
}
|
||||
#define OPT_WRITE_SIZE_T(n) \
|
||||
if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \
|
||||
== 0) { \
|
||||
if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \
|
||||
malloc_cprintf(write_cb, cbopaque, \
|
||||
" opt."#n": %zu\n", sv); \
|
||||
}
|
||||
#define OPT_WRITE_SSIZE_T(n) \
|
||||
if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \
|
||||
== 0) { \
|
||||
if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0) { \
|
||||
malloc_cprintf(write_cb, cbopaque, \
|
||||
" opt."#n": %zd\n", ssv); \
|
||||
}
|
||||
#define OPT_WRITE_SSIZE_T_MUTABLE(n, m) { \
|
||||
ssize_t ssv2; \
|
||||
if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0 && \
|
||||
je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \
|
||||
malloc_cprintf(write_cb, cbopaque, \
|
||||
" opt."#n": %zd ("#m": %zd)\n", \
|
||||
ssv, ssv2); \
|
||||
} \
|
||||
}
|
||||
#define OPT_WRITE_CHAR_P(n) \
|
||||
if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \
|
||||
== 0) { \
|
||||
if (je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0) == 0) { \
|
||||
malloc_cprintf(write_cb, cbopaque, \
|
||||
" opt."#n": \"%s\"\n", cpv); \
|
||||
}
|
||||
|
@ -376,9 +484,9 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
OPT_WRITE_SIZE_T(lg_chunk)
|
||||
OPT_WRITE_CHAR_P(dss)
|
||||
OPT_WRITE_SIZE_T(narenas)
|
||||
OPT_WRITE_SSIZE_T(lg_dirty_mult)
|
||||
OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, arenas.lg_dirty_mult)
|
||||
OPT_WRITE_BOOL(stats_print)
|
||||
OPT_WRITE_BOOL(junk)
|
||||
OPT_WRITE_CHAR_P(junk)
|
||||
OPT_WRITE_SIZE_T(quarantine)
|
||||
OPT_WRITE_BOOL(redzone)
|
||||
OPT_WRITE_BOOL(zero)
|
||||
|
@ -389,7 +497,9 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
OPT_WRITE_SSIZE_T(lg_tcache_max)
|
||||
OPT_WRITE_BOOL(prof)
|
||||
OPT_WRITE_CHAR_P(prof_prefix)
|
||||
OPT_WRITE_BOOL(prof_active)
|
||||
OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active)
|
||||
OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init,
|
||||
prof.thread_active_init)
|
||||
OPT_WRITE_SSIZE_T(lg_prof_sample)
|
||||
OPT_WRITE_BOOL(prof_accum)
|
||||
OPT_WRITE_SSIZE_T(lg_prof_interval)
|
||||
|
@ -398,6 +508,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
OPT_WRITE_BOOL(prof_leak)
|
||||
|
||||
#undef OPT_WRITE_BOOL
|
||||
#undef OPT_WRITE_BOOL_MUTABLE
|
||||
#undef OPT_WRITE_SIZE_T
|
||||
#undef OPT_WRITE_SSIZE_T
|
||||
#undef OPT_WRITE_CHAR_P
|
||||
|
@ -411,12 +522,13 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
sizeof(void *));
|
||||
|
||||
CTL_GET("arenas.quantum", &sv, size_t);
|
||||
malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
|
||||
malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n",
|
||||
sv);
|
||||
|
||||
CTL_GET("arenas.page", &sv, size_t);
|
||||
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
|
||||
|
||||
CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
|
||||
CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t);
|
||||
if (ssv >= 0) {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"Min active:dirty page ratio per arena: %u:1\n",
|
||||
|
@ -425,22 +537,20 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
malloc_cprintf(write_cb, cbopaque,
|
||||
"Min active:dirty page ratio per arena: N/A\n");
|
||||
}
|
||||
if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0))
|
||||
== 0) {
|
||||
if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"Maximum thread-cached size class: %zu\n", sv);
|
||||
}
|
||||
if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
|
||||
bv) {
|
||||
CTL_GET("opt.lg_prof_sample", &sv, size_t);
|
||||
if (je_mallctl("opt.prof", &bv, &bsz, NULL, 0) == 0 && bv) {
|
||||
CTL_GET("prof.lg_sample", &sv, size_t);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"Average profile sample interval: %"PRIu64
|
||||
"Average profile sample interval: %"FMTu64
|
||||
" (2^%zu)\n", (((uint64_t)1U) << sv), sv);
|
||||
|
||||
CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
|
||||
if (ssv >= 0) {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"Average profile dump interval: %"PRIu64
|
||||
"Average profile dump interval: %"FMTu64
|
||||
" (2^%zd)\n",
|
||||
(((uint64_t)1U) << ssv), ssv);
|
||||
} else {
|
||||
|
@ -449,47 +559,27 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
}
|
||||
}
|
||||
CTL_GET("opt.lg_chunk", &sv, size_t);
|
||||
malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n",
|
||||
(ZU(1) << sv), sv);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv);
|
||||
}
|
||||
|
||||
if (config_stats) {
|
||||
size_t *cactive;
|
||||
size_t allocated, active, mapped;
|
||||
size_t chunks_current, chunks_high;
|
||||
uint64_t chunks_total;
|
||||
size_t huge_allocated;
|
||||
uint64_t huge_nmalloc, huge_ndalloc;
|
||||
size_t allocated, active, metadata, resident, mapped;
|
||||
|
||||
CTL_GET("stats.cactive", &cactive, size_t *);
|
||||
CTL_GET("stats.allocated", &allocated, size_t);
|
||||
CTL_GET("stats.active", &active, size_t);
|
||||
CTL_GET("stats.metadata", &metadata, size_t);
|
||||
CTL_GET("stats.resident", &resident, size_t);
|
||||
CTL_GET("stats.mapped", &mapped, size_t);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"Allocated: %zu, active: %zu, mapped: %zu\n",
|
||||
allocated, active, mapped);
|
||||
"Allocated: %zu, active: %zu, metadata: %zu,"
|
||||
" resident: %zu, mapped: %zu\n",
|
||||
allocated, active, metadata, resident, mapped);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"Current active ceiling: %zu\n", atomic_read_z(cactive));
|
||||
|
||||
/* Print chunk stats. */
|
||||
CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
|
||||
CTL_GET("stats.chunks.high", &chunks_high, size_t);
|
||||
CTL_GET("stats.chunks.current", &chunks_current, size_t);
|
||||
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
|
||||
"highchunks curchunks\n");
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
" %13"PRIu64" %12zu %12zu\n",
|
||||
chunks_total, chunks_high, chunks_current);
|
||||
|
||||
/* Print huge stats. */
|
||||
CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
|
||||
CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t);
|
||||
CTL_GET("stats.huge.allocated", &huge_allocated, size_t);
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"huge: nmalloc ndalloc allocated\n");
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
" %12"PRIu64" %12"PRIu64" %12zu\n",
|
||||
huge_nmalloc, huge_ndalloc, huge_allocated);
|
||||
"Current active ceiling: %zu\n",
|
||||
atomic_read_z(cactive));
|
||||
|
||||
if (merged) {
|
||||
unsigned narenas;
|
||||
|
@ -508,12 +598,12 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
ninitialized++;
|
||||
}
|
||||
|
||||
if (ninitialized > 1 || unmerged == false) {
|
||||
if (ninitialized > 1 || !unmerged) {
|
||||
/* Print merged arena stats. */
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
"\nMerged arenas stats:\n");
|
||||
stats_arena_print(write_cb, cbopaque,
|
||||
narenas, bins, large);
|
||||
narenas, bins, large, huge);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -539,7 +629,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
cbopaque,
|
||||
"\narenas[%u]:\n", i);
|
||||
stats_arena_print(write_cb,
|
||||
cbopaque, i, bins, large);
|
||||
cbopaque, i, bins, large,
|
||||
huge);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,9 +4,6 @@
|
|||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
malloc_tsd_data(, tcache, tcache_t *, NULL)
|
||||
malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
|
||||
|
||||
bool opt_tcache = true;
|
||||
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
|
||||
|
||||
|
@ -16,6 +13,14 @@ static unsigned stack_nelms; /* Total stack elms per tcache. */
|
|||
size_t nhbins;
|
||||
size_t tcache_maxclass;
|
||||
|
||||
tcaches_t *tcaches;
|
||||
|
||||
/* Index of first element within tcaches that has never been used. */
|
||||
static unsigned tcaches_past;
|
||||
|
||||
/* Head of singly linked list tracking available tcaches elements. */
|
||||
static tcaches_t *tcaches_avail;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
size_t tcache_salloc(const void *ptr)
|
||||
|
@ -25,9 +30,9 @@ size_t tcache_salloc(const void *ptr)
|
|||
}
|
||||
|
||||
void
|
||||
tcache_event_hard(tcache_t *tcache)
|
||||
tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
|
||||
{
|
||||
size_t binind = tcache->next_gc_bin;
|
||||
index_t binind = tcache->next_gc_bin;
|
||||
tcache_bin_t *tbin = &tcache->tbins[binind];
|
||||
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
|
||||
|
||||
|
@ -36,11 +41,12 @@ tcache_event_hard(tcache_t *tcache)
|
|||
* Flush (ceiling) 3/4 of the objects below the low water mark.
|
||||
*/
|
||||
if (binind < NBINS) {
|
||||
tcache_bin_flush_small(tbin, binind, tbin->ncached -
|
||||
tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||
tcache_bin_flush_small(tsd, tcache, tbin, binind,
|
||||
tbin->ncached - tbin->low_water + (tbin->low_water
|
||||
>> 2));
|
||||
} else {
|
||||
tcache_bin_flush_large(tbin, binind, tbin->ncached -
|
||||
tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||
tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
|
||||
- tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||
}
|
||||
/*
|
||||
* Reduce fill count by 2X. Limit lg_fill_div such that the
|
||||
|
@ -65,12 +71,13 @@ tcache_event_hard(tcache_t *tcache)
|
|||
}
|
||||
|
||||
void *
|
||||
tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
|
||||
tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||
tcache_bin_t *tbin, index_t binind)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
arena_tcache_fill_small(tcache->arena, tbin, binind,
|
||||
config_prof ? tcache->prof_accumbytes : 0);
|
||||
arena_tcache_fill_small(arena, tbin, binind, config_prof ?
|
||||
tcache->prof_accumbytes : 0);
|
||||
if (config_prof)
|
||||
tcache->prof_accumbytes = 0;
|
||||
ret = tcache_alloc_easy(tbin);
|
||||
|
@ -79,9 +86,10 @@ tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
|
|||
}
|
||||
|
||||
void
|
||||
tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
tcache_t *tcache)
|
||||
tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
index_t binind, unsigned rem)
|
||||
{
|
||||
arena_t *arena;
|
||||
void *ptr;
|
||||
unsigned i, nflush, ndeferred;
|
||||
bool merged_stats = false;
|
||||
|
@ -89,22 +97,24 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
|||
assert(binind < NBINS);
|
||||
assert(rem <= tbin->ncached);
|
||||
|
||||
arena = arena_choose(tsd, NULL);
|
||||
assert(arena != NULL);
|
||||
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
|
||||
/* Lock the arena bin associated with the first object. */
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||
tbin->avail[0]);
|
||||
arena_t *arena = chunk->arena;
|
||||
arena_bin_t *bin = &arena->bins[binind];
|
||||
arena_t *bin_arena = extent_node_arena_get(&chunk->node);
|
||||
arena_bin_t *bin = &bin_arena->bins[binind];
|
||||
|
||||
if (config_prof && arena == tcache->arena) {
|
||||
if (config_prof && bin_arena == arena) {
|
||||
if (arena_prof_accum(arena, tcache->prof_accumbytes))
|
||||
prof_idump();
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
if (config_stats && arena == tcache->arena) {
|
||||
assert(merged_stats == false);
|
||||
if (config_stats && bin_arena == arena) {
|
||||
assert(!merged_stats);
|
||||
merged_stats = true;
|
||||
bin->stats.nflushes++;
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
|
@ -115,17 +125,13 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
|||
ptr = tbin->avail[i];
|
||||
assert(ptr != NULL);
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk->arena == arena) {
|
||||
if (extent_node_arena_get(&chunk->node) == bin_arena) {
|
||||
size_t pageind = ((uintptr_t)ptr -
|
||||
(uintptr_t)chunk) >> LG_PAGE;
|
||||
arena_chunk_map_t *mapelm =
|
||||
arena_mapp_get(chunk, pageind);
|
||||
if (config_fill && opt_junk) {
|
||||
arena_alloc_junk_small(ptr,
|
||||
&arena_bin_info[binind], true);
|
||||
}
|
||||
arena_dalloc_bin_locked(arena, chunk, ptr,
|
||||
mapelm);
|
||||
arena_chunk_map_bits_t *bitselm =
|
||||
arena_bitselm_get(chunk, pageind);
|
||||
arena_dalloc_bin_junked_locked(bin_arena, chunk,
|
||||
ptr, bitselm);
|
||||
} else {
|
||||
/*
|
||||
* This object was allocated via a different
|
||||
|
@ -139,12 +145,12 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
|||
}
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
}
|
||||
if (config_stats && merged_stats == false) {
|
||||
if (config_stats && !merged_stats) {
|
||||
/*
|
||||
* The flush loop didn't happen to flush to this thread's
|
||||
* arena, so the stats didn't get merged. Manually do so now.
|
||||
*/
|
||||
arena_bin_t *bin = &tcache->arena->bins[binind];
|
||||
arena_bin_t *bin = &arena->bins[binind];
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
bin->stats.nflushes++;
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
|
@ -160,9 +166,10 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
|||
}
|
||||
|
||||
void
|
||||
tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
tcache_t *tcache)
|
||||
tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
||||
unsigned rem, tcache_t *tcache)
|
||||
{
|
||||
arena_t *arena;
|
||||
void *ptr;
|
||||
unsigned i, nflush, ndeferred;
|
||||
bool merged_stats = false;
|
||||
|
@ -170,17 +177,19 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
|||
assert(binind < nhbins);
|
||||
assert(rem <= tbin->ncached);
|
||||
|
||||
arena = arena_choose(tsd, NULL);
|
||||
assert(arena != NULL);
|
||||
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
|
||||
/* Lock the arena associated with the first object. */
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||
tbin->avail[0]);
|
||||
arena_t *arena = chunk->arena;
|
||||
arena_t *locked_arena = extent_node_arena_get(&chunk->node);
|
||||
UNUSED bool idump;
|
||||
|
||||
if (config_prof)
|
||||
idump = false;
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
if ((config_prof || config_stats) && arena == tcache->arena) {
|
||||
malloc_mutex_lock(&locked_arena->lock);
|
||||
if ((config_prof || config_stats) && locked_arena == arena) {
|
||||
if (config_prof) {
|
||||
idump = arena_prof_accum_locked(arena,
|
||||
tcache->prof_accumbytes);
|
||||
|
@ -200,9 +209,11 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
|||
ptr = tbin->avail[i];
|
||||
assert(ptr != NULL);
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk->arena == arena)
|
||||
arena_dalloc_large_locked(arena, chunk, ptr);
|
||||
else {
|
||||
if (extent_node_arena_get(&chunk->node) ==
|
||||
locked_arena) {
|
||||
arena_dalloc_large_junked_locked(locked_arena,
|
||||
chunk, ptr);
|
||||
} else {
|
||||
/*
|
||||
* This object was allocated via a different
|
||||
* arena than the one that is currently locked.
|
||||
|
@ -213,16 +224,15 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
|||
ndeferred++;
|
||||
}
|
||||
}
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
malloc_mutex_unlock(&locked_arena->lock);
|
||||
if (config_prof && idump)
|
||||
prof_idump();
|
||||
}
|
||||
if (config_stats && merged_stats == false) {
|
||||
if (config_stats && !merged_stats) {
|
||||
/*
|
||||
* The flush loop didn't happen to flush to this thread's
|
||||
* arena, so the stats didn't get merged. Manually do so now.
|
||||
*/
|
||||
arena_t *arena = tcache->arena;
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||
arena->stats.lstats[binind - NBINS].nrequests +=
|
||||
|
@ -249,24 +259,58 @@ tcache_arena_associate(tcache_t *tcache, arena_t *arena)
|
|||
ql_tail_insert(&arena->tcache_ql, tcache, link);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
tcache->arena = arena;
|
||||
}
|
||||
|
||||
void
|
||||
tcache_arena_dissociate(tcache_t *tcache)
|
||||
tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
|
||||
{
|
||||
|
||||
tcache_arena_dissociate(tcache, oldarena);
|
||||
tcache_arena_associate(tcache, newarena);
|
||||
}
|
||||
|
||||
void
|
||||
tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
|
||||
if (config_stats) {
|
||||
/* Unlink from list of extant tcaches. */
|
||||
malloc_mutex_lock(&tcache->arena->lock);
|
||||
ql_remove(&tcache->arena->tcache_ql, tcache, link);
|
||||
tcache_stats_merge(tcache, tcache->arena);
|
||||
malloc_mutex_unlock(&tcache->arena->lock);
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
if (config_debug) {
|
||||
bool in_ql = false;
|
||||
tcache_t *iter;
|
||||
ql_foreach(iter, &arena->tcache_ql, link) {
|
||||
if (iter == tcache) {
|
||||
in_ql = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert(in_ql);
|
||||
}
|
||||
ql_remove(&arena->tcache_ql, tcache, link);
|
||||
tcache_stats_merge(tcache, arena);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
}
|
||||
|
||||
tcache_t *
|
||||
tcache_create(arena_t *arena)
|
||||
tcache_get_hard(tsd_t *tsd)
|
||||
{
|
||||
arena_t *arena;
|
||||
|
||||
if (!tcache_enabled_get()) {
|
||||
if (tsd_nominal(tsd))
|
||||
tcache_enabled_set(false); /* Memoize. */
|
||||
return (NULL);
|
||||
}
|
||||
arena = arena_choose(tsd, NULL);
|
||||
if (unlikely(arena == NULL))
|
||||
return (NULL);
|
||||
return (tcache_create(tsd, arena));
|
||||
}
|
||||
|
||||
tcache_t *
|
||||
tcache_create(tsd_t *tsd, arena_t *arena)
|
||||
{
|
||||
tcache_t *tcache;
|
||||
size_t size, stack_offset;
|
||||
|
@ -277,23 +321,10 @@ tcache_create(arena_t *arena)
|
|||
size = PTR_CEILING(size);
|
||||
stack_offset = size;
|
||||
size += stack_nelms * sizeof(void *);
|
||||
/*
|
||||
* Round up to the nearest multiple of the cacheline size, in order to
|
||||
* avoid the possibility of false cacheline sharing.
|
||||
*
|
||||
* That this works relies on the same logic as in ipalloc(), but we
|
||||
* cannot directly call ipalloc() here due to tcache bootstrapping
|
||||
* issues.
|
||||
*/
|
||||
size = (size + CACHELINE_MASK) & (-CACHELINE);
|
||||
|
||||
if (size <= SMALL_MAXCLASS)
|
||||
tcache = (tcache_t *)arena_malloc_small(arena, size, true);
|
||||
else if (size <= tcache_maxclass)
|
||||
tcache = (tcache_t *)arena_malloc_large(arena, size, true);
|
||||
else
|
||||
tcache = (tcache_t *)icalloct(size, false, arena);
|
||||
/* Avoid false cacheline sharing. */
|
||||
size = sa2u(size, CACHELINE);
|
||||
|
||||
tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get());
|
||||
if (tcache == NULL)
|
||||
return (NULL);
|
||||
|
||||
|
@ -307,25 +338,23 @@ tcache_create(arena_t *arena)
|
|||
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
|
||||
}
|
||||
|
||||
tcache_tsd_set(&tcache);
|
||||
|
||||
return (tcache);
|
||||
}
|
||||
|
||||
void
|
||||
tcache_destroy(tcache_t *tcache)
|
||||
static void
|
||||
tcache_destroy(tsd_t *tsd, tcache_t *tcache)
|
||||
{
|
||||
arena_t *arena;
|
||||
unsigned i;
|
||||
size_t tcache_size;
|
||||
|
||||
tcache_arena_dissociate(tcache);
|
||||
arena = arena_choose(tsd, NULL);
|
||||
tcache_arena_dissociate(tcache, arena);
|
||||
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||
tcache_bin_flush_small(tbin, i, 0, tcache);
|
||||
tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
|
||||
|
||||
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||
arena_t *arena = tcache->arena;
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
|
@ -335,10 +364,9 @@ tcache_destroy(tcache_t *tcache)
|
|||
|
||||
for (; i < nhbins; i++) {
|
||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||
tcache_bin_flush_large(tbin, i, 0, tcache);
|
||||
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
|
||||
|
||||
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||
arena_t *arena = tcache->arena;
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||
arena->stats.lstats[i - NBINS].nrequests +=
|
||||
|
@ -348,57 +376,33 @@ tcache_destroy(tcache_t *tcache)
|
|||
}
|
||||
|
||||
if (config_prof && tcache->prof_accumbytes > 0 &&
|
||||
arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
|
||||
arena_prof_accum(arena, tcache->prof_accumbytes))
|
||||
prof_idump();
|
||||
|
||||
tcache_size = arena_salloc(tcache, false);
|
||||
if (tcache_size <= SMALL_MAXCLASS) {
|
||||
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
|
||||
arena_t *arena = chunk->arena;
|
||||
size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
|
||||
LG_PAGE;
|
||||
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
|
||||
|
||||
arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
|
||||
} else if (tcache_size <= tcache_maxclass) {
|
||||
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
|
||||
arena_t *arena = chunk->arena;
|
||||
|
||||
arena_dalloc_large(arena, chunk, tcache);
|
||||
} else
|
||||
idalloct(tcache, false);
|
||||
idalloctm(tsd, tcache, false, true);
|
||||
}
|
||||
|
||||
void
|
||||
tcache_thread_cleanup(void *arg)
|
||||
tcache_cleanup(tsd_t *tsd)
|
||||
{
|
||||
tcache_t *tcache = *(tcache_t **)arg;
|
||||
tcache_t *tcache;
|
||||
|
||||
if (tcache == TCACHE_STATE_DISABLED) {
|
||||
/* Do nothing. */
|
||||
} else if (tcache == TCACHE_STATE_REINCARNATED) {
|
||||
/*
|
||||
* Another destructor called an allocator function after this
|
||||
* destructor was called. Reset tcache to
|
||||
* TCACHE_STATE_PURGATORY in order to receive another callback.
|
||||
*/
|
||||
tcache = TCACHE_STATE_PURGATORY;
|
||||
tcache_tsd_set(&tcache);
|
||||
} else if (tcache == TCACHE_STATE_PURGATORY) {
|
||||
/*
|
||||
* The previous time this destructor was called, we set the key
|
||||
* to TCACHE_STATE_PURGATORY so that other destructors wouldn't
|
||||
* cause re-creation of the tcache. This time, do nothing, so
|
||||
* that the destructor will not be called again.
|
||||
*/
|
||||
} else if (tcache != NULL) {
|
||||
assert(tcache != TCACHE_STATE_PURGATORY);
|
||||
tcache_destroy(tcache);
|
||||
tcache = TCACHE_STATE_PURGATORY;
|
||||
tcache_tsd_set(&tcache);
|
||||
if (!config_tcache)
|
||||
return;
|
||||
|
||||
if ((tcache = tsd_tcache_get(tsd)) != NULL) {
|
||||
tcache_destroy(tsd, tcache);
|
||||
tsd_tcache_set(tsd, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_enabled_cleanup(tsd_t *tsd)
|
||||
{
|
||||
|
||||
/* Do nothing. */
|
||||
}
|
||||
|
||||
/* Caller must own arena->lock. */
|
||||
void
|
||||
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
||||
|
@ -427,7 +431,67 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
|||
}
|
||||
|
||||
bool
|
||||
tcache_boot0(void)
|
||||
tcaches_create(tsd_t *tsd, unsigned *r_ind)
|
||||
{
|
||||
tcache_t *tcache;
|
||||
tcaches_t *elm;
|
||||
|
||||
if (tcaches == NULL) {
|
||||
tcaches = base_alloc(sizeof(tcache_t *) *
|
||||
(MALLOCX_TCACHE_MAX+1));
|
||||
if (tcaches == NULL)
|
||||
return (true);
|
||||
}
|
||||
|
||||
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
|
||||
return (true);
|
||||
tcache = tcache_create(tsd, a0get());
|
||||
if (tcache == NULL)
|
||||
return (true);
|
||||
|
||||
if (tcaches_avail != NULL) {
|
||||
elm = tcaches_avail;
|
||||
tcaches_avail = tcaches_avail->next;
|
||||
elm->tcache = tcache;
|
||||
*r_ind = elm - tcaches;
|
||||
} else {
|
||||
elm = &tcaches[tcaches_past];
|
||||
elm->tcache = tcache;
|
||||
*r_ind = tcaches_past;
|
||||
tcaches_past++;
|
||||
}
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
static void
|
||||
tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm)
|
||||
{
|
||||
|
||||
if (elm->tcache == NULL)
|
||||
return;
|
||||
tcache_destroy(tsd, elm->tcache);
|
||||
elm->tcache = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
tcaches_flush(tsd_t *tsd, unsigned ind)
|
||||
{
|
||||
|
||||
tcaches_elm_flush(tsd, &tcaches[ind]);
|
||||
}
|
||||
|
||||
void
|
||||
tcaches_destroy(tsd_t *tsd, unsigned ind)
|
||||
{
|
||||
tcaches_t *elm = &tcaches[ind];
|
||||
tcaches_elm_flush(tsd, elm);
|
||||
elm->next = tcaches_avail;
|
||||
tcaches_avail = elm;
|
||||
}
|
||||
|
||||
bool
|
||||
tcache_boot(void)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
|
@ -442,7 +506,7 @@ tcache_boot0(void)
|
|||
else
|
||||
tcache_maxclass = (1U << opt_lg_tcache_max);
|
||||
|
||||
nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
|
||||
nhbins = size2index(tcache_maxclass) + 1;
|
||||
|
||||
/* Initialize tcache_bin_info. */
|
||||
tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
|
||||
|
@ -451,7 +515,11 @@ tcache_boot0(void)
|
|||
return (true);
|
||||
stack_nelms = 0;
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
|
||||
if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
|
||||
tcache_bin_info[i].ncached_max =
|
||||
TCACHE_NSLOTS_SMALL_MIN;
|
||||
} else if ((arena_bin_info[i].nregs << 1) <=
|
||||
TCACHE_NSLOTS_SMALL_MAX) {
|
||||
tcache_bin_info[i].ncached_max =
|
||||
(arena_bin_info[i].nregs << 1);
|
||||
} else {
|
||||
|
@ -467,13 +535,3 @@ tcache_boot0(void)
|
|||
|
||||
return (false);
|
||||
}
|
||||
|
||||
bool
|
||||
tcache_boot1(void)
|
||||
{
|
||||
|
||||
if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
|
||||
return (true);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
|
|
@ -7,21 +7,22 @@
|
|||
static unsigned ncleanups;
|
||||
static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
|
||||
|
||||
malloc_tsd_data(, , tsd_t, TSD_INITIALIZER)
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
void *
|
||||
malloc_tsd_malloc(size_t size)
|
||||
{
|
||||
|
||||
/* Avoid choose_arena() in order to dodge bootstrapping issues. */
|
||||
return (arena_malloc(arenas[0], size, false, false));
|
||||
return (a0malloc(CACHELINE_CEILING(size)));
|
||||
}
|
||||
|
||||
void
|
||||
malloc_tsd_dalloc(void *wrapper)
|
||||
{
|
||||
|
||||
idalloct(wrapper, false);
|
||||
a0dalloc(wrapper);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -67,10 +68,58 @@ malloc_tsd_cleanup_register(bool (*f)(void))
|
|||
}
|
||||
|
||||
void
|
||||
malloc_tsd_boot(void)
|
||||
tsd_cleanup(void *arg)
|
||||
{
|
||||
tsd_t *tsd = (tsd_t *)arg;
|
||||
|
||||
switch (tsd->state) {
|
||||
case tsd_state_nominal:
|
||||
#define O(n, t) \
|
||||
n##_cleanup(tsd);
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
tsd->state = tsd_state_purgatory;
|
||||
tsd_set(tsd);
|
||||
break;
|
||||
case tsd_state_purgatory:
|
||||
/*
|
||||
* The previous time this destructor was called, we set the
|
||||
* state to tsd_state_purgatory so that other destructors
|
||||
* wouldn't cause re-creation of the tsd. This time, do
|
||||
* nothing, and do not request another callback.
|
||||
*/
|
||||
break;
|
||||
case tsd_state_reincarnated:
|
||||
/*
|
||||
* Another destructor deallocated memory after this destructor
|
||||
* was called. Reset state to tsd_state_purgatory and request
|
||||
* another callback.
|
||||
*/
|
||||
tsd->state = tsd_state_purgatory;
|
||||
tsd_set(tsd);
|
||||
break;
|
||||
default:
|
||||
not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
malloc_tsd_boot0(void)
|
||||
{
|
||||
|
||||
ncleanups = 0;
|
||||
if (tsd_boot0())
|
||||
return (true);
|
||||
*tsd_arenas_cache_bypassp_get(tsd_fetch()) = true;
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
malloc_tsd_boot1(void)
|
||||
{
|
||||
|
||||
tsd_boot1();
|
||||
*tsd_arenas_cache_bypassp_get(tsd_fetch()) = false;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
|
@ -102,7 +151,7 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
|
|||
# pragma section(".CRT$XLY",long,read)
|
||||
#endif
|
||||
JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
|
||||
static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL,
|
||||
static BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
|
||||
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
|
||||
#endif
|
||||
|
||||
|
|
|
@ -97,10 +97,10 @@ buferror(int err, char *buf, size_t buflen)
|
|||
{
|
||||
|
||||
#ifdef _WIN32
|
||||
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0,
|
||||
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
|
||||
(LPSTR)buf, buflen, NULL);
|
||||
return (0);
|
||||
#elif defined(_GNU_SOURCE)
|
||||
#elif defined(__GLIBC__) && defined(_GNU_SOURCE)
|
||||
char *b = strerror_r(err, buf, buflen);
|
||||
if (b != buf) {
|
||||
strncpy(buf, b, buflen);
|
||||
|
@ -116,7 +116,7 @@ uintmax_t
|
|||
malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
|
||||
{
|
||||
uintmax_t ret, digit;
|
||||
int b;
|
||||
unsigned b;
|
||||
bool neg;
|
||||
const char *p, *ns;
|
||||
|
||||
|
@ -282,7 +282,7 @@ d2s(intmax_t x, char sign, char *s, size_t *slen_p)
|
|||
sign = '-';
|
||||
switch (sign) {
|
||||
case '-':
|
||||
if (neg == false)
|
||||
if (!neg)
|
||||
break;
|
||||
/* Fall through. */
|
||||
case ' ':
|
||||
|
@ -345,7 +345,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
|||
/* Left padding. */ \
|
||||
size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
|
||||
(size_t)width - slen : 0); \
|
||||
if (left_justify == false && pad_len != 0) { \
|
||||
if (!left_justify && pad_len != 0) { \
|
||||
size_t j; \
|
||||
for (j = 0; j < pad_len; j++) \
|
||||
APPEND_C(' '); \
|
||||
|
@ -397,7 +397,9 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
|||
case 'p': /* Synthetic; used for %p. */ \
|
||||
val = va_arg(ap, uintptr_t); \
|
||||
break; \
|
||||
default: not_reached(); \
|
||||
default: \
|
||||
not_reached(); \
|
||||
val = 0; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
@ -420,19 +422,19 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
|||
while (true) {
|
||||
switch (*f) {
|
||||
case '#':
|
||||
assert(alt_form == false);
|
||||
assert(!alt_form);
|
||||
alt_form = true;
|
||||
break;
|
||||
case '-':
|
||||
assert(left_justify == false);
|
||||
assert(!left_justify);
|
||||
left_justify = true;
|
||||
break;
|
||||
case ' ':
|
||||
assert(plus_space == false);
|
||||
assert(!plus_space);
|
||||
plus_space = true;
|
||||
break;
|
||||
case '+':
|
||||
assert(plus_plus == false);
|
||||
assert(!plus_plus);
|
||||
plus_plus = true;
|
||||
break;
|
||||
default: goto label_width;
|
||||
|
@ -564,7 +566,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
|||
assert(len == '?' || len == 'l');
|
||||
assert_not_implemented(len != 'l');
|
||||
s = va_arg(ap, char *);
|
||||
slen = (prec < 0) ? strlen(s) : prec;
|
||||
slen = (prec < 0) ? strlen(s) : (size_t)prec;
|
||||
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||
f++;
|
||||
break;
|
||||
|
@ -600,7 +602,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
|||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ATTR(format(printf, 3, 4))
|
||||
JEMALLOC_FORMAT_PRINTF(3, 4)
|
||||
int
|
||||
malloc_snprintf(char *str, size_t size, const char *format, ...)
|
||||
{
|
||||
|
@ -639,7 +641,7 @@ malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
* Print to a callback function in such a way as to (hopefully) avoid memory
|
||||
* allocation.
|
||||
*/
|
||||
JEMALLOC_ATTR(format(printf, 3, 4))
|
||||
JEMALLOC_FORMAT_PRINTF(3, 4)
|
||||
void
|
||||
malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *format, ...)
|
||||
|
@ -652,7 +654,7 @@ malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||
}
|
||||
|
||||
/* Print to stderr in such a way as to avoid memory allocation. */
|
||||
JEMALLOC_ATTR(format(printf, 1, 2))
|
||||
JEMALLOC_FORMAT_PRINTF(1, 2)
|
||||
void
|
||||
malloc_printf(const char *format, ...)
|
||||
{
|
||||
|
|
|
@ -36,6 +36,23 @@
|
|||
#include <strings.h>
|
||||
|
||||
__BEGIN_DECLS
|
||||
typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned);
|
||||
typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned);
|
||||
typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned);
|
||||
typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned);
|
||||
typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned);
|
||||
typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned);
|
||||
typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned);
|
||||
typedef struct {
|
||||
chunk_alloc_t *alloc;
|
||||
chunk_dalloc_t *dalloc;
|
||||
chunk_commit_t *commit;
|
||||
chunk_decommit_t *decommit;
|
||||
chunk_purge_t *purge;
|
||||
chunk_split_t *split;
|
||||
chunk_merge_t *merge;
|
||||
} chunk_hooks_t;
|
||||
|
||||
size_t malloc_usable_size(const void *ptr);
|
||||
|
||||
void malloc_stats_print(void (*write_cb)(void *, const char *),
|
||||
|
@ -49,48 +66,32 @@ int mallctlbymib(const size_t *mib, size_t miblen, void *oldp,
|
|||
#define MALLOCX_LG_ALIGN(la) (la)
|
||||
#define MALLOCX_ALIGN(a) (ffsl(a)-1)
|
||||
#define MALLOCX_ZERO ((int)0x40)
|
||||
#define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
|
||||
#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
|
||||
#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
|
||||
#define MALLOCX_ARENA(a) ((int)(((a)+1) << 20))
|
||||
|
||||
void *mallocx(size_t size, int flags);
|
||||
void *rallocx(void *ptr, size_t size, int flags);
|
||||
size_t xallocx(void *ptr, size_t size, size_t extra, int flags);
|
||||
size_t sallocx(const void *ptr, int flags);
|
||||
void dallocx(void *ptr, int flags);
|
||||
void sdallocx(void *ptr, size_t size, int flags);
|
||||
size_t nallocx(size_t size, int flags);
|
||||
|
||||
#define ALLOCM_LG_ALIGN(la) (la)
|
||||
#define ALLOCM_ALIGN(a) (ffsl(a)-1)
|
||||
#define ALLOCM_ZERO ((int)0x40)
|
||||
#define ALLOCM_NO_MOVE ((int)0x80)
|
||||
|
||||
#define ALLOCM_SUCCESS 0
|
||||
#define ALLOCM_ERR_OOM 1
|
||||
#define ALLOCM_ERR_NOT_MOVED 2
|
||||
|
||||
int allocm(void **ptr, size_t *rsize, size_t size, int flags) __nonnull(1);
|
||||
int rallocm(void **ptr, size_t *rsize, size_t size, size_t extra,
|
||||
int flags) __nonnull(1);
|
||||
int sallocm(const void *ptr, size_t *rsize, int flags) __nonnull(1);
|
||||
int dallocm(void *ptr, int flags) __nonnull(1);
|
||||
int nallocm(size_t *rsize, size_t size, int flags);
|
||||
|
||||
void * __calloc(size_t, size_t) __malloc_like;
|
||||
void * __malloc(size_t) __malloc_like;
|
||||
void * __realloc(void *, size_t);
|
||||
void __free(void *);
|
||||
int __posix_memalign(void **, size_t, size_t);
|
||||
size_t __malloc_usable_size(const void *);
|
||||
void * __calloc(size_t number, size_t size) __malloc_like;
|
||||
void * __malloc(size_t size) __malloc_like;
|
||||
void * __realloc(void *ptr, size_t size);
|
||||
void __free(void *ptr);
|
||||
int __posix_memalign(void **ptr, size_t alignment, size_t size);
|
||||
void *__aligned_alloc(size_t alignment, size_t size);
|
||||
size_t __malloc_usable_size(const void *ptr);
|
||||
void *__mallocx(size_t size, int flags);
|
||||
void *__rallocx(void *ptr, size_t size, int flags);
|
||||
size_t __xallocx(void *ptr, size_t size, size_t extra, int flags);
|
||||
size_t __sallocx(const void *ptr, int flags);
|
||||
void __dallocx(void *ptr, int flags);
|
||||
void __sdallocx(void *ptr, size_t size, int flags);
|
||||
size_t __nallocx(size_t size, int flags);
|
||||
int __allocm(void **, size_t *, size_t, int) __nonnull(1);
|
||||
int __rallocm(void **, size_t *, size_t, size_t, int) __nonnull(1);
|
||||
int __sallocm(const void *, size_t *, int) __nonnull(1);
|
||||
int __dallocm(void *, int) __nonnull(1);
|
||||
int __nallocm(size_t *, size_t, int);
|
||||
__END_DECLS
|
||||
|
||||
#endif /* _MALLOC_NP_H_ */
|
||||
|
|
|
@ -40,9 +40,9 @@
|
|||
#include "libc_private.h"
|
||||
|
||||
/* Provided by jemalloc to avoid bootstrapping issues. */
|
||||
void *__je_a0malloc(size_t size);
|
||||
void *__je_a0calloc(size_t num, size_t size);
|
||||
void __je_a0free(void *ptr);
|
||||
void *__je_bootstrap_malloc(size_t size);
|
||||
void *__je_bootstrap_calloc(size_t num, size_t size);
|
||||
void __je_bootstrap_free(void *ptr);
|
||||
|
||||
__weak_reference(__libc_allocate_tls, _rtld_allocate_tls);
|
||||
__weak_reference(__libc_free_tls, _rtld_free_tls);
|
||||
|
@ -125,8 +125,8 @@ __libc_free_tls(void *tcb, size_t tcbsize, size_t tcbalign __unused)
|
|||
|
||||
tls = (Elf_Addr **)((Elf_Addr)tcb + tcbsize - TLS_TCB_SIZE);
|
||||
dtv = tls[0];
|
||||
__je_a0free(dtv);
|
||||
__je_a0free(tcb);
|
||||
__je_bootstrap_free(dtv);
|
||||
__je_bootstrap_free(tcb);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -142,18 +142,18 @@ __libc_allocate_tls(void *oldtcb, size_t tcbsize, size_t tcbalign __unused)
|
|||
if (oldtcb != NULL && tcbsize == TLS_TCB_SIZE)
|
||||
return (oldtcb);
|
||||
|
||||
tcb = __je_a0calloc(1, tls_static_space + tcbsize - TLS_TCB_SIZE);
|
||||
tcb = __je_bootstrap_calloc(1, tls_static_space + tcbsize - TLS_TCB_SIZE);
|
||||
tls = (Elf_Addr **)(tcb + tcbsize - TLS_TCB_SIZE);
|
||||
|
||||
if (oldtcb != NULL) {
|
||||
memcpy(tls, oldtcb, tls_static_space);
|
||||
__je_a0free(oldtcb);
|
||||
__je_bootstrap_free(oldtcb);
|
||||
|
||||
/* Adjust the DTV. */
|
||||
dtv = tls[0];
|
||||
dtv[2] = (Elf_Addr)tls + TLS_TCB_SIZE;
|
||||
} else {
|
||||
dtv = __je_a0malloc(3 * sizeof(Elf_Addr));
|
||||
dtv = __je_bootstrap_malloc(3 * sizeof(Elf_Addr));
|
||||
tls[0] = dtv;
|
||||
dtv[0] = 1;
|
||||
dtv[1] = 1;
|
||||
|
@ -194,8 +194,8 @@ __libc_free_tls(void *tcb, size_t tcbsize __unused, size_t tcbalign)
|
|||
dtv = ((Elf_Addr**)tcb)[1];
|
||||
tlsend = (Elf_Addr) tcb;
|
||||
tlsstart = tlsend - size;
|
||||
__je_a0free((void*) tlsstart);
|
||||
__je_a0free(dtv);
|
||||
__je_bootstrap_free((void*) tlsstart);
|
||||
__je_bootstrap_free(dtv);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -213,8 +213,8 @@ __libc_allocate_tls(void *oldtls, size_t tcbsize, size_t tcbalign)
|
|||
|
||||
if (tcbsize < 2 * sizeof(Elf_Addr))
|
||||
tcbsize = 2 * sizeof(Elf_Addr);
|
||||
tls = __je_a0calloc(1, size + tcbsize);
|
||||
dtv = __je_a0malloc(3 * sizeof(Elf_Addr));
|
||||
tls = __je_bootstrap_calloc(1, size + tcbsize);
|
||||
dtv = __je_bootstrap_malloc(3 * sizeof(Elf_Addr));
|
||||
|
||||
segbase = (Elf_Addr)(tls + size);
|
||||
((Elf_Addr*)segbase)[0] = segbase;
|
||||
|
|
|
@ -4,7 +4,8 @@
|
|||
|
||||
JEMALLOCSRCS:= jemalloc.c arena.c atomic.c base.c bitmap.c chunk.c \
|
||||
chunk_dss.c chunk_mmap.c ckh.c ctl.c extent.c hash.c huge.c mb.c \
|
||||
mutex.c prof.c quarantine.c rtree.c stats.c tcache.c tsd.c util.c
|
||||
mutex.c pages.c prof.c quarantine.c rtree.c stats.c tcache.c tsd.c \
|
||||
util.c
|
||||
|
||||
SYM_MAPS+=${LIBC_SRCTOP}/stdlib/jemalloc/Symbol.map
|
||||
|
||||
|
|
Loading…
Reference in a new issue