Merge from HEAD

This commit is contained in:
Baptiste Daroussin 2015-08-25 20:14:50 +00:00
commit 23a32822d2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/collation/; revision=287142
1077 changed files with 81055 additions and 31134 deletions

View file

@ -219,11 +219,16 @@ INSTALLTMP!= /usr/bin/mktemp -d -u -t install
# 2. build-tools stage [TMAKE]
# This stage is responsible for creating the object
# tree and building any tools that are needed during
# the build process.
# the build process. Some programs are listed during
# this phase because they build binaries to generate
# files needed to build these programs. This stage also
# builds the 'build-tools' target rather than 'all'.
# 3. cross-tools stage [XMAKE]
# This stage is responsible for creating any tools that
# are needed for building the system. A cross-compiler is one
# of them.
# of them. This differs from build tools in two ways:
# 1. the 'all' target is built rather than 'build-tools'
# 2. these tools are installed into TMPPATH for stage 4.
# 4. world stage [WMAKE]
# This stage actually builds the world.
# 5. install stage (optional) [IMAKE]
@ -444,7 +449,6 @@ LIB32WMAKEENV+= MAKEOBJDIRPREFIX=${LIB32_OBJTREE} \
PATH=${TMPPATH} \
LIBDIR=/usr/lib32 \
SHLIBDIR=/usr/lib32 \
LIBPRIVATEDIR=/usr/lib32/private \
DTRACE="${DTRACE} -32"
LIB32WMAKEFLAGS+= CC="${XCC} ${LIB32FLAGS}" \
CXX="${XCXX} ${LIB32FLAGS}" \
@ -807,7 +811,7 @@ __installcheck_UGID:
_zoneinfo= zic tzsetup
.endif
ITOOLS= [ awk cap_mkdb cat chflags chmod chown \
ITOOLS= [ awk cap_mkdb cat chflags chmod chown cmp cp \
date echo egrep find grep id install ${_install-info} \
ln lockf make mkdir mtree mv pwd_mkdb \
rm sed services_mkdb sh strip sysctl test true uname wc ${_zoneinfo} \
@ -1225,16 +1229,23 @@ update:
#
#
# legacy: Build compatibility shims for the next three targets. This is a minimal
# set of tools and shims necessary to compensate for older systems which don't have
# the APIs that the targets built in bootstrap-tools, build-tools or cross-tools.
# legacy: Build compatibility shims for the next three targets. This is a
# minimal set of tools and shims necessary to compensate for older systems
# which don't have the APIs required by the targets built in bootstrap-tools,
# build-tools or cross-tools.
#
# ELF Tool Chain libraries are needed for ELF tools and dtrace tools.
.if ${BOOTSTRAPPING} < 1100006
_elftoolchain_libs= lib/libelf lib/libdwarf
.endif
legacy:
.if ${BOOTSTRAPPING} < 800107 && ${BOOTSTRAPPING} != 0
@echo "ERROR: Source upgrades from versions prior to 8.0 not supported."; \
false
.endif
.for _tool in tools/build
.for _tool in tools/build ${_elftoolchain_libs}
${_+_}@${ECHODIR} "===> ${_tool} (obj,includes,depend,all,install)"; \
cd ${.CURDIR}/${_tool} && \
${MAKE} DIRPRFX=${_tool}/ obj && \
@ -1297,7 +1308,8 @@ _lex= usr.bin/lex
# r277259 crunchide: Correct 64-bit section header offset
# r281674 crunchide: always include both 32- and 64-bit ELF support
.if ${BOOTSTRAPPING} < 1100071
# r285986 crunchen: use STRIPBIN rather than STRIP
.if ${BOOTSTRAPPING} < 1100078
_crunch= usr.sbin/crunch
.endif
@ -1332,13 +1344,12 @@ ${_bt}-usr.bin/clang/tblgen: ${_bt}-lib/clang/libllvmtablegen ${_bt}-lib/clang/l
# pre libdwarf
.if ${BOOTSTRAPPING} < 1100006 || (${MACHINE} != ${TARGET} || \
${MACHINE_ARCH} != ${TARGET_ARCH})
_elftoolchain_libs= lib/libelf lib/libdwarf
.if ${MK_CDDL} != "no"
_dtrace_tools= cddl/usr.bin/sgsmsg cddl/lib/libctf cddl/usr.bin/ctfconvert \
cddl/usr.bin/ctfmerge
${_bt}-cddl/usr.bin/ctfconvert: ${_bt}-lib/libelf ${_bt}-lib/libdwarf ${_bt}-cddl/lib/libctf
${_bt}-cddl/usr.bin/ctfmerge: ${_bt}-lib/libelf ${_bt}-lib/libdwarf ${_bt}-cddl/lib/libctf
${_bt}-cddl/usr.bin/ctfconvert: ${_bt}-cddl/lib/libctf
${_bt}-cddl/usr.bin/ctfmerge: ${_bt}-cddl/lib/libctf
.endif
.endif
@ -1381,7 +1392,6 @@ bootstrap-tools: .PHONY
.for _tool in \
${_clang_tblgen} \
${_kerberos5_bootstrap_tools} \
${_elftoolchain_libs} \
${_dtrace_tools} \
${_strfile} \
${_gperf} \
@ -1428,17 +1438,18 @@ _gcc_tools= gnu/usr.bin/cc/cc_tools
.endif
.if ${MK_RESCUE} != "no"
_rescue= rescue/rescue
# rescue includes programs that have build-tools targets
_rescue=rescue/rescue
.endif
build-tools: .MAKE
.for _tool in \
bin/csh \
bin/sh \
${_rescue} \
${LOCAL_TOOL_DIRS} \
lib/ncurses/ncurses \
lib/ncurses/ncursesw \
${_rescue} \
${_share} \
usr.bin/awk \
lib/libmagic \
@ -1483,7 +1494,7 @@ _btxld= usr.sbin/btxld
.if ${MK_BINUTILS_BOOTSTRAP} != "no"
_binutils= gnu/usr.bin/binutils
.endif
.if ${MK_ELFTOOLCHAIN_TOOLS} != "no"
.if ${MK_ELFTOOLCHAIN_BOOTSTRAP} != "no"
_elftctools= lib/libelftc \
usr.bin/elfcopy \
usr.bin/nm \
@ -1493,7 +1504,7 @@ _elftctools= lib/libelftc \
# cross-build on a FreeBSD 10 host:
_elftctools+= usr.bin/addr2line
.endif
.elif ${TARGET_ARCH} != ${MACHINE_ARCH} && ${MK_ELFTOOLCHAIN_TOOLS} != "no"
.elif ${TARGET_ARCH} != ${MACHINE_ARCH} && ${MK_ELFTOOLCHAIN_BOOTSTRAP} != "no"
# If cross-building with an external binutils we still need to build strip for
# the target (for at least crunchide).
_elftctools= lib/libelftc \
@ -1767,9 +1778,7 @@ cddl/lib/libctf__L: lib/libz__L
.endif
# cddl/lib/libdtrace requires lib/libproc and lib/librtld_db; it's only built
# on select architectures though (see cddl/lib/Makefile)
.if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_ARCH} == "amd64" || \
${MACHINE_CPUARCH} == "arm" || ${MACHINE_ARCH} == "i386" || \
${MACHINE_CPUARCH} == "mips" || ${MACHINE_CPUARCH} == "powerpc"
.if ${MACHINE_CPUARCH} != "sparc64"
_prebuild_libs+= lib/libproc lib/librtld_db
.endif

View file

@ -39,10 +39,70 @@
# done
# 20150809: String collation improvements
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_COLLATE
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_CTYPE
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_MESSAGES
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_MONETARY
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_NUMERIC
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_TIME
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_TIME
OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_TIME
OLD_FILES+=usr/share/locale/la_LN.ISO8859-13/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-13/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_TIME
OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_TIME
OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_TIME
OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_TIME
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_TIME
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_TIME
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_TIME
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_TIME
OLD_FILES+=usr/share/locale/UTF-8/LC_CTYPE
OLD_FILES+=usr/bin/colldef
OLD_FILES+=usr/share/man/man1/colldef.1.gz
OLD_FILES+=usr/bin/mklocale
OLD_FILES+=usr/share/man/man1/mklocale.1.gz
# 20150818: *allocm() are gone in jemalloc 4.0.0
OLD_FILES+=usr/share/man/man3/allocm.3.gz
OLD_FILES+=usr/share/man/man3/dallocm.3.gz
OLD_FILES+=usr/share/man/man3/nallocm.3.gz
OLD_FILES+=usr/share/man/man3/rallocm.3.gz
OLD_FILES+=usr/share/man/man3/sallocm.3.gz
# 20150802: Remove netbsd's test on pw(8)
OLD_FILES+=usr/tests/usr.sbin/pw/pw_test
# 20150719: Remove libarchive.pc

View file

@ -31,6 +31,48 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 11.x IS SLOW:
disable the most expensive debugging functionality run
"ln -s 'abort:false,junk:false' /etc/malloc.conf".)
20150817:
Kernel-loadable modules for the random(4) device are back. To use
them, the kernel must have
device random
options RANDOM_LOADABLE
kldload(8) can then be used to load random_fortuna.ko
or random_yarrow.ko. Please note that due to the indirect
function calls that the loadable modules need to provide,
the build-in variants will be slightly more efficient.
The random(4) kernel option RANDOM_DUMMY has been retired due to
unpopularity. It was not all that useful anyway.
20150813:
The WITHOUT_ELFTOOLCHAIN_TOOLS src.conf(5) knob has been retired.
Control over building the ELF Tool Chain tools is now provided by
the WITHOUT_TOOLCHAIN knob.
20150810:
The polarity of Pulse Per Second (PPS) capture events with the
uart(4) driver has been corrected. Prior to this change the PPS
"assert" event corresponded to the trailing edge of a positive PPS
pulse and the "clear" event was the leading edge of the next pulse.
As the width of a PPS pulse in a typical GPS receiver is on the
order of 1 millisecond, most users will not notice any significant
difference with this change.
Anyone who has compensated for the historical polarity reversal by
configuring a negative offset equal to the pulse width will need to
remove that workaround.
20150809:
The default group assigned to /dev/dri entries has been changed
from 'wheel' to 'video' with the id of '44'. If you want to have
access to the dri devices please add yourself to the video group
with:
# pw groupmod video -m $USER
20150806:
The menu.rc and loader.rc files will now be replaced during
upgrades. Please migrate local changes to menu.rc.local and

View file

@ -5,7 +5,7 @@
PROG= ls
SRCS= cmp.c ls.c print.c util.c
LIBADD= util xo
LIBADD= xo util
.if !defined(RELEASE_CRUNCH) && \
${MK_LS_COLORS} != no

View file

@ -5,7 +5,7 @@
PROG= pkill
LIBADD= kvm
LIBADD= kvm jail
LINKS= ${BINDIR}/pkill ${BINDIR}/pgrep
MLINKS= pkill.1 pgrep.1

View file

@ -9,6 +9,7 @@ DIRDEPS = \
lib/${CSU_DIR} \
lib/libc \
lib/libcompiler_rt \
lib/libjail \
lib/libkvm \

View file

@ -29,7 +29,7 @@
.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd August 9, 2013
.Dd August 21, 2015
.Dt PKILL 1
.Os
.Sh NAME
@ -47,7 +47,7 @@
.Op Fl c Ar class
.Op Fl d Ar delim
.Op Fl g Ar pgrp
.Op Fl j Ar jid
.Op Fl j Ar jail
.Op Fl s Ar sid
.Op Fl t Ar tty
.Op Fl u Ar euid
@ -63,7 +63,7 @@
.Op Fl U Ar uid
.Op Fl c Ar class
.Op Fl g Ar pgrp
.Op Fl j Ar jid
.Op Fl j Ar jail
.Op Fl s Ar sid
.Op Fl t Ar tty
.Op Fl u Ar euid
@ -149,16 +149,16 @@ or
command.
.It Fl i
Ignore case distinctions in both the process table and the supplied pattern.
.It Fl j Ar jid
Restrict matches to processes inside jails with a jail ID in the comma-separated
list
.Ar jid .
The value
.It Fl j Ar jail
Restrict matches to processes inside the specified jails.
The argument
.Ar jail
may be
.Dq Li any
matches processes in any jail.
The value
to match processes in any jail,
.Dq Li none
matches processes not in jail.
to match processes not in jail,
or a comma-separated list of jail IDs or names.
.It Fl l
Long output.
For

View file

@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
#include <grp.h>
#include <errno.h>
#include <locale.h>
#include <jail.h>
#define STATUS_MATCH 0
#define STATUS_NOMATCH 1
@ -78,7 +79,7 @@ enum listtype {
LT_GROUP,
LT_TTY,
LT_PGRP,
LT_JID,
LT_JAIL,
LT_SID,
LT_CLASS
};
@ -245,7 +246,7 @@ main(int argc, char **argv)
cflags |= REG_ICASE;
break;
case 'j':
makelist(&jidlist, LT_JID, optarg);
makelist(&jidlist, LT_JAIL, optarg);
criteria = 1;
break;
case 'l':
@ -585,7 +586,7 @@ usage(void)
fprintf(stderr,
"usage: %s %s [-F pidfile] [-G gid] [-M core] [-N system]\n"
" [-P ppid] [-U uid] [-c class] [-g pgrp] [-j jid]\n"
" [-P ppid] [-U uid] [-c class] [-g pgrp] [-j jail]\n"
" [-s sid] [-t tty] [-u euid] pattern ...\n",
getprogname(), ustr);
@ -700,7 +701,7 @@ makelist(struct listhead *head, enum listtype type, char *src)
if (li->li_number == 0)
li->li_number = getsid(mypid);
break;
case LT_JID:
case LT_JAIL:
if (li->li_number < 0)
errx(STATUS_BADUSAGE,
"Negative jail ID `%s'", sp);
@ -766,15 +767,20 @@ foundtty: if ((st.st_mode & S_IFCHR) == 0)
li->li_number = st.st_rdev;
break;
case LT_JID:
case LT_JAIL: {
int jid;
if (strcmp(sp, "none") == 0)
li->li_number = 0;
else if (strcmp(sp, "any") == 0)
li->li_number = -1;
else if ((jid = jail_getid(sp)) != -1)
li->li_number = jid;
else if (*ep != '\0')
errx(STATUS_BADUSAGE,
"Invalid jail ID `%s'", sp);
"Invalid jail ID or name `%s'", sp);
break;
}
case LT_CLASS:
li->li_number = -1;
li->li_name = strdup(sp);

View file

@ -14,7 +14,7 @@ if [ `id -u` -ne 0 ]; then
exit 0
fi
echo "1..3"
echo "1..4"
sleep=$(pwd)/sleep.txt
ln -sf /bin/sleep $sleep
@ -87,5 +87,30 @@ else
fi
[ -f ${PWD}/${base}_3_1.pid ] && kill $(cat $PWD/${base}_3_1.pid)
[ -f ${PWD}/${base}_3_2.pid ] && kill $(cat $PWD/${base}_3_2.pid)
wait
# test 4 is like test 1 except with jname instead of jid.
name="pgrep -j <jname>"
sleep_amount=8
jail -c path=/ name=${base}_4_1 ip4.addr=127.0.0.1 \
command=daemon -p ${PWD}/${base}_4_1.pid $sleep $sleep_amount &
jail -c path=/ name=${base}_4_2 ip4.addr=127.0.0.1 \
command=daemon -p ${PWD}/${base}_4_2.pid $sleep $sleep_amount &
sleep 0.5
jname="${base}_4_1,${base}_4_2"
pid1="$(pgrep -f -x -j "$jname" "$sleep $sleep_amount" | sort)"
pid2=$(printf "%s\n%s" "$(cat ${PWD}/${base}_4_1.pid)" \
$(cat ${PWD}/${base}_4_2.pid) | sort)
if [ "$pid1" = "$pid2" ]; then
echo "ok 4 - $name"
else
echo "not ok 4 - $name # pgrep output: '$(echo $pid1)', pidfile output: '$(echo $pid2)'"
fi
[ -f ${PWD}/${base}_4_1.pid ] && kill $(cat ${PWD}/${base}_4_1.pid)
[ -f ${PWD}/${base}_4_2.pid ] && kill $(cat ${PWD}/${base}_4_2.pid)
wait
rm -f $sleep

View file

@ -14,7 +14,7 @@ if [ `id -u` -ne 0 ]; then
exit 0
fi
echo "1..3"
echo "1..4"
sleep=$(pwd)/sleep.txt
ln -sf /bin/sleep $sleep
@ -90,5 +90,31 @@ else
fi 2>/dev/null
[ -f ${PWD}/${base}_3_1.pid ] && kill $(cat ${base}_3_1.pid)
[ -f ${PWD}/${base}_3_2.pid ] && kill $(cat ${base}_3_2.pid)
wait
# test 4 is like test 1 except with jname instead of jid.
name="pkill -j <jname>"
sleep_amount=8
jail -c path=/ name=${base}_4_1 ip4.addr=127.0.0.1 \
command=daemon -p ${PWD}/${base}_4_1.pid $sleep $sleep_amount &
jail -c path=/ name=${base}_4_2 ip4.addr=127.0.0.1 \
command=daemon -p ${PWD}/${base}_4_2.pid $sleep $sleep_amount &
$sleep $sleep_amount &
sleep 0.5
jname="${base}_4_1,${base}_4_2"
if pkill -f -j "$jname" $sleep && sleep 0.5 &&
! -f ${PWD}/${base}_4_1.pid &&
! -f ${PWD}/${base}_4_2.pid ; then
echo "ok 4 - $name"
else
echo "not ok 4 - $name"
fi 2>/dev/null
[ -f ${PWD}/${base}_4_1.pid ] && kill $(cat ${PWD}/${base}_4_1.pid)
[ -f ${PWD}/${base}_4_2.pid ] && kill $(cat ${PWD}/${base}_4_2.pid)
wait
rm -f $sleep

View file

@ -11,6 +11,6 @@ SRCS= fmt.c keyword.c nlist.c print.c ps.c
# on large systems.
#
CFLAGS+=-DLAZY_PS
LIBADD= m kvm jail xo
LIBADD= m kvm jail xo util
.include <bsd.prog.mk>

View file

@ -191,9 +191,10 @@ readcmd(int argc __unused, char **argv __unused)
CHECKSTRSPACE(1, p);
if (backslash) {
backslash = 0;
startword = 0;
if (c != '\n')
if (c != '\n') {
startword = 0;
USTPUTC(c, p);
}
continue;
}
if (!rflag && c == '\\') {

View file

@ -73,6 +73,7 @@ char *minusc; /* argument to -c option */
static void options(int);
static void minus_o(char *, int);
static void setoption(int, int);
static void setoptionbyindex(int, int);
static int getopts(char *, char *, char **, char ***, char **);
@ -269,7 +270,7 @@ minus_o(char *name, int val)
} else {
for (i = 0; i < NOPTS; i++)
if (equal(name, optlist[i].name)) {
setoption(optlist[i].letter, val);
setoptionbyindex(i, val);
return;
}
error("Illegal option -o %s", name);
@ -278,26 +279,32 @@ minus_o(char *name, int val)
static void
setoption(int flag, int val)
setoptionbyindex(int idx, int val)
{
int i;
if (flag == 'p' && !val && privileged) {
if (optlist[idx].letter == 'p' && !val && privileged) {
if (setgid(getgid()) == -1)
error("setgid");
if (setuid(getuid()) == -1)
error("setuid");
}
optlist[idx].val = val;
if (val) {
/* #%$ hack for ksh semantics */
if (optlist[idx].letter == 'V')
Eflag = 0;
else if (optlist[idx].letter == 'E')
Vflag = 0;
}
}
static void
setoption(int flag, int val)
{
int i;
for (i = 0; i < NOPTS; i++)
if (optlist[i].letter == flag) {
optlist[i].val = val;
if (val) {
/* #%$ hack for ksh semantics */
if (flag == 'V')
Eflag = 0;
else if (flag == 'E')
Vflag = 0;
}
setoptionbyindex(i, val);
return;
}
error("Illegal option -%c", flag);

View file

@ -1195,7 +1195,8 @@ parsebackq(char *out, struct nodelist **pbqlist,
static char *
readcstyleesc(char *out)
{
int c, v, i, n;
int c, vc, i, n;
unsigned int v;
c = pgetc();
switch (c) {
@ -1310,12 +1311,12 @@ readcstyleesc(char *out)
default:
synerror("Bad escape sequence");
}
v = (char)v;
vc = (char)v;
/*
* We can't handle NUL bytes.
* POSIX says we should skip till the closing quote.
*/
if (v == '\0') {
if (vc == '\0') {
while ((c = pgetc()) != '\'') {
if (c == '\\')
c = pgetc();
@ -1332,9 +1333,9 @@ readcstyleesc(char *out)
pungetc();
return out;
}
if (SQSYNTAX[v] == CCTL)
if (SQSYNTAX[vc] == CCTL)
USTPUTC(CTLESC, out);
USTPUTC(v, out);
USTPUTC(vc, out);
return out;
}
@ -1661,7 +1662,7 @@ parsesub: {
pungetc();
else if (c == '\n' || c == PEOF)
synerror("Unexpected end of line in substitution");
else
else if (BASESYNTAX[c] != CCTL)
USTPUTC(c, out);
}
if (subtype == 0) {
@ -1677,7 +1678,8 @@ parsesub: {
synerror("Unexpected end of line in substitution");
if (flags == VSNUL)
STPUTC(':', out);
STPUTC(c, out);
if (BASESYNTAX[c] != CCTL)
STPUTC(c, out);
subtype = VSERROR;
} else
subtype = p - types + VSNORMAL;

View file

@ -121,6 +121,7 @@ FILES+= read4.0 read4.0.stdout
FILES+= read5.0
FILES+= read6.0
FILES+= read7.0
FILES+= read8.0
FILES+= return1.0
FILES+= return2.1
FILES+= return3.1

View file

@ -0,0 +1,17 @@
# $FreeBSD$
read a b c <<\EOF
\
A\
\
\
\
B\
\
\
C\
\
\
\
EOF
[ "$a.$b.$c" = "A.B.C" ]

View file

@ -19,6 +19,8 @@ FILES+= bad-parm-exp3.2 bad-parm-exp3.2.stderr
FILES+= bad-parm-exp4.2 bad-parm-exp4.2.stderr
FILES+= bad-parm-exp5.2 bad-parm-exp5.2.stderr
FILES+= bad-parm-exp6.2 bad-parm-exp6.2.stderr
FILES+= bad-parm-exp7.0
FILES+= bad-parm-exp8.0
FILES+= option-error.0
FILES+= redirection-error.0
FILES+= redirection-error2.2

View file

@ -0,0 +1,4 @@
# $FreeBSD$
v=1
eval ": $(printf '${v-${\372}}')"

View file

@ -0,0 +1,4 @@
# $FreeBSD$
v=1
eval ": $(printf '${v-${w\372}}')"

View file

@ -36,6 +36,8 @@ FILES+= dollar-quote8.0
FILES+= dollar-quote9.0
FILES+= dollar-quote10.0
FILES+= dollar-quote11.0
FILES+= dollar-quote12.0
FILES+= dollar-quote13.0
FILES+= empty-braces1.0
FILES+= empty-cmd1.0
FILES+= for1.0

View file

@ -0,0 +1,7 @@
# $FreeBSD$
# \u without any digits at all remains invalid.
# Our choice is a parse error.
v=$( (eval ": \$'\u'") 2>&1 >/dev/null)
[ $? -ne 0 ] && [ -n "$v" ]

View file

@ -0,0 +1,8 @@
# $FreeBSD$
# This Unicode escape sequence that has never been in range should either
# fail to expand or expand to a fallback.
c=$(eval printf %s \$\'\\Uffffff41\' 2>/dev/null)
r=$(($? != 0))
[ "$r.$c" = '1.' ] || [ "$r.$c" = '0.?' ] || [ "$r.$c" = $'0.\u2222' ]

View file

@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2014 by Delphix. All rights reserved.
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
*/
#include <stdio.h>
@ -95,6 +95,8 @@ static int zopt_objects = 0;
static libzfs_handle_t *g_zfs;
static uint64_t max_inflight = 1000;
static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *);
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
@ -418,6 +420,79 @@ dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
zap_cursor_fini(&zc);
}
static void
dump_bpobj(objset_t *os, uint64_t object, void *data, size_t size)
{
bpobj_phys_t *bpop = data;
char bytes[32], comp[32], uncomp[32];
if (bpop == NULL)
return;
zdb_nicenum(bpop->bpo_bytes, bytes);
zdb_nicenum(bpop->bpo_comp, comp);
zdb_nicenum(bpop->bpo_uncomp, uncomp);
(void) printf("\t\tnum_blkptrs = %llu\n",
(u_longlong_t)bpop->bpo_num_blkptrs);
(void) printf("\t\tbytes = %s\n", bytes);
if (size >= BPOBJ_SIZE_V1) {
(void) printf("\t\tcomp = %s\n", comp);
(void) printf("\t\tuncomp = %s\n", uncomp);
}
if (size >= sizeof (*bpop)) {
(void) printf("\t\tsubobjs = %llu\n",
(u_longlong_t)bpop->bpo_subobjs);
(void) printf("\t\tnum_subobjs = %llu\n",
(u_longlong_t)bpop->bpo_num_subobjs);
}
if (dump_opt['d'] < 5)
return;
for (uint64_t i = 0; i < bpop->bpo_num_blkptrs; i++) {
char blkbuf[BP_SPRINTF_LEN];
blkptr_t bp;
int err = dmu_read(os, object,
i * sizeof (bp), sizeof (bp), &bp, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
break;
}
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), &bp);
(void) printf("\t%s\n", blkbuf);
}
}
/* ARGSUSED */
static void
dump_bpobj_subobjs(objset_t *os, uint64_t object, void *data, size_t size)
{
dmu_object_info_t doi;
VERIFY0(dmu_object_info(os, object, &doi));
uint64_t *subobjs = kmem_alloc(doi.doi_max_offset, KM_SLEEP);
int err = dmu_read(os, object, 0, doi.doi_max_offset, subobjs, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(subobjs, doi.doi_max_offset);
return;
}
int64_t last_nonzero = -1;
for (uint64_t i = 0; i < doi.doi_max_offset / 8; i++) {
if (subobjs[i] != 0)
last_nonzero = i;
}
for (int64_t i = 0; i <= last_nonzero; i++) {
(void) printf("\t%llu\n", (longlong_t)subobjs[i]);
}
kmem_free(subobjs, doi.doi_max_offset);
}
/*ARGSUSED*/
static void
dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size)
@ -1397,7 +1472,7 @@ dump_bpobj_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
}
static void
dump_bpobj(bpobj_t *bpo, char *name, int indent)
dump_full_bpobj(bpobj_t *bpo, char *name, int indent)
{
char bytes[32];
char comp[32];
@ -1411,11 +1486,12 @@ dump_bpobj(bpobj_t *bpo, char *name, int indent)
zdb_nicenum(bpo->bpo_phys->bpo_comp, comp);
zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp);
(void) printf(" %*s: object %llu, %llu local blkptrs, "
"%llu subobjs, %s (%s/%s comp)\n",
"%llu subobjs in object %llu, %s (%s/%s comp)\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
(u_longlong_t)bpo->bpo_phys->bpo_subobjs,
bytes, comp, uncomp);
for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
@ -1432,7 +1508,7 @@ dump_bpobj(bpobj_t *bpo, char *name, int indent)
error, (u_longlong_t)subobj);
continue;
}
dump_bpobj(&subbpo, "subobj", indent + 1);
dump_full_bpobj(&subbpo, "subobj", indent + 1);
bpobj_close(&subbpo);
}
} else {
@ -1466,7 +1542,7 @@ dump_deadlist(dsl_deadlist_t *dl)
return;
if (dl->dl_oldfmt) {
dump_bpobj(&dl->dl_bpobj, "old-format deadlist", 0);
dump_full_bpobj(&dl->dl_bpobj, "old-format deadlist", 0);
return;
}
@ -1491,7 +1567,7 @@ dump_deadlist(dsl_deadlist_t *dl)
(void) snprintf(buf, sizeof (buf), "mintxg %llu -> "
"obj %llu", (longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
dump_bpobj(&dle->dle_bpobj, buf, 0);
dump_full_bpobj(&dle->dle_bpobj, buf, 0);
} else {
(void) printf("mintxg %llu -> obj %llu\n",
(longlong_t)dle->dle_mintxg,
@ -1682,8 +1758,8 @@ static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = {
dump_uint64, /* object array */
dump_none, /* packed nvlist */
dump_packed_nvlist, /* packed nvlist size */
dump_none, /* bplist */
dump_none, /* bplist header */
dump_none, /* bpobj */
dump_bpobj, /* bpobj header */
dump_none, /* SPA space map header */
dump_none, /* SPA space map */
dump_none, /* ZIL intent log */
@ -1730,7 +1806,7 @@ static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = {
dump_zap, /* deadlist */
dump_none, /* deadlist hdr */
dump_zap, /* dsl clones */
dump_none, /* bpobj subobjs */
dump_bpobj_subobjs, /* bpobj subobjs */
dump_unknown, /* Unknown type, must be last */
};
@ -2145,7 +2221,7 @@ dump_label(const char *dev)
(void) close(fd);
}
static uint64_t num_large_blocks;
static uint64_t dataset_feature_count[SPA_FEATURES];
/*ARGSUSED*/
static int
@ -2159,8 +2235,15 @@ dump_one_dir(const char *dsname, void *arg)
(void) printf("Could not open %s, error %d\n", dsname, error);
return (0);
}
if (dmu_objset_ds(os)->ds_large_blocks)
num_large_blocks++;
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (!dmu_objset_ds(os)->ds_feature_inuse[f])
continue;
ASSERT(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET);
dataset_feature_count[f]++;
}
dump_dir(os);
dmu_objset_disown(os, FTAG);
fuid_table_destroy();
@ -2352,6 +2435,9 @@ zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
dmu_object_type_t type;
boolean_t is_metadata;
if (bp == NULL)
return (0);
if (dump_opt['b'] >= 5 && bp->blk_birth > 0) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
@ -2841,7 +2927,7 @@ zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
avl_index_t where;
zdb_ddt_entry_t *zdde, zdde_search;
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
return (0);
if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) {
@ -2956,13 +3042,13 @@ dump_zpool(spa_t *spa)
dump_metaslab_groups(spa);
if (dump_opt['d'] || dump_opt['i']) {
uint64_t refcount;
dump_dir(dp->dp_meta_objset);
if (dump_opt['d'] >= 3) {
dump_bpobj(&spa->spa_deferred_bpobj,
dump_full_bpobj(&spa->spa_deferred_bpobj,
"Deferred frees", 0);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
dump_bpobj(&spa->spa_dsl_pool->dp_free_bpobj,
dump_full_bpobj(
&spa->spa_dsl_pool->dp_free_bpobj,
"Pool snapshot frees", 0);
}
@ -2977,17 +3063,29 @@ dump_zpool(spa_t *spa)
(void) dmu_objset_find(spa_name(spa), dump_one_dir,
NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_LARGE_BLOCKS], &refcount);
if (num_large_blocks != refcount) {
(void) printf("large_blocks feature refcount mismatch: "
"expected %lld != actual %lld\n",
(longlong_t)num_large_blocks,
(longlong_t)refcount);
rc = 2;
} else {
(void) printf("Verified large_blocks feature refcount "
"is correct (%llu)\n", (longlong_t)refcount);
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
uint64_t refcount;
if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET)) {
ASSERT0(dataset_feature_count[f]);
continue;
}
(void) feature_get_refcount(spa,
&spa_feature_table[f], &refcount);
if (dataset_feature_count[f] != refcount) {
(void) printf("%s feature refcount mismatch: "
"%lld datasets != %lld refcount\n",
spa_feature_table[f].fi_uname,
(longlong_t)dataset_feature_count[f],
(longlong_t)refcount);
rc = 2;
} else {
(void) printf("Verified %s feature refcount "
"of %llu is correct\n",
spa_feature_table[f].fi_uname,
(longlong_t)refcount);
}
}
}
if (rc == 0 && (dump_opt['b'] || dump_opt['c']))

View file

@ -191,11 +191,13 @@
.Nm
.Cm receive Ns | Ns Cm recv
.Op Fl vnFu
.Op Fl o Sy origin Ns = Ns Ar snapshot
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
.Nm
.Cm receive Ns | Ns Cm recv
.Op Fl vnFu
.Op Fl d | e
.Op Fl o Sy origin Ns = Ns Ar snapshot
.Ar filesystem
.Nm
.Cm allow
@ -939,7 +941,23 @@ Disabling checksums is
.Em NOT
a recommended practice.
.It Sy compression Ns = Ns Cm on | off | lzjb | gzip | gzip- Ns Ar N | Cm zle | Cm lz4
Controls the compression algorithm used for this dataset. The
Controls the compression algorithm used for this dataset.
Setting compression to
.Cm on
indicates that the current default compression algorithm should be used.
The default balances compression and decompression speed, with compression
ratio and is expected to work well on a wide variety of workloads.
Unlike all other settings for this property, on does not select a fixed
compression type.
As new compression algorithms are added to ZFS and enabled on a pool, the
default compression algorithm may change.
The current default compression algorthm is either
.Cm lzjb
or, if the
.Sy lz4_compress
feature is enabled,
.Cm lz4 .
The
.Cm lzjb
compression algorithm is optimized for performance while providing decent data
compression. Setting compression to
@ -2689,6 +2707,7 @@ feature.
.Nm
.Cm receive Ns | Ns Cm recv
.Op Fl vnFu
.Op Fl o Sy origin Ns = Ns Ar snapshot
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
.Xc
.It Xo
@ -2696,6 +2715,7 @@ feature.
.Cm receive Ns | Ns Cm recv
.Op Fl vnFu
.Op Fl d | e
.Op Fl o Sy origin Ns = Ns Ar snapshot
.Ar filesystem
.Xc
.Pp
@ -2780,6 +2800,10 @@ receive operation.
Do not actually receive the stream. This can be useful in conjunction with the
.Fl v
option to verify the name the receive operation would use.
.It Fl o Sy origin Ns = Ns Ar snapshot
Forces the stream to be received as a clone of the given snapshot.
This is only valid if the stream is an incremental stream whose source
is the same as the provided origin.
.It Fl F
Force a rollback of the file system to the most recent snapshot before
performing the receive operation. If receiving an incremental replication

View file

@ -264,8 +264,9 @@ get_usage(zfs_help_t idx)
return (gettext("\tpromote <clone-filesystem>\n"));
case HELP_RECEIVE:
return (gettext("\treceive|recv [-vnFu] <filesystem|volume|"
"snapshot>\n"
"\treceive|recv [-vnFu] [-d | -e] <filesystem>\n"));
"snapshot>\n"
"\treceive|recv [-vnFu] [-o origin=<snapshot>] [-d | -e] "
"<filesystem>\n"));
case HELP_RENAME:
return (gettext("\trename [-f] <filesystem|volume|snapshot> "
"<filesystem|volume|snapshot>\n"
@ -791,7 +792,7 @@ zfs_do_create(int argc, char **argv)
nomem();
break;
case 'o':
if (parseprop(props, optarg))
if (parseprop(props, optarg) != 0)
goto error;
break;
case 's':
@ -3659,7 +3660,7 @@ zfs_do_snapshot(int argc, char **argv)
while ((c = getopt(argc, argv, "ro:")) != -1) {
switch (c) {
case 'o':
if (parseprop(props, optarg))
if (parseprop(props, optarg) != 0)
return (1);
break;
case 'r':
@ -3918,10 +3919,19 @@ zfs_do_receive(int argc, char **argv)
{
int c, err;
recvflags_t flags = { 0 };
nvlist_t *props;
nvpair_t *nvp = NULL;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, ":denuvF")) != -1) {
while ((c = getopt(argc, argv, ":o:denuvF")) != -1) {
switch (c) {
case 'o':
if (parseprop(props, optarg) != 0)
return (1);
break;
case 'd':
flags.isprefix = B_TRUE;
break;
@ -3966,6 +3976,13 @@ zfs_do_receive(int argc, char **argv)
usage(B_FALSE);
}
while ((nvp = nvlist_next_nvpair(props, nvp))) {
if (strcmp(nvpair_name(nvp), "origin") != 0) {
(void) fprintf(stderr, gettext("invalid option"));
usage(B_FALSE);
}
}
if (isatty(STDIN_FILENO)) {
(void) fprintf(stderr,
gettext("Error: Backup stream can not be read "
@ -3974,7 +3991,7 @@ zfs_do_receive(int argc, char **argv)
return (1);
}
err = zfs_receive(g_zfs, argv[0], &flags, STDIN_FILENO, NULL);
err = zfs_receive(g_zfs, argv[0], props, &flags, STDIN_FILENO, NULL);
return (err != 0);
}

View file

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2011, 2014 by Delphix. All rights reserved.
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
*/
@ -294,8 +294,8 @@ zhack_feature_enable_sync(void *arg, dmu_tx_t *tx)
feature_enable_sync(spa, feature, tx);
spa_history_log_internal(spa, "zhack enable feature", tx,
"name=%s can_readonly=%u",
feature->fi_guid, feature->fi_can_readonly);
"guid=%s flags=%x",
feature->fi_guid, feature->fi_flags);
}
static void
@ -314,9 +314,7 @@ zhack_do_feature_enable(int argc, char **argv)
*/
desc = NULL;
feature.fi_uname = "zhack";
feature.fi_mos = B_FALSE;
feature.fi_can_readonly = B_FALSE;
feature.fi_activate_on_enable = B_FALSE;
feature.fi_flags = 0;
feature.fi_depends = nodeps;
feature.fi_feature = SPA_FEATURE_NONE;
@ -324,7 +322,7 @@ zhack_do_feature_enable(int argc, char **argv)
while ((c = getopt(argc, argv, "rmd:")) != -1) {
switch (c) {
case 'r':
feature.fi_can_readonly = B_TRUE;
feature.fi_flags |= ZFEATURE_FLAG_READONLY_COMPAT;
break;
case 'd':
desc = strdup(optarg);
@ -413,7 +411,7 @@ zhack_do_feature_ref(int argc, char **argv)
* disk later.
*/
feature.fi_uname = "zhack";
feature.fi_mos = B_FALSE;
feature.fi_flags = 0;
feature.fi_desc = NULL;
feature.fi_depends = nodeps;
feature.fi_feature = SPA_FEATURE_NONE;
@ -422,7 +420,7 @@ zhack_do_feature_ref(int argc, char **argv)
while ((c = getopt(argc, argv, "md")) != -1) {
switch (c) {
case 'm':
feature.fi_mos = B_TRUE;
feature.fi_flags |= ZFEATURE_FLAG_MOS;
break;
case 'd':
decr = B_TRUE;
@ -455,10 +453,10 @@ zhack_do_feature_ref(int argc, char **argv)
if (0 == zap_contains(mos, spa->spa_feat_for_read_obj,
feature.fi_guid)) {
feature.fi_can_readonly = B_FALSE;
feature.fi_flags &= ~ZFEATURE_FLAG_READONLY_COMPAT;
} else if (0 == zap_contains(mos, spa->spa_feat_for_write_obj,
feature.fi_guid)) {
feature.fi_can_readonly = B_TRUE;
feature.fi_flags |= ZFEATURE_FLAG_READONLY_COMPAT;
} else {
fatal(spa, FTAG, "feature is not enabled: %s", feature.fi_guid);
}

View file

@ -22,7 +22,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2014 by Delphix. All rights reserved.
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
* Copyright (c) 2012 by Frederik Wessels. All rights reserved.
* Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
* Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
@ -4986,7 +4986,8 @@ zpool_do_upgrade(int argc, char **argv)
"---------------\n");
for (i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t *fi = &spa_feature_table[i];
const char *ro = fi->fi_can_readonly ?
const char *ro =
(fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
" (read-only compatible)" : "";
(void) printf("%-37s%s\n", fi->fi_uname, ro);

View file

@ -25,7 +25,7 @@
*/
/*
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright (c) 2013, 2014 by Delphix. All rights reserved.
*/
#include <ctype.h>
@ -34,6 +34,7 @@
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <stddef.h>
#include <sys/dmu.h>
#include <sys/zfs_ioctl.h>
@ -83,7 +84,6 @@ safe_malloc(size_t size)
*
* Read while computing incremental checksum
*/
static size_t
ssread(void *buf, size_t len, zio_cksum_t *cksum)
{
@ -92,7 +92,7 @@ ssread(void *buf, size_t len, zio_cksum_t *cksum)
if ((outlen = fread(buf, len, 1, send_stream)) == 0)
return (0);
if (do_cksum && cksum) {
if (do_cksum) {
if (do_byteswap)
fletcher_4_incremental_byteswap(buf, len, cksum);
else
@ -102,6 +102,34 @@ ssread(void *buf, size_t len, zio_cksum_t *cksum)
return (outlen);
}
static size_t
read_hdr(dmu_replay_record_t *drr, zio_cksum_t *cksum)
{
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
size_t r = ssread(drr, sizeof (*drr) - sizeof (zio_cksum_t), cksum);
if (r == 0)
return (0);
zio_cksum_t saved_cksum = *cksum;
r = ssread(&drr->drr_u.drr_checksum.drr_checksum,
sizeof (zio_cksum_t), cksum);
if (r == 0)
return (0);
if (!ZIO_CHECKSUM_IS_ZERO(&drr->drr_u.drr_checksum.drr_checksum) &&
!ZIO_CHECKSUM_EQUAL(saved_cksum,
drr->drr_u.drr_checksum.drr_checksum)) {
fprintf(stderr, "invalid checksum\n");
(void) printf("Incorrect checksum in record header.\n");
(void) printf("Expected checksum = %llx/%llx/%llx/%llx\n",
saved_cksum.zc_word[0],
saved_cksum.zc_word[1],
saved_cksum.zc_word[2],
saved_cksum.zc_word[3]);
exit(1);
}
return (sizeof (*drr));
}
/*
* Print part of a block in ASCII characters
*/
@ -183,8 +211,10 @@ main(int argc, char *argv[])
struct drr_free *drrf = &thedrr.drr_u.drr_free;
struct drr_spill *drrs = &thedrr.drr_u.drr_spill;
struct drr_write_embedded *drrwe = &thedrr.drr_u.drr_write_embedded;
struct drr_checksum *drrc = &thedrr.drr_u.drr_checksum;
char c;
boolean_t verbose = B_FALSE;
boolean_t very_verbose = B_FALSE;
boolean_t first = B_TRUE;
/*
* dump flag controls whether the contents of any modified data blocks
@ -202,11 +232,14 @@ main(int argc, char *argv[])
do_cksum = B_FALSE;
break;
case 'v':
if (verbose)
very_verbose = B_TRUE;
verbose = B_TRUE;
break;
case 'd':
dump = B_TRUE;
verbose = B_TRUE;
very_verbose = B_TRUE;
break;
case ':':
(void) fprintf(stderr,
@ -230,7 +263,7 @@ main(int argc, char *argv[])
send_stream = stdin;
pcksum = zc;
while (ssread(drr, sizeof (dmu_replay_record_t), &zc)) {
while (read_hdr(drr, &zc)) {
/*
* If this is the first DMU record being processed, check for
@ -432,7 +465,7 @@ main(int argc, char *argv[])
if (verbose) {
(void) printf("WRITE object = %llu type = %u "
"checksum type = %u\n"
"offset = %llu length = %llu "
" offset = %llu length = %llu "
"props = %llx\n",
(u_longlong_t)drrw->drr_object,
drrw->drr_type,
@ -476,9 +509,9 @@ main(int argc, char *argv[])
if (verbose) {
(void) printf("WRITE_BYREF object = %llu "
"checksum type = %u props = %llx\n"
"offset = %llu length = %llu\n"
" offset = %llu length = %llu\n"
"toguid = %llx refguid = %llx\n"
"refobject = %llu refoffset = %llu\n",
" refobject = %llu refoffset = %llu\n",
(u_longlong_t)drrwbr->drr_object,
drrwbr->drr_checksumtype,
(u_longlong_t)drrwbr->drr_key.ddk_prop,
@ -538,7 +571,7 @@ main(int argc, char *argv[])
if (verbose) {
(void) printf("WRITE_EMBEDDED object = %llu "
"offset = %llu length = %llu\n"
"toguid = %llx comp = %u etype = %u "
" toguid = %llx comp = %u etype = %u "
"lsize = %u psize = %u\n",
(u_longlong_t)drrwe->drr_object,
(u_longlong_t)drrwe->drr_offset,
@ -553,6 +586,13 @@ main(int argc, char *argv[])
P2ROUNDUP(drrwe->drr_psize, 8), &zc);
break;
}
if (drr->drr_type != DRR_BEGIN && very_verbose) {
(void) printf(" checksum = %llx/%llx/%llx/%llx\n",
(longlong_t)drrc->drr_checksum.zc_word[0],
(longlong_t)drrc->drr_checksum.zc_word[1],
(longlong_t)drrc->drr_checksum.zc_word[2],
(longlong_t)drrc->drr_checksum.zc_word[3]);
}
pcksum = zc;
}
free(buf);

View file

@ -3586,7 +3586,8 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
*/
n = ztest_random(regions) * stride + ztest_random(width);
s = 1 + ztest_random(2 * width - 1);
dmu_prefetch(os, bigobj, n * chunksize, s * chunksize);
dmu_prefetch(os, bigobj, 0, n * chunksize, s * chunksize,
ZIO_PRIORITY_SYNC_READ);
/*
* Pick a random index and compute the offsets into packobj and bigobj.
@ -5705,8 +5706,10 @@ ztest_run(ztest_shared_t *zs)
* Right before closing the pool, kick off a bunch of async I/O;
* spa_close() should wait for it to complete.
*/
for (uint64_t object = 1; object < 50; object++)
dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20);
for (uint64_t object = 1; object < 50; object++) {
dmu_prefetch(spa->spa_meta_objset, object, 0, 0, 1ULL << 20,
ZIO_PRIORITY_SYNC_READ);
}
spa_close(spa, FTAG);
@ -5905,6 +5908,7 @@ ztest_init(ztest_shared_t *zs)
}
VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props, NULL));
nvlist_free(nvroot);
nvlist_free(props);
VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
zs->zs_metaslab_sz =

View file

@ -668,8 +668,8 @@ typedef struct recvflags {
boolean_t nomount;
} recvflags_t;
extern int zfs_receive(libzfs_handle_t *, const char *, recvflags_t *,
int, avl_tree_t *);
extern int zfs_receive(libzfs_handle_t *, const char *, nvlist_t *,
recvflags_t *, int, avl_tree_t *);
typedef enum diff_flags {
ZFS_DIFF_PARSEABLE = 0x1,

View file

@ -3535,7 +3535,7 @@ zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
}
static int
zbookmark_compare(const void *a, const void *b)
zbookmark_mem_compare(const void *a, const void *b)
{
return (memcmp(a, b, sizeof (zbookmark_phys_t)));
}
@ -3598,7 +3598,7 @@ zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
zc.zc_nvlist_dst_size;
count -= zc.zc_nvlist_dst_size;
qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_compare);
qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);

View file

@ -64,8 +64,9 @@ extern void zfs_setprop_error(libzfs_handle_t *, zfs_prop_t, int, char *);
/* We need to use something for ENODATA. */
#define ENODATA EIDRM
static int zfs_receive_impl(libzfs_handle_t *, const char *, recvflags_t *,
int, const char *, nvlist_t *, avl_tree_t *, char **, int, uint64_t *);
static int zfs_receive_impl(libzfs_handle_t *, const char *, const char *,
recvflags_t *, int, const char *, nvlist_t *, avl_tree_t *, char **, int,
uint64_t *);
static const zio_cksum_t zero_cksum = { 0 };
@ -188,10 +189,28 @@ ddt_update(libzfs_handle_t *hdl, dedup_table_t *ddt, zio_cksum_t *cs,
}
static int
cksum_and_write(const void *buf, uint64_t len, zio_cksum_t *zc, int outfd)
dump_record(dmu_replay_record_t *drr, void *payload, int payload_len,
zio_cksum_t *zc, int outfd)
{
fletcher_4_incremental_native(buf, len, zc);
return (write(outfd, buf, len));
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
fletcher_4_incremental_native(drr,
offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), zc);
if (drr->drr_type != DRR_BEGIN) {
ASSERT(ZIO_CHECKSUM_IS_ZERO(&drr->drr_u.
drr_checksum.drr_checksum));
drr->drr_u.drr_checksum.drr_checksum = *zc;
}
fletcher_4_incremental_native(&drr->drr_u.drr_checksum.drr_checksum,
sizeof (zio_cksum_t), zc);
if (write(outfd, drr, sizeof (*drr)) == -1)
return (errno);
if (payload_len != 0) {
fletcher_4_incremental_native(payload, payload_len, zc);
if (write(outfd, payload, payload_len) == -1)
return (errno);
}
return (0);
}
/*
@ -218,26 +237,18 @@ cksummer(void *arg)
char *buf = zfs_alloc(dda->dedup_hdl, SPA_MAXBLOCKSIZE);
dmu_replay_record_t thedrr;
dmu_replay_record_t *drr = &thedrr;
struct drr_begin *drrb = &thedrr.drr_u.drr_begin;
struct drr_end *drre = &thedrr.drr_u.drr_end;
struct drr_object *drro = &thedrr.drr_u.drr_object;
struct drr_write *drrw = &thedrr.drr_u.drr_write;
struct drr_spill *drrs = &thedrr.drr_u.drr_spill;
struct drr_write_embedded *drrwe = &thedrr.drr_u.drr_write_embedded;
FILE *ofp;
int outfd;
dmu_replay_record_t wbr_drr = {0};
struct drr_write_byref *wbr_drrr = &wbr_drr.drr_u.drr_write_byref;
dedup_table_t ddt;
zio_cksum_t stream_cksum;
uint64_t physmem = sysconf(_SC_PHYS_PAGES) * sysconf(_SC_PAGESIZE);
uint64_t numbuckets;
ddt.max_ddt_size =
MAX((physmem * MAX_DDT_PHYSMEM_PERCENT)/100,
SMALLEST_POSSIBLE_MAX_DDT_MB<<20);
MAX((physmem * MAX_DDT_PHYSMEM_PERCENT) / 100,
SMALLEST_POSSIBLE_MAX_DDT_MB << 20);
numbuckets = ddt.max_ddt_size/(sizeof (dedup_entry_t));
numbuckets = ddt.max_ddt_size / (sizeof (dedup_entry_t));
/*
* numbuckets must be a power of 2. Increase number to
@ -253,32 +264,29 @@ cksummer(void *arg)
ddt.numhashbits = high_order_bit(numbuckets) - 1;
ddt.ddt_full = B_FALSE;
/* Initialize the write-by-reference block. */
wbr_drr.drr_type = DRR_WRITE_BYREF;
wbr_drr.drr_payloadlen = 0;
outfd = dda->outputfd;
ofp = fdopen(dda->inputfd, "r");
while (ssread(drr, sizeof (dmu_replay_record_t), ofp) != 0) {
while (ssread(drr, sizeof (*drr), ofp) != 0) {
switch (drr->drr_type) {
case DRR_BEGIN:
{
int fflags;
struct drr_begin *drrb = &drr->drr_u.drr_begin;
int fflags;
int sz = 0;
ZIO_SET_CHECKSUM(&stream_cksum, 0, 0, 0, 0);
ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
/* set the DEDUP feature flag for this stream */
fflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
fflags |= (DMU_BACKUP_FEATURE_DEDUP |
DMU_BACKUP_FEATURE_DEDUPPROPS);
DMU_SET_FEATUREFLAGS(drrb->drr_versioninfo, fflags);
if (cksum_and_write(drr, sizeof (dmu_replay_record_t),
&stream_cksum, outfd) == -1)
goto out;
if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
DMU_COMPOUNDSTREAM && drr->drr_payloadlen != 0) {
int sz = drr->drr_payloadlen;
sz = drr->drr_payloadlen;
if (sz > SPA_MAXBLOCKSIZE) {
buf = zfs_realloc(dda->dedup_hdl, buf,
@ -287,64 +295,60 @@ cksummer(void *arg)
(void) ssread(buf, sz, ofp);
if (ferror(stdin))
perror("fread");
if (cksum_and_write(buf, sz, &stream_cksum,
outfd) == -1)
goto out;
}
if (dump_record(drr, buf, sz, &stream_cksum,
outfd) != 0)
goto out;
break;
}
case DRR_END:
{
struct drr_end *drre = &drr->drr_u.drr_end;
/* use the recalculated checksum */
ZIO_SET_CHECKSUM(&drre->drr_checksum,
stream_cksum.zc_word[0], stream_cksum.zc_word[1],
stream_cksum.zc_word[2], stream_cksum.zc_word[3]);
if ((write(outfd, drr,
sizeof (dmu_replay_record_t))) == -1)
drre->drr_checksum = stream_cksum;
if (dump_record(drr, NULL, 0, &stream_cksum,
outfd) != 0)
goto out;
break;
}
case DRR_OBJECT:
{
if (cksum_and_write(drr, sizeof (dmu_replay_record_t),
&stream_cksum, outfd) == -1)
goto out;
struct drr_object *drro = &drr->drr_u.drr_object;
if (drro->drr_bonuslen > 0) {
(void) ssread(buf,
P2ROUNDUP((uint64_t)drro->drr_bonuslen, 8),
ofp);
if (cksum_and_write(buf,
P2ROUNDUP((uint64_t)drro->drr_bonuslen, 8),
&stream_cksum, outfd) == -1)
goto out;
}
if (dump_record(drr, buf,
P2ROUNDUP((uint64_t)drro->drr_bonuslen, 8),
&stream_cksum, outfd) != 0)
goto out;
break;
}
case DRR_SPILL:
{
if (cksum_and_write(drr, sizeof (dmu_replay_record_t),
&stream_cksum, outfd) == -1)
goto out;
struct drr_spill *drrs = &drr->drr_u.drr_spill;
(void) ssread(buf, drrs->drr_length, ofp);
if (cksum_and_write(buf, drrs->drr_length,
&stream_cksum, outfd) == -1)
if (dump_record(drr, buf, drrs->drr_length,
&stream_cksum, outfd) != 0)
goto out;
break;
}
case DRR_FREEOBJECTS:
{
if (cksum_and_write(drr, sizeof (dmu_replay_record_t),
&stream_cksum, outfd) == -1)
if (dump_record(drr, NULL, 0, &stream_cksum,
outfd) != 0)
goto out;
break;
}
case DRR_WRITE:
{
struct drr_write *drrw = &drr->drr_u.drr_write;
dataref_t dataref;
(void) ssread(buf, drrw->drr_length, ofp);
@ -382,7 +386,13 @@ cksummer(void *arg)
if (ddt_update(dda->dedup_hdl, &ddt,
&drrw->drr_key.ddk_cksum, drrw->drr_key.ddk_prop,
&dataref)) {
dmu_replay_record_t wbr_drr = {0};
struct drr_write_byref *wbr_drrr =
&wbr_drr.drr_u.drr_write_byref;
/* block already present in stream */
wbr_drr.drr_type = DRR_WRITE_BYREF;
wbr_drrr->drr_object = drrw->drr_object;
wbr_drrr->drr_offset = drrw->drr_offset;
wbr_drrr->drr_length = drrw->drr_length;
@ -402,19 +412,13 @@ cksummer(void *arg)
wbr_drrr->drr_key.ddk_prop =
drrw->drr_key.ddk_prop;
if (cksum_and_write(&wbr_drr,
sizeof (dmu_replay_record_t), &stream_cksum,
outfd) == -1)
if (dump_record(&wbr_drr, NULL, 0,
&stream_cksum, outfd) != 0)
goto out;
} else {
/* block not previously seen */
if (cksum_and_write(drr,
sizeof (dmu_replay_record_t), &stream_cksum,
outfd) == -1)
goto out;
if (cksum_and_write(buf,
drrw->drr_length,
&stream_cksum, outfd) == -1)
if (dump_record(drr, buf, drrw->drr_length,
&stream_cksum, outfd) != 0)
goto out;
}
break;
@ -422,28 +426,27 @@ cksummer(void *arg)
case DRR_WRITE_EMBEDDED:
{
if (cksum_and_write(drr, sizeof (dmu_replay_record_t),
&stream_cksum, outfd) == -1)
goto out;
struct drr_write_embedded *drrwe =
&drr->drr_u.drr_write_embedded;
(void) ssread(buf,
P2ROUNDUP((uint64_t)drrwe->drr_psize, 8), ofp);
if (cksum_and_write(buf,
if (dump_record(drr, buf,
P2ROUNDUP((uint64_t)drrwe->drr_psize, 8),
&stream_cksum, outfd) == -1)
&stream_cksum, outfd) != 0)
goto out;
break;
}
case DRR_FREE:
{
if (cksum_and_write(drr, sizeof (dmu_replay_record_t),
&stream_cksum, outfd) == -1)
if (dump_record(drr, NULL, 0, &stream_cksum,
outfd) != 0)
goto out;
break;
}
default:
(void) printf("INVALID record type 0x%x\n",
(void) fprintf(stderr, "INVALID record type 0x%x\n",
drr->drr_type);
/* should never happen, so assert */
assert(B_FALSE);
@ -1470,18 +1473,11 @@ zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
sizeof (drr.drr_u.drr_begin.drr_toname),
"%s@%s", zhp->zfs_name, tosnap);
drr.drr_payloadlen = buflen;
err = cksum_and_write(&drr, sizeof (drr), &zc, outfd);
/* write header nvlist */
if (err != -1 && packbuf != NULL) {
err = cksum_and_write(packbuf, buflen, &zc,
outfd);
}
err = dump_record(&drr, packbuf, buflen, &zc, outfd);
free(packbuf);
if (err == -1) {
err = errno;
if (err != 0)
goto stderr_out;
}
/* write end record */
bzero(&drr, sizeof (drr));
@ -1714,6 +1710,8 @@ recv_read(libzfs_handle_t *hdl, int fd, void *buf, int ilen,
int rv;
int len = ilen;
assert(ilen <= SPA_MAXBLOCKSIZE);
do {
rv = read(fd, cp, len);
cp += rv;
@ -2501,7 +2499,7 @@ zfs_receive_package(libzfs_handle_t *hdl, int fd, const char *destname,
* zfs_receive_one() will take care of it (ie,
* recv_skip() and return 0).
*/
error = zfs_receive_impl(hdl, destname, flags, fd,
error = zfs_receive_impl(hdl, destname, NULL, flags, fd,
sendfs, stream_nv, stream_avl, top_zfs, cleanup_fd,
action_handlep);
if (error == ENODATA) {
@ -2634,9 +2632,9 @@ recv_skip(libzfs_handle_t *hdl, int fd, boolean_t byteswap)
*/
static int
zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
recvflags_t *flags, dmu_replay_record_t *drr,
dmu_replay_record_t *drr_noswap, const char *sendfs,
nvlist_t *stream_nv, avl_tree_t *stream_avl, char **top_zfs, int cleanup_fd,
const char *originsnap, recvflags_t *flags, dmu_replay_record_t *drr,
dmu_replay_record_t *drr_noswap, const char *sendfs, nvlist_t *stream_nv,
avl_tree_t *stream_avl, char **top_zfs, int cleanup_fd,
uint64_t *action_handlep)
{
zfs_cmd_t zc = { 0 };
@ -2801,10 +2799,15 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
}
if (flags->verbose)
(void) printf("found clone origin %s\n", zc.zc_string);
} else if (originsnap) {
(void) strncpy(zc.zc_string, originsnap, ZFS_MAXNAMELEN);
if (flags->verbose)
(void) printf("using provided clone origin %s\n",
zc.zc_string);
}
stream_wantsnewfs = (drrb->drr_fromguid == 0 ||
(drrb->drr_flags & DRR_FLAG_CLONE));
(drrb->drr_flags & DRR_FLAG_CLONE) || originsnap);
if (stream_wantsnewfs) {
/*
@ -3182,9 +3185,10 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
}
static int
zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap, recvflags_t *flags,
int infd, const char *sendfs, nvlist_t *stream_nv, avl_tree_t *stream_avl,
char **top_zfs, int cleanup_fd, uint64_t *action_handlep)
zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap,
const char *originsnap, recvflags_t *flags, int infd, const char *sendfs,
nvlist_t *stream_nv, avl_tree_t *stream_avl, char **top_zfs, int cleanup_fd,
uint64_t *action_handlep)
{
int err;
dmu_replay_record_t drr, drr_noswap;
@ -3203,6 +3207,12 @@ zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap, recvflags_t *flags,
"(%s) does not exist"), tosnap);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
if (originsnap &&
!zfs_dataset_exists(hdl, originsnap, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified origin fs "
"(%s) does not exist"), originsnap);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
/* read in the BEGIN record */
if (0 != (err = recv_read(hdl, infd, &drr, sizeof (drr), B_FALSE,
@ -3275,14 +3285,14 @@ zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap, recvflags_t *flags,
*cp = '\0';
sendfs = nonpackage_sendfs;
}
return (zfs_receive_one(hdl, infd, tosnap, flags,
&drr, &drr_noswap, sendfs, stream_nv, stream_avl,
top_zfs, cleanup_fd, action_handlep));
return (zfs_receive_one(hdl, infd, tosnap, originsnap, flags,
&drr, &drr_noswap, sendfs, stream_nv, stream_avl, top_zfs,
cleanup_fd, action_handlep));
} else {
assert(DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
DMU_COMPOUNDSTREAM);
return (zfs_receive_package(hdl, infd, tosnap, flags,
&drr, &zcksum, top_zfs, cleanup_fd, action_handlep));
return (zfs_receive_package(hdl, infd, tosnap, flags, &drr,
&zcksum, top_zfs, cleanup_fd, action_handlep));
}
}
@ -3293,18 +3303,24 @@ zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap, recvflags_t *flags,
* (-1 will override -2).
*/
int
zfs_receive(libzfs_handle_t *hdl, const char *tosnap, recvflags_t *flags,
int infd, avl_tree_t *stream_avl)
zfs_receive(libzfs_handle_t *hdl, const char *tosnap, nvlist_t *props,
recvflags_t *flags, int infd, avl_tree_t *stream_avl)
{
char *top_zfs = NULL;
int err;
int cleanup_fd;
uint64_t action_handle = 0;
char *originsnap = NULL;
if (props) {
err = nvlist_lookup_string(props, "origin", &originsnap);
if (err && err != ENOENT)
return (err);
}
cleanup_fd = open(ZFS_DEV, O_RDWR|O_EXCL);
VERIFY(cleanup_fd >= 0);
err = zfs_receive_impl(hdl, tosnap, flags, infd, NULL, NULL,
err = zfs_receive_impl(hdl, tosnap, originsnap, flags, infd, NULL, NULL,
stream_avl, &top_zfs, cleanup_fd, &action_handle);
VERIFY(0 == close(cleanup_fd));

View file

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright (c) 2012, 2014 by Delphix. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
*/
@ -532,18 +532,30 @@ lzc_send(const char *snapname, const char *from, int fd,
}
/*
* If fromsnap is NULL, a full (non-incremental) stream will be estimated.
* "from" can be NULL, a snapshot, or a bookmark.
*
* If from is NULL, a full (non-incremental) stream will be estimated. This
* is calculated very efficiently.
*
* If from is a snapshot, lzc_send_space uses the deadlists attached to
* each snapshot to efficiently estimate the stream size.
*
* If from is a bookmark, the indirect blocks in the destination snapshot
* are traversed, looking for blocks with a birth time since the creation TXG of
* the snapshot this bookmark was created from. This will result in
* significantly more I/O and be less efficient than a send space estimation on
* an equivalent snapshot.
*/
int
lzc_send_space(const char *snapname, const char *fromsnap, uint64_t *spacep)
lzc_send_space(const char *snapname, const char *from, uint64_t *spacep)
{
nvlist_t *args;
nvlist_t *result;
int err;
args = fnvlist_alloc();
if (fromsnap != NULL)
fnvlist_add_string(args, "fromsnap", fromsnap);
if (from != NULL)
fnvlist_add_string(args, "from", from);
err = lzc_ioctl(ZFS_IOC_SEND_SPACE, snapname, args, &result);
nvlist_free(args);
if (err == 0)

View file

@ -135,8 +135,18 @@ extern int aok;
/*
* DTrace SDT probes have different signatures in userland than they do in
* kernel. If they're being used in kernel code, re-define them out of
* the kernel. If they're being used in kernel code, re-define them out of
* existence for their counterparts in libzpool.
*
* Here's an example of how to use the set-error probes in userland:
* zfs$target:::set-error /arg0 == EBUSY/ {stack();}
*
* Here's an example of how to use DTRACE_PROBE probes in userland:
* If there is a probe declared as follows:
* DTRACE_PROBE2(zfs__probe_name, uint64_t, blkid, dnode_t *, dn);
* Then you can use it as follows:
* zfs$target:::probe2 /copyinstr(arg0) == "zfs__probe_name"/
* {printf("%u %p\n", arg1, arg2);}
*/
#ifdef DTRACE_PROBE

View file

@ -26,9 +26,7 @@ _libzpool= libzpool
.endif
.endif
.if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_ARCH} == "amd64" || \
${MACHINE_CPUARCH} == "arm" || ${MACHINE_ARCH} == "i386" || \
${MACHINE_CPUARCH} == "mips" || ${MACHINE_CPUARCH} == "powerpc"
.if ${MACHINE_CPUARCH} != "sparc64"
_drti= drti
_libdtrace= libdtrace
.endif

View file

@ -1,4 +1,63 @@
-*- coding: utf-8 -*-
Changes for APR 1.5.2
*) SECURITY: CVE-2015-1829 (cve.mitre.org)
APR applications using APR named pipe support on Windows can be
vulnerable to a pipe squatting attack from a local process; the extent
of the vulnerability, when present, depends on the application.
Initial analysis and report was provided by John Hernandez of Casaba
Security via HP SSRT Security Alert. [Yann Ylavic]
*) apr_atomic: Fix errors when building on Visual Studio 2013 while
maintaining the ability to build on Visual Studio 6 with Windows
Server 2003 R2 SDK. PR 57191. [Gregg Smith]
*) Switch to generic atomics for early/unpatched Solaris 10 not exporting
some atomic functions. PR 55418. [Yann Ylavic]
*) apr_file_mktemp() on HP-UX: Remove limitation of 26 temporary files
per process. PR 57677. [Jeff Trawick]
*) apr_escape: Correctly calculate the size of the returned string in
apr_escape_path and set the correct return value in case we actually
escape the string. [<aduryagin gmail.com>] PR 57230.
*) pollcb on Windows: Handle calls with no file/socket descriptors.
Follow up to PR 49882. [Jeff Trawick, Yann Ylavic]
*) apr_poll(cb): fix error paths returned values and leaks. [Yann Ylavic]
*) apr_thread_cond_*wait() on BeOS: Fix broken logic. PR 45800.
[Jochen Voss (no e-mail)]
*) apr_skiplist: Optimize the number of allocations by reusing pooled or
malloc()ed nodes for the lifetime of the skiplist. [Yann Ylavic]
*) apr_skiplist: Fix possible multiple-free() on the same value in
apr_skiplist_remove_all(). [Yann Ylavic]
*) apr_pollset: On z/OS, threadsafe apr_pollset_poll() may return
"EDC8102I Operation would block" under load.
[Pat Odonnell <patod us.ibm.com>]
*) On z/OS, apr_sockaddr_info_get() with family == APR_UNSPEC was not
returning IPv4 addresses if any IPv6 addresses were returned.
[Eric Covener]
*) Windows cmake build: Fix an incompatibility with cmake 2.8.12 and
later. [Jeff Trawick]
*) apr_global_mutex/apr_proc_mutex: Resolve failures with the
POSIX sem implementation in environments which receive signals.
[Jeff Trawick]
*) apr_skiplist: Fix potential corruption of skiplists leading to
results or crashes. [Takashi Sato <takashi tks st>, Eric Covener]
PR 56654.
*) Improve platform detection by updating config.guess and config.sub.
[Rainer Jung]
Changes for APR 1.5.1
*) apr_os_proc_mutex_get() on Unix: Avoid segfault for cross-
@ -37,8 +96,8 @@ Changes for APR 1.5.1
*) Correct a regression in 1.5.0 which affected out-of-tree
builds on Unix. [Rainer Jung]
*) Improve platform detection for bundled expat by updating
config.guess and config.sub. [Rainer Jung]
*) Improve platform detection by updating config.guess and config.sub.
[Rainer Jung]
Changes for APR 1.5.0

View file

@ -234,6 +234,7 @@ SET(APR_TEST_SOURCES
test/testprocmutex.c
test/testrand.c
test/testshm.c
test/testskiplist.c
test/testsleep.c
test/testsock.c
test/testsockets.c
@ -252,7 +253,6 @@ SET(APR_TEST_SOURCES
SET(install_targets)
SET(install_bin_pdb)
SET(install_lib_pdb)
# libapr-1 is shared, apr-1 is static
ADD_LIBRARY(libapr-1 SHARED ${APR_SOURCES} ${APR_PUBLIC_HEADERS_GENERATED} libapr.rc)
@ -264,7 +264,6 @@ ADD_DEPENDENCIES(libapr-1 test_char_header)
ADD_LIBRARY(apr-1 STATIC ${APR_SOURCES} ${APR_PUBLIC_HEADERS_GENERATED})
SET(install_targets ${install_targets} apr-1)
SET(install_lib_pdb ${install_lib_pdb} ${PROJECT_BINARY_DIR}/apr-1.pdb)
TARGET_LINK_LIBRARIES(apr-1 ${APR_SYSTEM_LIBS})
SET_TARGET_PROPERTIES(apr-1 PROPERTIES COMPILE_DEFINITIONS "APR_DECLARE_STATIC;WINNT")
ADD_DEPENDENCIES(apr-1 test_char_header)
@ -272,12 +271,10 @@ ADD_DEPENDENCIES(apr-1 test_char_header)
# libaprapp-1 and aprapp-1 are static
ADD_LIBRARY(libaprapp-1 STATIC misc/win32/apr_app.c misc/win32/internal.c ${APR_PUBLIC_HEADERS_GENERATED})
SET(install_targets ${install_targets} libaprapp-1)
SET(install_lib_pdb ${install_lib_pdb} ${PROJECT_BINARY_DIR}/libaprapp-1.pdb)
SET_TARGET_PROPERTIES(libaprapp-1 PROPERTIES COMPILE_DEFINITIONS "APR_APP;WINNT")
ADD_LIBRARY(aprapp-1 STATIC misc/win32/apr_app.c misc/win32/internal.c ${APR_PUBLIC_HEADERS_GENERATED})
SET(install_targets ${install_targets} aprapp-1)
SET(install_lib_pdb ${install_lib_pdb} ${PROJECT_BINARY_DIR}/aprapp-1.pdb)
SET_TARGET_PROPERTIES(aprapp-1 PROPERTIES COMPILE_DEFINITIONS "APR_DECLARE_STATIC;APR_APP;WINNT")
IF(APR_BUILD_TESTAPR)
@ -394,10 +391,6 @@ IF(INSTALL_PDB)
INSTALL(FILES ${install_bin_pdb}
DESTINATION bin
CONFIGURATIONS RelWithDebInfo Debug)
INSTALL(FILES ${install_lib_pdb}
DESTINATION lib
CONFIGURATIONS RelWithDebInfo Debug)
ENDIF()
INSTALL(FILES ${APR_PUBLIC_HEADERS_STATIC} ${APR_PUBLIC_HEADERS_GENERATED} DESTINATION include)

View file

@ -129,11 +129,11 @@ check: $(TARGET_LIB)
etags:
etags `find . -name '*.[ch]'`
make_tools_dir:
$(APR_MKDIR) tools
OBJECTS_gen_test_char = tools/gen_test_char.lo $(LOCAL_LIBS)
tools/gen_test_char.lo: make_tools_dir
tools/gen_test_char.lo: tools/gen_test_char.c
$(APR_MKDIR) tools
$(LT_COMPILE)
tools/gen_test_char@EXEEXT@: $(OBJECTS_gen_test_char)
$(LINK_PROG) $(OBJECTS_gen_test_char) $(ALL_LIBS)

View file

@ -1,5 +1,5 @@
Apache Portable Runtime
Copyright (c) 2000-2014 The Apache Software Foundation.
Copyright (c) 2000-2015 The Apache Software Foundation.
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View file

@ -50,6 +50,7 @@ include $(APR_WORK)/build/NWGNUhead.inc
#
XINCDIRS += \
$(APR)/include \
$(APR)/include/private \
$(APR)/include/arch/NetWare \
$(APR)/include/arch/unix \
$(APR)/memory/unix \
@ -293,11 +294,13 @@ FILES_nlm_exports = \
FILES_lib_objs = \
$(OBJDIR)/apr_atomic.o \
$(OBJDIR)/apr_cpystrn.o \
$(OBJDIR)/apr_escape.o \
$(OBJDIR)/apr_fnmatch.o \
$(OBJDIR)/apr_getpass.o \
$(OBJDIR)/apr_hash.o \
$(OBJDIR)/apr_pools.o \
$(OBJDIR)/apr_random.o \
$(OBJDIR)/apr_skiplist.o \
$(OBJDIR)/apr_snprintf.o \
$(OBJDIR)/apr_strings.o \
$(OBJDIR)/apr_strnatcmp.o \
@ -407,7 +410,7 @@ endif
vpath %.c atomic/netware:strings:tables:passwd:lib:time/unix
vpath %.c file_io/unix:locks/netware:misc/netware:misc/unix:threadproc/netware
vpath %.c poll/unix:shmem/unix:support/unix:random/unix
vpath %.c dso/netware:memory/unix:mmap/unix:user/netware
vpath %.c dso/netware:memory/unix:mmap/unix:user/netware:encoding
# Use the win32 network_io if Winsock is being used
ifndef USE_STDSOCKETS

View file

@ -907,69 +907,6 @@ SOURCE=.\include\apr_version.h
# Begin Source File
SOURCE=.\include\apr_want.h
!IF "$(CFG)" == "apr - Win32 Release"
# Begin Custom Build
InputPath=.\include\apr_want.h
".\LibR\gen_test_char.exe" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
type .\include\apr.hw > .\include\apr.h
# End Custom Build
!ELSEIF "$(CFG)" == "apr - Win32 Debug"
# Begin Custom Build
InputPath=.\include\apr_want.h
".\LibD\gen_test_char.exe" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
type .\include\apr.hw > .\include\apr.h
# End Custom Build
!ELSEIF "$(CFG)" == "apr - Win32 Release9x"
# Begin Custom Build
InputPath=.\include\apr_want.h
".\9x\LibR\gen_test_char.exe" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
type .\include\apr.hw > .\include\apr.h
# End Custom Build
!ELSEIF "$(CFG)" == "apr - Win32 Debug9x"
# Begin Custom Build
InputPath=.\include\apr_want.h
".\9x\LibD\gen_test_char.exe" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
type .\include\apr.hw > .\include\apr.h
# End Custom Build
!ELSEIF "$(CFG)" == "apr - x64 Release"
# Begin Custom Build
InputPath=.\include\apr_want.h
".\x64\LibR\gen_test_char.exe" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
type .\include\apr.hw > .\include\apr.h
# End Custom Build
!ELSEIF "$(CFG)" == "apr - x64 Debug"
# Begin Custom Build
InputPath=.\include\apr_want.h
".\x64\LibD\gen_test_char.exe" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
type .\include\apr.hw > .\include\apr.h
# End Custom Build
!ENDIF
# End Source File
# End Group
# End Target

View file

@ -3,7 +3,7 @@
Summary: Apache Portable Runtime library
Name: apr
Version: 1.5.1
Version: 1.5.2
Release: 1
License: Apache Software License
Group: System Environment/Libraries
@ -76,7 +76,7 @@ rm -rf $RPM_BUILD_ROOT
%defattr(-,root,root,-)
%doc docs/APRDesign.html docs/canonical_filenames.html
%doc docs/incomplete_types docs/non_apr_programs
%doc --parents html
%doc html
%{_bindir}/apr*config
%{_libdir}/libapr-%{aprver}.*a
%{_libdir}/libapr-%{aprver}.so

View file

@ -251,7 +251,7 @@ file_io/win32/filestat.lo: file_io/win32/filestat.c .make.dirs include/apr_alloc
file_io/win32/filesys.lo: file_io/win32/filesys.c .make.dirs include/apr_allocator.h include/apr_errno.h include/apr_general.h include/apr_pools.h include/apr_strings.h include/apr_thread_mutex.h include/apr_want.h
file_io/win32/flock.lo: file_io/win32/flock.c .make.dirs
file_io/win32/open.lo: file_io/win32/open.c .make.dirs include/apr_allocator.h include/apr_dso.h include/apr_errno.h include/apr_file_info.h include/apr_file_io.h include/apr_general.h include/apr_global_mutex.h include/apr_inherit.h include/apr_network_io.h include/apr_pools.h include/apr_portable.h include/apr_proc_mutex.h include/apr_shm.h include/apr_strings.h include/apr_tables.h include/apr_thread_mutex.h include/apr_thread_proc.h include/apr_time.h include/apr_user.h include/apr_want.h
file_io/win32/pipe.lo: file_io/win32/pipe.c .make.dirs include/apr_allocator.h include/apr_errno.h include/apr_file_info.h include/apr_file_io.h include/apr_general.h include/apr_inherit.h include/apr_pools.h include/apr_strings.h include/apr_tables.h include/apr_thread_mutex.h include/apr_time.h include/apr_user.h include/apr_want.h
file_io/win32/pipe.lo: file_io/win32/pipe.c .make.dirs include/apr_allocator.h include/apr_errno.h include/apr_escape.h include/apr_file_info.h include/apr_file_io.h include/apr_general.h include/apr_inherit.h include/apr_pools.h include/apr_strings.h include/apr_tables.h include/apr_thread_mutex.h include/apr_time.h include/apr_user.h include/apr_want.h
file_io/win32/readwrite.lo: file_io/win32/readwrite.c .make.dirs include/apr_allocator.h include/apr_errno.h include/apr_file_info.h include/apr_file_io.h include/apr_general.h include/apr_inherit.h include/apr_lib.h include/apr_pools.h include/apr_strings.h include/apr_tables.h include/apr_thread_mutex.h include/apr_time.h include/apr_user.h include/apr_want.h
file_io/win32/seek.lo: file_io/win32/seek.c .make.dirs include/apr_allocator.h include/apr_errno.h include/apr_file_info.h include/apr_file_io.h include/apr_general.h include/apr_inherit.h include/apr_pools.h include/apr_tables.h include/apr_thread_mutex.h include/apr_time.h include/apr_user.h include/apr_want.h

73
contrib/apr/configure vendored
View file

@ -6802,10 +6802,10 @@ if test "x$apr_preload_done" != "xyes" ; then
*-apple-darwin*)
if test "x$CPPFLAGS" = "x"; then
test "x$silent" != "xyes" && echo " setting CPPFLAGS to \"-DDARWIN -DSIGPROCMASK_SETS_THREAD_MASK -no-cpp-precomp\""
CPPFLAGS="-DDARWIN -DSIGPROCMASK_SETS_THREAD_MASK -no-cpp-precomp"
test "x$silent" != "xyes" && echo " setting CPPFLAGS to \"-DDARWIN -DSIGPROCMASK_SETS_THREAD_MASK\""
CPPFLAGS="-DDARWIN -DSIGPROCMASK_SETS_THREAD_MASK"
else
apr_addto_bugger="-DDARWIN -DSIGPROCMASK_SETS_THREAD_MASK -no-cpp-precomp"
apr_addto_bugger="-DDARWIN -DSIGPROCMASK_SETS_THREAD_MASK"
for i in $apr_addto_bugger; do
apr_addto_duplicate="0"
for j in $CPPFLAGS; do
@ -18794,7 +18794,34 @@ if test "${enable_nonportable_atomics+set}" = set; then :
else
case $host_cpu in
i[456]86) force_generic_atomics=yes ;;
*) force_generic_atomics=no ;;
*) force_generic_atomics=no
case $host in
*solaris2.10*)
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
#include <atomic.h>
int
main ()
{
void *ptr = NULL; atomic_cas_ptr(&ptr, NULL, NULL);
;
return 0;
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
else
force_generic_atomics=yes
fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
if test $force_generic_atomics = yes; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: nonportable atomic support disabled, system needs Patch-ID 118884 or 118885" >&5
$as_echo "$as_me: nonportable atomic support disabled, system needs Patch-ID 118884 or 118885" >&6;}
fi
;;
esac
;;
esac
fi
@ -22292,7 +22319,7 @@ else
fi
done
for ac_func in getpass getpassphrase gmtime_r localtime_r mkstemp
for ac_func in getpass getpassphrase gmtime_r localtime_r
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
@ -22304,6 +22331,23 @@ _ACEOF
fi
done
case $host in
*-hp-hpux*)
;;
*)
for ac_func in mkstemp
do :
ac_fn_c_check_func "$LINENO" "mkstemp" "ac_cv_func_mkstemp"
if test "x$ac_cv_func_mkstemp" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_MKSTEMP 1
_ACEOF
fi
done
;;
esac
@ -23902,7 +23946,7 @@ _ACEOF
if test "${ac_cv_sizeof_off_t}${apr_cv_use_lfs64}" = "4yes"; then
# Enable LFS
aprlfs=1
for ac_func in mmap64 sendfile64 sendfilev64 mkstemp64 readdir64_r
for ac_func in mmap64 sendfile64 sendfilev64 readdir64_r
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
@ -23914,6 +23958,23 @@ _ACEOF
fi
done
case $host in
*-hp-hpux*)
;;
*)
for ac_func in mkstemp64
do :
ac_fn_c_check_func "$LINENO" "mkstemp64" "ac_cv_func_mkstemp64"
if test "x$ac_cv_func_mkstemp64" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_MKSTEMP64 1
_ACEOF
fi
done
;;
esac
elif test "${ac_cv_sizeof_off_t}" != "${ac_cv_sizeof_size_t}"; then
# unsure of using -gt above is as portable, can can't forsee where
# off_t can legitimately be smaller than size_t

View file

@ -640,7 +640,20 @@ AC_ARG_ENABLE(nonportable-atomics,
],
[case $host_cpu in
i[[456]]86) force_generic_atomics=yes ;;
*) force_generic_atomics=no ;;
*) force_generic_atomics=no
case $host in
*solaris2.10*)
AC_TRY_COMPILE(
[#include <atomic.h>],
[void *ptr = NULL; atomic_cas_ptr(&ptr, NULL, NULL);],,
[force_generic_atomics=yes]
)
if test $force_generic_atomics = yes; then
AC_MSG_NOTICE([nonportable atomic support disabled, system needs Patch-ID 118884 or 118885])
fi
;;
esac
;;
esac
])
@ -1400,7 +1413,15 @@ if test "$native_mmap_emul" = "1"; then
mmap="1"
fi
AC_CHECK_FUNCS(memmove, [ have_memmove="1" ], [have_memmove="0" ])
AC_CHECK_FUNCS([getpass getpassphrase gmtime_r localtime_r mkstemp])
AC_CHECK_FUNCS([getpass getpassphrase gmtime_r localtime_r])
case $host in
*-hp-hpux*)
dnl mkstemp is limited to 26 temporary files (a-z); use APR replacement
;;
*)
AC_CHECK_FUNCS(mkstemp)
;;
esac
AC_SUBST(fork)
AC_SUBST(have_inet_addr)
@ -1801,7 +1822,15 @@ APR_CHECK_SIZEOF_EXTENDED([#include <sys/types.h>], off_t, 8)
if test "${ac_cv_sizeof_off_t}${apr_cv_use_lfs64}" = "4yes"; then
# Enable LFS
aprlfs=1
AC_CHECK_FUNCS([mmap64 sendfile64 sendfilev64 mkstemp64 readdir64_r])
AC_CHECK_FUNCS([mmap64 sendfile64 sendfilev64 readdir64_r])
case $host in
*-hp-hpux*)
dnl mkstemp64 is limited to 26 temporary files (a-z); use APR replacement
;;
*)
AC_CHECK_FUNCS(mkstemp64)
;;
esac
elif test "${ac_cv_sizeof_off_t}" != "${ac_cv_sizeof_size_t}"; then
# unsure of using -gt above is as portable, can can't forsee where
# off_t can legitimately be smaller than size_t

View file

@ -436,6 +436,8 @@ APR_DECLARE(apr_status_t) apr_escape_path(char *escaped, const char *path,
while ((c = *s) && slen) {
if (TEST_CHAR(c, T_OS_ESCAPE_PATH)) {
d = c2x(c, '%', d);
size += 2;
found = 1;
}
else {
*d++ = c;

View file

@ -40,7 +40,9 @@ extern "C" {
/**
* apr_skiplist_compare is the function type that must be implemented
* per object type that is used in a skip list for comparisons to maintain
* order
* order. A value <0 indicates placement after this node; a value of 0
* indicates collision with this exact node; a value >0 indicates placement
* before this node.
* */
typedef int (*apr_skiplist_compare) (void *, void *);
@ -171,7 +173,8 @@ APR_DECLARE(void *) apr_skiplist_next(apr_skiplist *sl, apr_skiplistnode **iter)
APR_DECLARE(void *) apr_skiplist_previous(apr_skiplist *sl, apr_skiplistnode **iter);
/**
* Insert an element into the skip list using the specified comparison function.
* Insert an element into the skip list using the specified comparison function
* if it does not already exist.
* @param sl The skip list
* @param data The element to insert
* @param comp The comparison function to use for placement into the skip list
@ -180,7 +183,8 @@ APR_DECLARE(apr_skiplistnode *) apr_skiplist_insert_compare(apr_skiplist *sl,
void *data, apr_skiplist_compare comp);
/**
* Insert an element into the skip list using the existing comparison function.
* Insert an element into the skip list using the existing comparison function
* if it does not already exist (as determined by the comparison function)
* @param sl The skip list
* @param data The element to insert
* @remark If no comparison function has been set for the skip list, the element
@ -190,7 +194,7 @@ APR_DECLARE(apr_skiplistnode *) apr_skiplist_insert(apr_skiplist* sl, void *data
/**
* Remove an element from the skip list using the specified comparison function for
* locating the element.
* locating the element. In the case of duplicates, the 1st entry will be removed.
* @param sl The skip list
* @param data The element to remove
* @param myfree A function to be called for each removed element
@ -203,7 +207,7 @@ APR_DECLARE(int) apr_skiplist_remove_compare(apr_skiplist *sl, void *data,
/**
* Remove an element from the skip list using the existing comparison function for
* locating the element.
* locating the element. In the case of duplicates, the 1st entry will be removed.
* @param sl The skip list
* @param data The element to remove
* @param myfree A function to be called for each removed element
@ -229,7 +233,7 @@ APR_DECLARE(void) apr_skiplist_remove_all(apr_skiplist *sl, apr_skiplist_freefun
APR_DECLARE(void) apr_skiplist_destroy(apr_skiplist *sl, apr_skiplist_freefunc myfree);
/**
* Return the first element in the skip list, leaving the element in the skip list.
* Return the first element in the skip list, removing the element from the skip list.
* @param sl The skip list
* @param myfree A function to be called for the removed element
* @remark NULL will be returned if there are no elements

View file

@ -38,7 +38,7 @@
*/
#define APR_COPYRIGHT "Copyright (c) 2000-2014 The Apache Software " \
#define APR_COPYRIGHT "Copyright (c) 2000-2015 The Apache Software " \
"Foundation or its licensors, as applicable."
/* The numeric compile-time version constants. These constants are the
@ -62,7 +62,7 @@
* The Patch Level never includes API changes, simply bug fixes.
* Reset to 0 when upgrading APR_MINOR_VERSION
*/
#define APR_PATCH_VERSION 1
#define APR_PATCH_VERSION 2
/**
* The symbol APR_IS_DEV_VERSION is only defined for internal,

View file

@ -765,7 +765,7 @@ SOURCE=.\include\apr_escape.h
# Begin Custom Build - Creating gen_test_char.exe and apr_escape_test_char.h
InputPath=.\include\apr_escape.h
".\Release\gen_test_char.exe" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
".\include\apr_escape_test_char.h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
cl.exe /nologo /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /FD /I ".\include" /Fo.\Release\gen_test_char /Fe.\Release\gen_test_char.exe .\tools\gen_test_char.c
.\Release\gen_test_char.exe > .\include\apr_escape_test_char.h
@ -776,7 +776,7 @@ InputPath=.\include\apr_escape.h
# Begin Custom Build - Creating gen_test_char.exe and apr_escape_test_char.h
InputPath=.\include\apr_escape.h
".\Debug\gen_test_char.exe" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
".\include\apr_escape_test_char.h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
cl.exe /nologo /W3 /EHsc /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /FD /I ".\include" /Fo.\Debug\gen_test_char /Fe.\Debug\gen_test_char.exe .\tools\gen_test_char.c
.\Debug\gen_test_char.exe > .\include\apr_escape_test_char.h
@ -787,7 +787,7 @@ InputPath=.\include\apr_escape.h
# Begin Custom Build - Creating gen_test_char.exe and apr_escape_test_char.h
InputPath=.\include\apr_escape.h
".\9x\Release\gen_test_char.exe" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
".\include\apr_escape_test_char.h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
cl.exe /nologo /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /FD /I ".\include" /Fo.\9x\Release\gen_test_char /Fe.\9x\Release\gen_test_char.exe .\tools\gen_test_char.c
.\9x\Release\gen_test_char.exe > .\include\apr_escape_test_char.h
@ -798,7 +798,7 @@ InputPath=.\include\apr_escape.h
# Begin Custom Build - Creating gen_test_char.exe and apr_escape_test_char.h
InputPath=.\include\apr_escape.h
".\9x\Debug\gen_test_char.exe" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
".\include\apr_escape_test_char.h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
cl.exe /nologo /W3 /EHsc /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /FD /I ".\include" /Fo.\9x\Debug\gen_test_char /Fe.\9x\Debug\gen_test_char.exe .\tools\gen_test_char.c
.\9x\Debug\gen_test_char.exe > .\include\apr_escape_test_char.h
@ -809,7 +809,7 @@ InputPath=.\include\apr_escape.h
# Begin Custom Build - Creating gen_test_char.exe and apr_escape_test_char.h
InputPath=.\include\apr_escape.h
".\x64\Release\gen_test_char.exe" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
".\include\apr_escape_test_char.h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
cl.exe /nologo /W3 /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /FD /I ".\include" /Fo.\x64\Release\gen_test_char /Fe.\x64\Release\gen_test_char.exe .\tools\gen_test_char.c
.\x64\Release\gen_test_char.exe > .\include\apr_escape_test_char.h
@ -820,7 +820,7 @@ InputPath=.\include\apr_escape.h
# Begin Custom Build - Creating gen_test_char.exe and apr_escape_test_char.h
InputPath=.\include\apr_escape.h
".\x64\Debug\gen_test_char.exe" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
".\include\apr_escape_test_char.h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
cl.exe /nologo /W3 /EHsc /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /FD /I ".\include" /Fo.\x64\Debug\gen_test_char /Fe.\x64\Debug\gen_test_char.exe .\tools\gen_test_char.c
.\x64\Debug\gen_test_char.exe > .\include\apr_escape_test_char.h
@ -952,69 +952,6 @@ SOURCE=.\include\apr_version.h
# Begin Source File
SOURCE=.\include\apr_want.h
!IF "$(CFG)" == "libapr - Win32 Release"
# Begin Custom Build
InputPath=.\include\apr_want.h
".\include\apr_escape_test_char.h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
type .\include\apr.hw > .\include\apr.h
# End Custom Build
!ELSEIF "$(CFG)" == "libapr - Win32 Debug"
# Begin Custom Build
InputPath=.\include\apr_want.h
".\include\apr_escape_test_char.h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
type .\include\apr.hw > .\include\apr.h
# End Custom Build
!ELSEIF "$(CFG)" == "libapr - Win32 Release9x"
# Begin Custom Build
InputPath=.\include\apr_want.h
".\include\apr_escape_test_char.h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
type .\include\apr.hw > .\include\apr.h
# End Custom Build
!ELSEIF "$(CFG)" == "libapr - Win32 Debug9x"
# Begin Custom Build
InputPath=.\include\apr_want.h
".\include\apr_escape_test_char.h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
type .\include\apr.hw > .\include\apr.h
# End Custom Build
!ELSEIF "$(CFG)" == "libapr - x64 Release"
# Begin Custom Build
InputPath=.\include\apr_want.h
".\include\apr_escape_test_char.h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
type .\include\apr.hw > .\include\apr.h
# End Custom Build
!ELSEIF "$(CFG)" == "libapr - x64 Debug"
# Begin Custom Build
InputPath=.\include\apr_want.h
".\include\apr_escape_test_char.h" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
type .\include\apr.hw > .\include\apr.h
# End Custom Build
!ENDIF
# End Source File
# End Group
# Begin Source File

View file

@ -114,7 +114,9 @@ static apr_status_t proc_mutex_posix_create(apr_proc_mutex_t *new_mutex,
usec = apr_time_usec(now);
apr_snprintf(semname, sizeof(semname), "/ApR.%lxZ%lx", sec, usec);
}
psem = sem_open(semname, O_CREAT | O_EXCL, 0644, 1);
do {
psem = sem_open(semname, O_CREAT | O_EXCL, 0644, 1);
} while (psem == (sem_t *)SEM_FAILED && errno == EINTR);
if (psem == (sem_t *)SEM_FAILED) {
if (errno == ENAMETOOLONG) {
/* Oh well, good try */
@ -122,7 +124,9 @@ static apr_status_t proc_mutex_posix_create(apr_proc_mutex_t *new_mutex,
} else {
return errno;
}
psem = sem_open(semname, O_CREAT | O_EXCL, 0644, 1);
do {
psem = sem_open(semname, O_CREAT | O_EXCL, 0644, 1);
} while (psem == (sem_t *)SEM_FAILED && errno == EINTR);
}
if (psem == (sem_t *)SEM_FAILED) {
@ -140,7 +144,12 @@ static apr_status_t proc_mutex_posix_create(apr_proc_mutex_t *new_mutex,
static apr_status_t proc_mutex_posix_acquire(apr_proc_mutex_t *mutex)
{
if (sem_wait(mutex->psem_interproc) < 0) {
int rc;
do {
rc = sem_wait(mutex->psem_interproc);
} while (rc < 0 && errno == EINTR);
if (rc < 0) {
return errno;
}
mutex->curr_locked = 1;
@ -149,7 +158,12 @@ static apr_status_t proc_mutex_posix_acquire(apr_proc_mutex_t *mutex)
static apr_status_t proc_mutex_posix_tryacquire(apr_proc_mutex_t *mutex)
{
if (sem_trywait(mutex->psem_interproc) < 0) {
int rc;
do {
rc = sem_trywait(mutex->psem_interproc);
} while (rc < 0 && errno == EINTR);
if (rc < 0) {
if (errno == EAGAIN) {
return APR_EBUSY;
}

View file

@ -1135,21 +1135,12 @@ APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
* room to hold the NUL terminator.
*/
if (ps.node->first_avail == ps.node->endp) {
if (psprintf_flush(&ps.vbuff) == -1) {
if (pool->abort_fn) {
pool->abort_fn(APR_ENOMEM);
}
return NULL;
}
if (psprintf_flush(&ps.vbuff) == -1)
goto error;
}
if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1) {
if (pool->abort_fn)
pool->abort_fn(APR_ENOMEM);
return NULL;
}
if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1)
goto error;
strp = ps.vbuff.curpos;
*strp++ = '\0';
@ -1195,6 +1186,15 @@ APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
list_insert(active, node);
return strp;
error:
if (pool->abort_fn)
pool->abort_fn(APR_ENOMEM);
if (ps.got_a_new_node) {
ps.node->next = ps.free;
allocator_free(pool->allocator, ps.node);
}
return NULL;
}

View file

@ -39,6 +39,8 @@ static char *stuffbuffer(char *buf, apr_size_t bufsize, const char *s)
static char *apr_error_string(apr_status_t statcode)
{
switch (statcode) {
case APR_ENOSTAT:
return "Could not perform a stat on the file.";
case APR_ENOPOOL:
return "A new pool could not be created.";
case APR_EBADDATE:
@ -73,7 +75,10 @@ static char *apr_error_string(apr_status_t statcode)
return "The specified IP address is invalid.";
case APR_EBADMASK:
return "The specified network mask is invalid.";
case APR_ESYMNOTFOUND:
return "Could not find the requested symbol.";
case APR_ENOTENOUGHENTROPY:
return "Not enough entropy to continue.";
case APR_INCHILD:
return
"Your code just forked, and you are currently executing in the "
@ -128,10 +133,12 @@ static char *apr_error_string(apr_status_t statcode)
return "The given path is misformatted or contained invalid characters";
case APR_EPATHWILD:
return "The given path contained wildcard characters";
case APR_EBUSY:
return "The given lock was busy.";
case APR_EPROC_UNKNOWN:
return "The process is not recognized.";
case APR_EGENERAL:
return "Internal error";
return "Internal error (specific information not available)";
default:
return "Error string not specified yet";
}

View file

@ -325,6 +325,16 @@ static apr_status_t call_resolver(apr_sockaddr_t **sa,
hints.ai_flags = AI_ADDRCONFIG;
}
#endif
#ifdef __MVS__
/* z/OS will not return IPv4 address under AF_UNSPEC if any IPv6 results
* are returned, w/o AI_ALL.
*/
if (family == APR_UNSPEC) {
hints.ai_flags |= AI_ALL;
}
#endif
if(hostname == NULL) {
#ifdef AI_PASSIVE
/* If hostname is NULL, assume we are trying to bind to all

View file

@ -145,13 +145,22 @@ apr_status_t apr_socket_create(apr_socket_t **new, int ofamily, int type,
#ifndef HAVE_SOCK_CLOEXEC
{
int flags;
apr_status_t rv;
if ((flags = fcntl((*new)->socketdes, F_GETFD)) == -1)
return errno;
if ((flags = fcntl((*new)->socketdes, F_GETFD)) == -1) {
rv = errno;
close((*new)->socketdes);
(*new)->socketdes = -1;
return rv;
}
flags |= FD_CLOEXEC;
if (fcntl((*new)->socketdes, F_SETFD, flags) == -1)
return errno;
if (fcntl((*new)->socketdes, F_SETFD, flags) == -1) {
rv = errno;
close((*new)->socketdes);
(*new)->socketdes = -1;
return rv;
}
}
#endif
@ -306,13 +315,22 @@ apr_status_t apr_socket_accept(apr_socket_t **new, apr_socket_t *sock,
#ifndef HAVE_ACCEPT4
{
int flags;
apr_status_t rv;
if ((flags = fcntl((*new)->socketdes, F_GETFD)) == -1)
return errno;
if ((flags = fcntl((*new)->socketdes, F_GETFD)) == -1) {
rv = errno;
close((*new)->socketdes);
(*new)->socketdes = -1;
return rv;
}
flags |= FD_CLOEXEC;
if (fcntl((*new)->socketdes, F_SETFD, flags) == -1)
return errno;
if (fcntl((*new)->socketdes, F_SETFD, flags) == -1) {
rv = errno;
close((*new)->socketdes);
(*new)->socketdes = -1;
return rv;
}
}
#endif

View file

@ -104,14 +104,22 @@ static apr_status_t impl_pollset_create(apr_pollset_t *pollset,
#ifndef HAVE_EPOLL_CREATE1
{
int flags;
int fd_flags;
if ((flags = fcntl(fd, F_GETFD)) == -1)
return errno;
if ((fd_flags = fcntl(fd, F_GETFD)) == -1) {
rv = errno;
close(fd);
pollset->p = NULL;
return rv;
}
flags |= FD_CLOEXEC;
if (fcntl(fd, F_SETFD, flags) == -1)
return errno;
fd_flags |= FD_CLOEXEC;
if (fcntl(fd, F_SETFD, fd_flags) == -1) {
rv = errno;
close(fd);
pollset->p = NULL;
return rv;
}
}
#endif
@ -122,11 +130,13 @@ static apr_status_t impl_pollset_create(apr_pollset_t *pollset,
((rv = apr_thread_mutex_create(&pollset->p->ring_lock,
APR_THREAD_MUTEX_DEFAULT,
p)) != APR_SUCCESS)) {
close(fd);
pollset->p = NULL;
return rv;
}
#else
if (flags & APR_POLLSET_THREADSAFE) {
close(fd);
pollset->p = NULL;
return APR_ENOTIMPL;
}
@ -345,14 +355,23 @@ static apr_status_t impl_pollcb_create(apr_pollcb_t *pollcb,
#ifndef HAVE_EPOLL_CREATE1
{
int flags;
int fd_flags;
apr_status_t rv;
if ((flags = fcntl(fd, F_GETFD)) == -1)
return errno;
if ((fd_flags = fcntl(fd, F_GETFD)) == -1) {
rv = errno;
close(fd);
pollcb->fd = -1;
return rv;
}
flags |= FD_CLOEXEC;
if (fcntl(fd, F_SETFD, flags) == -1)
return errno;
fd_flags |= FD_CLOEXEC;
if (fcntl(fd, F_SETFD, fd_flags) == -1) {
rv = errno;
close(fd);
pollcb->fd = -1;
return rv;
}
}
#endif

View file

@ -115,12 +115,20 @@ static apr_status_t impl_pollset_create(apr_pollset_t *pollset,
{
int flags;
if ((flags = fcntl(pollset->p->kqueue_fd, F_GETFD)) == -1)
return errno;
if ((flags = fcntl(pollset->p->kqueue_fd, F_GETFD)) == -1) {
rv = errno;
close(pollset->p->kqueue_fd);
pollset->p = NULL;
return rv;
}
flags |= FD_CLOEXEC;
if (fcntl(pollset->p->kqueue_fd, F_SETFD, flags) == -1)
return errno;
if (fcntl(pollset->p->kqueue_fd, F_SETFD, flags) == -1) {
rv = errno;
close(pollset->p->kqueue_fd);
pollset->p = NULL;
return rv;
}
}
pollset->p->result_set = apr_palloc(p, pollset->p->setsize * sizeof(apr_pollfd_t));
@ -338,13 +346,22 @@ static apr_status_t impl_pollcb_create(apr_pollcb_t *pollcb,
{
int flags;
apr_status_t rv;
if ((flags = fcntl(fd, F_GETFD)) == -1)
return errno;
if ((flags = fcntl(fd, F_GETFD)) == -1) {
rv = errno;
close(fd);
pollcb->fd = -1;
return rv;
}
flags |= FD_CLOEXEC;
if (fcntl(fd, F_SETFD, flags) == -1)
return errno;
if (fcntl(fd, F_SETFD, flags) == -1) {
rv = errno;
close(fd);
pollcb->fd = -1;
return rv;
}
}
pollcb->fd = fd;

View file

@ -240,26 +240,25 @@ static apr_status_t impl_pollset_poll(apr_pollset_t *pollset,
{
int ret;
apr_status_t rv = APR_SUCCESS;
#ifdef WIN32
apr_interval_time_t orig_timeout = timeout;
#endif
if (timeout > 0) {
timeout /= 1000;
}
#ifdef WIN32
/* WSAPoll() requires at least one socket. */
if (pollset->nelts == 0) {
*num = 0;
if (orig_timeout > 0) {
apr_sleep(orig_timeout);
if (timeout > 0) {
apr_sleep(timeout);
return APR_TIMEUP;
}
return APR_SUCCESS;
}
if (timeout > 0) {
timeout /= 1000;
}
ret = WSAPoll(pollset->p->pollset, pollset->nelts, (int)timeout);
#else
if (timeout > 0) {
timeout /= 1000;
}
ret = poll(pollset->p->pollset, pollset->nelts, timeout);
#endif
(*num) = ret;
@ -398,12 +397,23 @@ static apr_status_t impl_pollcb_poll(apr_pollcb_t *pollcb,
apr_status_t rv = APR_SUCCESS;
apr_uint32_t i;
#ifdef WIN32
/* WSAPoll() requires at least one socket. */
if (pollcb->nelts == 0) {
if (timeout > 0) {
apr_sleep(timeout);
return APR_TIMEUP;
}
return APR_SUCCESS;
}
if (timeout > 0) {
timeout /= 1000;
}
#ifdef WIN32
ret = WSAPoll(pollcb->pollset.ps, pollcb->nelts, (int)timeout);
#else
if (timeout > 0) {
timeout /= 1000;
}
ret = poll(pollcb->pollset.ps, pollcb->nelts, timeout);
#endif
if (ret < 0) {

View file

@ -136,6 +136,9 @@ APR_DECLARE(apr_status_t) apr_pollcb_create_ex(apr_pollcb_t **ret_pollcb,
}
pollcb->provider = provider;
}
else if (rv != APR_SUCCESS) {
return rv;
}
*ret_pollcb = pollcb;
return APR_SUCCESS;

View file

@ -188,12 +188,20 @@ static apr_status_t impl_pollset_create(apr_pollset_t *pollset,
{
int flags;
if ((flags = fcntl(pollset->p->port_fd, F_GETFD)) == -1)
return errno;
if ((flags = fcntl(pollset->p->port_fd, F_GETFD)) == -1) {
rv = errno;
close(pollset->p->port_fd);
pollset->p = NULL;
return rv;
}
flags |= FD_CLOEXEC;
if (fcntl(pollset->p->port_fd, F_SETFD, flags) == -1)
return errno;
if (fcntl(pollset->p->port_fd, F_SETFD, flags) == -1) {
rv = errno;
close(pollset->p->port_fd);
pollset->p = NULL;
return rv;
}
}
pollset->p->result_set = apr_palloc(p, size * sizeof(apr_pollfd_t));
@ -478,13 +486,22 @@ static apr_status_t impl_pollcb_create(apr_pollcb_t *pollcb,
{
int flags;
apr_status_t rv;
if ((flags = fcntl(pollcb->fd, F_GETFD)) == -1)
return errno;
if ((flags = fcntl(pollcb->fd, F_GETFD)) == -1) {
rv = errno;
close(pollcb->fd);
pollcb->fd = -1;
return rv;
}
flags |= FD_CLOEXEC;
if (fcntl(pollcb->fd, F_SETFD, flags) == -1)
return errno;
if (fcntl(pollcb->fd, F_SETFD, flags) == -1) {
rv = errno;
close(pollcb->fd);
pollcb->fd = -1;
return rv;
}
}
pollcb->pollset.port = apr_palloc(p, size * sizeof(port_event_t));

View file

@ -272,7 +272,7 @@ static apr_status_t asio_pollset_create(apr_pollset_t *pollset,
APR_THREAD_MUTEX_DEFAULT,
p) != APR_SUCCESS) {
DBG1(1, "apr_thread_mutex_create returned %d\n", rv);
pollset = NULL;
pollset->p = NULL;
return rv;
}
rv = msgget(IPC_PRIVATE, S_IWUSR+S_IRUSR); /* user r/w perms */
@ -280,7 +280,7 @@ static apr_status_t asio_pollset_create(apr_pollset_t *pollset,
#if DEBUG
perror(__FUNCTION__ " msgget returned < 0 ");
#endif
pollset = NULL;
pollset->p = NULL;
return rv;
}
@ -292,7 +292,7 @@ static apr_status_t asio_pollset_create(apr_pollset_t *pollset,
APR_RING_INIT(&priv->prior_ready_ring, asio_elem_t, link);
#else /* APR doesn't have threads but caller wants a threadsafe pollset */
pollset = NULL;
pollset->p = NULL;
return APR_ENOTIMPL;
#endif
@ -304,6 +304,7 @@ static apr_status_t asio_pollset_create(apr_pollset_t *pollset,
priv->query_set = apr_palloc(p, size * sizeof(apr_pollfd_t));
if ((!priv->pollset) || (!priv->query_set)) {
pollset->p = NULL;
return APR_ENOMEM;
}
}
@ -314,6 +315,10 @@ static apr_status_t asio_pollset_create(apr_pollset_t *pollset,
priv->size = size;
priv->result_set = apr_palloc(p, size * sizeof(apr_pollfd_t));
if (!priv->result_set) {
if (flags & APR_POLLSET_THREADSAFE) {
msgctl(priv->msg_q, IPC_RMID, NULL);
}
pollset->p = NULL;
return APR_ENOMEM;
}
@ -379,6 +384,7 @@ static apr_status_t asio_pollset_add(apr_pollset_t *pollset,
APR_RING_REMOVE(elem, link);
DBG1(3, "used recycled memory at %08p\n", elem);
elem->state = ASIO_INIT;
elem->a.aio_cflags = 0;
}
else {
elem = (asio_elem_t *) apr_pcalloc(pollset->pool, sizeof(asio_elem_t));
@ -659,6 +665,7 @@ static apr_status_t asio_pollset_poll(apr_pollset_t *pollset,
if (ret == 1) {
DBG(4, "asyncio() completed inline\n");
/* it's ready now */
elem->state = ASIO_COMPLETE;
APR_RING_INSERT_TAIL(&(priv->ready_ring), elem, asio_elem_t,
link);
}

View file

@ -25,12 +25,18 @@
#include "apr_skiplist.h"
typedef struct {
apr_skiplistnode **data;
size_t size, pos;
apr_pool_t *p;
} apr_skiplist_q;
struct apr_skiplist {
apr_skiplist_compare compare;
apr_skiplist_compare comparek;
int height;
int preheight;
int size;
size_t size;
apr_skiplistnode *top;
apr_skiplistnode *bottom;
/* These two are needed for appending */
@ -38,6 +44,8 @@ struct apr_skiplist {
apr_skiplistnode *bottomend;
apr_skiplist *index;
apr_array_header_t *memlist;
apr_skiplist_q nodes_q,
stack_q;
apr_pool_t *pool;
};
@ -52,20 +60,15 @@ struct apr_skiplistnode {
apr_skiplist *sl;
};
#ifndef MIN
#define MIN(a,b) ((a<b)?(a):(b))
#endif
static int get_b_rand(void)
{
static int ph = 32; /* More bits than we will ever use */
static apr_uint32_t randseq;
static int randseq;
if (ph > 31) { /* Num bits in return of rand() */
ph = 0;
randseq = (apr_uint32_t) rand();
randseq = rand();
}
ph++;
return ((randseq & (1 << (ph - 1))) >> (ph - 1));
return randseq & (1 << ph++);
}
typedef struct {
@ -103,7 +106,7 @@ APR_DECLARE(void *) apr_skiplist_alloc(apr_skiplist *sl, size_t size)
memlist++;
}
/* no free chunks */
ptr = apr_pcalloc(sl->pool, size);
ptr = apr_palloc(sl->pool, size);
if (!ptr) {
return ptr;
}
@ -122,7 +125,7 @@ APR_DECLARE(void *) apr_skiplist_alloc(apr_skiplist *sl, size_t size)
return ptr;
}
else {
return calloc(1, size);
return malloc(size);
}
}
@ -149,27 +152,73 @@ APR_DECLARE(void) apr_skiplist_free(apr_skiplist *sl, void *mem)
}
}
static apr_status_t skiplist_qpush(apr_skiplist_q *q, apr_skiplistnode *m)
{
if (q->pos >= q->size) {
apr_skiplistnode **data;
size_t size = (q->pos) ? q->pos * 2 : 32;
if (q->p) {
data = apr_palloc(q->p, size * sizeof(*data));
if (data) {
memcpy(data, q->data, q->pos * sizeof(*data));
}
}
else {
data = realloc(q->data, size * sizeof(*data));
}
if (!data) {
return APR_ENOMEM;
}
q->data = data;
q->size = size;
}
q->data[q->pos++] = m;
return APR_SUCCESS;
}
static APR_INLINE apr_skiplistnode *skiplist_qpop(apr_skiplist_q *q)
{
return (q->pos > 0) ? q->data[--q->pos] : NULL;
}
static APR_INLINE void skiplist_qclear(apr_skiplist_q *q)
{
q->pos = 0;
}
static apr_skiplistnode *skiplist_new_node(apr_skiplist *sl)
{
apr_skiplistnode *m = skiplist_qpop(&sl->nodes_q);
if (!m) {
if (sl->pool) {
m = apr_palloc(sl->pool, sizeof *m);
}
else {
m = malloc(sizeof *m);
}
}
return m;
}
static apr_status_t skiplist_free_node(apr_skiplist *sl, apr_skiplistnode *m)
{
return skiplist_qpush(&sl->nodes_q, m);
}
static apr_status_t skiplisti_init(apr_skiplist **s, apr_pool_t *p)
{
apr_skiplist *sl;
if (p) {
sl = apr_pcalloc(p, sizeof(apr_skiplist));
sl->memlist = apr_array_make(p, 20, sizeof(memlist_t));
sl->pool = sl->nodes_q.p = sl->stack_q.p = p;
}
else {
sl = calloc(1, sizeof(apr_skiplist));
if (!sl) {
return APR_ENOMEM;
}
}
#if 0
sl->compare = (apr_skiplist_compare) NULL;
sl->comparek = (apr_skiplist_compare) NULL;
sl->height = 0;
sl->preheight = 0;
sl->size = 0;
sl->top = NULL;
sl->bottom = NULL;
sl->index = NULL;
#endif
sl->pool = p;
*s = sl;
return APR_SUCCESS;
}
@ -248,56 +297,32 @@ APR_DECLARE(void) apr_skiplist_add_index(apr_skiplist *sl,
}
}
APR_DECLARE(apr_skiplistnode *) apr_skiplist_getlist(apr_skiplist *sl)
{
if (!sl->bottom) {
return NULL;
}
return sl->bottom->next;
}
APR_DECLARE(void *) apr_skiplist_find(apr_skiplist *sl, void *data, apr_skiplistnode **iter)
{
void *ret;
apr_skiplistnode *aiter;
if (!sl->compare) {
return 0;
}
if (iter) {
ret = apr_skiplist_find_compare(sl, data, iter, sl->compare);
}
else {
ret = apr_skiplist_find_compare(sl, data, &aiter, sl->compare);
}
return ret;
}
static int skiplisti_find_compare(apr_skiplist *sl, void *data,
apr_skiplistnode **ret,
apr_skiplist_compare comp)
{
apr_skiplistnode *m = NULL;
int count = 0;
apr_skiplistnode *m;
m = sl->top;
while (m) {
int compared;
compared = (m->next) ? comp(data, m->next->data) : -1;
if (compared == 0) {
m = m->next;
while (m->down) {
m = m->down;
if (m->next) {
int compared = comp(data, m->next->data);
if (compared == 0) {
m = m->next;
while (m->down) {
m = m->down;
}
*ret = m;
return count;
}
if (compared > 0) {
m = m->next;
count++;
continue;
}
*ret = m;
return count;
}
if ((m->next == NULL) || (compared < 0)) {
m = m->down;
count++;
}
else {
m = m->next;
count++;
}
m = m->down;
count++;
}
*ret = NULL;
return count;
@ -307,19 +332,47 @@ APR_DECLARE(void *) apr_skiplist_find_compare(apr_skiplist *sli, void *data,
apr_skiplistnode **iter,
apr_skiplist_compare comp)
{
apr_skiplistnode *m = NULL;
apr_skiplistnode *m;
apr_skiplist *sl;
if (!comp) {
if (iter) {
*iter = NULL;
}
return NULL;
}
if (comp == sli->compare || !sli->index) {
sl = sli;
}
else {
apr_skiplist_find(sli->index, (void *)comp, &m);
if (!m) {
if (iter) {
*iter = NULL;
}
return NULL;
}
sl = (apr_skiplist *) m->data;
}
skiplisti_find_compare(sl, data, iter, sl->comparek);
return (iter && *iter) ? ((*iter)->data) : NULL;
skiplisti_find_compare(sl, data, &m, sl->comparek);
if (iter) {
*iter = m;
}
return (m) ? m->data : NULL;
}
APR_DECLARE(void *) apr_skiplist_find(apr_skiplist *sl, void *data, apr_skiplistnode **iter)
{
return apr_skiplist_find_compare(sl, data, iter, sl->compare);
}
APR_DECLARE(apr_skiplistnode *) apr_skiplist_getlist(apr_skiplist *sl)
{
if (!sl->bottom) {
return NULL;
}
return sl->bottom->next;
}
APR_DECLARE(void *) apr_skiplist_next(apr_skiplist *sl, apr_skiplistnode **iter)
{
@ -339,98 +392,74 @@ APR_DECLARE(void *) apr_skiplist_previous(apr_skiplist *sl, apr_skiplistnode **i
return (*iter) ? ((*iter)->data) : NULL;
}
APR_DECLARE(apr_skiplistnode *) apr_skiplist_insert(apr_skiplist *sl, void *data)
static APR_INLINE int skiplist_height(const apr_skiplist *sl)
{
if (!sl->compare) {
return 0;
}
return apr_skiplist_insert_compare(sl, data, sl->compare);
/* Skiplists (even empty) always have a top node, although this
* implementation defers its creation until the first insert, or
* deletes it with the last remove. We want the real height here.
*/
return sl->height ? sl->height : 1;
}
APR_DECLARE(apr_skiplistnode *) apr_skiplist_insert_compare(apr_skiplist *sl, void *data,
apr_skiplist_compare comp)
{
apr_skiplistnode *m, *p, *tmp, *ret = NULL, **stack;
int nh = 1, ch, stacki;
if (!sl->top) {
sl->height = 1;
sl->topend = sl->bottomend = sl->top = sl->bottom =
(apr_skiplistnode *)apr_skiplist_alloc(sl, sizeof(apr_skiplistnode));
#if 0
sl->top->next = (apr_skiplistnode *)NULL;
sl->top->data = (apr_skiplistnode *)NULL;
sl->top->prev = (apr_skiplistnode *)NULL;
sl->top->up = (apr_skiplistnode *)NULL;
sl->top->down = (apr_skiplistnode *)NULL;
sl->top->nextindex = (apr_skiplistnode *)NULL;
sl->top->previndex = (apr_skiplistnode *)NULL;
#endif
sl->top->sl = sl;
apr_skiplistnode *m, *p, *tmp, *ret = NULL;
int ch, nh = 1;
if (!comp) {
return NULL;
}
ch = skiplist_height(sl);
if (sl->preheight) {
while (nh < sl->preheight && get_b_rand()) {
nh++;
}
}
else {
while (nh <= sl->height && get_b_rand()) {
while (nh <= ch && get_b_rand()) {
nh++;
}
}
/* Now we have the new height at which we wish to insert our new node */
/*
* Let us make sure that our tree is a least that tall (grow if
* necessary)
/* Now we have in nh the height at which we wish to insert our new node,
* and in ch the current height: don't create skip paths to the inserted
* element until the walk down through the tree (which decrements ch)
* reaches nh. From there, any walk down pushes the current node on a
* stack (the node(s) after which we would insert) to pop back through
* for insertion later.
*/
for (; sl->height < nh; sl->height++) {
sl->top->up =
(apr_skiplistnode *)apr_skiplist_alloc(sl, sizeof(apr_skiplistnode));
sl->top->up->down = sl->top;
sl->top = sl->topend = sl->top->up;
#if 0
sl->top->prev = sl->top->next = sl->top->nextindex =
sl->top->previndex = sl->top->up = NULL;
sl->top->data = NULL;
#endif
sl->top->sl = sl;
}
ch = sl->height;
/* Find the node (or node after which we would insert) */
/* Keep a stack to pop back through for insertion */
/* malloc() is OK since we free the temp stack */
m = sl->top;
stack = (apr_skiplistnode **)malloc(sizeof(apr_skiplistnode *) * (nh));
stacki = 0;
while (m) {
int compared = -1;
if (m->next) {
compared = comp(data, m->next->data);
}
if (compared == 0) {
free(stack); /* OK. was malloc'ed */
return 0;
}
if ((m->next == NULL) || (compared < 0)) {
if (ch <= nh) {
/* push on stack */
stack[stacki++] = m;
int compared = comp(data, m->next->data);
if (compared == 0) {
/* Keep the existing element(s) */
skiplist_qclear(&sl->stack_q);
return NULL;
}
if (compared > 0) {
m = m->next;
continue;
}
m = m->down;
ch--;
}
else {
m = m->next;
if (ch <= nh) {
/* push on stack */
skiplist_qpush(&sl->stack_q, m);
}
m = m->down;
ch--;
}
/* Pop the stack and insert nodes */
p = NULL;
for (; stacki > 0; stacki--) {
m = stack[stacki - 1];
tmp = (apr_skiplistnode *)apr_skiplist_alloc(sl, sizeof(apr_skiplistnode));
while ((m = skiplist_qpop(&sl->stack_q))) {
tmp = skiplist_new_node(sl);
tmp->next = m->next;
if (m->next) {
m->next->prev = tmp;
}
m->next = tmp;
tmp->prev = m;
tmp->up = NULL;
tmp->nextindex = tmp->previndex = NULL;
@ -438,17 +467,44 @@ APR_DECLARE(apr_skiplistnode *) apr_skiplist_insert_compare(apr_skiplist *sl, vo
if (p) {
p->up = tmp;
}
else {
/* This sets ret to the bottom-most node we are inserting */
ret = tmp;
}
tmp->data = data;
tmp->sl = sl;
p = tmp;
}
/* Now we are sure the node is inserted, grow our tree to 'nh' tall */
for (; sl->height < nh; sl->height++) {
m = skiplist_new_node(sl);
tmp = skiplist_new_node(sl);
m->up = m->prev = m->nextindex = m->previndex = NULL;
m->next = tmp;
/* This sets ret to the bottom-most node we are inserting */
if (!p) {
m->down = sl->top;
m->data = NULL;
m->sl = sl;
if (sl->top) {
sl->top->up = m;
}
else {
sl->bottom = sl->bottomend = m;
}
sl->top = sl->topend = tmp->prev = m;
tmp->up = tmp->next = tmp->nextindex = tmp->previndex = NULL;
tmp->down = p;
tmp->data = data;
tmp->sl = sl;
if (p) {
p->up = tmp;
}
else {
/* This sets ret to the bottom-most node we are inserting */
ret = tmp;
sl->size++; /* this seems to go here got each element to be counted */
}
p = tmp;
}
free(stack); /* OK. was malloc'ed */
if (sl->index != NULL) {
/*
* this is a external insertion, we must insert into each index as
@ -457,25 +513,20 @@ APR_DECLARE(apr_skiplistnode *) apr_skiplist_insert_compare(apr_skiplist *sl, vo
apr_skiplistnode *ni, *li;
li = ret;
for (p = apr_skiplist_getlist(sl->index); p; apr_skiplist_next(sl->index, &p)) {
ni = apr_skiplist_insert((apr_skiplist *) p->data, ret->data);
apr_skiplist *sli = (apr_skiplist *)p->data;
ni = apr_skiplist_insert_compare(sli, ret->data, sli->compare);
li->nextindex = ni;
ni->previndex = li;
li = ni;
}
}
else {
/* sl->size++; */
}
sl->size++;
return ret;
}
APR_DECLARE(int) apr_skiplist_remove(apr_skiplist *sl, void *data, apr_skiplist_freefunc myfree)
APR_DECLARE(apr_skiplistnode *) apr_skiplist_insert(apr_skiplist *sl, void *data)
{
if (!sl->compare) {
return 0;
}
return apr_skiplist_remove_compare(sl, data, myfree, sl->comparek);
return apr_skiplist_insert_compare(sl, data, sl->compare);
}
#if 0
@ -520,7 +571,7 @@ static int skiplisti_remove(apr_skiplist *sl, apr_skiplistnode *m, apr_skiplist_
if (!m && myfree && p->data) {
myfree(p->data);
}
apr_skiplist_free(sl, p);
skiplist_free_node(sl, p);
}
sl->size--;
while (sl->top && sl->top->next == NULL) {
@ -530,13 +581,14 @@ static int skiplisti_remove(apr_skiplist *sl, apr_skiplistnode *m, apr_skiplist_
if (sl->top) {
sl->top->up = NULL; /* Make it think its the top */
}
apr_skiplist_free(sl, p);
skiplist_free_node(sl, p);
sl->height--;
}
if (!sl->top) {
sl->bottom = NULL;
sl->bottom = sl->bottomend = NULL;
sl->topend = NULL;
}
return sl->height; /* return 1; ?? */
return skiplist_height(sl);
}
APR_DECLARE(int) apr_skiplist_remove_compare(apr_skiplist *sli,
@ -545,11 +597,17 @@ APR_DECLARE(int) apr_skiplist_remove_compare(apr_skiplist *sli,
{
apr_skiplistnode *m;
apr_skiplist *sl;
if (!comp) {
return 0;
}
if (comp == sli->comparek || !sli->index) {
sl = sli;
}
else {
apr_skiplist_find(sli->index, (void *)comp, &m);
if (!m) {
return 0;
}
sl = (apr_skiplist *) m->data;
}
skiplisti_find_compare(sl, data, &m, comp);
@ -562,6 +620,11 @@ APR_DECLARE(int) apr_skiplist_remove_compare(apr_skiplist *sli,
return skiplisti_remove(sl, m, myfree);
}
APR_DECLARE(int) apr_skiplist_remove(apr_skiplist *sl, void *data, apr_skiplist_freefunc myfree)
{
return apr_skiplist_remove_compare(sl, data, myfree, sl->comparek);
}
APR_DECLARE(void) apr_skiplist_remove_all(apr_skiplist *sl, apr_skiplist_freefunc myfree)
{
/*
@ -573,16 +636,18 @@ APR_DECLARE(void) apr_skiplist_remove_all(apr_skiplist *sl, apr_skiplist_freefun
m = sl->bottom;
while (m) {
p = m->next;
if (p && myfree && p->data)
if (myfree && p && p->data) {
myfree(p->data);
while (m) {
u = m->up;
apr_skiplist_free(sl, p);
m = u;
}
do {
u = m->up;
skiplist_free_node(sl, m);
m = u;
} while (m);
m = p;
}
sl->top = sl->bottom = NULL;
sl->topend = sl->bottomend = NULL;
sl->height = 0;
sl->size = 0;
}
@ -611,8 +676,7 @@ APR_DECLARE(void *) apr_skiplist_peek(apr_skiplist *a)
static void skiplisti_destroy(void *vsl)
{
apr_skiplist_destroy((apr_skiplist *) vsl, NULL);
apr_skiplist_free((apr_skiplist *) vsl, vsl);
apr_skiplist_destroy(vsl, NULL);
}
APR_DECLARE(void) apr_skiplist_destroy(apr_skiplist *sl, apr_skiplist_freefunc myfree)
@ -620,6 +684,13 @@ APR_DECLARE(void) apr_skiplist_destroy(apr_skiplist *sl, apr_skiplist_freefunc m
while (apr_skiplist_pop(sl->index, skiplisti_destroy) != NULL)
;
apr_skiplist_remove_all(sl, myfree);
if (!sl->pool) {
while (sl->nodes_q.pos)
free(sl->nodes_q.data[--sl->nodes_q.pos]);
free(sl->nodes_q.data);
free(sl->stack_q.data);
free(sl);
}
}
APR_DECLARE(apr_skiplist *) apr_skiplist_merge(apr_skiplist *sl1, apr_skiplist *sl2)

View file

@ -914,8 +914,8 @@ fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
#ifndef BFD64
return 1;
#else
return (!(((offsetT) -1 << 31) & num)
|| (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
return (!(-((offsetT) 1 << 31) & num)
|| (-((offsetT) 1 << 31) & num) == -((offsetT) 1 << 31));
#endif
} /* fits_in_signed_long() */

View file

@ -34,7 +34,7 @@ COMPILER_RT_ABI fp_t __floatditf(di_int a) {
}
// Exponent of (fp_t)a is the width of abs(a).
const int exponent = (aWidth - 1) - __builtin_clz(a);
const int exponent = (aWidth - 1) - __builtin_clzll(a);
rep_t result;
// Shift a into the significand field and clear the implicit bit. Extra

View file

@ -25,7 +25,7 @@ COMPILER_RT_ABI fp_t __floatunditf(du_int a) {
if (a == 0) return fromRep(0);
// Exponent of (fp_t)a is the width of abs(a).
const int exponent = (aWidth - 1) - __builtin_clz(a);
const int exponent = (aWidth - 1) - __builtin_clzll(a);
rep_t result;
// Shift a into the significand field and clear the implicit bit.

View file

@ -18,55 +18,50 @@
/* Returns: the product of a + ib and c + id */
COMPILER_RT_ABI long double _Complex
__multc3(long double __a, long double __b, long double __c, long double __d)
__multc3(long double a, long double b, long double c, long double d)
{
long double __ac = __a * __c;
long double __bd = __b * __d;
long double __ad = __a * __d;
long double __bc = __b * __c;
long double ac = a * c;
long double bd = b * d;
long double ad = a * d;
long double bc = b * c;
long double _Complex z;
__real__ z = __ac - __bd;
__imag__ z = __ad + __bc;
if (crt_isnan(__real__ z) && crt_isnan(__imag__ z))
{
int __recalc = 0;
if (crt_isinf(__a) || crt_isinf(__b))
{
__a = crt_copysignl(crt_isinf(__a) ? 1 : 0, __a);
__b = crt_copysignl(crt_isinf(__b) ? 1 : 0, __b);
if (crt_isnan(__c))
__c = crt_copysignl(0, __c);
if (crt_isnan(__d))
__d = crt_copysignl(0, __d);
__recalc = 1;
__real__ z = ac - bd;
__imag__ z = ad + bc;
if (crt_isnan(__real__ z) && crt_isnan(__imag__ z)) {
int recalc = 0;
if (crt_isinf(a) || crt_isinf(b)) {
a = crt_copysignl(crt_isinf(a) ? 1 : 0, a);
b = crt_copysignl(crt_isinf(b) ? 1 : 0, b);
if (crt_isnan(c))
c = crt_copysignl(0, c);
if (crt_isnan(d))
d = crt_copysignl(0, d);
recalc = 1;
}
if (crt_isinf(__c) || crt_isinf(__d))
{
__c = crt_copysignl(crt_isinf(__c) ? 1 : 0, __c);
__d = crt_copysignl(crt_isinf(__d) ? 1 : 0, __d);
if (crt_isnan(__a))
__a = crt_copysignl(0, __a);
if (crt_isnan(__b))
__b = crt_copysignl(0, __b);
__recalc = 1;
if (crt_isinf(c) || crt_isinf(d)) {
c = crt_copysignl(crt_isinf(c) ? 1 : 0, c);
d = crt_copysignl(crt_isinf(d) ? 1 : 0, d);
if (crt_isnan(a))
a = crt_copysignl(0, a);
if (crt_isnan(b))
b = crt_copysignl(0, b);
recalc = 1;
}
if (!__recalc && (crt_isinf(__ac) || crt_isinf(__bd) ||
crt_isinf(__ad) || crt_isinf(__bc)))
{
if (crt_isnan(__a))
__a = crt_copysignl(0, __a);
if (crt_isnan(__b))
__b = crt_copysignl(0, __b);
if (crt_isnan(__c))
__c = crt_copysignl(0, __c);
if (crt_isnan(__d))
__d = crt_copysignl(0, __d);
__recalc = 1;
if (!recalc && (crt_isinf(ac) || crt_isinf(bd) ||
crt_isinf(ad) || crt_isinf(bc))) {
if (crt_isnan(a))
a = crt_copysignl(0, a);
if (crt_isnan(b))
b = crt_copysignl(0, b);
if (crt_isnan(c))
c = crt_copysignl(0, c);
if (crt_isnan(d))
d = crt_copysignl(0, d);
recalc = 1;
}
if (__recalc)
{
__real__ z = CRT_INFINITY * (__a * __c - __b * __d);
__imag__ z = CRT_INFINITY * (__a * __d + __b * __c);
if (recalc) {
__real__ z = CRT_INFINITY * (a * c - b * d);
__imag__ z = CRT_INFINITY * (a * d + b * c);
}
}
return z;

View file

@ -1678,6 +1678,12 @@ XML_ParseBuffer(XML_Parser parser, int len, int isFinal)
void * XMLCALL
XML_GetBuffer(XML_Parser parser, int len)
{
/* BEGIN MOZILLA CHANGE (sanity check len) */
if (len < 0) {
errorCode = XML_ERROR_NO_MEMORY;
return NULL;
}
/* END MOZILLA CHANGE */
switch (ps_parsing) {
case XML_SUSPENDED:
errorCode = XML_ERROR_SUSPENDED;
@ -1689,8 +1695,13 @@ XML_GetBuffer(XML_Parser parser, int len)
}
if (len > bufferLim - bufferEnd) {
/* FIXME avoid integer overflow */
int neededSize = len + (int)(bufferEnd - bufferPtr);
/* BEGIN MOZILLA CHANGE (sanity check neededSize) */
if (neededSize < 0) {
errorCode = XML_ERROR_NO_MEMORY;
return NULL;
}
/* END MOZILLA CHANGE */
#ifdef XML_CONTEXT_BYTES
int keep = (int)(bufferPtr - buffer);
@ -1719,7 +1730,15 @@ XML_GetBuffer(XML_Parser parser, int len)
bufferSize = INIT_BUFFER_SIZE;
do {
bufferSize *= 2;
} while (bufferSize < neededSize);
/* BEGIN MOZILLA CHANGE (prevent infinite loop on overflow) */
} while (bufferSize < neededSize && bufferSize > 0);
/* END MOZILLA CHANGE */
/* BEGIN MOZILLA CHANGE (sanity check bufferSize) */
if (bufferSize <= 0) {
errorCode = XML_ERROR_NO_MEMORY;
return NULL;
}
/* END MOZILLA CHANGE */
newBuf = (char *)MALLOC(bufferSize);
if (newBuf == 0) {
errorCode = XML_ERROR_NO_MEMORY;

View file

@ -567,7 +567,7 @@ read_file_guts (cpp_reader *pfile, _cpp_file *file)
SSIZE_MAX to be much smaller than the actual range of the
type. Use INTTYPE_MAXIMUM unconditionally to ensure this
does not bite us. */
if (file->st.st_size > INTTYPE_MAXIMUM (ssize_t))
if (file->st.st_size > SSIZE_MAX)
{
cpp_error (pfile, CPP_DL_ERROR, "%s is too large", file->path);
return false;
@ -581,7 +581,7 @@ read_file_guts (cpp_reader *pfile, _cpp_file *file)
file->path);
return false;
}
else if (offset > INTTYPE_MAXIMUM (ssize_t) || (ssize_t)offset > size)
else if (offset > SSIZE_MAX || (ssize_t)offset > size)
{
cpp_error (pfile, CPP_DL_ERROR, "current position of %s is too large",
file->path);

View file

@ -811,7 +811,7 @@ kvp_get_ip_info(int family, char *if_name, int op,
int error = 0;
char *buffer;
size_t buffer_length;
struct hv_kvp_ipaddr_value *ip_buffer;
struct hv_kvp_ipaddr_value *ip_buffer = NULL;
char cidr_mask[5];
int weight;
int i;

View file

@ -1,10 +1,10 @@
Unless otherwise specified, files in the jemalloc source distribution are
subject to the following license:
--------------------------------------------------------------------------------
Copyright (C) 2002-2014 Jason Evans <jasone@canonware.com>.
Copyright (C) 2002-2015 Jason Evans <jasone@canonware.com>.
All rights reserved.
Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
Copyright (C) 2009-2014 Facebook, Inc. All rights reserved.
Copyright (C) 2009-2015 Facebook, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

View file

@ -1,10 +1,166 @@
Following are change highlights associated with official releases. Important
bug fixes are all mentioned, but internal enhancements are omitted here for
brevity (even though they are more fun to write about). Much more detail can be
found in the git revision history:
bug fixes are all mentioned, but some internal enhancements are omitted here for
brevity. Much more detail can be found in the git revision history:
https://github.com/jemalloc/jemalloc
* 4.0.0 (August 17, 2015)
This version contains many speed and space optimizations, both minor and
major. The major themes are generalization, unification, and simplification.
Although many of these optimizations cause no visible behavior change, their
cumulative effect is substantial.
New features:
- Normalize size class spacing to be consistent across the complete size
range. By default there are four size classes per size doubling, but this
is now configurable via the --with-lg-size-class-group option. Also add the
--with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and
--with-lg-tiny-min options, which can be used to tweak page and size class
settings. Impacts:
+ Worst case performance for incrementally growing/shrinking reallocation
is improved because there are far fewer size classes, and therefore
copying happens less often.
+ Internal fragmentation is limited to 20% for all but the smallest size
classes (those less than four times the quantum). (1B + 4 KiB)
and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation.
+ Chunk fragmentation tends to be lower because there are fewer distinct run
sizes to pack.
- Add support for explicit tcaches. The "tcache.create", "tcache.flush", and
"tcache.destroy" mallctls control tcache lifetime and flushing, and the
MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API
control which tcache is used for each operation.
- Implement per thread heap profiling, as well as the ability to
enable/disable heap profiling on a per thread basis. Add the "prof.reset",
"prof.lg_sample", "thread.prof.name", "thread.prof.active",
"opt.prof_thread_active_init", "prof.thread_active_init", and
"thread.prof.active" mallctls.
- Add support for per arena application-specified chunk allocators, configured
via the "arena.<i>.chunk_hooks" mallctl.
- Refactor huge allocation to be managed by arenas, so that arenas now
function as general purpose independent allocators. This is important in
the context of user-specified chunk allocators, aside from the scalability
benefits. Related new statistics:
+ The "stats.arenas.<i>.huge.allocated", "stats.arenas.<i>.huge.nmalloc",
"stats.arenas.<i>.huge.ndalloc", and "stats.arenas.<i>.huge.nrequests"
mallctls provide high level per arena huge allocation statistics.
+ The "arenas.nhchunks", "arenas.hchunk.<i>.size",
"stats.arenas.<i>.hchunks.<j>.nmalloc",
"stats.arenas.<i>.hchunks.<j>.ndalloc",
"stats.arenas.<i>.hchunks.<j>.nrequests", and
"stats.arenas.<i>.hchunks.<j>.curhchunks" mallctls provide per size class
statistics.
- Add the 'util' column to malloc_stats_print() output, which reports the
proportion of available regions that are currently in use for each small
size class.
- Add "alloc" and "free" modes for for junk filling (see the "opt.junk"
mallctl), so that it is possible to separately enable junk filling for
allocation versus deallocation.
- Add the jemalloc-config script, which provides information about how
jemalloc was configured, and how to integrate it into application builds.
- Add metadata statistics, which are accessible via the "stats.metadata",
"stats.arenas.<i>.metadata.mapped", and
"stats.arenas.<i>.metadata.allocated" mallctls.
- Add the "stats.resident" mallctl, which reports the upper limit of
physically resident memory mapped by the allocator.
- Add per arena control over unused dirty page purging, via the
"arenas.lg_dirty_mult", "arena.<i>.lg_dirty_mult", and
"stats.arenas.<i>.lg_dirty_mult" mallctls.
- Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump
feature on/off during program execution.
- Add sdallocx(), which implements sized deallocation. The primary
optimization over dallocx() is the removal of a metadata read, which often
suffers an L1 cache miss.
- Add missing header includes in jemalloc/jemalloc.h, so that applications
only have to #include <jemalloc/jemalloc.h>.
- Add support for additional platforms:
+ Bitrig
+ Cygwin
+ DragonFlyBSD
+ iOS
+ OpenBSD
+ OpenRISC/or1k
Optimizations:
- Maintain dirty runs in per arena LRUs rather than in per arena trees of
dirty-run-containing chunks. In practice this change significantly reduces
dirty page purging volume.
- Integrate whole chunks into the unused dirty page purging machinery. This
reduces the cost of repeated huge allocation/deallocation, because it
effectively introduces a cache of chunks.
- Split the arena chunk map into two separate arrays, in order to increase
cache locality for the frequently accessed bits.
- Move small run metadata out of runs, into arena chunk headers. This reduces
run fragmentation, smaller runs reduce external fragmentation for small size
classes, and packed (less uniformly aligned) metadata layout improves CPU
cache set distribution.
- Randomly distribute large allocation base pointer alignment relative to page
boundaries in order to more uniformly utilize CPU cache sets. This can be
disabled via the --disable-cache-oblivious configure option, and queried via
the "config.cache_oblivious" mallctl.
- Micro-optimize the fast paths for the public API functions.
- Refactor thread-specific data to reside in a single structure. This assures
that only a single TLS read is necessary per call into the public API.
- Implement in-place huge allocation growing and shrinking.
- Refactor rtree (radix tree for chunk lookups) to be lock-free, and make
additional optimizations that reduce maximum lookup depth to one or two
levels. This resolves what was a concurrency bottleneck for per arena huge
allocation, because a global data structure is critical for determining
which arenas own which huge allocations.
Incompatible changes:
- Replace --enable-cc-silence with --disable-cc-silence to suppress spurious
warnings by default.
- Assure that the constness of malloc_usable_size()'s return type matches that
of the system implementation.
- Change the heap profile dump format to support per thread heap profiling,
rename pprof to jeprof, and enhance it with the --thread=<n> option. As a
result, the bundled jeprof must now be used rather than the upstream
(gperftools) pprof.
- Disable "opt.prof_final" by default, in order to avoid atexit(3), which can
internally deadlock on some platforms.
- Change the "arenas.nlruns" mallctl type from size_t to unsigned.
- Replace the "stats.arenas.<i>.bins.<j>.allocated" mallctl with
"stats.arenas.<i>.bins.<j>.curregs".
- Ignore MALLOC_CONF in set{uid,gid,cap} binaries.
- Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the
MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage.
Removed features:
- Remove the *allocm() API, which is superseded by the *allocx() API.
- Remove the --enable-dss options, and make dss non-optional on all platforms
which support sbrk(2).
- Remove the "arenas.purge" mallctl, which was obsoleted by the
"arena.<i>.purge" mallctl in 3.1.0.
- Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically
detects whether it is running inside Valgrind.
- Remove the "stats.huge.allocated", "stats.huge.nmalloc", and
"stats.huge.ndalloc" mallctls.
- Remove the --enable-mremap option.
- Remove the "stats.chunks.current", "stats.chunks.total", and
"stats.chunks.high" mallctls.
Bug fixes:
- Fix the cactive statistic to decrease (rather than increase) when active
memory decreases. This regression was first released in 3.5.0.
- Fix OOM handling in memalign() and valloc(). A variant of this bug existed
in all releases since 2.0.0, which introduced these functions.
- Fix an OOM-related regression in arena_tcache_fill_small(), which could
cause cache corruption on OOM. This regression was present in all releases
from 2.2.0 through 3.6.0.
- Fix size class overflow handling for malloc(), posix_memalign(), memalign(),
calloc(), and realloc() when profiling is enabled.
- Fix the "arena.<i>.dss" mallctl to return an error if "primary" or
"secondary" precedence is specified, but sbrk(2) is not supported.
- Fix fallback lg_floor() implementations to handle extremely large inputs.
- Ensure the default purgeable zone is after the default zone on OS X.
- Fix latent bugs in atomic_*().
- Fix the "arena.<i>.dss" mallctl to handle read-only calls.
- Fix tls_model configuration to enable the initial-exec model when possible.
- Mark malloc_conf as a weak symbol so that the application can override it.
- Correctly detect glibc's adaptive pthread mutexes.
- Fix the --without-export configure option.
* 3.6.0 (March 31, 2014)
This version contains a critical bug fix for a regression present in 3.5.0 and
@ -21,7 +177,7 @@ found in the git revision history:
backtracing to be reliable.
- Use dss allocation precedence for huge allocations as well as small/large
allocations.
- Fix test assertion failure message formatting. This bug did not manifect on
- Fix test assertion failure message formatting. This bug did not manifest on
x86_64 systems because of implementation subtleties in va_list.
- Fix inconsequential test failures for hash and SFMT code.
@ -516,7 +672,7 @@ found in the git revision history:
- Make it possible for the application to manually flush a thread's cache, via
the "tcache.flush" mallctl.
- Base maximum dirty page count on proportion of active memory.
- Compute various addtional run-time statistics, including per size class
- Compute various additional run-time statistics, including per size class
statistics for large objects.
- Expose malloc_stats_print(), which can be called repeatedly by the
application.

View file

@ -1,6 +1,6 @@
$FreeBSD$
.git
.gitignore
.autom4te.cfg
.git*
FREEBSD-*
INSTALL
Makefile*
@ -40,7 +40,10 @@ include/jemalloc/jemalloc_protos.h
include/jemalloc/jemalloc_protos.h.in
include/jemalloc/jemalloc_rename.h
include/jemalloc/jemalloc_rename.sh
include/jemalloc/jemalloc_typedefs.h.in
include/msvc_compat/
install-sh
jemalloc.pc*
src/valgrind.c
src/zone.c
test/

View file

@ -1,15 +1,14 @@
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
index d8e2e71..330ba2a 100644
index 8fc774b..fdbef95 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -57,12 +57,23 @@
@@ -53,11 +53,23 @@
<para>This manual describes jemalloc @jemalloc_version@. More information
can be found at the <ulink
url="http://www.canonware.com/jemalloc/">jemalloc website</ulink>.</para>
+
+ <para>The following configuration options are enabled in libc's built-in
+ jemalloc: <option>--enable-dss</option>,
+ <option>--enable-experimental</option>, <option>--enable-fill</option>,
+ jemalloc: <option>--enable-fill</option>,
+ <option>--enable-lazy-lock</option>, <option>--enable-munmap</option>,
+ <option>--enable-stats</option>, <option>--enable-tcache</option>,
+ <option>--enable-tls</option>, <option>--enable-utrace</option>, and
@ -17,17 +16,18 @@ index d8e2e71..330ba2a 100644
+ <option>--enable-debug</option> is enabled in development versions of
+ FreeBSD (controlled by the <constant>MALLOC_PRODUCTION</constant> make
+ variable).</para>
+
</refsect1>
<refsynopsisdiv>
<title>SYNOPSIS</title>
<funcsynopsis>
<funcsynopsisinfo>#include &lt;<filename class="headerfile">stdlib.h</filename>&gt;
-#include &lt;<filename class="headerfile">jemalloc/jemalloc.h</filename>&gt;</funcsynopsisinfo>
- <funcsynopsisinfo>#include &lt;<filename class="headerfile">jemalloc/jemalloc.h</filename>&gt;</funcsynopsisinfo>
+ <funcsynopsisinfo>#include &lt;<filename class="headerfile">stdlib.h</filename>&gt;
+#include &lt;<filename class="headerfile">malloc_np.h</filename>&gt;</funcsynopsisinfo>
<refsect2>
<title>Standard API</title>
<funcprototype>
@@ -2342,4 +2353,19 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
@@ -2759,4 +2771,18 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
<para>The <function>posix_memalign<parameter/></function> function conforms
to IEEE Std 1003.1-2001 (&ldquo;POSIX.1&rdquo;).</para>
</refsect1>
@ -38,9 +38,8 @@ index d8e2e71..330ba2a 100644
+ FreeBSD 7.0.</para>
+
+ <para>The <function>aligned_alloc<parameter/></function>,
+ <function>malloc_stats_print<parameter/></function>,
+ <function>mallctl*<parameter/></function>, and
+ <function>*allocm<parameter/></function> functions first appeared in
+ <function>malloc_stats_print<parameter/></function>, and
+ <function>mallctl*<parameter/></function> functions first appeared in
+ FreeBSD 10.0.</para>
+
+ <para>The <function>*allocx<parameter/></function> functions first appeared
@ -48,20 +47,11 @@ index d8e2e71..330ba2a 100644
+ </refsect1>
</refentry>
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index 574bbb1..e3eafdf 100644
index 7a137b6..b0001e9 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -1,5 +1,8 @@
#ifndef JEMALLOC_INTERNAL_H
#define JEMALLOC_INTERNAL_H
+#include "libc_private.h"
+#include "namespace.h"
+
#include <math.h>
#ifdef _WIN32
# include <windows.h>
@@ -65,6 +68,9 @@ typedef intptr_t ssize_t;
#include <valgrind/memcheck.h>
@@ -8,6 +8,9 @@
#include <sys/ktrace.h>
#endif
+#include "un-namespace.h"
@ -70,7 +60,7 @@ index 574bbb1..e3eafdf 100644
#define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
# define JEMALLOC_N(n) jet_##n
@@ -99,13 +105,7 @@ static const bool config_fill =
@@ -42,13 +45,7 @@ static const bool config_fill =
false
#endif
;
@ -85,11 +75,25 @@ index 574bbb1..e3eafdf 100644
static const bool config_prof =
#ifdef JEMALLOC_PROF
true
diff --git a/include/jemalloc/internal/jemalloc_internal_decls.h b/include/jemalloc/internal/jemalloc_internal_decls.h
index a601d6e..e7094b2 100644
--- a/include/jemalloc/internal/jemalloc_internal_decls.h
+++ b/include/jemalloc/internal/jemalloc_internal_decls.h
@@ -1,6 +1,9 @@
#ifndef JEMALLOC_INTERNAL_DECLS_H
#define JEMALLOC_INTERNAL_DECLS_H
+#include "libc_private.h"
+#include "namespace.h"
+
#include <math.h>
#ifdef _WIN32
# include <windows.h>
diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h
index de44e14..564d604 100644
index f051f29..561378f 100644
--- a/include/jemalloc/internal/mutex.h
+++ b/include/jemalloc/internal/mutex.h
@@ -43,9 +43,6 @@ struct malloc_mutex_s {
@@ -47,15 +47,13 @@ struct malloc_mutex_s {
#ifdef JEMALLOC_LAZY_LOCK
extern bool isthreaded;
@ -99,24 +103,31 @@ index de44e14..564d604 100644
#endif
bool malloc_mutex_init(malloc_mutex_t *mutex);
void malloc_mutex_prefork(malloc_mutex_t *mutex);
void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
+bool malloc_mutex_first_thread(void);
bool mutex_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index 93516d2..22f9af9 100644
index dbf6aa7..f87dba8 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -226,7 +226,6 @@ iralloc
iralloct
iralloct_realign
@@ -277,7 +277,6 @@ iralloct_realign
isalloc
isdalloct
isqalloc
-isthreaded
ivsalloc
ixalloc
jemalloc_postfork_child
diff --git a/include/jemalloc/jemalloc_FreeBSD.h b/include/jemalloc/jemalloc_FreeBSD.h
new file mode 100644
index 0000000..94554bc
index 0000000..737542e
--- /dev/null
+++ b/include/jemalloc/jemalloc_FreeBSD.h
@@ -0,0 +1,134 @@
@@ -0,0 +1,142 @@
+/*
+ * Override settings that were generated in jemalloc_defs.h as necessary.
+ */
@ -131,7 +142,6 @@ index 0000000..94554bc
+ * The following are architecture-dependent, so conditionally define them for
+ * each supported architecture.
+ */
+#undef CPU_SPINWAIT
+#undef JEMALLOC_TLS_MODEL
+#undef STATIC_PAGE_SHIFT
+#undef LG_SIZEOF_PTR
@ -141,7 +151,6 @@ index 0000000..94554bc
+
+#ifdef __i386__
+# define LG_SIZEOF_PTR 2
+# define CPU_SPINWAIT __asm__ volatile("pause")
+# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
+#endif
+#ifdef __ia64__
@ -153,12 +162,14 @@ index 0000000..94554bc
+#endif
+#ifdef __amd64__
+# define LG_SIZEOF_PTR 3
+# define CPU_SPINWAIT __asm__ volatile("pause")
+# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
+#endif
+#ifdef __arm__
+# define LG_SIZEOF_PTR 2
+#endif
+#ifdef __aarch64__
+# define LG_SIZEOF_PTR 3
+#endif
+#ifdef __mips__
+#ifdef __mips_n64
+# define LG_SIZEOF_PTR 3
@ -181,6 +192,11 @@ index 0000000..94554bc
+#define LG_SIZEOF_LONG LG_SIZEOF_PTR
+#define LG_SIZEOF_INTMAX_T 3
+
+#undef CPU_SPINWAIT
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+#define CPU_SPINWAIT cpu_spinwait()
+
+/* Disable lazy-lock machinery, mangle isthreaded, and adjust its type. */
+#undef JEMALLOC_LAZY_LOCK
+extern int __isthreaded;
@ -192,6 +208,7 @@ index 0000000..94554bc
+#undef je_realloc
+#undef je_free
+#undef je_posix_memalign
+#undef je_aligned_alloc
+#undef je_malloc_usable_size
+#undef je_mallocx
+#undef je_rallocx
@ -209,6 +226,7 @@ index 0000000..94554bc
+#define je_realloc __realloc
+#define je_free __free
+#define je_posix_memalign __posix_memalign
+#define je_aligned_alloc __aligned_alloc
+#define je_malloc_usable_size __malloc_usable_size
+#define je_mallocx __mallocx
+#define je_rallocx __rallocx
@ -238,6 +256,7 @@ index 0000000..94554bc
+__weak_reference(__realloc, realloc);
+__weak_reference(__free, free);
+__weak_reference(__posix_memalign, posix_memalign);
+__weak_reference(__aligned_alloc, aligned_alloc);
+__weak_reference(__malloc_usable_size, malloc_usable_size);
+__weak_reference(__mallocx, mallocx);
+__weak_reference(__rallocx, rallocx);
@ -263,32 +282,142 @@ index f943891..47d032c 100755
+#include "jemalloc_FreeBSD.h"
EOF
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 204778b..9e5f2df 100644
index ed7863b..d078a1f 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -8,6 +8,10 @@ malloc_tsd_data(, arenas, arena_t *, NULL)
malloc_tsd_data(, thread_allocated, thread_allocated_t,
THREAD_ALLOCATED_INITIALIZER)
@@ -4,6 +4,10 @@
/******************************************************************************/
/* Data. */
+/* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
+const char *__malloc_options_1_0 = NULL;
+__sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
+
/* Runtime configuration options. */
const char *je_malloc_conf;
const char *je_malloc_conf JEMALLOC_ATTR(weak);
bool opt_abort =
@@ -457,7 +461,8 @@ malloc_conf_init(void)
#endif
;
@@ -2475,6 +2479,107 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
*/
/******************************************************************************/
/*
+ * Begin compatibility functions.
+ */
+
+#define ALLOCM_LG_ALIGN(la) (la)
+#define ALLOCM_ALIGN(a) (ffsl(a)-1)
+#define ALLOCM_ZERO ((int)0x40)
+#define ALLOCM_NO_MOVE ((int)0x80)
+
+#define ALLOCM_SUCCESS 0
+#define ALLOCM_ERR_OOM 1
+#define ALLOCM_ERR_NOT_MOVED 2
+
+int
+je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
+{
+ void *p;
+
+ assert(ptr != NULL);
+
+ p = je_mallocx(size, flags);
+ if (p == NULL)
+ return (ALLOCM_ERR_OOM);
+ if (rsize != NULL)
+ *rsize = isalloc(p, config_prof);
+ *ptr = p;
+ return (ALLOCM_SUCCESS);
+}
+
+int
+je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
+{
+ int ret;
+ bool no_move = flags & ALLOCM_NO_MOVE;
+
+ assert(ptr != NULL);
+ assert(*ptr != NULL);
+ assert(size != 0);
+ assert(SIZE_T_MAX - size >= extra);
+
+ if (no_move) {
+ size_t usize = je_xallocx(*ptr, size, extra, flags);
+ ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
+ if (rsize != NULL)
+ *rsize = usize;
+ } else {
+ void *p = je_rallocx(*ptr, size+extra, flags);
+ if (p != NULL) {
+ *ptr = p;
+ ret = ALLOCM_SUCCESS;
+ } else
+ ret = ALLOCM_ERR_OOM;
+ if (rsize != NULL)
+ *rsize = isalloc(*ptr, config_prof);
+ }
+ return (ret);
+}
+
+int
+je_sallocm(const void *ptr, size_t *rsize, int flags)
+{
+
+ assert(rsize != NULL);
+ *rsize = je_sallocx(ptr, flags);
+ return (ALLOCM_SUCCESS);
+}
+
+int
+je_dallocm(void *ptr, int flags)
+{
+
+ je_dallocx(ptr, flags);
+ return (ALLOCM_SUCCESS);
+}
+
+int
+je_nallocm(size_t *rsize, size_t size, int flags)
+{
+ size_t usize;
+
+ usize = je_nallocx(size, flags);
+ if (usize == 0)
+ return (ALLOCM_ERR_OOM);
+ if (rsize != NULL)
+ *rsize = usize;
+ return (ALLOCM_SUCCESS);
+}
+
+#undef ALLOCM_LG_ALIGN
+#undef ALLOCM_ALIGN
+#undef ALLOCM_ZERO
+#undef ALLOCM_NO_MOVE
+
+#undef ALLOCM_SUCCESS
+#undef ALLOCM_ERR_OOM
+#undef ALLOCM_ERR_NOT_MOVED
+
+/*
+ * End compatibility functions.
+ */
+/******************************************************************************/
+/*
* The following functions are used by threading libraries for protection of
* malloc during fork().
*/
@@ -2575,4 +2680,11 @@ jemalloc_postfork_child(void)
ctl_postfork_child();
}
- if ((opts = getenv(envname)) != NULL) {
+ if (issetugid() == 0 && (opts = getenv(envname)) !=
+ NULL) {
/*
* Do nothing; opts is already initialized to
* the value of the MALLOC_CONF environment
+void
+_malloc_first_thread(void)
+{
+
+ (void)malloc_mutex_first_thread();
+}
+
/******************************************************************************/
diff --git a/src/mutex.c b/src/mutex.c
index 788eca3..6f5954e 100644
index 2d47af9..934d5aa 100644
--- a/src/mutex.c
+++ b/src/mutex.c
@@ -66,6 +66,17 @@ pthread_create(pthread_t *__restrict thread,
@ -296,21 +425,45 @@ index 788eca3..6f5954e 100644
JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void *(calloc_cb)(size_t, size_t));
+
+__weak_reference(_pthread_mutex_init_calloc_cb_stub,
+ _pthread_mutex_init_calloc_cb);
+
+#pragma weak _pthread_mutex_init_calloc_cb
+int
+_pthread_mutex_init_calloc_cb_stub(pthread_mutex_t *mutex,
+_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+ void *(calloc_cb)(size_t, size_t))
+{
+
+ return (0);
+ return (((int (*)(pthread_mutex_t *, void *(*)(size_t, size_t)))
+ __libc_interposing[INTERPOS__pthread_mutex_init_calloc_cb])(mutex,
+ calloc_cb));
+}
#endif
bool
@@ -137,7 +148,7 @@ malloc_mutex_postfork_child(malloc_mutex_t *mutex)
}
bool
-mutex_boot(void)
+malloc_mutex_first_thread(void)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
@@ -151,3 +162,14 @@ mutex_boot(void)
#endif
return (false);
}
+
+bool
+mutex_boot(void)
+{
+
+#ifndef JEMALLOC_MUTEX_INIT_CB
+ return (malloc_mutex_first_thread());
+#else
+ return (false);
+#endif
+}
diff --git a/src/util.c b/src/util.c
index 93a19fd..70b3e45 100644
index 4cb0d6c..25b61c2 100644
--- a/src/util.c
+++ b/src/util.c
@@ -58,6 +58,22 @@ wrtmessage(void *cbopaque, const char *s)

View file

@ -72,15 +72,21 @@ do_extract() {
patch -p1 < "${src}/FREEBSD-diffs"
find . -name '*.orig' -delete
# Generate various files.
./autogen.sh --enable-cc-silence --enable-dss --enable-xmalloc \
--enable-utrace --with-xslroot=/usr/local/share/xsl/docbook \
--with-private-namespace=__
./autogen.sh --enable-cc-silence --enable-xmalloc --enable-utrace \
--with-xslroot=/usr/local/share/xsl/docbook --with-private-namespace=__ \
--with-lg-page-sizes=12,13,14,16
gmake dist
)
}
do_diff() {
(cd ${work}; git add -A; git diff --cached) > FREEBSD-diffs
(
cd ${work}
find . -name '*.orig' -delete
find . -name '*.rej' -delete
git add -A
git diff --cached
) > FREEBSD-diffs
}
command=$1

View file

@ -1 +1 @@
3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340
4.0.0-0-g6e98caf8f064482b9ab292ef3638dea67420bbc2

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -11,6 +11,7 @@
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
#define atomic_read_p(p) atomic_add_p(p, NULL)
#define atomic_read_z(p) atomic_add_z(p, 0)
#define atomic_read_u(p) atomic_add_u(p, 0)
@ -18,89 +19,139 @@
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
/*
* All arithmetic functions return the arithmetic result of the atomic
* operation. Some atomic operation APIs return the value prior to mutation, in
* which case the following functions must redundantly compute the result so
* that it can be returned. These functions are normally inlined, so the extra
* operations can be optimized away if the return values aren't used by the
* callers.
*
* <t> atomic_read_<t>(<t> *p) { return (*p); }
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p + x); }
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p - x); }
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
* {
* if (*p != c)
* return (true);
* *p = s;
* return (false);
* }
* void atomic_write_<t>(<t> *p, <t> x) { *p = x; }
*/
#ifndef JEMALLOC_ENABLE_INLINE
uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s);
void atomic_write_uint64(uint64_t *p, uint64_t x);
uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s);
void atomic_write_uint32(uint32_t *p, uint32_t x);
void *atomic_add_p(void **p, void *x);
void *atomic_sub_p(void **p, void *x);
bool atomic_cas_p(void **p, void *c, void *s);
void atomic_write_p(void **p, const void *x);
size_t atomic_add_z(size_t *p, size_t x);
size_t atomic_sub_z(size_t *p, size_t x);
bool atomic_cas_z(size_t *p, size_t c, size_t s);
void atomic_write_z(size_t *p, size_t x);
unsigned atomic_add_u(unsigned *p, unsigned x);
unsigned atomic_sub_u(unsigned *p, unsigned x);
bool atomic_cas_u(unsigned *p, unsigned c, unsigned s);
void atomic_write_u(unsigned *p, unsigned x);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
/******************************************************************************/
/* 64-bit operations. */
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
# ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (__sync_sub_and_fetch(p, x));
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, x));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, -((int64_t)x)));
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
}
# elif (defined(__amd64__) || defined(__x86_64__))
# if (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
uint64_t t = x;
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (x);
return (t + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
uint64_t t;
x = (uint64_t)(-(int64_t)x);
t = x;
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (x);
return (t + x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
uint8_t success;
asm volatile (
"lock; cmpxchgq %4, %0;"
"sete %1;"
: "=m" (*p), "=a" (success) /* Outputs. */
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
: "memory" /* Clobbers. */
);
return (!(bool)success);
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
asm volatile (
"xchgq %1, %0;" /* Lock is implied by xchgq. */
: "=m" (*p), "+r" (x) /* Outputs. */
: "m" (*p) /* Inputs. */
: "memory" /* Clobbers. */
);
}
# elif (defined(JEMALLOC_C11ATOMICS))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (atomic_fetch_add(a, x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (atomic_fetch_sub(a, x) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (!atomic_compare_exchange_strong(a, &c, s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
atomic_store(a, x);
}
# elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint64_t
@ -124,7 +175,88 @@ atomic_sub_uint64(uint64_t *p, uint64_t x)
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
}
# elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
atomic_store_rel_long(p, x);
}
# elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
uint64_t o;
/*The documented OSAtomic*() API does not expose an atomic exchange. */
do {
o = atomic_read_uint64(p);
} while (atomic_cas_uint64(p, o, x));
}
# elif (defined(_MSC_VER))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
uint64_t o;
o = InterlockedCompareExchange64(p, s, c);
return (o != c);
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
InterlockedExchange64(p, x);
}
# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
@ -138,6 +270,20 @@ atomic_sub_uint64(uint64_t *p, uint64_t x)
return (__sync_sub_and_fetch(p, x));
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
return (!__sync_bool_compare_and_swap(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
__sync_lock_test_and_set(p, x);
}
# else
# error "Missing implementation for 64-bit atomic operations"
# endif
@ -145,74 +291,91 @@ atomic_sub_uint64(uint64_t *p, uint64_t x)
/******************************************************************************/
/* 32-bit operations. */
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (__sync_sub_and_fetch(p, x));
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, x));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, -((int32_t)x)));
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
}
#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
uint32_t t = x;
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (x);
return (t + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
uint32_t t;
x = (uint32_t)(-(int32_t)x);
t = x;
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (x);
return (t + x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
uint8_t success;
asm volatile (
"lock; cmpxchgl %4, %0;"
"sete %1;"
: "=m" (*p), "=a" (success) /* Outputs. */
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
: "memory"
);
return (!(bool)success);
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
asm volatile (
"xchgl %1, %0;" /* Lock is implied by xchgl. */
: "=m" (*p), "+r" (x) /* Outputs. */
: "m" (*p) /* Inputs. */
: "memory" /* Clobbers. */
);
}
# elif (defined(JEMALLOC_C11ATOMICS))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (atomic_fetch_add(a, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (atomic_fetch_sub(a, x) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (!atomic_compare_exchange_strong(a, &c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
atomic_store(a, x);
}
#elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint32_t
@ -228,7 +391,84 @@ atomic_sub_uint32(uint32_t *p, uint32_t x)
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
}
#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!atomic_cmpset_32(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
atomic_store_rel_32(p, x);
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
uint32_t o;
/*The documented OSAtomic*() API does not expose an atomic exchange. */
do {
o = atomic_read_uint32(p);
} while (atomic_cas_uint32(p, o, x));
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
uint32_t o;
o = InterlockedCompareExchange(p, s, c);
return (o != c);
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
InterlockedExchange(p, x);
}
#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
@ -242,10 +482,72 @@ atomic_sub_uint32(uint32_t *p, uint32_t x)
return (__sync_sub_and_fetch(p, x));
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!__sync_bool_compare_and_swap(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
__sync_lock_test_and_set(p, x);
}
#else
# error "Missing implementation for 32-bit atomic operations"
#endif
/******************************************************************************/
/* Pointer operations. */
JEMALLOC_INLINE void *
atomic_add_p(void **p, void *x)
{
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
JEMALLOC_INLINE void *
atomic_sub_p(void **p, void *x)
{
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
return ((void *)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_p(void **p, void *c, void *s)
{
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_PTR == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void
atomic_write_p(void **p, const void *x)
{
#if (LG_SIZEOF_PTR == 3)
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
/******************************************************************************/
/* size_t operations. */
JEMALLOC_INLINE size_t
@ -272,6 +574,28 @@ atomic_sub_z(size_t *p, size_t x)
#endif
}
JEMALLOC_INLINE bool
atomic_cas_z(size_t *p, size_t c, size_t s)
{
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_PTR == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void
atomic_write_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
/******************************************************************************/
/* unsigned operations. */
JEMALLOC_INLINE unsigned
@ -297,6 +621,29 @@ atomic_sub_u(unsigned *p, unsigned x)
(uint32_t)-((int32_t)x)));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_u(unsigned *p, unsigned c, unsigned s)
{
#if (LG_SIZEOF_INT == 3)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_INT == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void
atomic_write_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_INT == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
/******************************************************************************/
#endif

View file

@ -10,9 +10,7 @@
#ifdef JEMALLOC_H_EXTERNS
void *base_alloc(size_t size);
void *base_calloc(size_t number, size_t size);
extent_node_t *base_node_alloc(void);
void base_node_dealloc(extent_node_t *node);
void base_stats_get(size_t *allocated, size_t *resident, size_t *mapped);
bool base_boot(void);
void base_prefork(void);
void base_postfork_parent(void);

View file

@ -3,6 +3,7 @@
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
typedef struct bitmap_level_s bitmap_level_t;
typedef struct bitmap_info_s bitmap_info_t;
@ -14,6 +15,51 @@ typedef unsigned long bitmap_t;
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
/* Number of groups required to store a given number of bits. */
#define BITMAP_BITS2GROUPS(nbits) \
((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
/*
* Number of groups required at a particular level for a given number of bits.
*/
#define BITMAP_GROUPS_L0(nbits) \
BITMAP_BITS2GROUPS(nbits)
#define BITMAP_GROUPS_L1(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
#define BITMAP_GROUPS_L2(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
#define BITMAP_GROUPS_L3(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS((nbits)))))
/*
* Assuming the number of levels, number of groups required for a given number
* of bits.
*/
#define BITMAP_GROUPS_1_LEVEL(nbits) \
BITMAP_GROUPS_L0(nbits)
#define BITMAP_GROUPS_2_LEVEL(nbits) \
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
#define BITMAP_GROUPS_3_LEVEL(nbits) \
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
#define BITMAP_GROUPS_4_LEVEL(nbits) \
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
/*
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
*/
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
#else
# error "Unsupported bitmap size"
#endif
/* Maximum number of levels possible. */
#define BITMAP_MAX_LEVELS \
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
@ -93,7 +139,7 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
bitmap_t g;
assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit) == false);
assert(!bitmap_get(bitmap, binfo, bit));
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
@ -126,15 +172,15 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
bitmap_t g;
unsigned i;
assert(bitmap_full(bitmap, binfo) == false);
assert(!bitmap_full(bitmap, binfo));
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
bit = ffsl(g) - 1;
bit = jemalloc_ffsl(g) - 1;
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1);
bit = (bit << LG_BITMAP_GROUP_NBITS) + (jemalloc_ffsl(g) - 1);
}
bitmap_set(bitmap, binfo, bit);
@ -158,7 +204,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit) == false);
assert(!bitmap_get(bitmap, binfo, bit));
/* Propagate group state transitions up the tree. */
if (propagate) {
unsigned i;
@ -172,7 +218,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
== 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (propagate == false)
if (!propagate)
break;
}
}

View file

@ -5,7 +5,7 @@
* Size and alignment of memory chunks that are allocated by the OS's virtual
* memory system.
*/
#define LG_CHUNK_DEFAULT 22
#define LG_CHUNK_DEFAULT 21
/* Return the chunk address for allocation address a. */
#define CHUNK_ADDR2BASE(a) \
@ -19,6 +19,16 @@
#define CHUNK_CEILING(s) \
(((s) + chunksize_mask) & ~chunksize_mask)
#define CHUNK_HOOKS_INITIALIZER { \
NULL, \
NULL, \
NULL, \
NULL, \
NULL, \
NULL, \
NULL \
}
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
@ -30,23 +40,36 @@
extern size_t opt_lg_chunk;
extern const char *opt_dss;
/* Protects stats_chunks; currently not used for any other purpose. */
extern malloc_mutex_t chunks_mtx;
/* Chunk statistics. */
extern chunk_stats_t stats_chunks;
extern rtree_t *chunks_rtree;
extern rtree_t chunks_rtree;
extern size_t chunksize;
extern size_t chunksize_mask; /* (chunksize - 1). */
extern size_t chunk_npages;
extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t arena_maxclass; /* Max size class for arenas. */
void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
dss_prec_t dss_prec);
void chunk_unmap(void *chunk, size_t size);
void chunk_dealloc(void *chunk, size_t size, bool unmap);
extern const chunk_hooks_t chunk_hooks_default;
chunk_hooks_t chunk_hooks_get(arena_t *arena);
chunk_hooks_t chunk_hooks_set(arena_t *arena,
const chunk_hooks_t *chunk_hooks);
bool chunk_register(const void *chunk, const extent_node_t *node);
void chunk_deregister(const void *chunk, const extent_node_t *node);
void *chunk_alloc_base(size_t size);
void *chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero,
bool dalloc_node);
void *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit);
void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool committed);
void chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool zeroed, bool committed);
void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool committed);
bool chunk_purge_arena(arena_t *arena, void *chunk, size_t offset,
size_t length);
bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, size_t offset, size_t length);
bool chunk_boot(void);
void chunk_prefork(void);
void chunk_postfork_parent(void);
@ -56,6 +79,19 @@ void chunk_postfork_child(void);
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
extent_node_t *chunk_lookup(const void *chunk, bool dependent);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
JEMALLOC_INLINE extent_node_t *
chunk_lookup(const void *ptr, bool dependent)
{
return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent));
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View file

@ -23,7 +23,8 @@ extern const char *dss_prec_names[];
dss_prec_t chunk_dss_prec_get(void);
bool chunk_dss_prec_set(dss_prec_t dss_prec);
void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero);
void *chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit);
bool chunk_in_dss(void *chunk);
bool chunk_dss_boot(void);
void chunk_dss_prefork(void);

View file

@ -9,10 +9,9 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool pages_purge(void *addr, size_t length);
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
bool chunk_dealloc_mmap(void *chunk, size_t size);
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero,
bool *commit);
bool chunk_dalloc_mmap(void *chunk, size_t size);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/

View file

@ -66,13 +66,13 @@ struct ckh_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp);
void ckh_delete(ckh_t *ckh);
void ckh_delete(tsd_t *tsd, ckh_t *ckh);
size_t ckh_count(ckh_t *ckh);
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
bool ckh_insert(ckh_t *ckh, const void *key, const void *data);
bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key,
bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
void **data);
bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
void ckh_string_hash(const void *key, size_t r_hash[2]);

View file

@ -34,6 +34,7 @@ struct ctl_arena_stats_s {
bool initialized;
unsigned nthreads;
const char *dss;
ssize_t lg_dirty_mult;
size_t pactive;
size_t pdirty;
arena_stats_t astats;
@ -46,22 +47,15 @@ struct ctl_arena_stats_s {
malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t *lstats; /* nlclasses elements. */
malloc_huge_stats_t *hstats; /* nhclasses elements. */
};
struct ctl_stats_s {
size_t allocated;
size_t active;
size_t metadata;
size_t resident;
size_t mapped;
struct {
size_t current; /* stats_chunks.curchunks */
uint64_t total; /* stats_chunks.nchunks */
size_t high; /* stats_chunks.highchunks */
} chunks;
struct {
size_t allocated; /* huge_allocated */
uint64_t nmalloc; /* huge_nmalloc */
uint64_t ndalloc; /* huge_ndalloc */
} huge;
unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
};

View file

@ -7,25 +7,53 @@ typedef struct extent_node_s extent_node_t;
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Tree of extents. */
/* Tree of extents. Use accessor functions for en_* fields. */
struct extent_node_s {
/* Linkage for the size/address-ordered tree. */
rb_node(extent_node_t) link_szad;
/* Linkage for the address-ordered tree. */
rb_node(extent_node_t) link_ad;
/* Profile counters, used for huge objects. */
prof_ctx_t *prof_ctx;
/* Arena from which this extent came, if any. */
arena_t *en_arena;
/* Pointer to the extent that this tree node is responsible for. */
void *addr;
void *en_addr;
/* Total region size. */
size_t size;
size_t en_size;
/* True if zero-filled; used by chunk recycling code. */
bool zeroed;
/*
* The zeroed flag is used by chunk recycling code to track whether
* memory is zero-filled.
*/
bool en_zeroed;
/*
* True if physical memory is committed to the extent, whether
* explicitly or implicitly as on a system that overcommits and
* satisfies physical memory needs on demand via soft page faults.
*/
bool en_committed;
/*
* The achunk flag is used to validate that huge allocation lookups
* don't return arena chunks.
*/
bool en_achunk;
/* Profile counters, used for huge objects. */
prof_tctx_t *en_prof_tctx;
/* Linkage for arena's runs_dirty and chunks_cache rings. */
arena_runs_dirty_link_t rd;
qr(extent_node_t) cc_link;
union {
/* Linkage for the size/address-ordered tree. */
rb_node(extent_node_t) szad_link;
/* Linkage for arena's huge and node_cache lists. */
ql_elm(extent_node_t) ql_link;
};
/* Linkage for the address-ordered tree. */
rb_node(extent_node_t) ad_link;
};
typedef rb_tree(extent_node_t) extent_tree_t;
@ -41,6 +69,171 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *extent_node_arena_get(const extent_node_t *node);
void *extent_node_addr_get(const extent_node_t *node);
size_t extent_node_size_get(const extent_node_t *node);
bool extent_node_zeroed_get(const extent_node_t *node);
bool extent_node_committed_get(const extent_node_t *node);
bool extent_node_achunk_get(const extent_node_t *node);
prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
void extent_node_addr_set(extent_node_t *node, void *addr);
void extent_node_size_set(extent_node_t *node, size_t size);
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
void extent_node_committed_set(extent_node_t *node, bool committed);
void extent_node_achunk_set(extent_node_t *node, bool achunk);
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
size_t size, bool zeroed, bool committed);
void extent_node_dirty_linkage_init(extent_node_t *node);
void extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
void extent_node_dirty_remove(extent_node_t *node);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
JEMALLOC_INLINE arena_t *
extent_node_arena_get(const extent_node_t *node)
{
return (node->en_arena);
}
JEMALLOC_INLINE void *
extent_node_addr_get(const extent_node_t *node)
{
return (node->en_addr);
}
JEMALLOC_INLINE size_t
extent_node_size_get(const extent_node_t *node)
{
return (node->en_size);
}
JEMALLOC_INLINE bool
extent_node_zeroed_get(const extent_node_t *node)
{
return (node->en_zeroed);
}
JEMALLOC_INLINE bool
extent_node_committed_get(const extent_node_t *node)
{
assert(!node->en_achunk);
return (node->en_committed);
}
JEMALLOC_INLINE bool
extent_node_achunk_get(const extent_node_t *node)
{
return (node->en_achunk);
}
JEMALLOC_INLINE prof_tctx_t *
extent_node_prof_tctx_get(const extent_node_t *node)
{
return (node->en_prof_tctx);
}
JEMALLOC_INLINE void
extent_node_arena_set(extent_node_t *node, arena_t *arena)
{
node->en_arena = arena;
}
JEMALLOC_INLINE void
extent_node_addr_set(extent_node_t *node, void *addr)
{
node->en_addr = addr;
}
JEMALLOC_INLINE void
extent_node_size_set(extent_node_t *node, size_t size)
{
node->en_size = size;
}
JEMALLOC_INLINE void
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
{
node->en_zeroed = zeroed;
}
JEMALLOC_INLINE void
extent_node_committed_set(extent_node_t *node, bool committed)
{
node->en_committed = committed;
}
JEMALLOC_INLINE void
extent_node_achunk_set(extent_node_t *node, bool achunk)
{
node->en_achunk = achunk;
}
JEMALLOC_INLINE void
extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
{
node->en_prof_tctx = tctx;
}
JEMALLOC_INLINE void
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
bool zeroed, bool committed)
{
extent_node_arena_set(node, arena);
extent_node_addr_set(node, addr);
extent_node_size_set(node, size);
extent_node_zeroed_set(node, zeroed);
extent_node_committed_set(node, committed);
extent_node_achunk_set(node, false);
if (config_prof)
extent_node_prof_tctx_set(node, NULL);
}
JEMALLOC_INLINE void
extent_node_dirty_linkage_init(extent_node_t *node)
{
qr_new(&node->rd, rd_link);
qr_new(node, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
{
qr_meld(runs_dirty, &node->rd, rd_link);
qr_meld(chunks_dirty, node, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_remove(extent_node_t *node)
{
qr_remove(&node->rd, rd_link);
qr_remove(node, cc_link);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View file

@ -35,13 +35,14 @@ JEMALLOC_INLINE uint32_t
hash_rotl_32(uint32_t x, int8_t r)
{
return (x << r) | (x >> (32 - r));
return ((x << r) | (x >> (32 - r)));
}
JEMALLOC_INLINE uint64_t
hash_rotl_64(uint64_t x, int8_t r)
{
return (x << r) | (x >> (64 - r));
return ((x << r) | (x >> (64 - r)));
}
JEMALLOC_INLINE uint32_t
@ -76,9 +77,9 @@ hash_fmix_64(uint64_t k)
{
k ^= k >> 33;
k *= QU(0xff51afd7ed558ccdLLU);
k *= KQU(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= QU(0xc4ceb9fe1a85ec53LLU);
k *= KQU(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return (k);
@ -247,8 +248,8 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t h1 = seed;
uint64_t h2 = seed;
const uint64_t c1 = QU(0x87c37b91114253d5LLU);
const uint64_t c2 = QU(0x4cf5ad432745937fLLU);
const uint64_t c1 = KQU(0x87c37b91114253d5);
const uint64_t c2 = KQU(0x4cf5ad432745937f);
/* body */
{

View file

@ -9,34 +9,24 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
/* Huge allocation statistics. */
extern uint64_t huge_nmalloc;
extern uint64_t huge_ndalloc;
extern size_t huge_allocated;
/* Protects chunk-related data structures. */
extern malloc_mutex_t huge_mtx;
void *huge_malloc(size_t size, bool zero, dss_prec_t dss_prec);
void *huge_palloc(size_t size, size_t alignment, bool zero,
dss_prec_t dss_prec);
void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
tcache_t *tcache);
void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
bool zero, tcache_t *tcache);
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra);
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec);
size_t extra, bool zero);
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t extra, size_t alignment, bool zero,
tcache_t *tcache);
#ifdef JEMALLOC_JET
typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif
void huge_dalloc(void *ptr, bool unmap);
void huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
arena_t *huge_aalloc(const void *ptr);
size_t huge_salloc(const void *ptr);
dss_prec_t huge_dss_prec_get(arena_t *arena);
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
bool huge_boot(void);
void huge_prefork(void);
void huge_postfork_parent(void);
void huge_postfork_child(void);
prof_tctx_t *huge_prof_tctx_get(const void *ptr);
void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/

View file

@ -0,0 +1,67 @@
#ifndef JEMALLOC_INTERNAL_DECLS_H
#define JEMALLOC_INTERNAL_DECLS_H
#include "libc_private.h"
#include "namespace.h"
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# include "msvc_compat/windows_extra.h"
#else
# include <sys/param.h>
# include <sys/mman.h>
# if !defined(__pnacl__) && !defined(__native_client__)
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# include <sys/uio.h>
# endif
# include <pthread.h>
# include <errno.h>
#endif
#include <sys/types.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
#endif
#include <string.h>
#include <strings.h>
#include <ctype.h>
#ifdef _MSC_VER
# include <io.h>
typedef intptr_t ssize_t;
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
# ifdef JEMALLOC_HAS_RESTRICT
# define restrict __restrict
# endif
/* Disable warnings about deprecated system functions. */
# pragma warning(disable: 4996)
#if _MSC_VER < 1800
static int
isblank(int c)
{
return (c == '\t' || c == ' ');
}
#endif
#else
# include <unistd.h>
#endif
#include <fcntl.h>
#endif /* JEMALLOC_INTERNAL_H */

View file

@ -23,6 +23,9 @@
*/
#define CPU_SPINWAIT __asm__ volatile("pause")
/* Defined if C11 atomics are available. */
/* #undef JEMALLOC_C11ATOMICS */
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
#define JEMALLOC_ATOMIC9 1
@ -36,7 +39,7 @@
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
* functions are defined in libgcc instead of being inlines).
*/
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */
@ -44,16 +47,36 @@
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
* functions are defined in libgcc instead of being inlines).
*/
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */
/*
* Defined if __builtin_clz() and __builtin_clzl() are available.
*/
#define JEMALLOC_HAVE_BUILTIN_CLZ
/*
* Defined if madvise(2) is available.
*/
#define JEMALLOC_HAVE_MADVISE
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
/* #undef JEMALLOC_OSSPIN */
/*
* Defined if secure_getenv(3) is available.
*/
/* #undef JEMALLOC_HAVE_SECURE_GETENV */
/*
* Defined if issetugid(2) is available.
*/
#define JEMALLOC_HAVE_ISSETUGID
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
@ -77,9 +100,6 @@
*/
#define JEMALLOC_MUTEX_INIT_CB 1
/* Defined if sbrk() is supported. */
#define JEMALLOC_HAVE_SBRK
/* Non-empty if the tls_model attribute is supported. */
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
@ -138,8 +158,26 @@
/* Support lazy locking (avoid locking unless a second thread is launched). */
#define JEMALLOC_LAZY_LOCK
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
#define STATIC_PAGE_SHIFT 12
/* Minimum size class to support is 2^LG_TINY_MIN bytes. */
#define LG_TINY_MIN 3
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
/* #undef LG_QUANTUM */
/* One page is 2^LG_PAGE bytes. */
#define LG_PAGE 12
/*
* If defined, adjacent virtual memory mappings with identical attributes
* automatically coalesce, and they fragment when changes are made to subranges.
* This is the normal order of things for mmap()/munmap(), but on Windows
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
* mappings do *not* coalesce/fragment.
*/
#define JEMALLOC_MAPS_COALESCE
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
@ -148,22 +186,28 @@
*/
#define JEMALLOC_MUNMAP
/*
* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
* disabled by default because it is Linux-specific and it will cause virtual
* memory map holes, much like munmap(2) does.
*/
/* #undef JEMALLOC_MREMAP */
/* TLS is used to map arenas and magazine caches to threads. */
#define JEMALLOC_TLS
/*
* ffs()/ffsl() functions to use for bitmapping. Don't use these directly;
* instead, use jemalloc_ffs() or jemalloc_ffsl() from util.h.
*/
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
/*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
* within jemalloc-owned chunks before dereferencing them.
*/
/* #undef JEMALLOC_IVSALLOC */
/*
* If defined, explicitly attempt to more uniformly distribute large allocation
* pointer alignments across all cache indices.
*/
#define JEMALLOC_CACHE_OBLIVIOUS
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
@ -183,9 +227,7 @@
/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED */
#define JEMALLOC_PURGE_MADVISE_FREE
/*
* Define if operating system has alloca.h header.
*/
/* Define if operating system has alloca.h header. */
/* #undef JEMALLOC_HAS_ALLOCA_H */
/* C99 restrict keyword supported. */
@ -203,4 +245,19 @@
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#define LG_SIZEOF_INTMAX_T 3
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
/* #undef JEMALLOC_GLIBC_MALLOC_HOOK */
/* glibc memalign hook. */
/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */
/* Adaptive mutex support in pthreads. */
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
/*
* If defined, jemalloc symbols are not exported (doesn't work when
* JEMALLOC_PREFIX is not defined).
*/
/* #undef JEMALLOC_EXPORT */
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */

View file

@ -39,9 +39,15 @@
#endif
#define ZU(z) ((size_t)z)
#define ZI(z) ((ssize_t)z)
#define QU(q) ((uint64_t)q)
#define QI(q) ((int64_t)q)
#define KZU(z) ZU(z##ULL)
#define KZI(z) ZI(z##LL)
#define KQU(q) QU(q##ULL)
#define KQI(q) QI(q##LL)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif

View file

@ -10,7 +10,7 @@ typedef struct malloc_mutex_s malloc_mutex_t;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
#else
# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \
# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
@ -26,7 +26,11 @@ typedef struct malloc_mutex_s malloc_mutex_t;
struct malloc_mutex_s {
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
SRWLOCK lock;
# else
CRITICAL_SECTION lock;
# endif
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
@ -68,7 +72,11 @@ malloc_mutex_lock(malloc_mutex_t *mutex)
if (isthreaded) {
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
AcquireSRWLockExclusive(&mutex->lock);
# else
EnterCriticalSection(&mutex->lock);
# endif
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mutex->lock);
#else
@ -83,7 +91,11 @@ malloc_mutex_unlock(malloc_mutex_t *mutex)
if (isthreaded) {
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
ReleaseSRWLockExclusive(&mutex->lock);
# else
LeaveCriticalSection(&mutex->lock);
# endif
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mutex->lock);
#else

View file

@ -0,0 +1,26 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *pages_map(void *addr, size_t size);
void pages_unmap(void *addr, size_t size);
void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
size_t size);
bool pages_commit(void *addr, size_t size);
bool pages_decommit(void *addr, size_t size);
bool pages_purge(void *addr, size_t size);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View file

@ -1,44 +1,75 @@
#define a0calloc JEMALLOC_N(a0calloc)
#define a0free JEMALLOC_N(a0free)
#define a0dalloc JEMALLOC_N(a0dalloc)
#define a0get JEMALLOC_N(a0get)
#define a0malloc JEMALLOC_N(a0malloc)
#define arena_aalloc JEMALLOC_N(arena_aalloc)
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
#define arena_bin_index JEMALLOC_N(arena_bin_index)
#define arena_bin_info JEMALLOC_N(arena_bin_info)
#define arena_bitselm_get JEMALLOC_N(arena_bitselm_get)
#define arena_boot JEMALLOC_N(arena_boot)
#define arena_choose JEMALLOC_N(arena_choose)
#define arena_choose_hard JEMALLOC_N(arena_choose_hard)
#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge)
#define arena_chunk_cache_maybe_insert JEMALLOC_N(arena_chunk_cache_maybe_insert)
#define arena_chunk_cache_maybe_remove JEMALLOC_N(arena_chunk_cache_maybe_remove)
#define arena_chunk_dalloc_huge JEMALLOC_N(arena_chunk_dalloc_huge)
#define arena_chunk_ralloc_huge_expand JEMALLOC_N(arena_chunk_ralloc_huge_expand)
#define arena_chunk_ralloc_huge_shrink JEMALLOC_N(arena_chunk_ralloc_huge_shrink)
#define arena_chunk_ralloc_huge_similar JEMALLOC_N(arena_chunk_ralloc_huge_similar)
#define arena_cleanup JEMALLOC_N(arena_cleanup)
#define arena_dalloc JEMALLOC_N(arena_dalloc)
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked)
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
#define arena_dalloc_large_junked_locked JEMALLOC_N(arena_dalloc_large_junked_locked)
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
#define arena_get JEMALLOC_N(arena_get)
#define arena_get_hard JEMALLOC_N(arena_get_hard)
#define arena_init JEMALLOC_N(arena_init)
#define arena_lg_dirty_mult_default_get JEMALLOC_N(arena_lg_dirty_mult_default_get)
#define arena_lg_dirty_mult_default_set JEMALLOC_N(arena_lg_dirty_mult_default_set)
#define arena_lg_dirty_mult_get JEMALLOC_N(arena_lg_dirty_mult_get)
#define arena_lg_dirty_mult_set JEMALLOC_N(arena_lg_dirty_mult_set)
#define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get)
#define arena_mapbits_decommitted_get JEMALLOC_N(arena_mapbits_decommitted_get)
#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
#define arena_mapbits_internal_set JEMALLOC_N(arena_mapbits_internal_set)
#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
#define arena_maxclass JEMALLOC_N(arena_maxclass)
#define arena_maxrun JEMALLOC_N(arena_maxrun)
#define arena_maybe_purge JEMALLOC_N(arena_maybe_purge)
#define arena_metadata_allocated_add JEMALLOC_N(arena_metadata_allocated_add)
#define arena_metadata_allocated_get JEMALLOC_N(arena_metadata_allocated_get)
#define arena_metadata_allocated_sub JEMALLOC_N(arena_metadata_allocated_sub)
#define arena_migrate JEMALLOC_N(arena_migrate)
#define arena_miscelm_get JEMALLOC_N(arena_miscelm_get)
#define arena_miscelm_to_pageind JEMALLOC_N(arena_miscelm_to_pageind)
#define arena_miscelm_to_rpages JEMALLOC_N(arena_miscelm_to_rpages)
#define arena_nbound JEMALLOC_N(arena_nbound)
#define arena_new JEMALLOC_N(arena_new)
#define arena_node_alloc JEMALLOC_N(arena_node_alloc)
#define arena_node_dalloc JEMALLOC_N(arena_node_dalloc)
#define arena_palloc JEMALLOC_N(arena_palloc)
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
@ -46,50 +77,46 @@
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl)
#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked)
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
#define arena_prof_tctx_get JEMALLOC_N(arena_prof_tctx_get)
#define arena_prof_tctx_set JEMALLOC_N(arena_prof_tctx_set)
#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
#define arena_purge_all JEMALLOC_N(arena_purge_all)
#define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small)
#define arena_ralloc JEMALLOC_N(arena_ralloc)
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
#define arena_rd_to_miscelm JEMALLOC_N(arena_rd_to_miscelm)
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
#define arena_run_regind JEMALLOC_N(arena_run_regind)
#define arena_run_to_miscelm JEMALLOC_N(arena_run_to_miscelm)
#define arena_salloc JEMALLOC_N(arena_salloc)
#define arenas_cache_bypass_cleanup JEMALLOC_N(arenas_cache_bypass_cleanup)
#define arenas_cache_cleanup JEMALLOC_N(arenas_cache_cleanup)
#define arena_sdalloc JEMALLOC_N(arena_sdalloc)
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
#define arenas JEMALLOC_N(arenas)
#define arenas_booted JEMALLOC_N(arenas_booted)
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
#define arenas_extend JEMALLOC_N(arenas_extend)
#define arenas_initialized JEMALLOC_N(arenas_initialized)
#define arenas_lock JEMALLOC_N(arenas_lock)
#define arenas_tls JEMALLOC_N(arenas_tls)
#define arenas_tsd JEMALLOC_N(arenas_tsd)
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
#define arenas_tsd_get_wrapper JEMALLOC_N(arenas_tsd_get_wrapper)
#define arenas_tsd_init_head JEMALLOC_N(arenas_tsd_init_head)
#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
#define atomic_add_p JEMALLOC_N(atomic_add_p)
#define atomic_add_u JEMALLOC_N(atomic_add_u)
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
#define atomic_add_z JEMALLOC_N(atomic_add_z)
#define atomic_cas_p JEMALLOC_N(atomic_cas_p)
#define atomic_cas_u JEMALLOC_N(atomic_cas_u)
#define atomic_cas_uint32 JEMALLOC_N(atomic_cas_uint32)
#define atomic_cas_uint64 JEMALLOC_N(atomic_cas_uint64)
#define atomic_cas_z JEMALLOC_N(atomic_cas_z)
#define atomic_sub_p JEMALLOC_N(atomic_sub_p)
#define atomic_sub_u JEMALLOC_N(atomic_sub_u)
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
#define base_alloc JEMALLOC_N(base_alloc)
#define base_boot JEMALLOC_N(base_boot)
#define base_calloc JEMALLOC_N(base_calloc)
#define base_node_alloc JEMALLOC_N(base_node_alloc)
#define base_node_dealloc JEMALLOC_N(base_node_dealloc)
#define base_postfork_child JEMALLOC_N(base_postfork_child)
#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
#define base_prefork JEMALLOC_N(base_prefork)
#define base_stats_get JEMALLOC_N(base_stats_get)
#define bitmap_full JEMALLOC_N(bitmap_full)
#define bitmap_get JEMALLOC_N(bitmap_get)
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
@ -99,49 +126,54 @@
#define bitmap_sfu JEMALLOC_N(bitmap_sfu)
#define bitmap_size JEMALLOC_N(bitmap_size)
#define bitmap_unset JEMALLOC_N(bitmap_unset)
#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc)
#define bootstrap_free JEMALLOC_N(bootstrap_free)
#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc)
#define bt_init JEMALLOC_N(bt_init)
#define buferror JEMALLOC_N(buferror)
#define choose_arena JEMALLOC_N(choose_arena)
#define choose_arena_hard JEMALLOC_N(choose_arena_hard)
#define chunk_alloc JEMALLOC_N(chunk_alloc)
#define chunk_alloc_base JEMALLOC_N(chunk_alloc_base)
#define chunk_alloc_cache JEMALLOC_N(chunk_alloc_cache)
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
#define chunk_alloc_wrapper JEMALLOC_N(chunk_alloc_wrapper)
#define chunk_boot JEMALLOC_N(chunk_boot)
#define chunk_dealloc JEMALLOC_N(chunk_dealloc)
#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
#define chunk_dalloc_arena JEMALLOC_N(chunk_dalloc_arena)
#define chunk_dalloc_cache JEMALLOC_N(chunk_dalloc_cache)
#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap)
#define chunk_dalloc_wrapper JEMALLOC_N(chunk_dalloc_wrapper)
#define chunk_deregister JEMALLOC_N(chunk_deregister)
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
#define chunk_hooks_default JEMALLOC_N(chunk_hooks_default)
#define chunk_hooks_get JEMALLOC_N(chunk_hooks_get)
#define chunk_hooks_set JEMALLOC_N(chunk_hooks_set)
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
#define chunk_lookup JEMALLOC_N(chunk_lookup)
#define chunk_npages JEMALLOC_N(chunk_npages)
#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
#define chunk_prefork JEMALLOC_N(chunk_prefork)
#define chunk_unmap JEMALLOC_N(chunk_unmap)
#define chunks_mtx JEMALLOC_N(chunks_mtx)
#define chunks_rtree JEMALLOC_N(chunks_rtree)
#define chunk_purge_arena JEMALLOC_N(chunk_purge_arena)
#define chunk_purge_wrapper JEMALLOC_N(chunk_purge_wrapper)
#define chunk_register JEMALLOC_N(chunk_register)
#define chunksize JEMALLOC_N(chunksize)
#define chunksize_mask JEMALLOC_N(chunksize_mask)
#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
#define chunks_rtree JEMALLOC_N(chunks_rtree)
#define ckh_count JEMALLOC_N(ckh_count)
#define ckh_delete JEMALLOC_N(ckh_delete)
#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert)
#define ckh_insert JEMALLOC_N(ckh_insert)
#define ckh_isearch JEMALLOC_N(ckh_isearch)
#define ckh_iter JEMALLOC_N(ckh_iter)
#define ckh_new JEMALLOC_N(ckh_new)
#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
#define ckh_rebuild JEMALLOC_N(ckh_rebuild)
#define ckh_remove JEMALLOC_N(ckh_remove)
#define ckh_search JEMALLOC_N(ckh_search)
#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert)
#define ckh_try_insert JEMALLOC_N(ckh_try_insert)
#define ctl_boot JEMALLOC_N(ctl_boot)
#define ctl_bymib JEMALLOC_N(ctl_bymib)
#define ctl_byname JEMALLOC_N(ctl_byname)
@ -150,6 +182,23 @@
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
#define ctl_prefork JEMALLOC_N(ctl_prefork)
#define dss_prec_names JEMALLOC_N(dss_prec_names)
#define extent_node_achunk_get JEMALLOC_N(extent_node_achunk_get)
#define extent_node_achunk_set JEMALLOC_N(extent_node_achunk_set)
#define extent_node_addr_get JEMALLOC_N(extent_node_addr_get)
#define extent_node_addr_set JEMALLOC_N(extent_node_addr_set)
#define extent_node_arena_get JEMALLOC_N(extent_node_arena_get)
#define extent_node_arena_set JEMALLOC_N(extent_node_arena_set)
#define extent_node_dirty_insert JEMALLOC_N(extent_node_dirty_insert)
#define extent_node_dirty_linkage_init JEMALLOC_N(extent_node_dirty_linkage_init)
#define extent_node_dirty_remove JEMALLOC_N(extent_node_dirty_remove)
#define extent_node_init JEMALLOC_N(extent_node_init)
#define extent_node_prof_tctx_get JEMALLOC_N(extent_node_prof_tctx_get)
#define extent_node_prof_tctx_set JEMALLOC_N(extent_node_prof_tctx_set)
#define extent_node_size_get JEMALLOC_N(extent_node_size_get)
#define extent_node_size_set JEMALLOC_N(extent_node_size_set)
#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get)
#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set)
#define extent_tree_ad_empty JEMALLOC_N(extent_tree_ad_empty)
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
@ -166,6 +215,7 @@
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
#define extent_tree_szad_empty JEMALLOC_N(extent_tree_szad_empty)
#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter)
@ -193,44 +243,46 @@
#define hash_x64_128 JEMALLOC_N(hash_x64_128)
#define hash_x86_128 JEMALLOC_N(hash_x86_128)
#define hash_x86_32 JEMALLOC_N(hash_x86_32)
#define huge_allocated JEMALLOC_N(huge_allocated)
#define huge_boot JEMALLOC_N(huge_boot)
#define huge_aalloc JEMALLOC_N(huge_aalloc)
#define huge_dalloc JEMALLOC_N(huge_dalloc)
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
#define huge_dss_prec_get JEMALLOC_N(huge_dss_prec_get)
#define huge_malloc JEMALLOC_N(huge_malloc)
#define huge_mtx JEMALLOC_N(huge_mtx)
#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
#define huge_nmalloc JEMALLOC_N(huge_nmalloc)
#define huge_palloc JEMALLOC_N(huge_palloc)
#define huge_postfork_child JEMALLOC_N(huge_postfork_child)
#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent)
#define huge_prefork JEMALLOC_N(huge_prefork)
#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get)
#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set)
#define huge_prof_tctx_get JEMALLOC_N(huge_prof_tctx_get)
#define huge_prof_tctx_set JEMALLOC_N(huge_prof_tctx_set)
#define huge_ralloc JEMALLOC_N(huge_ralloc)
#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
#define huge_salloc JEMALLOC_N(huge_salloc)
#define iallocm JEMALLOC_N(iallocm)
#define iaalloc JEMALLOC_N(iaalloc)
#define iallocztm JEMALLOC_N(iallocztm)
#define icalloc JEMALLOC_N(icalloc)
#define icalloct JEMALLOC_N(icalloct)
#define idalloc JEMALLOC_N(idalloc)
#define idalloct JEMALLOC_N(idalloct)
#define idalloctm JEMALLOC_N(idalloctm)
#define imalloc JEMALLOC_N(imalloc)
#define imalloct JEMALLOC_N(imalloct)
#define index2size JEMALLOC_N(index2size)
#define index2size_compute JEMALLOC_N(index2size_compute)
#define index2size_lookup JEMALLOC_N(index2size_lookup)
#define index2size_tab JEMALLOC_N(index2size_tab)
#define in_valgrind JEMALLOC_N(in_valgrind)
#define ipalloc JEMALLOC_N(ipalloc)
#define ipalloct JEMALLOC_N(ipalloct)
#define ipallocztm JEMALLOC_N(ipallocztm)
#define iqalloc JEMALLOC_N(iqalloc)
#define iqalloct JEMALLOC_N(iqalloct)
#define iralloc JEMALLOC_N(iralloc)
#define iralloct JEMALLOC_N(iralloct)
#define iralloct_realign JEMALLOC_N(iralloct_realign)
#define isalloc JEMALLOC_N(isalloc)
#define isdalloct JEMALLOC_N(isdalloct)
#define isqalloc JEMALLOC_N(isqalloc)
#define ivsalloc JEMALLOC_N(ivsalloc)
#define ixalloc JEMALLOC_N(ixalloc)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
#define lg_floor JEMALLOC_N(lg_floor)
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
@ -241,7 +293,8 @@
#define malloc_printf JEMALLOC_N(malloc_printf)
#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot)
#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0)
#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1)
#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
@ -250,16 +303,18 @@
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
#define malloc_write JEMALLOC_N(malloc_write)
#define map_bias JEMALLOC_N(map_bias)
#define map_misc_offset JEMALLOC_N(map_misc_offset)
#define mb_write JEMALLOC_N(mb_write)
#define mutex_boot JEMALLOC_N(mutex_boot)
#define narenas_auto JEMALLOC_N(narenas_auto)
#define narenas_total JEMALLOC_N(narenas_total)
#define narenas_cache_cleanup JEMALLOC_N(narenas_cache_cleanup)
#define narenas_total_get JEMALLOC_N(narenas_total_get)
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
#define opt_abort JEMALLOC_N(opt_abort)
#define opt_dss JEMALLOC_N(opt_dss)
#define opt_junk JEMALLOC_N(opt_junk)
#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc)
#define opt_junk_free JEMALLOC_N(opt_junk_free)
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
@ -273,84 +328,98 @@
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init)
#define opt_quarantine JEMALLOC_N(opt_quarantine)
#define opt_redzone JEMALLOC_N(opt_redzone)
#define opt_stats_print JEMALLOC_N(opt_stats_print)
#define opt_tcache JEMALLOC_N(opt_tcache)
#define opt_utrace JEMALLOC_N(opt_utrace)
#define opt_valgrind JEMALLOC_N(opt_valgrind)
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
#define opt_zero JEMALLOC_N(opt_zero)
#define p2rz JEMALLOC_N(p2rz)
#define pages_commit JEMALLOC_N(pages_commit)
#define pages_decommit JEMALLOC_N(pages_decommit)
#define pages_map JEMALLOC_N(pages_map)
#define pages_purge JEMALLOC_N(pages_purge)
#define pages_trim JEMALLOC_N(pages_trim)
#define pages_unmap JEMALLOC_N(pages_unmap)
#define pow2_ceil JEMALLOC_N(pow2_ceil)
#define prof_active_get JEMALLOC_N(prof_active_get)
#define prof_active_get_unlocked JEMALLOC_N(prof_active_get_unlocked)
#define prof_active_set JEMALLOC_N(prof_active_set)
#define prof_alloc_prep JEMALLOC_N(prof_alloc_prep)
#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback)
#define prof_backtrace JEMALLOC_N(prof_backtrace)
#define prof_boot0 JEMALLOC_N(prof_boot0)
#define prof_boot1 JEMALLOC_N(prof_boot1)
#define prof_boot2 JEMALLOC_N(prof_boot2)
#define prof_bt_count JEMALLOC_N(prof_bt_count)
#define prof_ctx_get JEMALLOC_N(prof_ctx_get)
#define prof_ctx_set JEMALLOC_N(prof_ctx_set)
#define prof_dump_header JEMALLOC_N(prof_dump_header)
#define prof_dump_open JEMALLOC_N(prof_dump_open)
#define prof_free JEMALLOC_N(prof_free)
#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object)
#define prof_gdump JEMALLOC_N(prof_gdump)
#define prof_gdump_get JEMALLOC_N(prof_gdump_get)
#define prof_gdump_get_unlocked JEMALLOC_N(prof_gdump_get_unlocked)
#define prof_gdump_set JEMALLOC_N(prof_gdump_set)
#define prof_gdump_val JEMALLOC_N(prof_gdump_val)
#define prof_idump JEMALLOC_N(prof_idump)
#define prof_interval JEMALLOC_N(prof_interval)
#define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_malloc JEMALLOC_N(prof_malloc)
#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object)
#define prof_mdump JEMALLOC_N(prof_mdump)
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
#define prof_prefork JEMALLOC_N(prof_prefork)
#define prof_promote JEMALLOC_N(prof_promote)
#define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_reset JEMALLOC_N(prof_reset)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted)
#define prof_tctx_get JEMALLOC_N(prof_tctx_get)
#define prof_tctx_set JEMALLOC_N(prof_tctx_set)
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
#define prof_tdata_tsd_get_wrapper JEMALLOC_N(prof_tdata_tsd_get_wrapper)
#define prof_tdata_tsd_init_head JEMALLOC_N(prof_tdata_tsd_init_head)
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit)
#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get)
#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get)
#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set)
#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set)
#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get)
#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set)
#define quarantine JEMALLOC_N(quarantine)
#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook)
#define quarantine_boot JEMALLOC_N(quarantine_boot)
#define quarantine_booted JEMALLOC_N(quarantine_booted)
#define quarantine_alloc_hook_work JEMALLOC_N(quarantine_alloc_hook_work)
#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup)
#define quarantine_init JEMALLOC_N(quarantine_init)
#define quarantine_tls JEMALLOC_N(quarantine_tls)
#define quarantine_tsd JEMALLOC_N(quarantine_tsd)
#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
#define quarantine_tsd_get_wrapper JEMALLOC_N(quarantine_tsd_get_wrapper)
#define quarantine_tsd_init_head JEMALLOC_N(quarantine_tsd_init_head)
#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
#define register_zone JEMALLOC_N(register_zone)
#define rtree_child_read JEMALLOC_N(rtree_child_read)
#define rtree_child_read_hard JEMALLOC_N(rtree_child_read_hard)
#define rtree_child_tryread JEMALLOC_N(rtree_child_tryread)
#define rtree_delete JEMALLOC_N(rtree_delete)
#define rtree_get JEMALLOC_N(rtree_get)
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
#define rtree_new JEMALLOC_N(rtree_new)
#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child)
#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent)
#define rtree_prefork JEMALLOC_N(rtree_prefork)
#define rtree_node_valid JEMALLOC_N(rtree_node_valid)
#define rtree_set JEMALLOC_N(rtree_set)
#define rtree_start_level JEMALLOC_N(rtree_start_level)
#define rtree_subkey JEMALLOC_N(rtree_subkey)
#define rtree_subtree_read JEMALLOC_N(rtree_subtree_read)
#define rtree_subtree_read_hard JEMALLOC_N(rtree_subtree_read_hard)
#define rtree_subtree_tryread JEMALLOC_N(rtree_subtree_tryread)
#define rtree_val_read JEMALLOC_N(rtree_val_read)
#define rtree_val_write JEMALLOC_N(rtree_val_write)
#define s2u JEMALLOC_N(s2u)
#define s2u_compute JEMALLOC_N(s2u_compute)
#define s2u_lookup JEMALLOC_N(s2u_lookup)
#define sa2u JEMALLOC_N(sa2u)
#define set_errno JEMALLOC_N(set_errno)
#define small_size2bin JEMALLOC_N(small_size2bin)
#define size2index JEMALLOC_N(size2index)
#define size2index_compute JEMALLOC_N(size2index_compute)
#define size2index_lookup JEMALLOC_N(size2index_lookup)
#define size2index_tab JEMALLOC_N(size2index_tab)
#define stats_cactive JEMALLOC_N(stats_cactive)
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
#define stats_chunks JEMALLOC_N(stats_chunks)
#define stats_print JEMALLOC_N(stats_print)
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
@ -358,55 +427,67 @@
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate)
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
#define tcache_boot0 JEMALLOC_N(tcache_boot0)
#define tcache_boot1 JEMALLOC_N(tcache_boot1)
#define tcache_booted JEMALLOC_N(tcache_booted)
#define tcache_boot JEMALLOC_N(tcache_boot)
#define tcache_cleanup JEMALLOC_N(tcache_cleanup)
#define tcache_create JEMALLOC_N(tcache_create)
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
#define tcache_destroy JEMALLOC_N(tcache_destroy)
#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted)
#define tcache_enabled_cleanup JEMALLOC_N(tcache_enabled_cleanup)
#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd)
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
#define tcache_enabled_tsd_get_wrapper JEMALLOC_N(tcache_enabled_tsd_get_wrapper)
#define tcache_enabled_tsd_init_head JEMALLOC_N(tcache_enabled_tsd_init_head)
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
#define tcache_event JEMALLOC_N(tcache_event)
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
#define tcache_flush JEMALLOC_N(tcache_flush)
#define tcache_get JEMALLOC_N(tcache_get)
#define tcache_initialized JEMALLOC_N(tcache_initialized)
#define tcache_get_hard JEMALLOC_N(tcache_get_hard)
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
#define tcaches JEMALLOC_N(tcaches)
#define tcache_salloc JEMALLOC_N(tcache_salloc)
#define tcaches_create JEMALLOC_N(tcaches_create)
#define tcaches_destroy JEMALLOC_N(tcaches_destroy)
#define tcaches_flush JEMALLOC_N(tcaches_flush)
#define tcaches_get JEMALLOC_N(tcaches_get)
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
#define tcache_tls JEMALLOC_N(tcache_tls)
#define tcache_tsd JEMALLOC_N(tcache_tsd)
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
#define tcache_tsd_get_wrapper JEMALLOC_N(tcache_tsd_get_wrapper)
#define tcache_tsd_init_head JEMALLOC_N(tcache_tsd_init_head)
#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd)
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
#define thread_allocated_tsd_get_wrapper JEMALLOC_N(thread_allocated_tsd_get_wrapper)
#define thread_allocated_tsd_init_head JEMALLOC_N(thread_allocated_tsd_init_head)
#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
#define thread_allocated_cleanup JEMALLOC_N(thread_allocated_cleanup)
#define thread_deallocated_cleanup JEMALLOC_N(thread_deallocated_cleanup)
#define tsd_arena_get JEMALLOC_N(tsd_arena_get)
#define tsd_arena_set JEMALLOC_N(tsd_arena_set)
#define tsd_boot JEMALLOC_N(tsd_boot)
#define tsd_boot0 JEMALLOC_N(tsd_boot0)
#define tsd_boot1 JEMALLOC_N(tsd_boot1)
#define tsd_booted JEMALLOC_N(tsd_booted)
#define tsd_cleanup JEMALLOC_N(tsd_cleanup)
#define tsd_cleanup_wrapper JEMALLOC_N(tsd_cleanup_wrapper)
#define tsd_fetch JEMALLOC_N(tsd_fetch)
#define tsd_get JEMALLOC_N(tsd_get)
#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get)
#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set)
#define tsd_initialized JEMALLOC_N(tsd_initialized)
#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
#define tsd_init_finish JEMALLOC_N(tsd_init_finish)
#define tsd_init_head JEMALLOC_N(tsd_init_head)
#define tsd_nominal JEMALLOC_N(tsd_nominal)
#define tsd_quarantine_get JEMALLOC_N(tsd_quarantine_get)
#define tsd_quarantine_set JEMALLOC_N(tsd_quarantine_set)
#define tsd_set JEMALLOC_N(tsd_set)
#define tsd_tcache_enabled_get JEMALLOC_N(tsd_tcache_enabled_get)
#define tsd_tcache_enabled_set JEMALLOC_N(tsd_tcache_enabled_set)
#define tsd_tcache_get JEMALLOC_N(tsd_tcache_get)
#define tsd_tcache_set JEMALLOC_N(tsd_tcache_set)
#define tsd_tls JEMALLOC_N(tsd_tls)
#define tsd_tsd JEMALLOC_N(tsd_tsd)
#define tsd_prof_tdata_get JEMALLOC_N(tsd_prof_tdata_get)
#define tsd_prof_tdata_set JEMALLOC_N(tsd_prof_tdata_set)
#define tsd_thread_allocated_get JEMALLOC_N(tsd_thread_allocated_get)
#define tsd_thread_allocated_set JEMALLOC_N(tsd_thread_allocated_set)
#define tsd_thread_deallocated_get JEMALLOC_N(tsd_thread_deallocated_get)
#define tsd_thread_deallocated_set JEMALLOC_N(tsd_thread_deallocated_set)
#define u2rz JEMALLOC_N(u2rz)
#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block)
#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined)
#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess)
#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined)

View file

@ -15,7 +15,7 @@
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
*
* This choice of m has the disadvantage that the quality of the bits is
* proportional to bit position. For example. the lowest bit has a cycle of 2,
* proportional to bit position. For example, the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
*
@ -26,22 +26,22 @@
* const uint32_t a, c : See above discussion.
*/
#define prng32(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 32); \
assert((lg_range) > 0); \
assert((lg_range) <= 32); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (32 - lg_range); \
r >>= (32 - (lg_range)); \
} while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
#define prng64(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 64); \
assert((lg_range) > 0); \
assert((lg_range) <= 64); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (64 - lg_range); \
r >>= (64 - (lg_range)); \
} while (false)
#endif /* JEMALLOC_H_TYPES */

View file

@ -3,8 +3,8 @@
typedef struct prof_bt_s prof_bt_t;
typedef struct prof_cnt_s prof_cnt_t;
typedef struct prof_thr_cnt_s prof_thr_cnt_t;
typedef struct prof_ctx_s prof_ctx_t;
typedef struct prof_tctx_s prof_tctx_t;
typedef struct prof_gctx_s prof_gctx_t;
typedef struct prof_tdata_s prof_tdata_t;
/* Option defaults. */
@ -23,9 +23,6 @@ typedef struct prof_tdata_s prof_tdata_t;
*/
#define PROF_BT_MAX 128
/* Maximum number of backtraces to store in each per thread LRU cache. */
#define PROF_TCMAX 1024
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
@ -36,11 +33,17 @@ typedef struct prof_tdata_s prof_tdata_t;
#define PROF_PRINTF_BUFSIZE 128
/*
* Number of mutexes shared among all ctx's. No space is allocated for these
* Number of mutexes shared among all gctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NCTX_LOCKS 1024
/*
* Number of mutexes shared among all tdata's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NTDATA_LOCKS 256
/*
* prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
@ -63,141 +66,185 @@ struct prof_bt_s {
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef struct {
prof_bt_t *bt;
unsigned nignore;
unsigned max;
} prof_unwind_data_t;
#endif
struct prof_cnt_s {
/*
* Profiling counters. An allocation/deallocation pair can operate on
* different prof_thr_cnt_t objects that are linked into the same
* prof_ctx_t cnts_ql, so it is possible for the cur* counters to go
* negative. In principle it is possible for the *bytes counters to
* overflow/underflow, but a general solution would require something
* like 128-bit counters; this implementation doesn't bother to solve
* that problem.
*/
int64_t curobjs;
int64_t curbytes;
/* Profiling counters. */
uint64_t curobjs;
uint64_t curbytes;
uint64_t accumobjs;
uint64_t accumbytes;
};
struct prof_thr_cnt_s {
/* Linkage into prof_ctx_t's cnts_ql. */
ql_elm(prof_thr_cnt_t) cnts_link;
typedef enum {
prof_tctx_state_initializing,
prof_tctx_state_nominal,
prof_tctx_state_dumping,
prof_tctx_state_purgatory /* Dumper must finish destroying. */
} prof_tctx_state_t;
/* Linkage into thread's LRU. */
ql_elm(prof_thr_cnt_t) lru_link;
struct prof_tctx_s {
/* Thread data for thread that performed the allocation. */
prof_tdata_t *tdata;
/*
* Associated context. If a thread frees an object that it did not
* allocate, it is possible that the context is not cached in the
* thread's hash table, in which case it must be able to look up the
* context, insert a new prof_thr_cnt_t into the thread's hash table,
* and link it into the prof_ctx_t's cnts_ql.
* Copy of tdata->thr_uid, necessary because tdata may be defunct during
* teardown.
*/
prof_ctx_t *ctx;
uint64_t thr_uid;
/*
* Threads use memory barriers to update the counters. Since there is
* only ever one writer, the only challenge is for the reader to get a
* consistent read of the counters.
*
* The writer uses this series of operations:
*
* 1) Increment epoch to an odd number.
* 2) Update counters.
* 3) Increment epoch to an even number.
*
* The reader must assure 1) that the epoch is even while it reads the
* counters, and 2) that the epoch doesn't change between the time it
* starts and finishes reading the counters.
*/
unsigned epoch;
/* Profiling counters. */
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t cnts;
/* Associated global context. */
prof_gctx_t *gctx;
/*
* UID that distinguishes multiple tctx's created by the same thread,
* but coexisting in gctx->tctxs. There are two ways that such
* coexistence can occur:
* - A dumper thread can cause a tctx to be retained in the purgatory
* state.
* - Although a single "producer" thread must create all tctx's which
* share the same thr_uid, multiple "consumers" can each concurrently
* execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
* gets called once each time cnts.cur{objs,bytes} drop to 0, but this
* threshold can be hit again before the first consumer finishes
* executing prof_tctx_destroy().
*/
uint64_t tctx_uid;
/* Linkage into gctx's tctxs. */
rb_node(prof_tctx_t) tctx_link;
/*
* True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
* sample vs destroy race.
*/
bool prepared;
/* Current dump-related state, protected by gctx->lock. */
prof_tctx_state_t state;
/*
* Copy of cnts snapshotted during early dump phase, protected by
* dump_mtx.
*/
prof_cnt_t dump_cnts;
};
typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
struct prof_ctx_s {
/* Associated backtrace. */
prof_bt_t *bt;
/* Protects nlimbo, cnt_merged, and cnts_ql. */
struct prof_gctx_s {
/* Protects nlimbo, cnt_summed, and tctxs. */
malloc_mutex_t *lock;
/*
* Number of threads that currently cause this ctx to be in a state of
* Number of threads that currently cause this gctx to be in a state of
* limbo due to one of:
* - Initializing per thread counters associated with this ctx.
* - Preparing to destroy this ctx.
* - Dumping a heap profile that includes this ctx.
* - Initializing this gctx.
* - Initializing per thread counters associated with this gctx.
* - Preparing to destroy this gctx.
* - Dumping a heap profile that includes this gctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* ctx.
* gctx.
*/
unsigned nlimbo;
/*
* Tree of profile counters, one for each thread that has allocated in
* this context.
*/
prof_tctx_tree_t tctxs;
/* Linkage for tree of contexts to be dumped. */
rb_node(prof_gctx_t) dump_link;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* When threads exit, they merge their stats into cnt_merged. */
prof_cnt_t cnt_merged;
/* Associated backtrace. */
prof_bt_t bt;
/*
* List of profile counters, one for each thread that has allocated in
* this context.
*/
ql_head(prof_thr_cnt_t) cnts_ql;
/* Linkage for list of contexts to be dumped. */
ql_elm(prof_ctx_t) dump_link;
/* Backtrace vector, variable size, referred to by bt. */
void *vec[1];
};
typedef ql_head(prof_ctx_t) prof_ctx_list_t;
typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
struct prof_tdata_s {
malloc_mutex_t *lock;
/* Monotonically increasing unique thread identifier. */
uint64_t thr_uid;
/*
* Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a
* cache of backtraces, with associated thread-specific prof_thr_cnt_t
* objects. Other threads may read the prof_thr_cnt_t contents, but no
* others will ever write them.
*
* Upon thread exit, the thread must merge all the prof_thr_cnt_t
* counter data into the associated prof_ctx_t objects, and unlink/free
* the prof_thr_cnt_t objects.
* Monotonically increasing discriminator among tdata structures
* associated with the same thr_uid.
*/
ckh_t bt2cnt;
uint64_t thr_discrim;
/* LRU for contents of bt2cnt. */
ql_head(prof_thr_cnt_t) lru_ql;
/* Included in heap profile dumps if non-NULL. */
char *thread_name;
/* Backtrace vector, used for calls to prof_backtrace(). */
void **vec;
bool attached;
bool expired;
rb_node(prof_tdata_t) tdata_link;
/*
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
* necessary when incrementing this field, because only one thread ever
* does so.
*/
uint64_t tctx_uid_next;
/*
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
* backtraces for which it has non-zero allocation/deallocation counters
* associated with thread-specific prof_tctx_t objects. Other threads
* may write to prof_tctx_t contents when freeing associated objects.
*/
ckh_t bt2tctx;
/* Sampling state. */
uint64_t prng_state;
uint64_t threshold;
uint64_t accum;
uint64_t bytes_until_sample;
/* State used to avoid dumping while operating on prof internals. */
bool enq;
bool enq_idump;
bool enq_gdump;
/*
* Set to true during an early dump phase for tdata's which are
* currently being dumped. New threads' tdata's have this initialized
* to false so that they aren't accidentally included in later dump
* phases.
*/
bool dumping;
/*
* True if profiling is active for this tdata's thread
* (thread.prof.active mallctl).
*/
bool active;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* Backtrace vector, used for calls to prof_backtrace(). */
void *vec[PROF_BT_MAX];
};
typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_prof;
/*
* Even if opt_prof is true, sampling can be temporarily disabled by setting
* opt_prof_active to false. No locking is used when updating opt_prof_active,
* so there are no guarantees regarding how long it will take for all threads
* to notice state changes.
*/
extern bool opt_prof_active;
extern bool opt_prof_thread_active_init;
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
extern bool opt_prof_gdump; /* High-water memory dumping. */
@ -211,6 +258,12 @@ extern char opt_prof_prefix[
#endif
1];
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern bool prof_active;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern bool prof_gdump_val;
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
@ -221,391 +274,248 @@ extern char opt_prof_prefix[
extern uint64_t prof_interval;
/*
* If true, promote small sampled objects to large objects, since small run
* headers do not have embedded profile context pointers.
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
* resets.
*/
extern bool prof_promote;
extern size_t lg_prof_sample;
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
void prof_malloc_sample_object(const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(prof_bt_t *bt, unsigned nignore);
prof_thr_cnt_t *prof_lookup(prof_bt_t *bt);
void prof_backtrace(prof_bt_t *bt);
prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
#ifdef JEMALLOC_JET
size_t prof_tdata_count(void);
size_t prof_bt_count(void);
const prof_cnt_t *prof_cnt_all(void);
typedef int (prof_dump_open_t)(bool, const char *);
extern prof_dump_open_t *prof_dump_open;
typedef bool (prof_dump_header_t)(bool, const prof_cnt_t *);
extern prof_dump_header_t *prof_dump_header;
#endif
void prof_idump(void);
bool prof_mdump(const char *filename);
void prof_gdump(void);
prof_tdata_t *prof_tdata_init(void);
void prof_tdata_cleanup(void *arg);
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
void prof_reset(tsd_t *tsd, size_t lg_sample);
void prof_tdata_cleanup(tsd_t *tsd);
const char *prof_thread_name_get(void);
bool prof_active_get(void);
bool prof_active_set(bool active);
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
bool prof_thread_active_get(void);
bool prof_thread_active_set(bool active);
bool prof_thread_active_init_get(void);
bool prof_thread_active_init_set(bool active_init);
bool prof_gdump_get(void);
bool prof_gdump_set(bool active);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(void);
void prof_prefork(void);
void prof_postfork_parent(void);
void prof_postfork_child(void);
void prof_sample_threshold_update(prof_tdata_t *tdata);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#define PROF_ALLOC_PREP(nignore, size, ret) do { \
prof_tdata_t *prof_tdata; \
prof_bt_t bt; \
\
assert(size == s2u(size)); \
\
prof_tdata = prof_tdata_get(true); \
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
if (prof_tdata != NULL) \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
else \
ret = NULL; \
break; \
} \
\
if (opt_prof_active == false) { \
/* Sampling is currently inactive, so avoid sampling. */\
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} else if (opt_lg_prof_sample == 0) { \
/* Don't bother with sampling logic, since sampling */\
/* interval is 1. */\
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore); \
ret = prof_lookup(&bt); \
} else { \
if (prof_tdata->threshold == 0) { \
/* Initialize. Seed the prng differently for */\
/* each thread. */\
prof_tdata->prng_state = \
(uint64_t)(uintptr_t)&size; \
prof_sample_threshold_update(prof_tdata); \
} \
\
/* Determine whether to capture a backtrace based on */\
/* whether size is enough for prof_accum to reach */\
/* prof_tdata->threshold. However, delay updating */\
/* these variables until prof_{m,re}alloc(), because */\
/* we don't know for sure that the allocation will */\
/* succeed. */\
/* */\
/* Use subtraction rather than addition to avoid */\
/* potential integer overflow. */\
if (size >= prof_tdata->threshold - \
prof_tdata->accum) { \
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore); \
ret = prof_lookup(&bt); \
} else \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} \
} while (0)
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *)
prof_tdata_t *prof_tdata_get(bool create);
void prof_sample_threshold_update(prof_tdata_t *prof_tdata);
prof_ctx_t *prof_ctx_get(const void *ptr);
void prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx);
bool prof_sample_accum_update(size_t size);
void prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt);
void prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
size_t old_usize, prof_ctx_t *old_ctx);
void prof_free(const void *ptr, size_t size);
bool prof_active_get_unlocked(void);
bool prof_gdump_get_unlocked(void);
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
prof_tdata_t **tdata_out);
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool update);
prof_tctx_t *prof_tctx_get(const void *ptr);
void prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
void prof_malloc_sample_object(const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx);
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
prof_tctx_t *tctx, bool updated, size_t old_usize, prof_tctx_t *old_tctx);
void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */
malloc_tsd_externs(prof_tdata, prof_tdata_t *)
malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL,
prof_tdata_cleanup)
JEMALLOC_INLINE prof_tdata_t *
prof_tdata_get(bool create)
JEMALLOC_ALWAYS_INLINE bool
prof_active_get_unlocked(void)
{
prof_tdata_t *prof_tdata;
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return (prof_active);
}
JEMALLOC_ALWAYS_INLINE bool
prof_gdump_get_unlocked(void)
{
/*
* No locking is used when reading prof_gdump_val in the fast path, so
* there are no guarantees regarding how long it will take for all
* threads to notice state changes.
*/
return (prof_gdump_val);
}
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
prof_tdata_get(tsd_t *tsd, bool create)
{
prof_tdata_t *tdata;
cassert(config_prof);
prof_tdata = *prof_tdata_tsd_get();
if (create && prof_tdata == NULL)
prof_tdata = prof_tdata_init();
tdata = tsd_prof_tdata_get(tsd);
if (create) {
if (unlikely(tdata == NULL)) {
if (tsd_nominal(tsd)) {
tdata = prof_tdata_init(tsd);
tsd_prof_tdata_set(tsd, tdata);
}
} else if (unlikely(tdata->expired)) {
tdata = prof_tdata_reinit(tsd, tdata);
tsd_prof_tdata_set(tsd, tdata);
}
assert(tdata == NULL || tdata->attached);
}
return (prof_tdata);
return (tdata);
}
JEMALLOC_INLINE void
prof_sample_threshold_update(prof_tdata_t *prof_tdata)
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_tctx_get(const void *ptr)
{
/*
* The body of this function is compiled out unless heap profiling is
* enabled, so that it is possible to compile jemalloc with floating
* point support completely disabled. Avoiding floating point code is
* important on memory-constrained systems, but it also enables a
* workaround for versions of glibc that don't properly save/restore
* floating point registers during dynamic lazy symbol loading (which
* internally calls into whatever malloc implementation happens to be
* integrated into the application). Note that some compilers (e.g.
* gcc 4.8) may use floating point registers for fast memory moves, so
* jemalloc must be compiled with such optimizations disabled (e.g.
* -mno-sse) in order for the workaround to be complete.
*/
#ifdef JEMALLOC_PROF
uint64_t r;
double u;
cassert(config_prof);
/*
* Compute sample threshold as a geometrically distributed random
* variable with mean (2^opt_lg_prof_sample).
*
* __ __
* | log(u) | 1
* prof_tdata->threshold = | -------- |, where p = -------------------
* | log(1-p) | opt_lg_prof_sample
* 2
*
* For more information on the math, see:
*
* Non-Uniform Random Variate Generation
* Luc Devroye
* Springer-Verlag, New York, 1986
* pp 500
* (http://luc.devroye.org/rnbookindex.html)
*/
prng64(r, 53, prof_tdata->prng_state,
UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
u = (double)r * (1.0/9007199254740992.0L);
prof_tdata->threshold = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
+ (uint64_t)1U;
#endif
}
JEMALLOC_INLINE prof_ctx_t *
prof_ctx_get(const void *ptr)
{
prof_ctx_t *ret;
arena_chunk_t *chunk;
cassert(config_prof);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
ret = arena_prof_ctx_get(ptr);
} else
ret = huge_prof_ctx_get(ptr);
return (arena_prof_tctx_get(ptr));
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
arena_prof_tctx_set(ptr, tctx);
}
JEMALLOC_ALWAYS_INLINE bool
prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
prof_tdata_t **tdata_out)
{
prof_tdata_t *tdata;
cassert(config_prof);
tdata = prof_tdata_get(tsd, true);
if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
tdata = NULL;
if (tdata_out != NULL)
*tdata_out = tdata;
if (tdata == NULL)
return (true);
if (tdata->bytes_until_sample >= usize) {
if (update)
tdata->bytes_until_sample -= usize;
return (true);
} else {
/* Compute new sample threshold. */
if (update)
prof_sample_threshold_update(tdata);
return (!tdata->active);
}
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_alloc_prep(tsd_t *tsd, size_t usize, bool update)
{
prof_tctx_t *ret;
prof_tdata_t *tdata;
prof_bt_t bt;
assert(usize == s2u(usize));
if (!prof_active_get_unlocked() || likely(prof_sample_accum_update(tsd,
usize, update, &tdata)))
ret = (prof_tctx_t *)(uintptr_t)1U;
else {
bt_init(&bt, tdata->vec);
prof_backtrace(&bt);
ret = prof_lookup(tsd, &bt);
}
return (ret);
}
JEMALLOC_INLINE void
prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx)
{
arena_chunk_t *chunk;
cassert(config_prof);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
arena_prof_ctx_set(ptr, usize, ctx);
} else
huge_prof_ctx_set(ptr, ctx);
}
JEMALLOC_INLINE bool
prof_sample_accum_update(size_t size)
{
prof_tdata_t *prof_tdata;
cassert(config_prof);
/* Sampling logic is unnecessary if the interval is 1. */
assert(opt_lg_prof_sample != 0);
prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (true);
/* Take care to avoid integer overflow. */
if (size >= prof_tdata->threshold - prof_tdata->accum) {
prof_tdata->accum -= (prof_tdata->threshold - size);
/* Compute new sample threshold. */
prof_sample_threshold_update(prof_tdata);
while (prof_tdata->accum >= prof_tdata->threshold) {
prof_tdata->accum -= prof_tdata->threshold;
prof_sample_threshold_update(prof_tdata);
}
return (false);
} else {
prof_tdata->accum += size;
return (true);
}
}
JEMALLOC_INLINE void
prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt)
JEMALLOC_ALWAYS_INLINE void
prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
assert(usize == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) {
if (prof_sample_accum_update(usize)) {
/*
* Don't sample. For malloc()-like allocation, it is
* always possible to tell in advance how large an
* object's usable size will be, so there should never
* be a difference between the usize passed to
* PROF_ALLOC_PREP() and prof_malloc().
*/
assert((uintptr_t)cnt == (uintptr_t)1U);
}
}
if ((uintptr_t)cnt > (uintptr_t)1U) {
prof_ctx_set(ptr, usize, cnt->ctx);
cnt->epoch++;
/*********/
mb_write();
/*********/
cnt->cnts.curobjs++;
cnt->cnts.curbytes += usize;
if (opt_prof_accum) {
cnt->cnts.accumobjs++;
cnt->cnts.accumbytes += usize;
}
/*********/
mb_write();
/*********/
cnt->epoch++;
/*********/
mb_write();
/*********/
} else
prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_malloc_sample_object(ptr, usize, tctx);
else
prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
}
JEMALLOC_INLINE void
prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
size_t old_usize, prof_ctx_t *old_ctx)
JEMALLOC_ALWAYS_INLINE void
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
bool updated, size_t old_usize, prof_tctx_t *old_tctx)
{
prof_thr_cnt_t *told_cnt;
cassert(config_prof);
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
if (ptr != NULL) {
if (!updated && ptr != NULL) {
assert(usize == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) {
if (prof_sample_accum_update(usize)) {
/*
* Don't sample. The usize passed to
* PROF_ALLOC_PREP() was larger than what
* actually got allocated, so a backtrace was
* captured for this allocation, even though
* its actual usize was insufficient to cross
* the sample threshold.
*/
cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
}
}
}
if ((uintptr_t)old_ctx > (uintptr_t)1U) {
told_cnt = prof_lookup(old_ctx->bt);
if (told_cnt == NULL) {
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
/*
* It's too late to propagate OOM for this realloc(),
* so operate directly on old_cnt->ctx->cnt_merged.
* Don't sample. The usize passed to PROF_ALLOC_PREP()
* was larger than what actually got allocated, so a
* backtrace was captured for this allocation, even
* though its actual usize was insufficient to cross the
* sample threshold.
*/
malloc_mutex_lock(old_ctx->lock);
old_ctx->cnt_merged.curobjs--;
old_ctx->cnt_merged.curbytes -= old_usize;
malloc_mutex_unlock(old_ctx->lock);
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
tctx = (prof_tctx_t *)(uintptr_t)1U;
}
} else
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
}
if ((uintptr_t)told_cnt > (uintptr_t)1U)
told_cnt->epoch++;
if ((uintptr_t)cnt > (uintptr_t)1U) {
prof_ctx_set(ptr, usize, cnt->ctx);
cnt->epoch++;
} else if (ptr != NULL)
prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
/*********/
mb_write();
/*********/
if ((uintptr_t)told_cnt > (uintptr_t)1U) {
told_cnt->cnts.curobjs--;
told_cnt->cnts.curbytes -= old_usize;
}
if ((uintptr_t)cnt > (uintptr_t)1U) {
cnt->cnts.curobjs++;
cnt->cnts.curbytes += usize;
if (opt_prof_accum) {
cnt->cnts.accumobjs++;
cnt->cnts.accumbytes += usize;
}
}
/*********/
mb_write();
/*********/
if ((uintptr_t)told_cnt > (uintptr_t)1U)
told_cnt->epoch++;
if ((uintptr_t)cnt > (uintptr_t)1U)
cnt->epoch++;
/*********/
mb_write(); /* Not strictly necessary. */
if (unlikely((uintptr_t)old_tctx > (uintptr_t)1U))
prof_free_sampled_object(tsd, old_usize, old_tctx);
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_malloc_sample_object(ptr, usize, tctx);
else
prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
}
JEMALLOC_INLINE void
prof_free(const void *ptr, size_t size)
JEMALLOC_ALWAYS_INLINE void
prof_free(tsd_t *tsd, const void *ptr, size_t usize)
{
prof_ctx_t *ctx = prof_ctx_get(ptr);
prof_tctx_t *tctx = prof_tctx_get(ptr);
cassert(config_prof);
assert(usize == isalloc(ptr, true));
if ((uintptr_t)ctx > (uintptr_t)1) {
prof_thr_cnt_t *tcnt;
assert(size == isalloc(ptr, true));
tcnt = prof_lookup(ctx->bt);
if (tcnt != NULL) {
tcnt->epoch++;
/*********/
mb_write();
/*********/
tcnt->cnts.curobjs--;
tcnt->cnts.curbytes -= size;
/*********/
mb_write();
/*********/
tcnt->epoch++;
/*********/
mb_write();
/*********/
} else {
/*
* OOM during free() cannot be propagated, so operate
* directly on cnt->ctx->cnt_merged.
*/
malloc_mutex_lock(ctx->lock);
ctx->cnt_merged.curobjs--;
ctx->cnt_merged.curbytes -= size;
malloc_mutex_unlock(ctx->lock);
}
}
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_free_sampled_object(tsd, usize, tctx);
}
#endif

View file

@ -11,6 +11,7 @@
#define je_xallocx JEMALLOC_N(xallocx)
#define je_sallocx JEMALLOC_N(sallocx)
#define je_dallocx JEMALLOC_N(dallocx)
#define je_sdallocx JEMALLOC_N(sdallocx)
#define je_nallocx JEMALLOC_N(nallocx)
#define je_mallctl JEMALLOC_N(mallctl)
#define je_mallctlnametomib JEMALLOC_N(mallctlnametomib)
@ -18,8 +19,3 @@
#define je_malloc_stats_print JEMALLOC_N(malloc_stats_print)
#define je_malloc_usable_size JEMALLOC_N(malloc_usable_size)
#define je_valloc JEMALLOC_N(valloc)
#define je_allocm JEMALLOC_N(allocm)
#define je_dallocm JEMALLOC_N(dallocm)
#define je_nallocm JEMALLOC_N(nallocm)
#define je_rallocm JEMALLOC_N(rallocm)
#define je_sallocm JEMALLOC_N(sallocm)

View file

@ -1,6 +1,4 @@
/*
* List definitions.
*/
/* List definitions. */
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \

View file

@ -40,8 +40,10 @@ struct { \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code. */
/*
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code.
*/
#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)

View file

@ -29,36 +29,29 @@ struct quarantine_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
quarantine_t *quarantine_init(size_t lg_maxobjs);
void quarantine(void *ptr);
void quarantine_cleanup(void *arg);
bool quarantine_boot(void);
void quarantine_alloc_hook_work(tsd_t *tsd);
void quarantine(tsd_t *tsd, void *ptr);
void quarantine_cleanup(tsd_t *tsd);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), quarantine, quarantine_t *)
void quarantine_alloc_hook(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
malloc_tsd_externs(quarantine, quarantine_t *)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, quarantine, quarantine_t *, NULL,
quarantine_cleanup)
JEMALLOC_ALWAYS_INLINE void
quarantine_alloc_hook(void)
{
quarantine_t *quarantine;
tsd_t *tsd;
assert(config_fill && opt_quarantine);
quarantine = *quarantine_tsd_get();
if (quarantine == NULL)
quarantine_init(LG_MAXOBJS_INIT);
tsd = tsd_fetch();
if (tsd_quarantine_get(tsd) == NULL)
quarantine_alloc_hook_work(tsd);
}
#endif

Some files were not shown because too many files have changed in this diff Show more