MFHead @345353

This commit is contained in:
Alan Somers 2019-03-20 23:32:37 +00:00
commit f9856d0813
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/fuse2/; revision=345354
93 changed files with 2675 additions and 2320 deletions

View file

@ -1162,7 +1162,7 @@ buildworld_epilogue: .PHONY
@echo "--------------------------------------------------------------"
@echo ">>> World build completed on `LC_ALL=C date`"
@seconds=$$(($$(date '+%s') - ${_BUILDWORLD_START})); \
echo -n ">>> World build in $$seconds seconds, "; \
echo -n ">>> World built in $$seconds seconds, "; \
echo "ncpu: $$(sysctl -n hw.ncpu)${.MAKE.JOBS:S/^/, make -j/}"
@echo "--------------------------------------------------------------"
@ -1648,7 +1648,7 @@ buildkernel: .MAKE .PHONY
.endfor
@seconds=$$(($$(date '+%s') - ${_BUILDKERNEL_START})); \
echo -n ">>> Kernel(s) build for${BUILDKERNELS} in $$seconds seconds, "; \
echo -n ">>> Kernel(s) ${BUILDKERNELS} built in $$seconds seconds, "; \
echo "ncpu: $$(sysctl -n hw.ncpu)${.MAKE.JOBS:S/^/, make -j/}"
@echo "--------------------------------------------------------------"

View file

@ -31,6 +31,12 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 13.x IS SLOW:
disable the most expensive debugging functionality run
"ln -s 'abort:false,junk:false' /etc/malloc.conf".)
20190320:
The fuse(4) module has been renamed to fusefs(4) for consistency with
other filesystems. You should update any kld_load="fuse" entries in
/etc/rc.conf, fuse_load="YES" entries in /boot/loader.conf, and
"options FUSE" enties in kernel config files.
20190304:
Clang, llvm, lld, lldb, compiler-rt and libc++ have been upgraded to
8.0.0. Please see the 20141231 entry below for information about

View file

@ -328,7 +328,7 @@ update(void)
if (dbi.id[0]) {
run_change("rem", &c, dbi.id, 0);
sockaddr_snprintf(buf, sizeof(buf), "%a", ss);
syslog(LOG_INFO, "released %s/%d:%d after %d seconds",
(*lfun)(LOG_INFO, "released %s/%d:%d after %d seconds",
buf, c.c_lmask, c.c_port, c.c_duration);
}
state_del(state, &c);

View file

@ -36,7 +36,7 @@ std::string getClangRepositoryPath() {
// If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us
// pick up a tag in an SVN export, for example.
StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/branches/release_80/lib/Basic/Version.cpp $");
StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_800/final/lib/Basic/Version.cpp $");
if (URL.empty()) {
URL = SVNRepository.slice(SVNRepository.find(':'),
SVNRepository.find("/lib/Basic"));

View file

@ -122,6 +122,7 @@ struct Configuration {
uint64_t>
CallGraphProfile;
bool AllowMultipleDefinition;
bool AllowShlibUndefined;
bool AndroidPackDynRelocs;
bool ARMHasBlx = false;
bool ARMHasMovtMovw = false;

View file

@ -758,6 +758,9 @@ void LinkerDriver::readConfigs(opt::InputArgList &Args) {
Args.hasFlag(OPT_allow_multiple_definition,
OPT_no_allow_multiple_definition, false) ||
hasZOption(Args, "muldefs");
Config->AllowShlibUndefined =
Args.hasFlag(OPT_allow_shlib_undefined, OPT_no_allow_shlib_undefined,
Args.hasArg(OPT_shared));
Config->AuxiliaryList = args::getStrings(Args, OPT_auxiliary);
Config->Bsymbolic = Args.hasArg(OPT_Bsymbolic);
Config->BsymbolicFunctions = Args.hasArg(OPT_Bsymbolic_functions);

View file

@ -865,7 +865,7 @@ SharedFile<ELFT>::SharedFile(MemoryBufferRef M, StringRef DefaultSoName)
// Partially parse the shared object file so that we can call
// getSoName on this object.
template <class ELFT> void SharedFile<ELFT>::parseSoName() {
template <class ELFT> void SharedFile<ELFT>::parseDynamic() {
const Elf_Shdr *DynamicSec = nullptr;
const ELFFile<ELFT> Obj = this->getObj();
ArrayRef<Elf_Shdr> Sections = CHECK(Obj.sections(), this);
@ -902,12 +902,16 @@ template <class ELFT> void SharedFile<ELFT>::parseSoName() {
ArrayRef<Elf_Dyn> Arr =
CHECK(Obj.template getSectionContentsAsArray<Elf_Dyn>(DynamicSec), this);
for (const Elf_Dyn &Dyn : Arr) {
if (Dyn.d_tag == DT_SONAME) {
if (Dyn.d_tag == DT_NEEDED) {
uint64_t Val = Dyn.getVal();
if (Val >= this->StringTable.size())
fatal(toString(this) + ": invalid DT_NEEDED entry");
DtNeeded.push_back(this->StringTable.data() + Val);
} else if (Dyn.d_tag == DT_SONAME) {
uint64_t Val = Dyn.getVal();
if (Val >= this->StringTable.size())
fatal(toString(this) + ": invalid DT_SONAME entry");
SoName = this->StringTable.data() + Val;
return;
}
}
}
@ -975,7 +979,7 @@ uint32_t SharedFile<ELFT>::getAlignment(ArrayRef<Elf_Shdr> Sections,
return (Ret > UINT32_MAX) ? 0 : Ret;
}
// Fully parse the shared object file. This must be called after parseSoName().
// Fully parse the shared object file. This must be called after parseDynamic().
//
// This function parses symbol versions. If a DSO has version information,
// the file has a ".gnu.version_d" section which contains symbol version

View file

@ -323,6 +323,7 @@ template <class ELFT> class SharedFile : public ELFFileBase<ELFT> {
public:
std::vector<const Elf_Verdef *> Verdefs;
std::vector<StringRef> DtNeeded;
std::string SoName;
static bool classof(const InputFile *F) {
@ -331,7 +332,7 @@ template <class ELFT> class SharedFile : public ELFFileBase<ELFT> {
SharedFile(MemoryBufferRef M, StringRef DefaultSoName);
void parseSoName();
void parseDynamic();
void parseRest();
uint32_t getAlignment(ArrayRef<Elf_Shdr> Sections, const Elf_Sym &Sym);
std::vector<const Elf_Verdef *> parseVerdefs();
@ -349,6 +350,9 @@ template <class ELFT> class SharedFile : public ELFFileBase<ELFT> {
// data structures in the output file.
std::map<const Elf_Verdef *, NeededVer> VerdefMap;
// Used for --no-allow-shlib-undefined.
bool AllNeededIsKnown;
// Used for --as-needed
bool IsNeeded;
};

View file

@ -63,6 +63,10 @@ defm allow_multiple_definition: B<"allow-multiple-definition",
"Allow multiple definitions",
"Do not allow multiple definitions (default)">;
defm allow_shlib_undefined: B<"allow-shlib-undefined",
"Allow unresolved references in shared libraries (default when linking a shared library)",
"Do not allow unresolved references in shared libraries (default when linking an executable)">;
defm apply_dynamic_relocs: B<"apply-dynamic-relocs",
"Apply link-time values for dynamic relocations",
"Do not apply link-time values for dynamic relocations (default)">;
@ -492,12 +496,10 @@ def plugin_opt_thinlto: J<"plugin-opt=thinlto">;
def plugin_opt_slash: J<"plugin-opt=/">;
// Options listed below are silently ignored for now for compatibility.
def: F<"allow-shlib-undefined">;
def: F<"detect-odr-violations">;
def: Flag<["-"], "g">;
def: F<"long-plt">;
def: F<"no-add-needed">;
def: F<"no-allow-shlib-undefined">;
def: F<"no-copy-dt-needed-entries">;
def: F<"no-ctors-in-init-array">;
def: F<"no-keep-memory">;

View file

@ -93,7 +93,7 @@ template <class ELFT> void SymbolTable::addFile(InputFile *File) {
// .so file
if (auto *F = dyn_cast<SharedFile<ELFT>>(File)) {
// DSOs are uniquified not by filename but by soname.
F->parseSoName();
F->parseDynamic();
if (errorCount())
return;

View file

@ -80,6 +80,9 @@ class SymbolTable {
void handleDynamicList();
// Set of .so files to not link the same shared object file more than once.
llvm::DenseMap<StringRef, InputFile *> SoNames;
private:
std::pair<Symbol *, bool> insertName(StringRef Name);
@ -107,9 +110,6 @@ class SymbolTable {
// is used to uniquify them.
llvm::DenseSet<llvm::CachedHashStringRef> ComdatGroups;
// Set of .so files to not link the same shared object file more than once.
llvm::DenseMap<StringRef, InputFile *> SoNames;
// A map from demangled symbol names to their symbol objects.
// This mapping is 1:N because two symbols with different versions
// can have the same name. We use this map to handle "extern C++ {}"

View file

@ -1668,6 +1668,27 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
if (In.Iplt && !In.Iplt->empty())
In.Iplt->addSymbols();
if (!Config->AllowShlibUndefined) {
// Error on undefined symbols in a shared object, if all of its DT_NEEDED
// entires are seen. These cases would otherwise lead to runtime errors
// reported by the dynamic linker.
//
// ld.bfd traces all DT_NEEDED to emulate the logic of the dynamic linker to
// catch more cases. That is too much for us. Our approach resembles the one
// used in ld.gold, achieves a good balance to be useful but not too smart.
for (InputFile *File : SharedFiles) {
SharedFile<ELFT> *F = cast<SharedFile<ELFT>>(File);
F->AllNeededIsKnown = llvm::all_of(F->DtNeeded, [&](StringRef Needed) {
return Symtab->SoNames.count(Needed);
});
}
for (Symbol *Sym : Symtab->getSymbols())
if (Sym->isUndefined() && !Sym->isWeak())
if (auto *F = dyn_cast_or_null<SharedFile<ELFT>>(Sym->File))
if (F->AllNeededIsKnown)
error(toString(F) + ": undefined reference to " + toString(*Sym));
}
// Now that we have defined all possible global symbols including linker-
// synthesized ones. Visit all symbols to give the finishing touches.
for (Symbol *Sym : Symtab->getSymbols()) {

View file

@ -56,6 +56,9 @@ option.
.It Fl -allow-multiple-definition
Do not error if a symbol is defined multiple times.
The first definition will be used.
.It Fl -allow-shlib-undefined
Allow unresolved references in shared libraries.
This option is enabled by default when linking a shared library.
.It Fl -apply-dynamic-relocs
Apply link-time values for dynamic relocations.
.It Fl -as-needed
@ -252,6 +255,9 @@ Set target emulation.
.It Fl -Map Ns = Ns Ar file , Fl M Ar file
Print a link map to
.Ar file .
.It Fl -no-allow-shlib-undefined
Do not allow unresolved references in shared libraries.
This option is enabled by default when linking an executable.
.It Fl -no-as-needed
Always set
.Dv DT_NEEDED

View file

@ -3666,8 +3666,13 @@ extern int __kmp_read_from_file(char const *path, char const *format, ...);
extern void __kmp_query_cpuid(kmp_cpuinfo_t *p);
#define __kmp_load_mxcsr(p) _mm_setcsr(*(p))
#if __SSE__
static inline void __kmp_load_mxcsr(const kmp_uint32 *p) { _mm_setcsr(*(p)); }
static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = _mm_getcsr(); }
#else
static inline void __kmp_load_mxcsr(const kmp_uint32 *) {}
static inline void __kmp_store_mxcsr(kmp_uint32 *) {}
#endif
extern void __kmp_load_x87_fpu_control_word(kmp_int16 *p);
extern void __kmp_store_x87_fpu_control_word(kmp_int16 *p);

View file

@ -8104,7 +8104,7 @@ __kmp_determine_reduction_method(
#elif KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_AARCH || KMP_ARCH_MIPS
#if KMP_OS_LINUX || KMP_OS_WINDOWS || KMP_OS_HURD
#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS || KMP_OS_HURD
// basic tuning

View file

@ -196,9 +196,7 @@ _libproc= libproc
_librtld_db= librtld_db
.endif
.if !defined(COMPAT_32BIT)
SUBDIR.${MK_OPENMP}+= libomp
.endif
SUBDIR.${MK_OPENSSL}+= libmp
SUBDIR.${MK_PMC}+= libpmc libpmcstat
SUBDIR.${MK_RADIUS_SUPPORT}+= libradius

View file

@ -8,4 +8,4 @@
#define CLANG_VENDOR "FreeBSD "
#define SVN_REVISION "356034"
#define SVN_REVISION "356365"

View file

@ -7,4 +7,4 @@
#define LLD_REPOSITORY_STRING "FreeBSD"
// <Upstream revision at import>-<Local identifier in __FreeBSD_version style>
#define LLD_REVISION_STRING "356034-1300002"
#define LLD_REVISION_STRING "356365-1300003"

View file

@ -1,2 +1,2 @@
/* $FreeBSD$ */
#define LLVM_REVISION "svn-r356034"
#define LLVM_REVISION "svn-r356365"

View file

@ -1050,14 +1050,8 @@ kldload_param(const char *name)
kl = kldload(name);
else if (strncmp(name, "allow.mount.", 12) == 0) {
/* Load the matching filesystem */
const char *modname;
const char *modname = name + 12;
if (strcmp("fusefs", name + 12) == 0 ||
strcmp("nofusefs", name + 12) == 0) {
modname = "fuse";
} else {
modname = name + 12;
}
kl = kldload(modname);
if (kl < 0 && errno == ENOENT &&
strncmp(modname, "no", 2) == 0)

View file

@ -58,7 +58,6 @@ CXXFLAGS+= -fno-exceptions
CXXFLAGS+= -fno-rtti
LDFLAGS+= -Wl,--warn-shared-textrel
LDFLAGS+= -Wl,--as-needed
LDFLAGS+= -Wl,--gc-sections
LDFLAGS+= -Wl,-z,noexecstack
LDFLAGS+= -Wl,-fini=__kmp_internal_end_fini
@ -67,5 +66,6 @@ LDFLAGS+= -Wl,-soname,libomp.so
VERSION_MAP= ${OMPSRC}/exports_so.txt
LIBADD+= pthread
LIBADD+= m
.include <bsd.lib.mk>

View file

@ -42,6 +42,9 @@ PUBLICSNAP= --publicsnap
EC2SNSREL= ${REVISION}-${BRANCH}
EC2SNSVERS= ${EC2_SVNBRANCH}@${EC2_SVNREV}
.endif
.if ${TARGET_ARCH} != "amd64"
EC2ARCH= --${TARGET_ARCH:S/aarch64/arm64/}
.endif
CLEANFILES+= ec2ami
@ -82,7 +85,8 @@ ec2ami: cw-ec2 ${CW_EC2_PORTINSTALL}
@echo "--------------------------------------------------------------"
@false
.endif
/usr/local/bin/bsdec2-image-upload ${PUBLISH} ${PUBLICSNAP} --sriov --ena \
/usr/local/bin/bsdec2-image-upload ${PUBLISH} ${PUBLICSNAP} \
${EC2ARCH} --sriov --ena \
${.OBJDIR}/ec2.raw \
"${TYPE} ${REVISION}-${BRANCH}-${TARGET}${AMINAMESUFFIX}" \
"${TYPE}/${TARGET} ${EC2_SVNBRANCH}@${EC2_SVNREV}" \

View file

@ -6,7 +6,15 @@
# Packages to install into the image we're creating. This is a deliberately
# minimalist set, providing only the packages necessary to bootstrap further
# package installation as specified via EC2 user-data.
export VM_EXTRA_PACKAGES="ec2-scripts firstboot-freebsd-update firstboot-pkgs dual-dhclient amazon-ssm-agent"
export VM_EXTRA_PACKAGES="ec2-scripts firstboot-freebsd-update firstboot-pkgs dual-dhclient"
# Include the amazon-ssm-agent package in amd64 images, since some users want
# to be able to use it on systems which are not connected to the Internet.
# (It is not enabled by default, however.) This package does not exist for
# aarch64, so we have to be selective about when we install it.
if [ "${TARGET_ARCH}" = "amd64" ]; then
export VM_EXTRA_PACKAGES="${VM_EXTRA_PACKAGES} amazon-ssm-agent"
fi
# Set to a list of third-party software to enable in rc.conf(5).
export VM_RC_LIST="ec2_configinit ec2_fetchkey ec2_loghostkey firstboot_freebsd_update firstboot_pkgs ntpd"

View file

@ -40,10 +40,10 @@ __FBSDID("$FreeBSD$");
#include <unistd.h>
#include <be.h>
#include "bectl.h"
static void jailparam_grow(void);
#define MNTTYPE_ZFS 222
static void jailparam_add(const char *name, const char *val);
static int jailparam_del(const char *name);
static bool jailparam_addarg(char *arg);
@ -51,84 +51,28 @@ static int jailparam_delarg(char *arg);
static int bectl_search_jail_paths(const char *mnt);
static int bectl_locate_jail(const char *ident);
static int bectl_jail_cleanup(char *mountpoint, int jid);
/* We'll start with 8 parameters initially and grow as needed. */
#define INIT_PARAMCOUNT 8
static struct jailparam *jp;
static int jpcnt;
static int jpused;
static char mnt_loc[BE_MAXPATHLEN];
static nvlist_t *jailparams;
static void
jailparam_grow(void)
{
static const char *disabled_params[] = {
"command", "exec.start", "nopersist", "persist", NULL
};
jpcnt *= 2;
jp = realloc(jp, jpcnt * sizeof(*jp));
if (jp == NULL)
err(2, "realloc");
}
static void
jailparam_add(const char *name, const char *val)
{
int i;
for (i = 0; i < jpused; ++i) {
if (strcmp(name, jp[i].jp_name) == 0)
break;
}
if (i < jpused)
jailparam_free(&jp[i], 1);
else if (jpused == jpcnt)
/* The next slot isn't allocated yet */
jailparam_grow();
if (jailparam_init(&jp[i], name) != 0)
return;
if (jailparam_import(&jp[i], val) != 0)
return;
++jpused;
nvlist_add_string(jailparams, name, val);
}
static int
jailparam_del(const char *name)
{
int i;
char *val;
for (i = 0; i < jpused; ++i) {
if (strcmp(name, jp[i].jp_name) == 0)
break;
}
if (i == jpused)
return (ENOENT);
for (; i < jpused - 1; ++i) {
val = jailparam_export(&jp[i + 1]);
jailparam_free(&jp[i], 1);
/*
* Given the context, the following will really only fail if
* they can't allocate the copy of the name or value.
*/
if (jailparam_init(&jp[i], jp[i + 1].jp_name) != 0) {
free(val);
return (ENOMEM);
}
if (jailparam_import(&jp[i], val) != 0) {
jailparam_free(&jp[i], 1);
free(val);
return (ENOMEM);
}
free(val);
}
jailparam_free(&jp[i], 1);
--jpused;
nvlist_remove_all(jailparams, name);
return (0);
}
@ -136,6 +80,7 @@ static bool
jailparam_addarg(char *arg)
{
char *name, *val;
size_t i, len;
if (arg == NULL)
return (false);
@ -156,6 +101,15 @@ jailparam_addarg(char *arg)
}
strlcpy(mnt_loc, val, sizeof(mnt_loc));
}
for (i = 0; disabled_params[i] != NULL; i++) {
len = strlen(disabled_params[i]);
if (strncmp(disabled_params[i], name, len) == 0) {
fprintf(stderr, "invalid jail parameter: %s\n", name);
return (false);
}
}
jailparam_add(name, val);
return (true);
}
@ -176,21 +130,127 @@ jailparam_delarg(char *arg)
return (jailparam_del(name));
}
static int
build_jailcmd(char ***argvp, bool interactive, int argc, char *argv[])
{
char *cmd, **jargv, *name, *val;
nvpair_t *nvp;
size_t i, iarg, nargv;
cmd = NULL;
nvp = NULL;
iarg = i = 0;
if (nvlist_size(jailparams, &nargv, NV_ENCODE_NATIVE) != 0)
return (1);
/*
* Number of args + "/usr/sbin/jail", "-c", and ending NULL.
* If interactive also include command.
*/
nargv += 3;
if (interactive) {
if (argc == 0)
nargv++;
else
nargv += argc;
}
jargv = *argvp = calloc(nargv, sizeof(jargv));
if (jargv == NULL)
err(2, "calloc");
jargv[iarg++] = strdup("/usr/sbin/jail");
jargv[iarg++] = strdup("-c");
while ((nvp = nvlist_next_nvpair(jailparams, nvp)) != NULL) {
name = nvpair_name(nvp);
if (nvpair_value_string(nvp, &val) != 0)
continue;
if (asprintf(&jargv[iarg++], "%s=%s", name, val) < 0)
goto error;
}
if (interactive) {
if (argc < 1)
cmd = strdup("/bin/sh");
else {
cmd = argv[0];
argc--;
argv++;
}
if (asprintf(&jargv[iarg++], "command=%s", cmd) < 0) {
goto error;
}
if (argc < 1) {
free(cmd);
cmd = NULL;
}
for (; argc > 0; argc--) {
if (asprintf(&jargv[iarg++], "%s", argv[0]) < 0)
goto error;
argv++;
}
}
return (0);
error:
if (interactive && argc < 1)
free(cmd);
for (; i < iarg - 1; i++) {
free(jargv[i]);
}
free(jargv);
return (1);
}
/* Remove jail and cleanup any non zfs mounts. */
static int
bectl_jail_cleanup(char *mountpoint, int jid)
{
struct statfs *mntbuf;
size_t i, searchlen, mntsize;
if (jid >= 0 && jail_remove(jid) != 0) {
fprintf(stderr, "unable to remove jail");
return (1);
}
searchlen = strnlen(mountpoint, MAXPATHLEN);
mntsize = getmntinfo(&mntbuf, MNT_NOWAIT);
for (i = 0; i < mntsize; i++) {
if (strncmp(mountpoint, mntbuf[i].f_mntonname, searchlen) == 0 &&
mntbuf[i].f_type != MNTTYPE_ZFS) {
if (unmount(mntbuf[i].f_mntonname, 0) != 0) {
fprintf(stderr, "bectl jail: unable to unmount filesystem %s",
mntbuf[i].f_mntonname);
return (1);
}
}
}
return (0);
}
int
bectl_cmd_jail(int argc, char *argv[])
{
char *bootenv, *mountpoint;
int jid, mntflags, opt, ret;
char *bootenv, **jargv, *mountpoint;
int i, jid, mntflags, opt, ret;
bool default_hostname, interactive, unjail;
pid_t pid;
/* XXX TODO: Allow shallow */
mntflags = BE_MNT_DEEP;
default_hostname = interactive = unjail = true;
jpcnt = INIT_PARAMCOUNT;
jp = malloc(jpcnt * sizeof(*jp));
if (jp == NULL)
err(2, "malloc");
if ((nvlist_alloc(&jailparams, NV_UNIQUE_NAME, 0)) != 0) {
fprintf(stderr, "nvlist_alloc() failed\n");
return (1);
}
jailparam_add("persist", "true");
jailparam_add("allow.mount", "true");
@ -210,6 +270,8 @@ bectl_cmd_jail(int argc, char *argv[])
*/
if (strcmp(optarg, "host.hostname") == 0)
default_hostname = false;
} else {
return (1);
}
break;
case 'U':
@ -236,13 +298,14 @@ bectl_cmd_jail(int argc, char *argv[])
argc -= optind;
argv += optind;
/* struct jail be_jail = { 0 }; */
if (argc < 1) {
fprintf(stderr, "bectl jail: missing boot environment name\n");
return (usage(false));
}
bootenv = argv[0];
argc--;
argv++;
/*
* XXX TODO: if its already mounted, perhaps there should be a flag to
@ -264,45 +327,46 @@ bectl_cmd_jail(int argc, char *argv[])
* This is our indicator that path was not set by the user, so we'll use
* the path that libbe generated for us.
*/
if (mountpoint == NULL)
if (mountpoint == NULL) {
jailparam_add("path", mnt_loc);
/* Create the jail for now, attach later as-needed */
jid = jailparam_set(jp, jpused, JAIL_CREATE);
if (jid == -1) {
fprintf(stderr, "unable to create jail. error: %d\n", errno);
mountpoint = mnt_loc;
}
if ((build_jailcmd(&jargv, interactive, argc, argv)) != 0) {
fprintf(stderr, "unable to build argument list for jail command\n");
return (1);
}
jailparam_free(jp, jpused);
free(jp);
/* We're not interactive, nothing more to do here. */
if (!interactive)
return (0);
pid = fork();
switch(pid) {
switch (pid) {
case -1:
perror("fork");
return (1);
case 0:
jail_attach(jid);
/* We're attached within the jail... good bye! */
chdir("/");
if (argc > 1)
execve(argv[1], &argv[1], NULL);
else
execl("/bin/sh", "/bin/sh", NULL);
fprintf(stderr, "bectl jail: failed to execute %s\n",
(argc > 1 ? argv[1] : "/bin/sh"));
_exit(1);
execv("/usr/sbin/jail", jargv);
fprintf(stderr, "bectl jail: failed to execute\n");
default:
/* Wait for the child to get back, see if we need to unjail */
waitpid(pid, NULL, 0);
}
for (i = 0; jargv[i] != NULL; i++) {
free(jargv[i]);
}
free(jargv);
if (!interactive)
return (0);
if (unjail) {
jail_remove(jid);
/*
* We're not checking the jail id result here because in the
* case of invalid param, or last command in jail was an error
* the jail will not exist upon exit. bectl_jail_cleanup will
* only jail_remove if the jid is >= 0.
*/
jid = bectl_locate_jail(bootenv);
bectl_jail_cleanup(mountpoint, jid);
be_unmount(be, bootenv, 0);
}
@ -319,7 +383,6 @@ bectl_search_jail_paths(const char *mnt)
/* jail_getv expects name/value strings */
snprintf(lastjid, sizeof(lastjid), "%d", 0);
jid = 0;
while ((jid = jail_getv(0, "lastjid", lastjid, "path", &jailpath,
NULL)) != -1) {
@ -416,7 +479,7 @@ bectl_cmd_unjail(int argc, char *argv[])
return (1);
}
jail_remove(jid);
bectl_jail_cleanup(path, jid);
be_unmount(be, target, 0);
return (0);

View file

@ -1,7 +1,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd March 18, 2019
.Dd March 19, 2019
.Dt IPFW 8
.Os
.Sh NAME
@ -3300,6 +3300,7 @@ See
.Sx SYSCTL VARIABLES
for more info.
.Sh IPv6/IPv4 NETWORK ADDRESS AND PROTOCOL TRANSLATION
.Ss Stateful translation
.Nm
supports in-kernel IPv6/IPv4 network address and protocol translation.
Stateful NAT64 translation allows IPv6-only clients to contact IPv4 servers
@ -3317,7 +3318,8 @@ to be able use stateful NAT64 translator.
Stateful NAT64 uses a bunch of memory for several types of objects.
When IPv6 client initiates connection, NAT64 translator creates a host entry
in the states table.
Each host entry has a number of ports group entries allocated on demand.
Each host entry uses preallocated IPv4 alias entry.
Each alias entry has a number of ports group entries allocated on demand.
Ports group entries contains connection state entries.
There are several options to control limits and lifetime for these objects.
.Pp
@ -3337,6 +3339,11 @@ First time an original packet is handled and consumed by translator,
and then it is handled again as translated packet.
This behavior can be changed by sysctl variable
.Va net.inet.ip.fw.nat64_direct_output .
Also translated packet can be tagged using
.Cm tag
rule action, and then matched by
.Cm tagged
opcode to avoid loops and extra overhead.
.Pp
The stateful NAT64 configuration command is the following:
.Bd -ragged -offset indent
@ -3364,15 +3371,16 @@ to represent IPv4 addresses. This IPv6 prefix should be configured in DNS64.
The translator implementation follows RFC6052, that restricts the length of
prefixes to one of following: 32, 40, 48, 56, 64, or 96.
The Well-Known IPv6 Prefix 64:ff9b:: must be 96 bits long.
.It Cm max_ports Ar number
Maximum number of ports reserved for upper level protocols to one IPv6 client.
All reserved ports are divided into chunks between supported protocols.
The number of connections from one IPv6 client is limited by this option.
Note that closed TCP connections still remain in the list of connections until
.Cm tcp_close_age
interval will not expire.
Default value is
.Ar 2048 .
The special
.Ar ::/length
prefix can be used to handle several IPv6 prefixes with one NAT64 instance.
The NAT64 instance will determine a destination IPv4 address from prefix
.Ar length .
.It Cm states_chunks Ar number
The number of states chunks in single ports group.
Each ports group by default can keep 64 state entries in single chunk.
The above value affects the maximum number of states that can be associated with single IPv4 alias address and port.
The value must be power of 2, and up to 128.
.It Cm host_del_age Ar seconds
The number of seconds until the host entry for a IPv6 client will be deleted
and all its resources will be released due to inactivity.

View file

@ -278,6 +278,7 @@ enum tokens {
TOK_AGG_LEN,
TOK_AGG_COUNT,
TOK_MAX_PORTS,
TOK_STATES_CHUNKS,
TOK_JMAXLEN,
TOK_PORT_RANGE,
TOK_HOST_DEL_AGE,

View file

@ -87,68 +87,70 @@ nat64lsn_print_states(void *buf)
char sflags[4], *sf, *proto;
ipfw_obj_header *oh;
ipfw_obj_data *od;
ipfw_nat64lsn_stg *stg;
ipfw_nat64lsn_state *ste;
ipfw_nat64lsn_stg_v1 *stg;
ipfw_nat64lsn_state_v1 *ste;
uint64_t next_idx;
int i, sz;
oh = (ipfw_obj_header *)buf;
od = (ipfw_obj_data *)(oh + 1);
stg = (ipfw_nat64lsn_stg *)(od + 1);
stg = (ipfw_nat64lsn_stg_v1 *)(od + 1);
sz = od->head.length - sizeof(*od);
next_idx = 0;
while (sz > 0 && next_idx != 0xFF) {
next_idx = stg->next_idx;
next_idx = stg->next.index;
sz -= sizeof(*stg);
if (stg->count == 0) {
stg++;
continue;
}
switch (stg->proto) {
case IPPROTO_TCP:
proto = "TCP";
break;
case IPPROTO_UDP:
proto = "UDP";
break;
case IPPROTO_ICMPV6:
proto = "ICMPv6";
break;
}
inet_ntop(AF_INET6, &stg->host6, s, sizeof(s));
/*
* NOTE: addresses are in network byte order,
* ports are in host byte order.
*/
inet_ntop(AF_INET, &stg->alias4, a, sizeof(a));
ste = (ipfw_nat64lsn_state *)(stg + 1);
ste = (ipfw_nat64lsn_state_v1 *)(stg + 1);
for (i = 0; i < stg->count && sz > 0; i++) {
sf = sflags;
inet_ntop(AF_INET6, &ste->host6, s, sizeof(s));
inet_ntop(AF_INET, &ste->daddr, f, sizeof(f));
if (stg->proto == IPPROTO_TCP) {
switch (ste->proto) {
case IPPROTO_TCP:
proto = "TCP";
if (ste->flags & 0x02)
*sf++ = 'S';
if (ste->flags & 0x04)
*sf++ = 'E';
if (ste->flags & 0x01)
*sf++ = 'F';
break;
case IPPROTO_UDP:
proto = "UDP";
break;
case IPPROTO_ICMP:
proto = "ICMPv6";
break;
}
*sf = '\0';
switch (stg->proto) {
switch (ste->proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
printf("%s:%d\t%s:%d\t%s\t%s\t%d\t%s:%d\n",
s, ste->sport, a, ste->aport, proto,
sflags, ste->idle, f, ste->dport);
break;
case IPPROTO_ICMPV6:
case IPPROTO_ICMP:
printf("%s\t%s\t%s\t\t%d\t%s\n",
s, a, proto, ste->idle, f);
break;
default:
printf("%s\t%s\t%d\t\t%d\t%s\n",
s, a, stg->proto, ste->idle, f);
s, a, ste->proto, ste->idle, f);
}
ste++;
sz -= sizeof(*ste);
}
stg = (ipfw_nat64lsn_stg *)ste;
stg = (ipfw_nat64lsn_stg_v1 *)ste;
}
return (next_idx);
}
@ -174,6 +176,7 @@ nat64lsn_states_cb(ipfw_nat64lsn_cfg *cfg, const char *name, uint8_t set)
err(EX_OSERR, NULL);
do {
oh = (ipfw_obj_header *)buf;
oh->opheader.version = 1; /* Force using ov new API */
od = (ipfw_obj_data *)(oh + 1);
nat64lsn_fill_ntlv(&oh->ntlv, cfg->name, set);
od->head.type = IPFW_TLV_OBJDATA;
@ -363,12 +366,8 @@ nat64lsn_parse_int(const char *arg, const char *desc)
static struct _s_x nat64newcmds[] = {
{ "prefix6", TOK_PREFIX6 },
{ "agg_len", TOK_AGG_LEN }, /* not yet */
{ "agg_count", TOK_AGG_COUNT }, /* not yet */
{ "port_range", TOK_PORT_RANGE }, /* not yet */
{ "jmaxlen", TOK_JMAXLEN },
{ "prefix4", TOK_PREFIX4 },
{ "max_ports", TOK_MAX_PORTS },
{ "host_del_age", TOK_HOST_DEL_AGE },
{ "pg_del_age", TOK_PG_DEL_AGE },
{ "tcp_syn_age", TOK_TCP_SYN_AGE },
@ -376,10 +375,13 @@ static struct _s_x nat64newcmds[] = {
{ "tcp_est_age", TOK_TCP_EST_AGE },
{ "udp_age", TOK_UDP_AGE },
{ "icmp_age", TOK_ICMP_AGE },
{ "states_chunks",TOK_STATES_CHUNKS },
{ "log", TOK_LOG },
{ "-log", TOK_LOGOFF },
{ "allow_private", TOK_PRIVATE },
{ "-allow_private", TOK_PRIVATEOFF },
/* for compatibility with old configurations */
{ "max_ports", TOK_MAX_PORTS }, /* unused */
{ NULL, 0 }
};
@ -436,34 +438,10 @@ nat64lsn_create(const char *name, uint8_t set, int ac, char **av)
nat64lsn_parse_prefix(*av, AF_INET6, &cfg->prefix6,
&cfg->plen6);
if (ipfw_check_nat64prefix(&cfg->prefix6,
cfg->plen6) != 0)
cfg->plen6) != 0 &&
!IN6_IS_ADDR_UNSPECIFIED(&cfg->prefix6))
errx(EX_USAGE, "Bad prefix6 %s", *av);
ac--; av++;
break;
#if 0
case TOK_AGG_LEN:
NEED1("Aggregation prefix len required");
cfg->agg_prefix_len = nat64lsn_parse_int(*av, opt);
ac--; av++;
break;
case TOK_AGG_COUNT:
NEED1("Max per-prefix count required");
cfg->agg_prefix_max = nat64lsn_parse_int(*av, opt);
ac--; av++;
break;
case TOK_PORT_RANGE:
NEED1("port range x[:y] required");
if ((p = strchr(*av, ':')) == NULL)
cfg->min_port = (uint16_t)nat64lsn_parse_int(
*av, opt);
else {
*p++ = '\0';
cfg->min_port = (uint16_t)nat64lsn_parse_int(
*av, opt);
cfg->max_port = (uint16_t)nat64lsn_parse_int(
p, opt);
}
ac--; av++;
break;
case TOK_JMAXLEN:
@ -471,7 +449,6 @@ nat64lsn_create(const char *name, uint8_t set, int ac, char **av)
cfg->jmaxlen = nat64lsn_parse_int(*av, opt);
ac--; av++;
break;
#endif
case TOK_MAX_PORTS:
NEED1("Max per-user ports required");
cfg->max_ports = nat64lsn_parse_int(*av, opt);
@ -519,6 +496,12 @@ nat64lsn_create(const char *name, uint8_t set, int ac, char **av)
*av, opt);
ac--; av++;
break;
case TOK_STATES_CHUNKS:
NEED1("number of chunks required");
cfg->states_chunks = (uint8_t)nat64lsn_parse_int(
*av, opt);
ac--; av++;
break;
case TOK_LOG:
cfg->flags |= NAT64_LOG;
break;
@ -630,6 +613,12 @@ nat64lsn_config(const char *name, uint8_t set, int ac, char **av)
*av, opt);
ac--; av++;
break;
case TOK_STATES_CHUNKS:
NEED1("number of chunks required");
cfg->states_chunks = (uint8_t)nat64lsn_parse_int(
*av, opt);
ac--; av++;
break;
case TOK_LOG:
cfg->flags |= NAT64_LOG;
break;
@ -789,31 +778,24 @@ nat64lsn_show_cb(ipfw_nat64lsn_cfg *cfg, const char *name, uint8_t set)
printf("nat64lsn %s prefix4 %s/%u", cfg->name, abuf, cfg->plen4);
inet_ntop(AF_INET6, &cfg->prefix6, abuf, sizeof(abuf));
printf(" prefix6 %s/%u", abuf, cfg->plen6);
#if 0
printf("agg_len %u agg_count %u ", cfg->agg_prefix_len,
cfg->agg_prefix_max);
if (cfg->min_port != NAT64LSN_PORT_MIN ||
cfg->max_port != NAT64LSN_PORT_MAX)
printf(" port_range %u:%u", cfg->min_port, cfg->max_port);
if (cfg->jmaxlen != NAT64LSN_JMAXLEN)
printf(" jmaxlen %u ", cfg->jmaxlen);
#endif
if (cfg->max_ports != NAT64LSN_MAX_PORTS)
printf(" max_ports %u", cfg->max_ports);
if (cfg->nh_delete_delay != NAT64LSN_HOST_AGE)
if (co.verbose || cfg->states_chunks > 1)
printf(" states_chunks %u", cfg->states_chunks);
if (co.verbose || cfg->nh_delete_delay != NAT64LSN_HOST_AGE)
printf(" host_del_age %u", cfg->nh_delete_delay);
if (cfg->pg_delete_delay != NAT64LSN_PG_AGE)
printf(" pg_del_age %u ", cfg->pg_delete_delay);
if (cfg->st_syn_ttl != NAT64LSN_TCP_SYN_AGE)
if (co.verbose || cfg->pg_delete_delay != NAT64LSN_PG_AGE)
printf(" pg_del_age %u", cfg->pg_delete_delay);
if (co.verbose || cfg->st_syn_ttl != NAT64LSN_TCP_SYN_AGE)
printf(" tcp_syn_age %u", cfg->st_syn_ttl);
if (cfg->st_close_ttl != NAT64LSN_TCP_FIN_AGE)
if (co.verbose || cfg->st_close_ttl != NAT64LSN_TCP_FIN_AGE)
printf(" tcp_close_age %u", cfg->st_close_ttl);
if (cfg->st_estab_ttl != NAT64LSN_TCP_EST_AGE)
if (co.verbose || cfg->st_estab_ttl != NAT64LSN_TCP_EST_AGE)
printf(" tcp_est_age %u", cfg->st_estab_ttl);
if (cfg->st_udp_ttl != NAT64LSN_UDP_AGE)
if (co.verbose || cfg->st_udp_ttl != NAT64LSN_UDP_AGE)
printf(" udp_age %u", cfg->st_udp_ttl);
if (cfg->st_icmp_ttl != NAT64LSN_ICMP_AGE)
if (co.verbose || cfg->st_icmp_ttl != NAT64LSN_ICMP_AGE)
printf(" icmp_age %u", cfg->st_icmp_ttl);
if (co.verbose || cfg->jmaxlen != NAT64LSN_JMAXLEN)
printf(" jmaxlen %u", cfg->jmaxlen);
if (cfg->flags & NAT64_LOG)
printf(" log");
if (cfg->flags & NAT64_ALLOW_PRIVATE)

View file

@ -501,7 +501,7 @@ init_backgrounded(void)
len = sizeof(ibg);
if (sysctlbyname("vfs.fuse.init_backgrounded", &ibg, &len, NULL, 0))
if (sysctlbyname("vfs.fusefs.init_backgrounded", &ibg, &len, NULL, 0))
return (0);
return (ibg);

View file

@ -48,6 +48,7 @@
.Nm SYSCTL_ADD_SBINTIME_MSEC ,
.Nm SYSCTL_ADD_SBINTIME_USEC ,
.Nm SYSCTL_ADD_STRING ,
.Nm SYSCTL_ADD_CONST_STRING ,
.Nm SYSCTL_ADD_STRUCT ,
.Nm SYSCTL_ADD_U8 ,
.Nm SYSCTL_ADD_U16 ,
@ -82,6 +83,7 @@
.Nm SYSCTL_SBINTIME_MSEC ,
.Nm SYSCTL_SBINTIME_USEC ,
.Nm SYSCTL_STRING ,
.Nm SYSCTL_CONST_STRING ,
.Nm SYSCTL_STRUCT ,
.Nm SYSCTL_U8 ,
.Nm SYSCTL_U16 ,
@ -291,6 +293,16 @@
.Fa "const char *descr"
.Fc
.Ft struct sysctl_oid *
.Fo SYSCTL_ADD_CONST_STRING
.Fa "struct sysctl_ctx_list *ctx"
.Fa "struct sysctl_oid_list *parent"
.Fa "int number"
.Fa "const char *name"
.Fa "int ctlflags"
.Fa "const char *ptr"
.Fa "const char *descr"
.Fc
.Ft struct sysctl_oid *
.Fo SYSCTL_ADD_STRUCT
.Fa "struct sysctl_ctx_list *ctx"
.Fa "struct sysctl_oid_list *parent"
@ -443,6 +455,7 @@
.Fn SYSCTL_SBINTIME_MSEC parent number name ctlflags ptr descr
.Fn SYSCTL_SBINTIME_USEC parent number name ctlflags ptr descr
.Fn SYSCTL_STRING parent number name ctlflags arg len descr
.Fn SYSCTL_CONST_STRING parent number name ctlflags arg descr
.Fn SYSCTL_STRUCT parent number name ctlflags ptr struct_type descr
.Fn SYSCTL_U8 parent number name ctlflags ptr val descr
.Fn SYSCTL_U16 parent number name ctlflags ptr val descr
@ -607,6 +620,11 @@ If the
.Fa len
argument in zero, the string length is computed at every access to the OID using
.Xr strlen 3 .
Use the
.Fn SYSCTL_CONST_STRING
macro or the
.Fn SYSCTL_ADD_CONST_STRING
function to add a sysctl for a constant string.
.Sh CREATING OPAQUE SYSCTLS
The
.Fn SYSCTL_OPAQUE
@ -658,6 +676,7 @@ Static sysctls are declared using one of the
.Fn SYSCTL_SBINTIME_MSEC ,
.Fn SYSCTL_SBINTIME_USEC ,
.Fn SYSCTL_STRING ,
.Fn SYSCTL_CONST_STRING ,
.Fn SYSCTL_STRUCT ,
.Fn SYSCTL_U8 ,
.Fn SYSCTL_U16 ,
@ -690,6 +709,7 @@ Dynamic nodes are created using one of the
.Fn SYSCTL_ADD_SBINTIME_MSEC ,
.Fn SYSCTL_ADD_SBINTIME_USEC ,
.Fn SYSCTL_ADD_STRING ,
.Fn SYSCTL_ADD_CONST_STRING ,
.Fn SYSCTL_ADD_STRUCT ,
.Fn SYSCTL_ADD_U8 ,
.Fn SYSCTL_ADD_U16 ,

View file

@ -399,7 +399,7 @@ BROKEN_OPTIONS+=NVME
BROKEN_OPTIONS+=BSD_CRTBEGIN
.endif
.if ${COMPILER_FEATURES:Mc++11} && ${__T} == "amd64"
.if ${COMPILER_FEATURES:Mc++11} && (${__T} == "amd64" || ${__T} == "i386")
__DEFAULT_YES_OPTIONS+=OPENMP
.else
__DEFAULT_NO_OPTIONS+=OPENMP

View file

@ -283,6 +283,12 @@ bf_init(void)
/* try to load and run init file if present */
if ((fd = open("/boot/boot.4th", O_RDONLY)) != -1) {
#ifdef LOADER_VERIEXEC
if (verify_file(fd, "/boot/boot.4th", 0, VE_GUESS) < 0) {
close(fd);
return;
}
#endif
(void)ficlExecFD(bf_vm, fd);
close(fd);
}

View file

@ -868,14 +868,16 @@ fake_modname(const char *name)
sp++;
else
sp = name;
ep = strrchr(name, '.');
if (ep) {
if (ep == name) {
sp = invalid_name;
ep = invalid_name + sizeof(invalid_name) - 1;
}
} else
ep = name + strlen(name);
ep = strrchr(sp, '.');
if (ep == NULL) {
ep = sp + strlen(sp);
}
if (ep == sp) {
sp = invalid_name;
ep = invalid_name + sizeof(invalid_name) - 1;
}
len = ep - sp;
fp = malloc(len + 1);
if (fp == NULL)

View file

@ -1075,6 +1075,12 @@ sgx_get_epc_area(struct sgx_softc *sc)
(cp[2] & 0xfffff000);
sc->npages = sc->epc_size / SGX_PAGE_SIZE;
if (sc->epc_size == 0 || sc->epc_base == 0) {
printf("%s: Incorrect EPC data: EPC base %lx, size %lu\n",
__func__, sc->epc_base, sc->epc_size);
return (EINVAL);
}
if (cp[3] & 0xffff)
sc->enclave_size_max = (1 << ((cp[3] >> 8) & 0xff));
else

View file

@ -90,22 +90,22 @@ typedef struct fdt_platform_class fdt_platform_def_t;
extern platform_method_t fdt_platform_methods[];
#define FDT_PLATFORM_DEF2(NAME, VAR_NAME, NAME_STR, size, compatible, \
delay) \
CTASSERT(delay > 0); \
#define FDT_PLATFORM_DEF2(NAME, VAR_NAME, NAME_STR, _size, _compatible, \
_delay) \
CTASSERT(_delay > 0); \
static fdt_platform_def_t VAR_NAME ## _fdt_platform = { \
.name = NAME_STR, \
.methods = fdt_platform_methods, \
.fdt_compatible = compatible, \
.fdt_compatible = _compatible, \
}; \
static kobj_class_t VAR_NAME ## _baseclasses[] = \
{ (kobj_class_t)&VAR_NAME ## _fdt_platform, NULL }; \
static platform_def_t VAR_NAME ## _platform = { \
NAME_STR, \
NAME ## _methods, \
size, \
VAR_NAME ## _baseclasses, \
delay, \
.name = NAME_STR, \
.methods = NAME ## _methods, \
.size = _size, \
.baseclasses = VAR_NAME ## _baseclasses, \
.delay_count = _delay, \
}; \
DATA_SET(platform_set, VAR_NAME ## _platform)

View file

@ -1071,7 +1071,7 @@ options NFSCL #Network File System client
options AUTOFS #Automounter filesystem
options CD9660 #ISO 9660 filesystem
options FDESCFS #File descriptor filesystem
options FUSE #FUSE support module
options FUSEFS #FUSEFS support module
options MSDOSFS #MS DOS File System (FAT, FAT32)
options NFSLOCKD #Network Lock Manager
options NFSD #Network Filesystem Server

View file

@ -3494,15 +3494,15 @@ fs/fdescfs/fdesc_vfsops.c optional fdescfs
fs/fdescfs/fdesc_vnops.c optional fdescfs
fs/fifofs/fifo_vnops.c standard
fs/cuse/cuse.c optional cuse
fs/fuse/fuse_device.c optional fuse
fs/fuse/fuse_file.c optional fuse
fs/fuse/fuse_internal.c optional fuse
fs/fuse/fuse_io.c optional fuse
fs/fuse/fuse_ipc.c optional fuse
fs/fuse/fuse_main.c optional fuse
fs/fuse/fuse_node.c optional fuse
fs/fuse/fuse_vfsops.c optional fuse
fs/fuse/fuse_vnops.c optional fuse
fs/fuse/fuse_device.c optional fusefs
fs/fuse/fuse_file.c optional fusefs
fs/fuse/fuse_internal.c optional fusefs
fs/fuse/fuse_io.c optional fusefs
fs/fuse/fuse_ipc.c optional fusefs
fs/fuse/fuse_main.c optional fusefs
fs/fuse/fuse_node.c optional fusefs
fs/fuse/fuse_vfsops.c optional fusefs
fs/fuse/fuse_vnops.c optional fusefs
fs/msdosfs/msdosfs_conv.c optional msdosfs
fs/msdosfs/msdosfs_denode.c optional msdosfs
fs/msdosfs/msdosfs_fat.c optional msdosfs
@ -4398,9 +4398,9 @@ netpfil/ipfw/nat64/nat64clat.c optional inet inet6 ipfirewall \
netpfil/ipfw/nat64/nat64clat_control.c optional inet inet6 ipfirewall \
ipfirewall_nat64
netpfil/ipfw/nat64/nat64lsn.c optional inet inet6 ipfirewall \
ipfirewall_nat64
ipfirewall_nat64 compile-with "${NORMAL_C} -I$S/contrib/ck/include"
netpfil/ipfw/nat64/nat64lsn_control.c optional inet inet6 ipfirewall \
ipfirewall_nat64
ipfirewall_nat64 compile-with "${NORMAL_C} -I$S/contrib/ck/include"
netpfil/ipfw/nat64/nat64stl.c optional inet inet6 ipfirewall \
ipfirewall_nat64
netpfil/ipfw/nat64/nat64stl_control.c optional inet inet6 ipfirewall \

View file

@ -140,6 +140,8 @@ kernel-obj:
.if !defined(NO_MODULES)
modules: modules-all
modules-depend: beforebuild
modules-all: beforebuild
.if !defined(NO_MODULES_OBJ)
modules-all modules-depend: modules-obj
@ -328,6 +330,11 @@ ${__obj}: ${OBJS_DEPEND_GUESS.${__obj}}
.depend: .PRECIOUS ${SRCS}
.if ${COMPILER_TYPE} == "clang" || \
(${COMPILER_TYPE} == "gcc" && ${COMPILER_VERSION} >= 60000)
_MAP_DEBUG_PREFIX= yes
.endif
_ILINKS= machine
.if ${MACHINE} != ${MACHINE_CPUARCH} && ${MACHINE} != "arm64"
_ILINKS+= ${MACHINE_CPUARCH}
@ -337,12 +344,25 @@ _ILINKS+= x86
.endif
# Ensure that the link exists without depending on it when it exists.
# Ensure that debug info references the path in the source tree.
.for _link in ${_ILINKS}
.if !exists(${.OBJDIR}/${_link})
${SRCS} ${CLEAN:M*.o}: ${_link}
.endif
.if defined(_MAP_DEBUG_PREFIX)
.if ${_link} == "machine"
CFLAGS+= -fdebug-prefix-map=./machine=${SYSDIR}/${MACHINE}/include
.else
CFLAGS+= -fdebug-prefix-map=./${_link}=${SYSDIR}/${_link}/include
.endif
.endif
.endfor
.if defined(_MAP_DEBUG_PREFIX)
# Ensure that DWARF info contains a full path for auto-generated headers.
CFLAGS+= -fdebug-prefix-map=.=${.OBJDIR}
.endif
${_ILINKS}:
@case ${.TARGET} in \
machine) \

View file

@ -267,6 +267,11 @@ ${FULLPROG}: ${OBJS}
${OBJCOPY} --strip-debug ${.TARGET}
.endif
.if ${COMPILER_TYPE} == "clang" || \
(${COMPILER_TYPE} == "gcc" && ${COMPILER_VERSION} >= 60000)
_MAP_DEBUG_PREFIX= yes
.endif
_ILINKS=machine
.if ${MACHINE} != ${MACHINE_CPUARCH} && ${MACHINE} != "arm64"
_ILINKS+=${MACHINE_CPUARCH}
@ -283,12 +288,25 @@ beforebuild: ${_ILINKS}
# Ensure that the links exist without depending on it when it exists which
# causes all the modules to be rebuilt when the directory pointed to changes.
# Ensure that debug info references the path in the source tree.
.for _link in ${_ILINKS}
.if !exists(${.OBJDIR}/${_link})
OBJS_DEPEND_GUESS+= ${_link}
.endif
.if defined(_MAP_DEBUG_PREFIX)
.if ${_link} == "machine"
CFLAGS+= -fdebug-prefix-map=./machine=${SYSDIR}/${MACHINE}/include
.else
CFLAGS+= -fdebug-prefix-map=./${_link}=${SYSDIR}/${_link}/include
.endif
.endif
.endfor
.if defined(_MAP_DEBUG_PREFIX)
# Ensure that DWARF info contains a full path for auto-generated headers.
CFLAGS+= -fdebug-prefix-map=.=${.OBJDIR}
.endif
.NOPATH: ${_ILINKS}
${_ILINKS}:

View file

@ -253,7 +253,7 @@ CD9660 opt_dontuse.h
EXT2FS opt_dontuse.h
FDESCFS opt_dontuse.h
FFS opt_dontuse.h
FUSE opt_dontuse.h
FUSEFS opt_dontuse.h
MSDOSFS opt_dontuse.h
NANDFS opt_dontuse.h
NULLFS opt_dontuse.h

View file

@ -1217,6 +1217,7 @@ ar9300_ani_ar_poll(struct ath_hal *ah, const HAL_NODE_STATS *stats,
cck_phy_err_cnt = OS_REG_READ(ah, AR_PHY_ERR_2);
/* Populate HAL_ANISTATS */
/* XXX TODO: are these correct? */
if (ani_stats) {
ani_stats->cckphyerr_cnt =
cck_phy_err_cnt - ani_state->cck_phy_err_count;
@ -1257,18 +1258,32 @@ ar9300_ani_ar_poll(struct ath_hal *ah, const HAL_NODE_STATS *stats,
return;
}
/*
* Calculate the OFDM/CCK phy error rate over the listen time interval.
* This is used in subsequent math to see if the OFDM/CCK phy error rate
* is above or below the threshold checks.
*/
ofdm_phy_err_rate =
ani_state->ofdm_phy_err_count * 1000 / ani_state->listen_time;
cck_phy_err_rate =
ani_state->cck_phy_err_count * 1000 / ani_state->listen_time;
HALDEBUG(ah, HAL_DEBUG_ANI,
"%s: listen_time=%d OFDM:%d errs=%d/s CCK:%d errs=%d/s ofdm_turn=%d\n",
"%s: listen_time=%d (total: %d) OFDM:%d errs=%d/s CCK:%d errs=%d/s ofdm_turn=%d\n",
__func__, listen_time,
ani_state->listen_time,
ani_state->ofdm_noise_immunity_level, ofdm_phy_err_rate,
ani_state->cck_noise_immunity_level, cck_phy_err_rate,
ani_state->ofdms_turn);
/*
* Check for temporary noise spurs. This is intended to be used by
* rate control to check if we should try higher packet rates or not.
* If the noise period is short enough then we shouldn't avoid trying
* higher rates but if the noise is high/sustained then it's likely
* not a great idea to try the higher MCS rates.
*/
if (ani_state->listen_time >= HAL_NOISE_DETECT_PERIOD) {
old_phy_noise_spur = ani_state->phy_noise_spur;
if (ofdm_phy_err_rate <= ani_state->ofdm_trig_low &&
@ -1281,7 +1296,7 @@ ar9300_ani_ar_poll(struct ath_hal *ah, const HAL_NODE_STATS *stats,
}
if (old_phy_noise_spur != ani_state->phy_noise_spur) {
HALDEBUG(ah, HAL_DEBUG_ANI,
"%s: enviroment change from %d to %d\n",
"%s: environment change from %d to %d\n",
__func__, old_phy_noise_spur, ani_state->phy_noise_spur);
}
}
@ -1304,6 +1319,10 @@ ar9300_ani_ar_poll(struct ath_hal *ah, const HAL_NODE_STATS *stats,
ar9300_ani_lower_immunity(ah);
ani_state->ofdms_turn = !ani_state->ofdms_turn;
}
/*
* Force an ANI restart regardless of whether the lower immunity
* level was met.
*/
HALDEBUG(ah, HAL_DEBUG_ANI,
"%s: 1 listen_time=%d ofdm=%d/s cck=%d/s - "
"calling ar9300_ani_restart\n",
@ -1337,6 +1356,13 @@ ar9300_ani_ar_poll(struct ath_hal *ah, const HAL_NODE_STATS *stats,
ani_state->ofdms_turn = AH_TRUE;
}
}
/*
* Note that currently this poll function doesn't reset the listen
* time after it accumulates a second worth of error samples.
* It will continue to accumulate samples until a counter overflows,
* or a raise threshold is met, or 5 seconds passes.
*/
}
/*

View file

@ -232,6 +232,8 @@ static const struct {
{0xa2828086, 0x00, "Intel Union Point", 0},
{0xa2868086, 0x00, "Intel Union Point (RAID)", 0},
{0xa28e8086, 0x00, "Intel Union Point (RAID)", 0},
{0xa3528086, 0x00, "Intel Cannon Lake", 0},
{0xa3538086, 0x00, "Intel Cannon Lake", 0},
{0x23238086, 0x00, "Intel DH89xxCC", 0},
{0x2360197b, 0x00, "JMicron JMB360", 0},
{0x2361197b, 0x00, "JMicron JMB361", AHCI_Q_NOFORCE | AHCI_Q_1CH},

View file

@ -166,7 +166,7 @@ cpufreq_dt_set(device_t dev, const struct cf_setting *set)
struct cpufreq_dt_softc *sc;
const struct cpufreq_dt_opp *opp, *copp;
uint64_t freq;
int error = 0;
int uvolt, error;
sc = device_get_softc(dev);
@ -174,23 +174,38 @@ cpufreq_dt_set(device_t dev, const struct cf_setting *set)
device_printf(dev, "Can't get current clk freq\n");
return (ENXIO);
}
/* Try to get current valtage by using regulator first. */
error = regulator_get_voltage(sc->reg, &uvolt);
if (error != 0) {
/*
* Try oppoints table as backup way. However,
* this is insufficient because the actual processor
* frequency may not be in the table. PLL frequency
* granularity can be different that granularity of
* oppoint table.
*/
copp = cpufreq_dt_find_opp(sc->dev, freq);
if (copp == NULL) {
device_printf(dev,
"Can't find the current freq in opp\n");
return (ENOENT);
}
uvolt = copp->uvolt_target;
DEBUG(sc->dev, "Current freq %ju\n", freq);
DEBUG(sc->dev, "Target freq %ju\n", (uint64_t)set->freq * 1000000);
copp = cpufreq_dt_find_opp(sc->dev, freq);
if (copp == NULL) {
device_printf(dev, "Can't find the current freq in opp\n");
return (ENOENT);
}
opp = cpufreq_dt_find_opp(sc->dev, set->freq * 1000000);
if (opp == NULL) {
device_printf(dev, "Couldn't find an opp for this freq\n");
return (EINVAL);
}
DEBUG(sc->dev, "Current freq %ju, uvolt: %d\n", freq, uvolt);
DEBUG(sc->dev, "Target freq %ju, , uvolt: %d\n",
opp->freq, opp->uvolt_target);
if (copp->uvolt_target < opp->uvolt_target) {
if (uvolt < opp->uvolt_target) {
DEBUG(dev, "Changing regulator from %u to %u\n",
copp->uvolt_target, opp->uvolt_target);
uvolt, opp->uvolt_target);
error = regulator_set_voltage(sc->reg,
opp->uvolt_min,
opp->uvolt_max);
@ -201,7 +216,7 @@ cpufreq_dt_set(device_t dev, const struct cf_setting *set)
}
DEBUG(dev, "Setting clk to %ju\n", opp->freq);
error = clk_set_freq(sc->clk, opp->freq, 0);
error = clk_set_freq(sc->clk, opp->freq, CLK_SET_ROUND_DOWN);
if (error != 0) {
DEBUG(dev, "Failed, backout\n");
/* Restore previous voltage (best effort) */
@ -211,7 +226,9 @@ cpufreq_dt_set(device_t dev, const struct cf_setting *set)
return (ENXIO);
}
if (copp->uvolt_target > opp->uvolt_target) {
if (uvolt > opp->uvolt_target) {
DEBUG(dev, "Changing regulator from %u to %u\n",
uvolt, opp->uvolt_target);
error = regulator_set_voltage(sc->reg,
opp->uvolt_min,
opp->uvolt_max);
@ -219,8 +236,7 @@ cpufreq_dt_set(device_t dev, const struct cf_setting *set)
DEBUG(dev, "Failed to switch regulator to %d\n",
opp->uvolt_target);
/* Restore previous CPU frequency (best effort) */
(void)clk_set_freq(sc->clk,
copp->freq, 0);
(void)clk_set_freq(sc->clk, copp->freq, 0);
return (ENXIO);
}
}
@ -277,7 +293,8 @@ cpufreq_dt_identify(driver_t *driver, device_t parent)
/* The cpu@0 node must have the following properties */
if (!OF_hasprop(node, "clocks") ||
!OF_hasprop(node, "cpu-supply"))
(!OF_hasprop(node, "cpu-supply") &&
!OF_hasprop(node, "cpu0-supply")))
return;
if (!OF_hasprop(node, "operating-points") &&
@ -299,7 +316,9 @@ cpufreq_dt_probe(device_t dev)
node = ofw_bus_get_node(device_get_parent(dev));
if (!OF_hasprop(node, "clocks") ||
!OF_hasprop(node, "cpu-supply"))
(!OF_hasprop(node, "cpu-supply") &&
!OF_hasprop(node, "cpu0-supply")))
return (ENXIO);
if (!OF_hasprop(node, "operating-points") &&
@ -439,9 +458,12 @@ cpufreq_dt_attach(device_t dev)
if (regulator_get_by_ofw_property(dev, node,
"cpu-supply", &sc->reg) != 0) {
device_printf(dev, "no regulator for %s\n",
ofw_bus_get_name(device_get_parent(dev)));
return (ENXIO);
if (regulator_get_by_ofw_property(dev, node,
"cpu0-supply", &sc->reg) != 0) {
device_printf(dev, "no regulator for %s\n",
ofw_bus_get_name(device_get_parent(dev)));
return (ENXIO);
}
}
if (clk_get_by_ofw_index(dev, node, 0, &sc->clk) != 0) {

View file

@ -194,8 +194,10 @@ struct vi_info {
int if_flags;
uint16_t *rss, *nm_rss;
int smt_idx; /* for convenience */
uint16_t viid;
uint16_t viid; /* opaque VI identifier */
uint16_t smt_idx;
uint16_t vin;
uint8_t vfvld;
int16_t xact_addr_filt;/* index of exact MAC address filter */
uint16_t rss_size; /* size of VI's RSS table slice */
uint16_t rss_base; /* start of VI's RSS table slice */

View file

@ -375,8 +375,9 @@ struct adapter_params {
uint32_t mps_bg_map; /* rx buffer group map for all ports (upto 4) */
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
bool viid_smt_extn_support; /* FW returns vin, vfvld & smt index? */
};
#define CHELSIO_T4 0x4
@ -756,10 +757,11 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
unsigned int port, unsigned int pf, unsigned int vf,
unsigned int nmac, u8 *mac, u16 *rss_size,
uint8_t *vfvld, uint16_t *vin,
unsigned int portfunc, unsigned int idstype);
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
u16 *rss_size);
u16 *rss_size, uint8_t *vfvld, uint16_t *vin);
int t4_free_vi(struct adapter *adap, unsigned int mbox,
unsigned int pf, unsigned int vf,
unsigned int viid);
@ -770,7 +772,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid
bool free, unsigned int naddr, const u8 **addr, u16 *idx,
u64 *hash, bool sleep_ok);
int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
int idx, const u8 *addr, bool persist, bool add_smt);
int idx, const u8 *addr, bool persist, uint16_t *smt_idx);
int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
bool ucast, u64 vec, bool sleep_ok);
int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,

View file

@ -7794,6 +7794,7 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
unsigned int port, unsigned int pf, unsigned int vf,
unsigned int nmac, u8 *mac, u16 *rss_size,
uint8_t *vfvld, uint16_t *vin,
unsigned int portfunc, unsigned int idstype)
{
int ret;
@ -7814,6 +7815,7 @@ int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret)
return ret;
ret = G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
if (mac) {
memcpy(mac, c.mac, sizeof(c.mac));
@ -7830,7 +7832,18 @@ int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
}
if (rss_size)
*rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
if (vfvld) {
*vfvld = adap->params.viid_smt_extn_support ?
G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16)) :
G_FW_VIID_VIVLD(ret);
}
if (vin) {
*vin = adap->params.viid_smt_extn_support ?
G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16)) :
G_FW_VIID_VIN(ret);
}
return ret;
}
/**
@ -7850,10 +7863,10 @@ int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
*/
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
u16 *rss_size)
u16 *rss_size, uint8_t *vfvld, uint16_t *vin)
{
return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
FW_VI_FUNC_ETH, 0);
vfvld, vin, FW_VI_FUNC_ETH, 0);
}
/**
@ -8030,7 +8043,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
* @idx: index of existing filter for old value of MAC address, or -1
* @addr: the new MAC address value
* @persist: whether a new MAC allocation should be persistent
* @add_smt: if true also add the address to the HW SMT
* @smt_idx: add MAC to SMT and return its index, or NULL
*
* Modifies an exact-match filter and sets it to the new MAC address if
* @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
@ -8045,7 +8058,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
* MAC value. Note that this index may differ from @idx.
*/
int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
int idx, const u8 *addr, bool persist, bool add_smt)
int idx, const u8 *addr, bool persist, uint16_t *smt_idx)
{
int ret, mode;
struct fw_vi_mac_cmd c;
@ -8054,7 +8067,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
if (idx < 0) /* new allocation */
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
memset(&c, 0, sizeof(c));
c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
@ -8071,6 +8084,16 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
if (ret >= max_mac_addr)
ret = -ENOMEM;
if (smt_idx) {
if (adap->params.viid_smt_extn_support)
*smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid));
else {
if (chip_id(adap) <= CHELSIO_T5)
*smt_idx = (viid & M_FW_VIID_VIN) << 1;
else
*smt_idx = viid & M_FW_VIID_VIN;
}
}
}
return ret;
}
@ -9331,9 +9354,9 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
{
u8 addr[6];
int ret, i, j;
u16 rss_size;
struct port_info *p = adap2pinfo(adap, port_id);
u32 param, val;
struct vi_info *vi = &p->vi[0];
for (i = 0, j = -1; i <= p->port_id; i++) {
do {
@ -9351,27 +9374,23 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
t4_update_port_info(p);
}
ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &vi->rss_size,
&vi->vfvld, &vi->vin);
if (ret < 0)
return ret;
p->vi[0].viid = ret;
if (chip_id(adap) <= CHELSIO_T5)
p->vi[0].smt_idx = (ret & 0x7f) << 1;
else
p->vi[0].smt_idx = (ret & 0x7f);
p->vi[0].rss_size = rss_size;
vi->viid = ret;
t4_os_set_hw_addr(p, addr);
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
V_FW_PARAMS_PARAM_YZ(p->vi[0].viid);
V_FW_PARAMS_PARAM_YZ(vi->viid);
ret = t4_query_params(adap, mbox, pf, vf, 1, &param, &val);
if (ret)
p->vi[0].rss_base = 0xffff;
vi->rss_base = 0xffff;
else {
/* MPASS((val >> 16) == rss_size); */
p->vi[0].rss_base = val & 0xffff;
vi->rss_base = val & 0xffff;
}
return 0;

View file

@ -4798,6 +4798,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24,
FW_PARAMS_PARAM_DEV_ADD_SMAC = 0x25,
FW_PARAMS_PARAM_DEV_HPFILTER_REGION_SUPPORT = 0x26,
FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27,
};
/*
@ -6502,6 +6503,19 @@ struct fw_vi_cmd {
(((x) >> S_FW_VI_CMD_FREE) & M_FW_VI_CMD_FREE)
#define F_FW_VI_CMD_FREE V_FW_VI_CMD_FREE(1U)
#define S_FW_VI_CMD_VFVLD 24
#define M_FW_VI_CMD_VFVLD 0x1
#define V_FW_VI_CMD_VFVLD(x) ((x) << S_FW_VI_CMD_VFVLD)
#define G_FW_VI_CMD_VFVLD(x) \
(((x) >> S_FW_VI_CMD_VFVLD) & M_FW_VI_CMD_VFVLD)
#define F_FW_VI_CMD_VFVLD V_FW_VI_CMD_VFVLD(1U)
#define S_FW_VI_CMD_VIN 16
#define M_FW_VI_CMD_VIN 0xff
#define V_FW_VI_CMD_VIN(x) ((x) << S_FW_VI_CMD_VIN)
#define G_FW_VI_CMD_VIN(x) \
(((x) >> S_FW_VI_CMD_VIN) & M_FW_VI_CMD_VIN)
#define S_FW_VI_CMD_TYPE 15
#define M_FW_VI_CMD_TYPE 0x1
#define V_FW_VI_CMD_TYPE(x) ((x) << S_FW_VI_CMD_TYPE)
@ -6608,6 +6622,12 @@ struct fw_vi_mac_cmd {
} u;
};
#define S_FW_VI_MAC_CMD_SMTID 12
#define M_FW_VI_MAC_CMD_SMTID 0xff
#define V_FW_VI_MAC_CMD_SMTID(x) ((x) << S_FW_VI_MAC_CMD_SMTID)
#define G_FW_VI_MAC_CMD_SMTID(x) \
(((x) >> S_FW_VI_MAC_CMD_SMTID) & M_FW_VI_MAC_CMD_SMTID)
#define S_FW_VI_MAC_CMD_VIID 0
#define M_FW_VI_MAC_CMD_VIID 0xfff
#define V_FW_VI_MAC_CMD_VIID(x) ((x) << S_FW_VI_MAC_CMD_VIID)

View file

@ -859,7 +859,6 @@ struct c4iw_ep {
unsigned int mpa_pkt_len;
u32 ird;
u32 ord;
u32 smac_idx;
u32 tx_chan;
u32 mtu;
u16 mss;

View file

@ -2486,17 +2486,13 @@ alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
device_get_nameunit(vi->dev)));
func = vi_mac_funcs[index];
rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
vi->hw_addr, &vi->rss_size, func, 0);
vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0);
if (rc < 0) {
device_printf(vi->dev, "failed to allocate virtual interface %d"
"for port %d: %d\n", index, pi->port_id, -rc);
return (-rc);
}
vi->viid = rc;
if (chip_id(sc) <= CHELSIO_T5)
vi->smt_idx = (rc & 0x7f) << 1;
else
vi->smt_idx = (rc & 0x7f);
if (vi->rss_size == 1) {
/*
@ -4113,6 +4109,15 @@ set_params__pre_init(struct adapter *sc)
}
}
/* Enable opaque VIIDs with firmwares that support it. */
param = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
val = 1;
rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
if (rc == 0 && val == 1)
sc->params.viid_smt_extn_support = true;
else
sc->params.viid_smt_extn_support = false;
return (rc);
}
@ -4825,7 +4830,7 @@ update_mac_settings(struct ifnet *ifp, int flags)
bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
ucaddr, true, true);
ucaddr, true, &vi->smt_idx);
if (rc < 0) {
rc = -rc;
if_printf(ifp, "change_mac failed: %d\n", rc);
@ -5746,7 +5751,7 @@ get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
#define A_PL_INDIR_DATA 0x1fc
static uint64_t
read_vf_stat(struct adapter *sc, unsigned int viid, int reg)
read_vf_stat(struct adapter *sc, u_int vin, int reg)
{
u32 stats[2];
@ -5756,8 +5761,7 @@ read_vf_stat(struct adapter *sc, unsigned int viid, int reg)
stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4));
} else {
t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
V_PL_VFID(G_FW_VIID_VIN(viid)) |
V_PL_ADDR(VF_MPS_REG(reg)));
V_PL_VFID(vin) | V_PL_ADDR(VF_MPS_REG(reg)));
stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
}
@ -5765,12 +5769,11 @@ read_vf_stat(struct adapter *sc, unsigned int viid, int reg)
}
static void
t4_get_vi_stats(struct adapter *sc, unsigned int viid,
struct fw_vi_stats_vf *stats)
t4_get_vi_stats(struct adapter *sc, u_int vin, struct fw_vi_stats_vf *stats)
{
#define GET_STAT(name) \
read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L)
read_vf_stat(sc, vin, A_MPS_VF_STAT_##name##_L)
stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES);
stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
@ -5793,12 +5796,11 @@ t4_get_vi_stats(struct adapter *sc, unsigned int viid,
}
static void
t4_clr_vi_stats(struct adapter *sc, unsigned int viid)
t4_clr_vi_stats(struct adapter *sc, u_int vin)
{
int reg;
t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
V_PL_VFID(G_FW_VIID_VIN(viid)) |
t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | V_PL_VFID(vin) |
V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
@ -5820,7 +5822,7 @@ vi_refresh_stats(struct adapter *sc, struct vi_info *vi)
return;
mtx_lock(&sc->reg_lock);
t4_get_vi_stats(sc, vi->viid, &vi->stats);
t4_get_vi_stats(sc, vi->vin, &vi->stats);
getmicrotime(&vi->last_refreshed);
mtx_unlock(&sc->reg_lock);
}
@ -10055,7 +10057,7 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
mtx_lock(&sc->reg_lock);
for_each_vi(pi, v, vi) {
if (vi->flags & VI_INIT_DONE)
t4_clr_vi_stats(sc, vi->viid);
t4_clr_vi_stats(sc, vi->vin);
}
bg_map = pi->mps_bg_map;
v = 0; /* reuse */

View file

@ -799,9 +799,8 @@ cxgbe_snd_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
cst->tx_total = cst->tx_credits;
cst->plen = 0;
cst->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(G_FW_VIID_PFN(vi->viid)) |
V_TXPKT_VF(G_FW_VIID_VIN(vi->viid)) |
V_TXPKT_VF_VLD(G_FW_VIID_VIVLD(vi->viid)));
V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
/*
* Queues will be selected later when the connection flowid is available.

View file

@ -3640,9 +3640,8 @@ alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx,
nm_txq->nid = idx;
nm_txq->iqidx = iqidx;
nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(G_FW_VIID_PFN(vi->viid)) |
V_TXPKT_VF(G_FW_VIID_VIN(vi->viid)) |
V_TXPKT_VF_VLD(G_FW_VIID_VIVLD(vi->viid)));
V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID;
snprintf(name, sizeof(name), "%d", idx);
@ -4043,10 +4042,8 @@ alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx,
V_TXPKT_INTF(pi->tx_chan));
else
txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
V_TXPKT_INTF(pi->tx_chan) |
V_TXPKT_PF(G_FW_VIID_PFN(vi->viid)) |
V_TXPKT_VF(G_FW_VIID_VIN(vi->viid)) |
V_TXPKT_VF_VLD(G_FW_VIID_VIVLD(vi->viid)));
V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
txq->tc_idx = -1;
txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE,
M_ZERO | M_WAITOK);
@ -5657,7 +5654,7 @@ send_etid_flowc_wr(struct cxgbe_snd_tag *cst, struct port_info *pi,
struct vi_info *vi)
{
struct wrq_cookie cookie;
u_int pfvf = G_FW_VIID_PFN(vi->viid) << S_FW_VIID_PFN;
u_int pfvf = pi->adapter->pf << S_FW_VIID_PFN;
struct fw_flowc_wr *flowc;
mtx_assert(&cst->lock, MA_OWNED);

View file

@ -107,7 +107,7 @@ send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp)
struct vi_info *vi = toep->vi;
struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
unsigned int pfvf = G_FW_VIID_PFN(vi->viid) << S_FW_VIID_PFN;
unsigned int pfvf = sc->pf << S_FW_VIID_PFN;
struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT),

View file

@ -356,7 +356,7 @@ send_reset_synqe(struct toedev *tod, struct synq_entry *synqe)
struct sge_wrq *ofld_txq;
struct sge_ofld_rxq *ofld_rxq;
const int nparams = 6;
unsigned int pfvf = G_FW_VIID_PFN(vi->viid) << S_FW_VIID_PFN;
const u_int pfvf = sc->pf << S_FW_VIID_PFN;
INP_WLOCK_ASSERT(synqe->lctx->inp);

View file

@ -633,7 +633,6 @@ select_ntuple(struct vi_info *vi, struct l2t_entry *e)
{
struct adapter *sc = vi->pi->adapter;
struct tp_params *tp = &sc->params.tp;
uint16_t viid = vi->viid;
uint64_t ntuple = 0;
/*
@ -650,12 +649,9 @@ select_ntuple(struct vi_info *vi, struct l2t_entry *e)
ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift;
if (tp->vnic_shift >= 0 && tp->ingress_config & F_VNIC) {
uint32_t vf = G_FW_VIID_VIN(viid);
uint32_t pf = G_FW_VIID_PFN(viid);
uint32_t vld = G_FW_VIID_VIVLD(viid);
ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vf) | V_FT_VNID_ID_PF(pf) |
V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vi->vin) |
V_FT_VNID_ID_PF(sc->pf) | V_FT_VNID_ID_VLD(vi->vfvld)) <<
tp->vnic_shift;
}
if (is_t4(sc))

View file

@ -1270,14 +1270,7 @@ em_if_init(if_ctx_t ctx)
/* Setup Multicast table */
em_if_multi_set(ctx);
/*
* Figure out the desired mbuf
* pool for doing jumbos
*/
if (adapter->hw.mac.max_frame_size <= 2048)
adapter->rx_mbuf_sz = MCLBYTES;
else
adapter->rx_mbuf_sz = MJUMPAGESIZE;
adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
em_initialize_receive_unit(ctx);
/* Use real VLAN Filter support? */

View file

@ -517,7 +517,7 @@ phy_get_by_ofw_property(device_t consumer_dev, phandle_t cnode, char *name,
ncells = OF_getencprop_alloc_multi(cnode, name, sizeof(pcell_t),
(void **)&cells);
if (ncells < 1)
return (ENXIO);
return (ENOENT);
/* Tranlate provider to device. */
phydev = OF_device_from_xref(cells[0]);

View file

@ -1091,7 +1091,7 @@ regulator_get_by_ofw_property(device_t cdev, phandle_t cnode, char *name,
ncells = OF_getencprop_alloc_multi(cnode, name, sizeof(*cells),
(void **)&cells);
if (ncells <= 0)
return (ENXIO);
return (ENOENT);
/* Translate xref to device */
regdev = OF_device_from_xref(cells[0]);

View file

@ -238,7 +238,7 @@ syscon_get_by_ofw_property(device_t cdev, phandle_t cnode, char *name,
ncells = OF_getencprop_alloc_multi(cnode, name, sizeof(pcell_t),
(void **)&cells);
if (ncells < 1)
return (ENXIO);
return (ENOENT);
/* Translate to syscon node. */
SYSCON_TOPO_SLOCK();

View file

@ -104,6 +104,7 @@ __FBSDID("$FreeBSD$");
#define ID_LEWISBURG 0xa1a3
#define ID_LEWISBURG2 0xa223
#define ID_KABYLAKE 0xa2a3
#define ID_CANNONLAKE 0xa323
static const struct ichsmb_device {
uint16_t id;
@ -148,6 +149,7 @@ static const struct ichsmb_device {
{ ID_LEWISBURG, "Intel Lewisburg SMBus controller" },
{ ID_LEWISBURG2,"Intel Lewisburg SMBus controller" },
{ ID_KABYLAKE, "Intel Kaby Lake SMBus controller" },
{ ID_CANNONLAKE,"Intel Cannon Lake SMBus controller" },
{ 0, NULL },
};

View file

@ -2880,10 +2880,7 @@ ixgbe_if_init(if_ctx_t ctx)
ixgbe_if_multi_set(ctx);
/* Determine the correct mbuf pool, based on frame size */
if (adapter->max_frame_size <= MCLBYTES)
adapter->rx_mbuf_sz = MCLBYTES;
else
adapter->rx_mbuf_sz = MJUMPAGESIZE;
adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
/* Configure RX settings */
ixgbe_initialize_receive_units(ctx);

View file

@ -220,7 +220,7 @@ static struct if_shared_ctx ixv_sctx_init = {
.isc_vendor_info = ixv_vendor_info_array,
.isc_driver_version = ixv_driver_version,
.isc_driver = &ixv_if_driver,
.isc_flags = IFLIB_TSO_INIT_IP,
.isc_flags = IFLIB_IS_VF | IFLIB_TSO_INIT_IP,
.isc_nrxd_min = {MIN_RXD},
.isc_ntxd_min = {MIN_TXD},
@ -629,14 +629,7 @@ ixv_if_init(if_ctx_t ctx)
/* Setup Multicast table */
ixv_if_multi_set(ctx);
/*
* Determine the correct mbuf pool
* for doing jumbo/headersplit
*/
if (ifp->if_mtu > ETHERMTU)
adapter->rx_mbuf_sz = MJUMPAGESIZE;
else
adapter->rx_mbuf_sz = MCLBYTES;
adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
/* Configure RX settings */
ixv_initialize_receive_units(ctx);

View file

@ -614,7 +614,6 @@ iavf_send_vc_msg(struct iavf_sc *sc, u32 op)
static void
iavf_init_queues(struct ixl_vsi *vsi)
{
if_softc_ctx_t scctx = vsi->shared;
struct ixl_tx_queue *tx_que = vsi->tx_queues;
struct ixl_rx_queue *rx_que = vsi->rx_queues;
struct rx_ring *rxr;
@ -625,10 +624,7 @@ iavf_init_queues(struct ixl_vsi *vsi)
for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
rxr = &rx_que->rxr;
if (scctx->isc_max_frame_size <= MCLBYTES)
rxr->mbuf_sz = MCLBYTES;
else
rxr->mbuf_sz = MJUMPAGESIZE;
rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
wr32(vsi->hw, rxr->tail, 0);
}

View file

@ -1300,10 +1300,7 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
struct i40e_hmc_obj_rxq rctx;
/* Next setup the HMC RX Context */
if (scctx->isc_max_frame_size <= MCLBYTES)
rxr->mbuf_sz = MCLBYTES;
else
rxr->mbuf_sz = MJUMPAGESIZE;
rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;

View file

@ -259,6 +259,13 @@ typedef struct {
extern uint32_t pci_numdevs;
/*
* The bitfield has to be stable and match the fields below (so that
* match_flag_vendor must be bit 0) so we have to do the endian dance. We can't
* use enums or #define constants because then the macros for subsetting matches
* wouldn't work. These tables are parsed by devmatch and others to connect
* modules with devices on the PCI bus.
*/
struct pci_device_table {
#if BYTE_ORDER == LITTLE_ENDIAN
uint16_t

View file

@ -79,9 +79,9 @@ struct psci_softc {
};
#ifdef FDT
static int psci_v0_1_init(device_t dev);
static int psci_v0_1_init(device_t dev, int default_version);
#endif
static int psci_v0_2_init(device_t dev);
static int psci_v0_2_init(device_t dev, int default_version);
struct psci_softc *psci_softc = NULL;
@ -96,15 +96,35 @@ struct psci_softc *psci_softc = NULL;
#endif
#ifdef FDT
struct psci_init_def {
int default_version;
psci_initfn_t psci_init;
};
static struct psci_init_def psci_v1_0_init_def = {
.default_version = (1 << 16) | 0,
.psci_init = psci_v0_2_init
};
static struct psci_init_def psci_v0_2_init_def = {
.default_version = (0 << 16) | 2,
.psci_init = psci_v0_2_init
};
static struct psci_init_def psci_v0_1_init_def = {
.default_version = (0 << 16) | 1,
.psci_init = psci_v0_1_init
};
static struct ofw_compat_data compat_data[] = {
{"arm,psci-1.0", (uintptr_t)psci_v0_2_init},
{"arm,psci-0.2", (uintptr_t)psci_v0_2_init},
{"arm,psci", (uintptr_t)psci_v0_1_init},
{"arm,psci-1.0", (uintptr_t)&psci_v1_0_init_def},
{"arm,psci-0.2", (uintptr_t)&psci_v0_2_init_def},
{"arm,psci", (uintptr_t)&psci_v0_1_init_def},
{NULL, 0}
};
#endif
static int psci_attach(device_t, psci_initfn_t);
static int psci_attach(device_t, psci_initfn_t, int);
static void psci_shutdown(void *, int);
static int psci_find_callfn(psci_callfn_t *);
@ -198,12 +218,13 @@ static int
psci_fdt_attach(device_t dev)
{
const struct ofw_compat_data *ocd;
psci_initfn_t psci_init;
struct psci_init_def *psci_init_def;
ocd = ofw_bus_search_compatible(dev, compat_data);
psci_init = (psci_initfn_t)ocd->ocd_data;
psci_init_def = (struct psci_init_def *)ocd->ocd_data;
return (psci_attach(dev, psci_init));
return (psci_attach(dev, psci_init_def->psci_init,
psci_init_def->default_version));
}
#endif
@ -304,12 +325,12 @@ static int
psci_acpi_attach(device_t dev)
{
return (psci_attach(dev, psci_v0_2_init));
return (psci_attach(dev, psci_v0_2_init, PSCI_RETVAL_NOT_SUPPORTED));
}
#endif
static int
psci_attach(device_t dev, psci_initfn_t psci_init)
psci_attach(device_t dev, psci_initfn_t psci_init, int default_version)
{
struct psci_softc *sc = device_get_softc(dev);
@ -317,7 +338,7 @@ psci_attach(device_t dev, psci_initfn_t psci_init)
return (ENXIO);
KASSERT(psci_init != NULL, ("PSCI init function cannot be NULL"));
if (psci_init(dev))
if (psci_init(dev, default_version))
return (ENXIO);
psci_softc = sc;
@ -464,7 +485,7 @@ psci_reset(void)
#ifdef FDT
/* Only support PSCI 0.1 on FDT */
static int
psci_v0_1_init(device_t dev)
psci_v0_1_init(device_t dev, int default_version __unused)
{
struct psci_softc *sc = device_get_softc(dev);
int psci_fn;
@ -510,7 +531,7 @@ psci_v0_1_init(device_t dev)
#endif
static int
psci_v0_2_init(device_t dev)
psci_v0_2_init(device_t dev, int default_version)
{
struct psci_softc *sc = device_get_softc(dev);
int version;
@ -529,8 +550,20 @@ psci_v0_2_init(device_t dev)
version = _psci_get_version(sc);
if (version == PSCI_RETVAL_NOT_SUPPORTED)
return (1);
/*
* U-Boot PSCI implementation doesn't have psci_get_version()
* method implemented for many boards. In this case, use the version
* readed from FDT as fallback. No fallback method for ACPI.
*/
if (version == PSCI_RETVAL_NOT_SUPPORTED) {
if (default_version == PSCI_RETVAL_NOT_SUPPORTED)
return (1);
version = default_version;
printf("PSCI get_version() function is not implemented, "
" assuming v%d.%d\n", PSCI_VER_MAJOR(version),
PSCI_VER_MINOR(version));
}
sc->psci_version = version;
if ((PSCI_VER_MAJOR(version) == 0 && PSCI_VER_MINOR(version) == 2) ||

View file

@ -31,7 +31,7 @@
#include <sys/types.h>
typedef int (*psci_initfn_t)(device_t dev);
typedef int (*psci_initfn_t)(device_t dev, int default_version);
typedef int (*psci_callfn_t)(register_t, register_t, register_t, register_t);
extern int psci_present;

View file

@ -97,11 +97,11 @@ static const struct {
{ HDA_INTEL_LPTLP1, "Intel Lynx Point-LP", 0, 0 },
{ HDA_INTEL_LPTLP2, "Intel Lynx Point-LP", 0, 0 },
{ HDA_INTEL_SRPTLP, "Intel Sunrise Point-LP", 0, 0 },
{ HDA_INTEL_KBLKLP, "Intel Kabylake-LP", 0, 0 },
{ HDA_INTEL_KBLKLP, "Intel Kaby Lake-LP", 0, 0 },
{ HDA_INTEL_SRPT, "Intel Sunrise Point", 0, 0 },
{ HDA_INTEL_KBLK, "Intel Kabylake", 0, 0 },
{ HDA_INTEL_KBLKH, "Intel Kabylake-H", 0, 0 },
{ HDA_INTEL_CFLK, "Intel Coffelake", 0, 0 },
{ HDA_INTEL_KBLK, "Intel Kaby Lake", 0, 0 },
{ HDA_INTEL_KBLKH, "Intel Kaby Lake-H", 0, 0 },
{ HDA_INTEL_CFLK, "Intel Coffee Lake", 0, 0 },
{ HDA_INTEL_82801F, "Intel 82801F", 0, 0 },
{ HDA_INTEL_63XXESB, "Intel 631x/632xESB", 0, 0 },
{ HDA_INTEL_82801G, "Intel 82801G", 0, 0 },

View file

@ -368,7 +368,7 @@ static const struct {
{ HDA_CODEC_INTELHSW, 0, "Intel Haswell" },
{ HDA_CODEC_INTELBDW, 0, "Intel Broadwell" },
{ HDA_CODEC_INTELSKLK, 0, "Intel Skylake" },
{ HDA_CODEC_INTELKBLK, 0, "Intel Kabylake" },
{ HDA_CODEC_INTELKBLK, 0, "Intel Kaby Lake" },
{ HDA_CODEC_INTELCL, 0, "Intel Crestline" },
{ HDA_CODEC_SII1390, 0, "Silicon Image SiI1390" },
{ HDA_CODEC_SII1392, 0, "Silicon Image SiI1392" },

View file

@ -143,7 +143,7 @@
/* misc */
SYSCTL_DECL(_vfs_fuse);
SYSCTL_DECL(_vfs_fusefs);
/* Fuse locking */

View file

@ -92,7 +92,7 @@ SDT_PROBE_DEFINE2(fuse, , file, trace, "int", "char*");
static int fuse_fh_count = 0;
SYSCTL_INT(_vfs_fuse, OID_AUTO, filehandle_count, CTLFLAG_RD,
SYSCTL_INT(_vfs_fusefs, OID_AUTO, filehandle_count, CTLFLAG_RD,
&fuse_fh_count, 0, "number of open FUSE filehandles");
int

View file

@ -104,21 +104,21 @@ static int fuse_body_audit(struct fuse_ticket *ftick, size_t blen);
static fuse_handler_t fuse_standard_handler;
SYSCTL_NODE(_vfs, OID_AUTO, fuse, CTLFLAG_RW, 0, "FUSE tunables");
SYSCTL_STRING(_vfs_fuse, OID_AUTO, version, CTLFLAG_RD,
SYSCTL_NODE(_vfs, OID_AUTO, fusefs, CTLFLAG_RW, 0, "FUSE tunables");
SYSCTL_STRING(_vfs_fusefs, OID_AUTO, version, CTLFLAG_RD,
FUSE_FREEBSD_VERSION, 0, "fuse-freebsd version");
static int fuse_ticket_count = 0;
SYSCTL_INT(_vfs_fuse, OID_AUTO, ticket_count, CTLFLAG_RW,
SYSCTL_INT(_vfs_fusefs, OID_AUTO, ticket_count, CTLFLAG_RW,
&fuse_ticket_count, 0, "number of allocated tickets");
static long fuse_iov_permanent_bufsize = 1 << 19;
SYSCTL_LONG(_vfs_fuse, OID_AUTO, iov_permanent_bufsize, CTLFLAG_RW,
SYSCTL_LONG(_vfs_fusefs, OID_AUTO, iov_permanent_bufsize, CTLFLAG_RW,
&fuse_iov_permanent_bufsize, 0,
"limit for permanently stored buffer size for fuse_iovs");
static int fuse_iov_credit = 16;
SYSCTL_INT(_vfs_fuse, OID_AUTO, iov_credit, CTLFLAG_RW,
SYSCTL_INT(_vfs_fusefs, OID_AUTO, iov_credit, CTLFLAG_RW,
&fuse_iov_credit, 0,
"how many times is an oversized fuse_iov tolerated");

View file

@ -96,9 +96,9 @@ static struct vfsconf fuse_vfsconf = {
.vfc_flags = VFCF_JAIL | VFCF_SYNTHETIC
};
SYSCTL_INT(_vfs_fuse, OID_AUTO, kernelabi_major, CTLFLAG_RD,
SYSCTL_INT(_vfs_fusefs, OID_AUTO, kernelabi_major, CTLFLAG_RD,
SYSCTL_NULL_INT_PTR, FUSE_KERNEL_VERSION, "FUSE kernel abi major version");
SYSCTL_INT(_vfs_fuse, OID_AUTO, kernelabi_minor, CTLFLAG_RD,
SYSCTL_INT(_vfs_fusefs, OID_AUTO, kernelabi_minor, CTLFLAG_RD,
SYSCTL_NULL_INT_PTR, FUSE_KERNEL_MINOR_VERSION, "FUSE kernel abi minor version");
SDT_PROVIDER_DEFINE(fuse);
@ -159,10 +159,10 @@ fuse_loader(struct module *m, int what, void *arg)
/* Registering the module */
static moduledata_t fuse_moddata = {
"fuse",
"fusefs",
fuse_loader,
&fuse_vfsconf
};
DECLARE_MODULE(fuse, fuse_moddata, SI_SUB_VFS, SI_ORDER_MIDDLE);
MODULE_VERSION(fuse, 1);
DECLARE_MODULE(fusefs, fuse_moddata, SI_SUB_VFS, SI_ORDER_MIDDLE);
MODULE_VERSION(fusefs, 1);

View file

@ -103,47 +103,47 @@ static int sysctl_fuse_cache_mode(SYSCTL_HANDLER_ARGS);
static int fuse_node_count = 0;
SYSCTL_INT(_vfs_fuse, OID_AUTO, node_count, CTLFLAG_RD,
SYSCTL_INT(_vfs_fusefs, OID_AUTO, node_count, CTLFLAG_RD,
&fuse_node_count, 0, "Count of FUSE vnodes");
int fuse_data_cache_mode = FUSE_CACHE_WT;
SYSCTL_PROC(_vfs_fuse, OID_AUTO, data_cache_mode, CTLTYPE_INT|CTLFLAG_RW,
SYSCTL_PROC(_vfs_fusefs, OID_AUTO, data_cache_mode, CTLTYPE_INT|CTLFLAG_RW,
&fuse_data_cache_mode, 0, sysctl_fuse_cache_mode, "I",
"Zero: disable caching of FUSE file data; One: write-through caching "
"(default); Two: write-back caching (generally unsafe)");
int fuse_data_cache_invalidate = 0;
SYSCTL_INT(_vfs_fuse, OID_AUTO, data_cache_invalidate, CTLFLAG_RW,
SYSCTL_INT(_vfs_fusefs, OID_AUTO, data_cache_invalidate, CTLFLAG_RW,
&fuse_data_cache_invalidate, 0,
"If non-zero, discard cached clean file data when there are no active file"
" users");
int fuse_mmap_enable = 1;
SYSCTL_INT(_vfs_fuse, OID_AUTO, mmap_enable, CTLFLAG_RW,
SYSCTL_INT(_vfs_fusefs, OID_AUTO, mmap_enable, CTLFLAG_RW,
&fuse_mmap_enable, 0,
"If non-zero, and data_cache_mode is also non-zero, enable mmap(2) of "
"FUSE files");
int fuse_refresh_size = 0;
SYSCTL_INT(_vfs_fuse, OID_AUTO, refresh_size, CTLFLAG_RW,
SYSCTL_INT(_vfs_fusefs, OID_AUTO, refresh_size, CTLFLAG_RW,
&fuse_refresh_size, 0,
"If non-zero, and no dirty file extension data is buffered, fetch file "
"size before write operations");
int fuse_sync_resize = 1;
SYSCTL_INT(_vfs_fuse, OID_AUTO, sync_resize, CTLFLAG_RW,
SYSCTL_INT(_vfs_fusefs, OID_AUTO, sync_resize, CTLFLAG_RW,
&fuse_sync_resize, 0,
"If a cached write extended a file, inform FUSE filesystem of the changed"
"size immediately subsequent to the issued writes");
int fuse_fix_broken_io = 0;
SYSCTL_INT(_vfs_fuse, OID_AUTO, fix_broken_io, CTLFLAG_RW,
SYSCTL_INT(_vfs_fusefs, OID_AUTO, fix_broken_io, CTLFLAG_RW,
&fuse_fix_broken_io, 0,
"If non-zero, print a diagnostic warning if a userspace filesystem returns"
" EIO on reads of recently extended portions of files");

View file

@ -120,16 +120,16 @@ struct vfsops fuse_vfsops = {
.vfs_statfs = fuse_vfsop_statfs,
};
SYSCTL_INT(_vfs_fuse, OID_AUTO, init_backgrounded, CTLFLAG_RD,
SYSCTL_INT(_vfs_fusefs, OID_AUTO, init_backgrounded, CTLFLAG_RD,
SYSCTL_NULL_INT_PTR, 1, "indicate async handshake");
static int fuse_enforce_dev_perms = 0;
SYSCTL_INT(_vfs_fuse, OID_AUTO, enforce_dev_perms, CTLFLAG_RW,
SYSCTL_INT(_vfs_fusefs, OID_AUTO, enforce_dev_perms, CTLFLAG_RW,
&fuse_enforce_dev_perms, 0,
"enforce fuse device permissions for secondary mounts");
static unsigned sync_unmount = 1;
SYSCTL_UINT(_vfs_fuse, OID_AUTO, sync_unmount, CTLFLAG_RW,
SYSCTL_UINT(_vfs_fusefs, OID_AUTO, sync_unmount, CTLFLAG_RW,
&sync_unmount, 0, "specify when to use synchronous unmount");
MALLOC_DEFINE(M_FUSEVFS, "fuse_filesystem", "buffer for fuse vfs layer");

View file

@ -183,17 +183,17 @@ struct vop_vector fuse_vnops = {
static u_long fuse_lookup_cache_hits = 0;
SYSCTL_ULONG(_vfs_fuse, OID_AUTO, lookup_cache_hits, CTLFLAG_RD,
SYSCTL_ULONG(_vfs_fusefs, OID_AUTO, lookup_cache_hits, CTLFLAG_RD,
&fuse_lookup_cache_hits, 0, "number of positive cache hits in lookup");
static u_long fuse_lookup_cache_misses = 0;
SYSCTL_ULONG(_vfs_fuse, OID_AUTO, lookup_cache_misses, CTLFLAG_RD,
SYSCTL_ULONG(_vfs_fusefs, OID_AUTO, lookup_cache_misses, CTLFLAG_RD,
&fuse_lookup_cache_misses, 0, "number of cache misses in lookup");
int fuse_lookup_cache_enable = 1;
SYSCTL_INT(_vfs_fuse, OID_AUTO, lookup_cache_enable, CTLFLAG_RW,
SYSCTL_INT(_vfs_fusefs, OID_AUTO, lookup_cache_enable, CTLFLAG_RW,
&fuse_lookup_cache_enable, 0, "if non-zero, enable lookup cache");
/*
@ -202,7 +202,7 @@ SYSCTL_INT(_vfs_fuse, OID_AUTO, lookup_cache_enable, CTLFLAG_RW,
*/
static int fuse_reclaim_revoked = 0;
SYSCTL_INT(_vfs_fuse, OID_AUTO, reclaim_revoked, CTLFLAG_RW,
SYSCTL_INT(_vfs_fusefs, OID_AUTO, reclaim_revoked, CTLFLAG_RW,
&fuse_reclaim_revoked, 0, "");
uma_zone_t fuse_pbuf_zone;

View file

@ -129,7 +129,7 @@ SUBDIR= \
filemon \
firewire \
firmware \
fuse \
fusefs \
${_fxp} \
gem \
geom \

View file

@ -2,9 +2,12 @@
.PATH: ${SRCTOP}/sys/fs/fuse
KMOD= fuse
KMOD= fusefs
SRCS= vnode_if.h \
fuse_node.c fuse_io.c fuse_device.c fuse_ipc.c fuse_file.c \
fuse_vfsops.c fuse_vnops.c fuse_internal.c fuse_main.c
# Symlink for backwards compatibility with systems installed at 12.0 or older
LINKS= ${KMODDIR}/${KMOD}.ko ${KMODDIR}/fuse.ko
.include <bsd.kmod.mk>

View file

@ -8,4 +8,6 @@ SRCS+= nat64clat.c nat64clat_control.c
SRCS+= nat64lsn.c nat64lsn_control.c
SRCS+= nat64stl.c nat64stl_control.c
CFLAGS+= -I${SRCTOP}/sys/contrib/ck/include
.include <bsd.kmod.mk>

View file

@ -2592,16 +2592,16 @@ bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
{
struct bpf_if *bp;
bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
if (bp == NULL)
panic("bpfattach");
KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized"));
bp = malloc(sizeof(*bp), M_BPF, M_WAITOK | M_ZERO);
rw_init(&bp->bif_lock, "bpf interface lock");
LIST_INIT(&bp->bif_dlist);
LIST_INIT(&bp->bif_wlist);
bp->bif_ifp = ifp;
bp->bif_dlt = dlt;
rw_init(&bp->bif_lock, "bpf interface lock");
KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized"));
bp->bif_hdrlen = hdrlen;
bp->bif_bpf = driverp;
*driverp = bp;
@ -2609,8 +2609,6 @@ bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
BPF_UNLOCK();
bp->bif_hdrlen = hdrlen;
if (bootverbose && IS_DEFAULT_VNET(curvnet))
if_printf(ifp, "bpf attached\n");
}

View file

@ -171,6 +171,7 @@ struct iflib_ctx {
uint32_t ifc_if_flags;
uint32_t ifc_flags;
uint32_t ifc_max_fl_buf_size;
uint32_t ifc_rx_mbuf_sz;
int ifc_link_state;
int ifc_link_irq;
@ -2172,7 +2173,6 @@ iflib_fl_setup(iflib_fl_t fl)
{
iflib_rxq_t rxq = fl->ifl_rxq;
if_ctx_t ctx = rxq->ifr_ctx;
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1);
/*
@ -2181,14 +2181,7 @@ iflib_fl_setup(iflib_fl_t fl)
iflib_fl_bufs_free(fl);
/* Now replenish the mbufs */
MPASS(fl->ifl_credits == 0);
/*
* XXX don't set the max_frame_size to larger
* than the hardware can handle
*/
if (sctx->isc_max_frame_size <= 2048)
fl->ifl_buf_size = MCLBYTES;
else
fl->ifl_buf_size = MJUMPAGESIZE;
fl->ifl_buf_size = ctx->ifc_rx_mbuf_sz;
if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
@ -2313,6 +2306,27 @@ iflib_timer(void *arg)
STATE_UNLOCK(ctx);
}
static void
iflib_calc_rx_mbuf_sz(if_ctx_t ctx)
{
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
/*
* XXX don't set the max_frame_size to larger
* than the hardware can handle
*/
if (sctx->isc_max_frame_size <= MCLBYTES)
ctx->ifc_rx_mbuf_sz = MCLBYTES;
else
ctx->ifc_rx_mbuf_sz = MJUMPAGESIZE;
}
uint32_t
iflib_get_rx_mbuf_sz(if_ctx_t ctx)
{
return (ctx->ifc_rx_mbuf_sz);
}
static void
iflib_init_locked(if_ctx_t ctx)
{
@ -2347,6 +2361,14 @@ iflib_init_locked(if_ctx_t ctx)
CALLOUT_UNLOCK(txq);
iflib_netmap_txq_init(ctx, txq);
}
/*
* Calculate a suitable Rx mbuf size prior to calling IFDI_INIT, so
* that drivers can use the value when setting up the hardware receive
* buffers.
*/
iflib_calc_rx_mbuf_sz(ctx);
#ifdef INVARIANTS
i = if_getdrvflags(ifp);
#endif
@ -3276,9 +3298,14 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
txq->ift_mbuf_defrag++;
m_head = m_defrag(*m_headp, M_NOWAIT);
}
remap++;
if (__predict_false(m_head == NULL))
/*
* remap should never be >1 unless bus_dmamap_load_mbuf_sg
* failed to map an mbuf that was run through m_defrag
*/
MPASS(remap <= 1);
if (__predict_false(m_head == NULL || remap > 1))
goto defrag_failed;
remap++;
*m_headp = m_head;
goto retry;
break;
@ -6230,8 +6257,8 @@ iflib_add_device_sysctl_pre(if_ctx_t ctx)
CTLFLAG_RD, NULL, "IFLIB fields");
oid_list = SYSCTL_CHILDREN(node);
SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0,
SYSCTL_ADD_CONST_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version,
"driver version");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",

View file

@ -248,7 +248,7 @@ struct if_shared_ctx {
/* fields necessary for probe */
pci_vendor_info_t *isc_vendor_info;
char *isc_driver_version;
const char *isc_driver_version;
/* optional function to transform the read values to match the table*/
void (*isc_parse_devinfo) (uint16_t *device_id, uint16_t *subvendor_id,
uint16_t *subdevice_id, uint16_t *rev_id);
@ -381,6 +381,8 @@ void iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]);
void iflib_request_reset(if_ctx_t ctx);
uint8_t iflib_in_detach(if_ctx_t ctx);
uint32_t iflib_get_rx_mbuf_sz(if_ctx_t ctx);
/*
* If the driver can plug cleanly in to newbus use these
*/

View file

@ -122,7 +122,7 @@ typedef struct _ipfw_nat64clat_cfg {
/*
* NAT64LSN default configuration values
*/
#define NAT64LSN_MAX_PORTS 2048 /* Max number of ports per host */
#define NAT64LSN_MAX_PORTS 2048 /* Unused */
#define NAT64LSN_JMAXLEN 2048 /* Max outstanding requests. */
#define NAT64LSN_TCP_SYN_AGE 10 /* State's TTL after SYN received. */
#define NAT64LSN_TCP_EST_AGE (2 * 3600) /* TTL for established connection */
@ -135,16 +135,20 @@ typedef struct _ipfw_nat64clat_cfg {
typedef struct _ipfw_nat64lsn_cfg {
char name[64]; /* NAT name */
uint32_t flags;
uint32_t max_ports; /* Max ports per client */
uint32_t agg_prefix_len; /* Prefix length to count */
uint32_t agg_prefix_max; /* Max hosts per agg prefix */
uint32_t max_ports; /* Unused */
uint32_t agg_prefix_len; /* Unused */
uint32_t agg_prefix_max; /* Unused */
struct in_addr prefix4;
uint16_t plen4; /* Prefix length */
uint16_t plen6; /* Prefix length */
struct in6_addr prefix6; /* NAT64 prefix */
uint32_t jmaxlen; /* Max jobqueue length */
uint16_t min_port; /* Min port group # to use */
uint16_t max_port; /* Max port group # to use */
uint16_t min_port; /* Unused */
uint16_t max_port; /* Unused */
uint16_t nh_delete_delay;/* Stale host delete delay */
uint16_t pg_delete_delay;/* Stale portgroup delete delay */
uint16_t st_syn_ttl; /* TCP syn expire */
@ -153,7 +157,7 @@ typedef struct _ipfw_nat64lsn_cfg {
uint16_t st_udp_ttl; /* UDP expire */
uint16_t st_icmp_ttl; /* ICMP expire */
uint8_t set; /* Named instance set [0..31] */
uint8_t spare;
uint8_t states_chunks; /* Number of states chunks per PG */
} ipfw_nat64lsn_cfg;
typedef struct _ipfw_nat64lsn_state {
@ -177,5 +181,30 @@ typedef struct _ipfw_nat64lsn_stg {
uint32_t spare2;
} ipfw_nat64lsn_stg;
#endif /* _NETINET6_IP_FW_NAT64_H_ */
typedef struct _ipfw_nat64lsn_state_v1 {
struct in6_addr host6; /* Bound IPv6 host */
struct in_addr daddr; /* Remote IPv4 address */
uint16_t dport; /* Remote destination port */
uint16_t aport; /* Local alias port */
uint16_t sport; /* Source port */
uint16_t spare;
uint16_t idle; /* Last used time */
uint8_t flags; /* State flags */
uint8_t proto; /* protocol */
} ipfw_nat64lsn_state_v1;
typedef struct _ipfw_nat64lsn_stg_v1 {
union nat64lsn_pgidx {
uint64_t index;
struct {
uint8_t chunk; /* states chunk */
uint8_t proto; /* protocol */
uint16_t port; /* base port */
in_addr_t addr; /* alias address */
};
} next; /* next state index */
struct in_addr alias4; /* IPv4 alias address */
uint32_t count; /* Number of states */
} ipfw_nat64lsn_stg_v1;
#endif /* _NETINET6_IP_FW_NAT64_H_ */

File diff suppressed because it is too large Load diff

View file

@ -35,75 +35,149 @@
#include "ip_fw_nat64.h"
#include "nat64_translate.h"
#define NAT64_CHUNK_SIZE_BITS 6 /* 64 ports */
#define NAT64_CHUNK_SIZE (1 << NAT64_CHUNK_SIZE_BITS)
#define NAT64_MIN_PORT 1024
#define NAT64_MIN_CHUNK (NAT64_MIN_PORT >> NAT64_CHUNK_SIZE_BITS)
struct nat64lsn_host;
struct nat64lsn_alias;
struct st_ptr {
uint8_t idx; /* index in nh->pg_ptr array.
* NOTE: it starts from 1.
*/
uint8_t off;
struct nat64lsn_state {
/* IPv6 host entry keeps hash table to speedup state lookup */
CK_SLIST_ENTRY(nat64lsn_state) entries;
struct nat64lsn_host *host;
struct in6_addr ip6_dst; /* Destination IPv6 address */
in_addr_t ip_src; /* Alias IPv4 address */
in_addr_t ip_dst; /* Destination IPv4 address */
uint16_t dport; /* Destination port */
uint16_t sport; /* Source port */
uint32_t hval;
uint32_t flags; /* Internal flags */
uint16_t aport;
uint16_t timestamp; /* last used */
uint8_t proto;
uint8_t _spare[7];
};
#define NAT64LSN_MAXPGPTR ((1 << (sizeof(uint8_t) * NBBY)) - 1)
#define NAT64LSN_PGPTRMASKBITS (sizeof(uint64_t) * NBBY)
#define NAT64LSN_PGPTRNMASK (roundup(NAT64LSN_MAXPGPTR, \
NAT64LSN_PGPTRMASKBITS) / NAT64LSN_PGPTRMASKBITS)
struct nat64lsn_portgroup;
/* sizeof(struct nat64lsn_host) = 64 + 64x2 + 8x8 = 256 bytes */
struct nat64lsn_states_chunk {
struct nat64lsn_state state[64];
};
#define ISSET64(mask, bit) ((mask) & ((uint64_t)1 << (bit)))
#define ISSET32(mask, bit) ((mask) & ((uint32_t)1 << (bit)))
struct nat64lsn_pg {
CK_SLIST_ENTRY(nat64lsn_pg) entries;
uint16_t base_port;
uint16_t timestamp;
uint8_t proto;
uint8_t chunks_count;
uint8_t spare[2];
union {
uint64_t freemask64;
uint32_t freemask32[2];
uint64_t *freemask64_chunk;
uint32_t *freemask32_chunk;
void *freemask_chunk;
};
union {
struct nat64lsn_states_chunk *states;
struct nat64lsn_states_chunk **states_chunk;
};
};
#define CHUNK_BY_FADDR(p, a) ((a) & ((p)->chunks_count - 1))
#ifdef __LP64__
#define FREEMASK_CHUNK(p, v) \
((p)->chunks_count == 1 ? &(p)->freemask64 : \
&(p)->freemask64_chunk[CHUNK_BY_FADDR(p, v)])
#define FREEMASK_BITCOUNT(pg, faddr) \
bitcount64(*FREEMASK_CHUNK((pg), (faddr)))
#else
#define FREEMASK_CHUNK(p, v) \
((p)->chunks_count == 1 ? &(p)->freemask32[0] : \
&(p)->freemask32_chunk[CHUNK_BY_FADDR(p, v) * 2])
#define FREEMASK_BITCOUNT(pg, faddr) \
bitcount64(*(uint64_t *)FREEMASK_CHUNK((pg), (faddr)))
#endif /* !__LP64__ */
struct nat64lsn_pgchunk {
struct nat64lsn_pg *pgptr[32];
};
struct nat64lsn_aliaslink {
CK_SLIST_ENTRY(nat64lsn_aliaslink) alias_entries;
CK_SLIST_ENTRY(nat64lsn_aliaslink) host_entries;
struct nat64lsn_alias *alias;
};
CK_SLIST_HEAD(nat64lsn_aliaslink_slist, nat64lsn_aliaslink);
CK_SLIST_HEAD(nat64lsn_states_slist, nat64lsn_state);
CK_SLIST_HEAD(nat64lsn_hosts_slist, nat64lsn_host);
CK_SLIST_HEAD(nat64lsn_pg_slist, nat64lsn_pg);
struct nat64lsn_alias {
struct nat64lsn_aliaslink_slist hosts;
struct nat64lsn_pg_slist portgroups;
struct mtx lock;
in_addr_t addr; /* host byte order */
uint32_t hosts_count;
uint32_t portgroups_count;
uint32_t tcp_chunkmask;
uint32_t udp_chunkmask;
uint32_t icmp_chunkmask;
uint32_t tcp_pgidx;
uint32_t udp_pgidx;
uint32_t icmp_pgidx;
uint16_t timestamp;
uint16_t spare;
uint32_t tcp_pgmask[32];
uint32_t udp_pgmask[32];
uint32_t icmp_pgmask[32];
struct nat64lsn_pgchunk *tcp[32];
struct nat64lsn_pgchunk *udp[32];
struct nat64lsn_pgchunk *icmp[32];
/* pointer to PG that can be used for faster state allocation */
struct nat64lsn_pg *tcp_pg;
struct nat64lsn_pg *udp_pg;
struct nat64lsn_pg *icmp_pg;
};
#define ALIAS_LOCK_INIT(p) \
mtx_init(&(p)->lock, "alias_lock", NULL, MTX_DEF)
#define ALIAS_LOCK_DESTROY(p) mtx_destroy(&(p)->lock)
#define ALIAS_LOCK(p) mtx_lock(&(p)->lock)
#define ALIAS_UNLOCK(p) mtx_unlock(&(p)->lock)
#define NAT64LSN_HSIZE 256
#define NAT64LSN_MAX_HSIZE 4096
#define NAT64LSN_HOSTS_HSIZE 1024
struct nat64lsn_host {
struct rwlock h_lock; /* Host states lock */
struct in6_addr addr;
struct nat64lsn_host *next;
uint16_t timestamp; /* Last altered */
uint16_t hsize; /* ports hash size */
uint16_t pg_used; /* Number of portgroups used */
#define NAT64LSN_REMAININGPG 8 /* Number of remaining PG before
* requesting of new chunk of indexes.
*/
uint16_t pg_allocated; /* Number of portgroups indexes
* allocated.
*/
#define NAT64LSN_HSIZE 64
struct st_ptr phash[NAT64LSN_HSIZE]; /* XXX: hardcoded size */
/*
* PG indexes are stored in chunks with 32 elements.
* The maximum count is limited to 255 due to st_ptr->idx is uint8_t.
*/
#define NAT64LSN_PGIDX_CHUNK 32
#define NAT64LSN_PGNIDX (roundup(NAT64LSN_MAXPGPTR, \
NAT64LSN_PGIDX_CHUNK) / NAT64LSN_PGIDX_CHUNK)
struct nat64lsn_portgroup **pg_ptr[NAT64LSN_PGNIDX]; /* PG indexes */
struct in6_addr addr;
struct nat64lsn_aliaslink_slist aliases;
struct nat64lsn_states_slist *states_hash;
CK_SLIST_ENTRY(nat64lsn_host) entries;
uint32_t states_count;
uint32_t hval;
uint32_t flags;
#define NAT64LSN_DEADHOST 1
#define NAT64LSN_GROWHASH 2
uint16_t states_hashsize;
uint16_t timestamp;
struct mtx lock;
};
#define NAT64_RLOCK_ASSERT(h) rw_assert(&(h)->h_lock, RA_RLOCKED)
#define NAT64_WLOCK_ASSERT(h) rw_assert(&(h)->h_lock, RA_WLOCKED)
#define NAT64_RLOCK(h) rw_rlock(&(h)->h_lock)
#define NAT64_RUNLOCK(h) rw_runlock(&(h)->h_lock)
#define NAT64_WLOCK(h) rw_wlock(&(h)->h_lock)
#define NAT64_WUNLOCK(h) rw_wunlock(&(h)->h_lock)
#define NAT64_LOCK(h) NAT64_WLOCK(h)
#define NAT64_UNLOCK(h) NAT64_WUNLOCK(h)
#define NAT64_LOCK_INIT(h) do { \
rw_init(&(h)->h_lock, "NAT64 host lock"); \
} while (0)
#define NAT64_LOCK_DESTROY(h) do { \
rw_destroy(&(h)->h_lock); \
} while (0)
/* Internal proto index */
#define NAT_PROTO_TCP 1
#define NAT_PROTO_UDP 2
#define NAT_PROTO_ICMP 3
#define NAT_MAX_PROTO 4
extern uint8_t nat64lsn_rproto_map[NAT_MAX_PROTO];
#define HOST_LOCK_INIT(p) \
mtx_init(&(p)->lock, "host_lock", NULL, MTX_DEF|MTX_NEW)
#define HOST_LOCK_DESTROY(p) mtx_destroy(&(p)->lock)
#define HOST_LOCK(p) mtx_lock(&(p)->lock)
#define HOST_UNLOCK(p) mtx_unlock(&(p)->lock)
VNET_DECLARE(uint16_t, nat64lsn_eid);
#define V_nat64lsn_eid VNET(nat64lsn_eid)
@ -112,124 +186,65 @@ VNET_DECLARE(uint16_t, nat64lsn_eid);
/* Timestamp macro */
#define _CT ((int)time_uptime % 65536)
#define SET_AGE(x) (x) = _CT
#define GET_AGE(x) ((_CT >= (x)) ? _CT - (x) : \
(int)65536 + _CT - (x))
#define GET_AGE(x) ((_CT >= (x)) ? _CT - (x): (int)65536 + _CT - (x))
#ifdef __LP64__
/* ffsl() is capable of checking 64-bit ints */
#define _FFS64
#endif
/* 16 bytes */
struct nat64lsn_state {
union {
struct {
in_addr_t faddr; /* Remote IPv4 address */
uint16_t fport; /* Remote IPv4 port */
uint16_t lport; /* Local IPv6 port */
}s;
uint64_t hkey;
} u;
uint8_t nat_proto;
uint8_t flags;
uint16_t timestamp;
struct st_ptr cur; /* Index of portgroup in nat64lsn_host */
struct st_ptr next; /* Next entry index */
};
/*
* 1024+32 bytes per 64 states, used to store state
* AND for outside-in state lookup
*/
struct nat64lsn_portgroup {
struct nat64lsn_host *host; /* IPv6 source host info */
in_addr_t aaddr; /* Alias addr, network format */
uint16_t aport; /* Base port */
uint16_t timestamp;
uint8_t nat_proto;
uint8_t spare[3];
uint32_t idx;
#ifdef _FFS64
uint64_t freemask; /* Mask of free entries */
#else
uint32_t freemask[2]; /* Mask of free entries */
#endif
struct nat64lsn_state states[NAT64_CHUNK_SIZE]; /* State storage */
};
#ifdef _FFS64
#define PG_MARK_BUSY_IDX(_pg, _idx) (_pg)->freemask &= ~((uint64_t)1<<(_idx))
#define PG_MARK_FREE_IDX(_pg, _idx) (_pg)->freemask |= ((uint64_t)1<<(_idx))
#define PG_IS_FREE_IDX(_pg, _idx) ((_pg)->freemask & ((uint64_t)1<<(_idx)))
#define PG_IS_BUSY_IDX(_pg, _idx) (PG_IS_FREE_IDX(_pg, _idx) == 0)
#define PG_GET_FREE_IDX(_pg) (ffsll((_pg)->freemask))
#define PG_IS_EMPTY(_pg) (((_pg)->freemask + 1) == 0)
#else
#define PG_MARK_BUSY_IDX(_pg, _idx) \
(_pg)->freemask[(_idx) / 32] &= ~((u_long)1<<((_idx) % 32))
#define PG_MARK_FREE_IDX(_pg, _idx) \
(_pg)->freemask[(_idx) / 32] |= ((u_long)1<<((_idx) % 32))
#define PG_IS_FREE_IDX(_pg, _idx) \
((_pg)->freemask[(_idx) / 32] & ((u_long)1<<((_idx) % 32)))
#define PG_IS_BUSY_IDX(_pg, _idx) (PG_IS_FREE_IDX(_pg, _idx) == 0)
#define PG_GET_FREE_IDX(_pg) _pg_get_free_idx(_pg)
#define PG_IS_EMPTY(_pg) \
((((_pg)->freemask[0] + 1) == 0 && ((_pg)->freemask[1] + 1) == 0))
static inline int
_pg_get_free_idx(const struct nat64lsn_portgroup *pg)
{
int i;
if ((i = ffsl(pg->freemask[0])) != 0)
return (i);
if ((i = ffsl(pg->freemask[1])) != 0)
return (i + 32);
return (0);
}
#endif
TAILQ_HEAD(nat64lsn_job_head, nat64lsn_job_item);
STAILQ_HEAD(nat64lsn_job_head, nat64lsn_job_item);
struct nat64lsn_cfg {
struct named_object no;
struct nat64lsn_portgroup **pg; /* XXX: array of pointers */
struct nat64lsn_host **ih; /* Host hash */
struct nat64lsn_hosts_slist *hosts_hash;
struct nat64lsn_alias *aliases; /* array of aliases */
struct mtx lock;
uint32_t hosts_hashsize;
uint32_t hash_seed;
uint32_t prefix4; /* IPv4 prefix */
uint32_t pmask4; /* IPv4 prefix mask */
uint32_t ihsize; /* IPv6 host hash size */
uint8_t plen4;
uint8_t nomatch_verdict;/* What to return to ipfw on no-match */
uint8_t nomatch_verdict;/* Return value on no-match */
uint32_t ihcount; /* Number of items in host hash */
int max_chunks; /* Max chunks per client */
int agg_prefix_len; /* Prefix length to count */
int agg_prefix_max; /* Max hosts per agg prefix */
uint32_t hosts_count; /* Number of items in host hash */
uint32_t states_chunks; /* Number of states chunks per PG */
uint32_t jmaxlen; /* Max jobqueue length */
uint16_t min_chunk; /* Min port group # to use */
uint16_t max_chunk; /* Max port group # to use */
uint16_t nh_delete_delay; /* Stale host delete delay */
uint16_t host_delete_delay; /* Stale host delete delay */
uint16_t pgchunk_delete_delay;
uint16_t pg_delete_delay; /* Stale portgroup del delay */
uint16_t st_syn_ttl; /* TCP syn expire */
uint16_t st_close_ttl; /* TCP fin expire */
uint16_t st_estab_ttl; /* TCP established expire */
uint16_t st_udp_ttl; /* UDP expire */
uint16_t st_icmp_ttl; /* ICMP expire */
uint32_t protochunks[NAT_MAX_PROTO];/* Number of chunks used */
struct nat64_config base;
#define NAT64LSN_FLAGSMASK (NAT64_LOG | NAT64_ALLOW_PRIVATE)
#define NAT64LSN_ANYPREFIX 0x00000100
struct mtx periodic_lock;
struct callout periodic;
struct callout jcallout;
struct ip_fw_chain *ch;
struct vnet *vp;
struct nat64lsn_job_head jhead;
int jlen;
char name[64]; /* Nat instance name */
};
/* CFG_LOCK protects cfg->hosts_hash from modification */
#define CFG_LOCK_INIT(p) \
mtx_init(&(p)->lock, "cfg_lock", NULL, MTX_DEF)
#define CFG_LOCK_DESTROY(p) mtx_destroy(&(p)->lock)
#define CFG_LOCK(p) mtx_lock(&(p)->lock)
#define CFG_UNLOCK(p) mtx_unlock(&(p)->lock)
#define CALLOUT_LOCK_INIT(p) \
mtx_init(&(p)->periodic_lock, "periodic_lock", NULL, MTX_DEF)
#define CALLOUT_LOCK_DESTROY(p) mtx_destroy(&(p)->periodic_lock)
#define CALLOUT_LOCK(p) mtx_lock(&(p)->periodic_lock)
#define CALLOUT_UNLOCK(p) mtx_unlock(&(p)->periodic_lock)
struct nat64lsn_cfg *nat64lsn_init_instance(struct ip_fw_chain *ch,
size_t numaddr);
in_addr_t prefix, int plen);
void nat64lsn_destroy_instance(struct nat64lsn_cfg *cfg);
void nat64lsn_start_instance(struct nat64lsn_cfg *cfg);
void nat64lsn_init_internal(void);
@ -237,114 +252,4 @@ void nat64lsn_uninit_internal(void);
int ipfw_nat64lsn(struct ip_fw_chain *ch, struct ip_fw_args *args,
ipfw_insn *cmd, int *done);
void
nat64lsn_dump_state(const struct nat64lsn_cfg *cfg,
const struct nat64lsn_portgroup *pg, const struct nat64lsn_state *st,
const char *px, int off);
/*
* Portgroup layout
* addr x nat_proto x port_off
*
*/
#define _ADDR_PG_PROTO_COUNT (65536 >> NAT64_CHUNK_SIZE_BITS)
#define _ADDR_PG_COUNT (_ADDR_PG_PROTO_COUNT * NAT_MAX_PROTO)
#define GET_ADDR_IDX(_cfg, _addr) ((_addr) - ((_cfg)->prefix4))
#define __GET_PORTGROUP_IDX(_proto, _port) \
((_proto - 1) * _ADDR_PG_PROTO_COUNT + \
((_port) >> NAT64_CHUNK_SIZE_BITS))
#define _GET_PORTGROUP_IDX(_cfg, _addr, _proto, _port) \
GET_ADDR_IDX(_cfg, _addr) * _ADDR_PG_COUNT + \
__GET_PORTGROUP_IDX(_proto, _port)
#define GET_PORTGROUP(_cfg, _addr, _proto, _port) \
((_cfg)->pg[_GET_PORTGROUP_IDX(_cfg, _addr, _proto, _port)])
#define PORTGROUP_CHUNK(_nh, _idx) \
((_nh)->pg_ptr[(_idx)])
#define PORTGROUP_BYSIDX(_cfg, _nh, _idx) \
(PORTGROUP_CHUNK(_nh, (_idx - 1) / NAT64LSN_PGIDX_CHUNK) \
[((_idx) - 1) % NAT64LSN_PGIDX_CHUNK])
/* Chained hash table */
#define CHT_FIND(_ph, _hsize, _PX, _x, _key) do { \
unsigned int _buck = _PX##hash(_key) & (_hsize - 1); \
_PX##lock(_ph, _buck); \
_x = _PX##first(_ph, _buck); \
for ( ; _x != NULL; _x = _PX##next(_x)) { \
if (_PX##cmp(_key, _PX##val(_x))) \
break; \
} \
if (_x == NULL) \
_PX##unlock(_ph, _buck); \
} while(0)
#define CHT_UNLOCK_BUCK(_ph, _PX, _buck) \
_PX##unlock(_ph, _buck);
#define CHT_UNLOCK_KEY(_ph, _hsize, _PX, _key) do { \
unsigned int _buck = _PX##hash(_key) & (_hsize - 1); \
_PX##unlock(_ph, _buck); \
} while(0)
#define CHT_INSERT_HEAD(_ph, _hsize, _PX, _i) do { \
unsigned int _buck = _PX##hash(_PX##val(_i)) & (_hsize - 1); \
_PX##lock(_ph, _buck); \
_PX##next(_i) = _PX##first(_ph, _buck); \
_PX##first(_ph, _buck) = _i; \
_PX##unlock(_ph, _buck); \
} while(0)
#define CHT_REMOVE(_ph, _hsize, _PX, _x, _tmp, _key) do { \
unsigned int _buck = _PX##hash(_key) & (_hsize - 1); \
_PX##lock(_ph, _buck); \
_x = _PX##first(_ph, _buck); \
_tmp = NULL; \
for ( ; _x != NULL; _tmp = _x, _x = _PX##next(_x)) { \
if (_PX##cmp(_key, _PX##val(_x))) \
break; \
} \
if (_x != NULL) { \
if (_tmp == NULL) \
_PX##first(_ph, _buck) = _PX##next(_x); \
else \
_PX##next(_tmp) = _PX##next(_x); \
} \
_PX##unlock(_ph, _buck); \
} while(0)
#define CHT_FOREACH_SAFE(_ph, _hsize, _PX, _x, _tmp, _cb, _arg) do { \
for (unsigned int _i = 0; _i < _hsize; _i++) { \
_PX##lock(_ph, _i); \
_x = _PX##first(_ph, _i); \
_tmp = NULL; \
for (; _x != NULL; _tmp = _x, _x = _PX##next(_x)) { \
if (_cb(_x, _arg) == 0) \
continue; \
if (_tmp == NULL) \
_PX##first(_ph, _i) = _PX##next(_x); \
else \
_tmp = _PX##next(_x); \
} \
_PX##unlock(_ph, _i); \
} \
} while(0)
#define CHT_RESIZE(_ph, _hsize, _nph, _nhsize, _PX, _x, _y) do { \
unsigned int _buck; \
for (unsigned int _i = 0; _i < _hsize; _i++) { \
_x = _PX##first(_ph, _i); \
_y = _x; \
while (_y != NULL) { \
_buck = _PX##hash(_PX##val(_x)) & (_nhsize - 1);\
_y = _PX##next(_x); \
_PX##next(_x) = _PX##first(_nph, _buck); \
_PX##first(_nph, _buck) = _x; \
} \
} \
} while(0)
#endif /* _IP_FW_NAT64LSN_H_ */

View file

@ -33,6 +33,8 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/counter.h>
#include <sys/ck.h>
#include <sys/epoch.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/lock.h>
@ -43,10 +45,8 @@ __FBSDID("$FreeBSD$");
#include <sys/rwlock.h>
#include <sys/socket.h>
#include <sys/sockopt.h>
#include <sys/queue.h>
#include <net/if.h>
#include <net/pfil.h>
#include <netinet/in.h>
#include <netinet/ip.h>
@ -75,12 +75,6 @@ static void
nat64lsn_default_config(ipfw_nat64lsn_cfg *uc)
{
if (uc->max_ports == 0)
uc->max_ports = NAT64LSN_MAX_PORTS;
else
uc->max_ports = roundup(uc->max_ports, NAT64_CHUNK_SIZE);
if (uc->max_ports > NAT64_CHUNK_SIZE * NAT64LSN_MAXPGPTR)
uc->max_ports = NAT64_CHUNK_SIZE * NAT64LSN_MAXPGPTR;
if (uc->jmaxlen == 0)
uc->jmaxlen = NAT64LSN_JMAXLEN;
if (uc->jmaxlen > 65536)
@ -99,6 +93,13 @@ nat64lsn_default_config(ipfw_nat64lsn_cfg *uc)
uc->st_udp_ttl = NAT64LSN_UDP_AGE;
if (uc->st_icmp_ttl == 0)
uc->st_icmp_ttl = NAT64LSN_ICMP_AGE;
if (uc->states_chunks == 0)
uc->states_chunks = 1;
else if (uc->states_chunks >= 128)
uc->states_chunks = 128;
else if (!powerof2(uc->states_chunks))
uc->states_chunks = 1 << fls(uc->states_chunks);
}
/*
@ -127,12 +128,20 @@ nat64lsn_create(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
if (ipfw_check_object_name_generic(uc->name) != 0)
return (EINVAL);
if (uc->agg_prefix_len > 127 || uc->set >= IPFW_MAX_SETS)
if (uc->set >= IPFW_MAX_SETS)
return (EINVAL);
if (uc->plen4 > 32)
return (EINVAL);
if (nat64_check_prefix6(&uc->prefix6, uc->plen6) != 0)
/*
* Unspecified address has special meaning. But it must
* have valid prefix length. This length will be used to
* correctly extract and embedd IPv4 address into IPv6.
*/
if (nat64_check_prefix6(&uc->prefix6, uc->plen6) != 0 &&
IN6_IS_ADDR_UNSPECIFIED(&uc->prefix6) &&
nat64_check_prefixlen(uc->plen6) != 0)
return (EINVAL);
/* XXX: Check prefix4 to be global */
@ -140,14 +149,6 @@ nat64lsn_create(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
mask4 = ~((1 << (32 - uc->plen4)) - 1);
if ((addr4 & mask4) != addr4)
return (EINVAL);
if (uc->min_port == 0)
uc->min_port = NAT64_MIN_PORT;
if (uc->max_port == 0)
uc->max_port = 65535;
if (uc->min_port > uc->max_port)
return (EINVAL);
uc->min_port = roundup(uc->min_port, NAT64_CHUNK_SIZE);
uc->max_port = roundup(uc->max_port, NAT64_CHUNK_SIZE);
nat64lsn_default_config(uc);
@ -159,7 +160,7 @@ nat64lsn_create(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
}
IPFW_UH_RUNLOCK(ch);
cfg = nat64lsn_init_instance(ch, 1 << (32 - uc->plen4));
cfg = nat64lsn_init_instance(ch, addr4, uc->plen4);
strlcpy(cfg->name, uc->name, sizeof(cfg->name));
cfg->no.name = cfg->name;
cfg->no.etlv = IPFW_TLV_NAT64LSN_NAME;
@ -170,20 +171,12 @@ nat64lsn_create(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
cfg->base.flags = (uc->flags & NAT64LSN_FLAGSMASK) | NAT64_PLATPFX;
if (IN6_IS_ADDR_WKPFX(&cfg->base.plat_prefix))
cfg->base.flags |= NAT64_WKPFX;
else if (IN6_IS_ADDR_UNSPECIFIED(&cfg->base.plat_prefix))
cfg->base.flags |= NAT64LSN_ANYPREFIX;
cfg->prefix4 = addr4;
cfg->pmask4 = addr4 | ~mask4;
cfg->plen4 = uc->plen4;
cfg->max_chunks = uc->max_ports / NAT64_CHUNK_SIZE;
cfg->agg_prefix_len = uc->agg_prefix_len;
cfg->agg_prefix_max = uc->agg_prefix_max;
cfg->min_chunk = uc->min_port / NAT64_CHUNK_SIZE;
cfg->max_chunk = uc->max_port / NAT64_CHUNK_SIZE;
cfg->states_chunks = uc->states_chunks;
cfg->jmaxlen = uc->jmaxlen;
cfg->nh_delete_delay = uc->nh_delete_delay;
cfg->host_delete_delay = uc->nh_delete_delay;
cfg->pg_delete_delay = uc->pg_delete_delay;
cfg->st_syn_ttl = uc->st_syn_ttl;
cfg->st_close_ttl = uc->st_close_ttl;
@ -249,7 +242,7 @@ nat64lsn_destroy(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
cfg = nat64lsn_find(CHAIN_TO_SRV(ch), oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_WUNLOCK(ch);
return (ESRCH);
return (ENOENT);
}
if (cfg->no.refcnt > 0) {
@ -272,6 +265,8 @@ static void
export_stats(struct ip_fw_chain *ch, struct nat64lsn_cfg *cfg,
struct ipfw_nat64lsn_stats *stats)
{
struct nat64lsn_alias *alias;
int i, j;
__COPY_STAT_FIELD(cfg, stats, opcnt64);
__COPY_STAT_FIELD(cfg, stats, opcnt46);
@ -299,10 +294,16 @@ export_stats(struct ip_fw_chain *ch, struct nat64lsn_cfg *cfg,
__COPY_STAT_FIELD(cfg, stats, spgcreated);
__COPY_STAT_FIELD(cfg, stats, spgdeleted);
stats->hostcount = cfg->ihcount;
stats->tcpchunks = cfg->protochunks[NAT_PROTO_TCP];
stats->udpchunks = cfg->protochunks[NAT_PROTO_UDP];
stats->icmpchunks = cfg->protochunks[NAT_PROTO_ICMP];
stats->hostcount = cfg->hosts_count;
for (i = 0; i < (1 << (32 - cfg->plen4)); i++) {
alias = &cfg->aliases[i];
for (j = 0; j < 32 && ISSET32(alias->tcp_chunkmask, j); j++)
stats->tcpchunks += bitcount32(alias->tcp_pgmask[j]);
for (j = 0; j < 32 && ISSET32(alias->udp_chunkmask, j); j++)
stats->udpchunks += bitcount32(alias->udp_pgmask[j]);
for (j = 0; j < 32 && ISSET32(alias->icmp_chunkmask, j); j++)
stats->icmpchunks += bitcount32(alias->icmp_pgmask[j]);
}
}
#undef __COPY_STAT_FIELD
@ -312,12 +313,9 @@ nat64lsn_export_config(struct ip_fw_chain *ch, struct nat64lsn_cfg *cfg,
{
uc->flags = cfg->base.flags & NAT64LSN_FLAGSMASK;
uc->max_ports = cfg->max_chunks * NAT64_CHUNK_SIZE;
uc->agg_prefix_len = cfg->agg_prefix_len;
uc->agg_prefix_max = cfg->agg_prefix_max;
uc->states_chunks = cfg->states_chunks;
uc->jmaxlen = cfg->jmaxlen;
uc->nh_delete_delay = cfg->nh_delete_delay;
uc->nh_delete_delay = cfg->host_delete_delay;
uc->pg_delete_delay = cfg->pg_delete_delay;
uc->st_syn_ttl = cfg->st_syn_ttl;
uc->st_close_ttl = cfg->st_close_ttl;
@ -425,7 +423,7 @@ nat64lsn_config(struct ip_fw_chain *ch, ip_fw3_opheader *op,
cfg = nat64lsn_find(ni, oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_RUNLOCK(ch);
return (EEXIST);
return (ENOENT);
}
nat64lsn_export_config(ch, cfg, uc);
IPFW_UH_RUNLOCK(ch);
@ -438,18 +436,18 @@ nat64lsn_config(struct ip_fw_chain *ch, ip_fw3_opheader *op,
cfg = nat64lsn_find(ni, oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_WUNLOCK(ch);
return (EEXIST);
return (ENOENT);
}
/*
* For now allow to change only following values:
* jmaxlen, nh_del_age, pg_del_age, tcp_syn_age, tcp_close_age,
* tcp_est_age, udp_age, icmp_age, flags, max_ports.
* tcp_est_age, udp_age, icmp_age, flags, states_chunks.
*/
cfg->max_chunks = uc->max_ports / NAT64_CHUNK_SIZE;
cfg->states_chunks = uc->states_chunks;
cfg->jmaxlen = uc->jmaxlen;
cfg->nh_delete_delay = uc->nh_delete_delay;
cfg->host_delete_delay = uc->nh_delete_delay;
cfg->pg_delete_delay = uc->pg_delete_delay;
cfg->st_syn_ttl = uc->st_syn_ttl;
cfg->st_close_ttl = uc->st_close_ttl;
@ -496,7 +494,7 @@ nat64lsn_stats(struct ip_fw_chain *ch, ip_fw3_opheader *op,
cfg = nat64lsn_find(CHAIN_TO_SRV(ch), oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_RUNLOCK(ch);
return (ESRCH);
return (ENOENT);
}
export_stats(ch, cfg, &stats);
@ -538,143 +536,139 @@ nat64lsn_reset_stats(struct ip_fw_chain *ch, ip_fw3_opheader *op,
cfg = nat64lsn_find(CHAIN_TO_SRV(ch), oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_WUNLOCK(ch);
return (ESRCH);
return (ENOENT);
}
COUNTER_ARRAY_ZERO(cfg->base.stats.cnt, NAT64STATS);
IPFW_UH_WUNLOCK(ch);
return (0);
}
#ifdef __LP64__
#define FREEMASK_COPY(pg, n, out) (out) = *FREEMASK_CHUNK((pg), (n))
#else
#define FREEMASK_COPY(pg, n, out) (out) = *FREEMASK_CHUNK((pg), (n)) | \
((uint64_t)*(FREEMASK_CHUNK((pg), (n)) + 1) << 32)
#endif
/*
* Reply: [ ipfw_obj_header ipfw_obj_data [ ipfw_nat64lsn_stg
* ipfw_nat64lsn_state x count, ... ] ]
*/
static int
export_pg_states(struct nat64lsn_cfg *cfg, struct nat64lsn_portgroup *pg,
ipfw_nat64lsn_stg *stg, struct sockopt_data *sd)
nat64lsn_export_states_v1(struct nat64lsn_cfg *cfg, union nat64lsn_pgidx *idx,
struct nat64lsn_pg *pg, struct sockopt_data *sd, uint32_t *ret_count)
{
ipfw_nat64lsn_state *ste;
struct nat64lsn_state *st;
int i, count;
ipfw_nat64lsn_state_v1 *s;
struct nat64lsn_state *state;
uint64_t freemask;
uint32_t i, count;
/* validate user input */
if (idx->chunk > pg->chunks_count - 1)
return (EINVAL);
FREEMASK_COPY(pg, idx->chunk, freemask);
count = 64 - bitcount64(freemask);
if (count == 0)
return (0); /* Try next PG/chunk */
DPRINTF(DP_STATE, "EXPORT PG 0x%16jx, count %d",
(uintmax_t)idx->index, count);
s = (ipfw_nat64lsn_state_v1 *)ipfw_get_sopt_space(sd,
count * sizeof(ipfw_nat64lsn_state_v1));
if (s == NULL)
return (ENOMEM);
NAT64_LOCK(pg->host);
count = 0;
for (i = 0; i < 64; i++) {
if (PG_IS_BUSY_IDX(pg, i))
count++;
}
DPRINTF(DP_STATE, "EXPORT PG %d, count %d", pg->idx, count);
if (count == 0) {
stg->count = 0;
NAT64_UNLOCK(pg->host);
return (0);
}
ste = (ipfw_nat64lsn_state *)ipfw_get_sopt_space(sd,
count * sizeof(ipfw_nat64lsn_state));
if (ste == NULL) {
NAT64_UNLOCK(pg->host);
return (1);
}
stg->alias4.s_addr = pg->aaddr;
stg->proto = nat64lsn_rproto_map[pg->nat_proto];
stg->flags = 0;
stg->host6 = pg->host->addr;
stg->count = count;
for (i = 0; i < 64; i++) {
if (PG_IS_FREE_IDX(pg, i))
if (ISSET64(freemask, i))
continue;
st = &pg->states[i];
ste->daddr.s_addr = st->u.s.faddr;
ste->dport = st->u.s.fport;
ste->aport = pg->aport + i;
ste->sport = st->u.s.lport;
ste->flags = st->flags; /* XXX filter flags */
ste->idle = GET_AGE(st->timestamp);
ste++;
}
NAT64_UNLOCK(pg->host);
state = pg->chunks_count == 1 ? &pg->states->state[i] :
&pg->states_chunk[idx->chunk]->state[i];
s->host6 = state->host->addr;
s->daddr.s_addr = htonl(state->ip_dst);
s->dport = state->dport;
s->sport = state->sport;
s->aport = state->aport;
s->flags = (uint8_t)(state->flags & 7);
s->proto = state->proto;
s->idle = GET_AGE(state->timestamp);
s++;
}
*ret_count = count;
return (0);
}
#define LAST_IDX 0xFF
static int
get_next_idx(struct nat64lsn_cfg *cfg, uint32_t *addr, uint8_t *nat_proto,
uint16_t *port)
nat64lsn_next_pgidx(struct nat64lsn_cfg *cfg, struct nat64lsn_pg *pg,
union nat64lsn_pgidx *idx)
{
if (*port < 65536 - NAT64_CHUNK_SIZE) {
*port += NAT64_CHUNK_SIZE;
return (0);
}
*port = 0;
if (*nat_proto < NAT_MAX_PROTO - 1) {
*nat_proto += 1;
return (0);
}
*nat_proto = 1;
if (*addr < cfg->pmask4) {
*addr += 1;
return (0);
}
/* End of space. */
return (1);
}
#define PACK_IDX(addr, proto, port) \
((uint64_t)addr << 32) | ((uint32_t)port << 16) | (proto << 8)
#define UNPACK_IDX(idx, addr, proto, port) \
(addr) = (uint32_t)((idx) >> 32); \
(port) = (uint16_t)(((idx) >> 16) & 0xFFFF); \
(proto) = (uint8_t)(((idx) >> 8) & 0xFF)
static struct nat64lsn_portgroup *
get_next_pg(struct nat64lsn_cfg *cfg, uint32_t *addr, uint8_t *nat_proto,
uint16_t *port)
{
struct nat64lsn_portgroup *pg;
uint64_t pre_pack, post_pack;
pg = NULL;
pre_pack = PACK_IDX(*addr, *nat_proto, *port);
for (;;) {
if (get_next_idx(cfg, addr, nat_proto, port) != 0) {
/* End of states */
return (pg);
/* First iterate over chunks */
if (pg != NULL) {
if (idx->chunk < pg->chunks_count - 1) {
idx->chunk++;
return (0);
}
pg = GET_PORTGROUP(cfg, *addr, *nat_proto, *port);
if (pg != NULL)
break;
}
post_pack = PACK_IDX(*addr, *nat_proto, *port);
if (pre_pack == post_pack)
DPRINTF(DP_STATE, "XXX: PACK_IDX %u %d %d",
*addr, *nat_proto, *port);
return (pg);
idx->chunk = 0;
/* Then over PGs */
if (idx->port < UINT16_MAX - 64) {
idx->port += 64;
return (0);
}
idx->port = NAT64_MIN_PORT;
/* Then over supported protocols */
switch (idx->proto) {
case IPPROTO_ICMP:
idx->proto = IPPROTO_TCP;
return (0);
case IPPROTO_TCP:
idx->proto = IPPROTO_UDP;
return (0);
default:
idx->proto = IPPROTO_ICMP;
}
/* And then over IPv4 alias addresses */
if (idx->addr < cfg->pmask4) {
idx->addr++;
return (1); /* New states group is needed */
}
idx->index = LAST_IDX;
return (-1); /* No more states */
}
static NAT64NOINLINE struct nat64lsn_portgroup *
get_first_pg(struct nat64lsn_cfg *cfg, uint32_t *addr, uint8_t *nat_proto,
uint16_t *port)
static struct nat64lsn_pg*
nat64lsn_get_pg_byidx(struct nat64lsn_cfg *cfg, union nat64lsn_pgidx *idx)
{
struct nat64lsn_portgroup *pg;
struct nat64lsn_alias *alias;
int pg_idx;
pg = GET_PORTGROUP(cfg, *addr, *nat_proto, *port);
if (pg == NULL)
pg = get_next_pg(cfg, addr, nat_proto, port);
alias = &cfg->aliases[idx->addr & ((1 << (32 - cfg->plen4)) - 1)];
MPASS(alias->addr == idx->addr);
return (pg);
pg_idx = (idx->port - NAT64_MIN_PORT) / 64;
switch (idx->proto) {
case IPPROTO_ICMP:
if (ISSET32(alias->icmp_pgmask[pg_idx / 32], pg_idx % 32))
return (alias->icmp[pg_idx / 32]->pgptr[pg_idx % 32]);
break;
case IPPROTO_TCP:
if (ISSET32(alias->tcp_pgmask[pg_idx / 32], pg_idx % 32))
return (alias->tcp[pg_idx / 32]->pgptr[pg_idx % 32]);
break;
case IPPROTO_UDP:
if (ISSET32(alias->udp_pgmask[pg_idx / 32], pg_idx % 32))
return (alias->udp[pg_idx / 32]->pgptr[pg_idx % 32]);
break;
}
return (NULL);
}
/*
* Lists nat64lsn states.
* Data layout (v0)(current):
* Data layout (v0):
* Request: [ ipfw_obj_header ipfw_obj_data [ uint64_t ]]
* Reply: [ ipfw_obj_header ipfw_obj_data [
* ipfw_nat64lsn_stg ipfw_nat64lsn_state x N] ]
@ -682,19 +676,36 @@ get_first_pg(struct nat64lsn_cfg *cfg, uint32_t *addr, uint8_t *nat_proto,
* Returns 0 on success
*/
static int
nat64lsn_states(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
nat64lsn_states_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct sockopt_data *sd)
{
/* TODO: implement states listing for old ipfw(8) binaries */
return (EOPNOTSUPP);
}
/*
* Lists nat64lsn states.
* Data layout (v1)(current):
* Request: [ ipfw_obj_header ipfw_obj_data [ uint64_t ]]
* Reply: [ ipfw_obj_header ipfw_obj_data [
* ipfw_nat64lsn_stg_v1 ipfw_nat64lsn_state_v1 x N] ]
*
* Returns 0 on success
*/
static int
nat64lsn_states_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct sockopt_data *sd)
{
ipfw_obj_header *oh;
ipfw_obj_data *od;
ipfw_nat64lsn_stg *stg;
ipfw_nat64lsn_stg_v1 *stg;
struct nat64lsn_cfg *cfg;
struct nat64lsn_portgroup *pg, *pg_next;
uint64_t next_idx;
struct nat64lsn_pg *pg;
union nat64lsn_pgidx idx;
size_t sz;
uint32_t addr, states;
uint16_t port;
uint8_t nat_proto;
uint32_t count, total;
int ret;
sz = sizeof(ipfw_obj_header) + sizeof(ipfw_obj_data) +
sizeof(uint64_t);
@ -708,78 +719,96 @@ nat64lsn_states(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
od->head.length != sz - sizeof(ipfw_obj_header))
return (EINVAL);
next_idx = *(uint64_t *)(od + 1);
/* Translate index to the request position to start from */
UNPACK_IDX(next_idx, addr, nat_proto, port);
if (nat_proto >= NAT_MAX_PROTO)
idx.index = *(uint64_t *)(od + 1);
if (idx.index != 0 && idx.proto != IPPROTO_ICMP &&
idx.proto != IPPROTO_TCP && idx.proto != IPPROTO_UDP)
return (EINVAL);
if (nat_proto == 0 && addr != 0)
if (idx.index == LAST_IDX)
return (EINVAL);
IPFW_UH_RLOCK(ch);
cfg = nat64lsn_find(CHAIN_TO_SRV(ch), oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_RUNLOCK(ch);
return (ESRCH);
return (ENOENT);
}
/* Fill in starting point */
if (addr == 0) {
addr = cfg->prefix4;
nat_proto = 1;
port = 0;
if (idx.index == 0) { /* Fill in starting point */
idx.addr = cfg->prefix4;
idx.proto = IPPROTO_ICMP;
idx.port = NAT64_MIN_PORT;
}
if (addr < cfg->prefix4 || addr > cfg->pmask4) {
if (idx.addr < cfg->prefix4 || idx.addr > cfg->pmask4 ||
idx.port < NAT64_MIN_PORT) {
IPFW_UH_RUNLOCK(ch);
DPRINTF(DP_GENERIC | DP_STATE, "XXX: %ju %u %u",
(uintmax_t)next_idx, addr, cfg->pmask4);
return (EINVAL);
}
sz = sizeof(ipfw_obj_header) + sizeof(ipfw_obj_data) +
sizeof(ipfw_nat64lsn_stg);
if (sd->valsize < sz)
sizeof(ipfw_nat64lsn_stg_v1);
if (sd->valsize < sz) {
IPFW_UH_RUNLOCK(ch);
return (ENOMEM);
}
oh = (ipfw_obj_header *)ipfw_get_sopt_space(sd, sz);
od = (ipfw_obj_data *)(oh + 1);
od->head.type = IPFW_TLV_OBJDATA;
od->head.length = sz - sizeof(ipfw_obj_header);
stg = (ipfw_nat64lsn_stg *)(od + 1);
pg = get_first_pg(cfg, &addr, &nat_proto, &port);
if (pg == NULL) {
/* No states */
stg->next_idx = 0xFF;
stg->count = 0;
IPFW_UH_RUNLOCK(ch);
return (0);
}
states = 0;
pg_next = NULL;
while (pg != NULL) {
pg_next = get_next_pg(cfg, &addr, &nat_proto, &port);
if (pg_next == NULL)
stg->next_idx = 0xFF;
else
stg->next_idx = PACK_IDX(addr, nat_proto, port);
if (export_pg_states(cfg, pg, stg, sd) != 0) {
IPFW_UH_RUNLOCK(ch);
return (states == 0 ? ENOMEM: 0);
}
states += stg->count;
od->head.length += stg->count * sizeof(ipfw_nat64lsn_state);
sz += stg->count * sizeof(ipfw_nat64lsn_state);
if (pg_next != NULL) {
sz += sizeof(ipfw_nat64lsn_stg);
if (sd->valsize < sz)
stg = (ipfw_nat64lsn_stg_v1 *)(od + 1);
stg->count = total = 0;
stg->next.index = idx.index;
/*
* Acquire CALLOUT_LOCK to avoid races with expiration code.
* Thus states, hosts and PGs will not expire while we hold it.
*/
CALLOUT_LOCK(cfg);
ret = 0;
do {
pg = nat64lsn_get_pg_byidx(cfg, &idx);
if (pg != NULL) {
count = 0;
ret = nat64lsn_export_states_v1(cfg, &idx, pg,
sd, &count);
if (ret != 0)
break;
stg = (ipfw_nat64lsn_stg *)ipfw_get_sopt_space(sd,
sizeof(ipfw_nat64lsn_stg));
if (count > 0) {
stg->count += count;
total += count;
/* Update total size of reply */
od->head.length +=
count * sizeof(ipfw_nat64lsn_state_v1);
sz += count * sizeof(ipfw_nat64lsn_state_v1);
}
stg->alias4.s_addr = htonl(idx.addr);
}
pg = pg_next;
}
/* Determine new index */
switch (nat64lsn_next_pgidx(cfg, pg, &idx)) {
case -1:
ret = ENOENT; /* End of search */
break;
case 1: /*
* Next alias address, new group may be needed.
* If states count is zero, use this group.
*/
if (stg->count == 0)
continue;
/* Otherwise try to create new group */
sz += sizeof(ipfw_nat64lsn_stg_v1);
if (sd->valsize < sz) {
ret = ENOMEM;
break;
}
/* Save next index in current group */
stg->next.index = idx.index;
stg = (ipfw_nat64lsn_stg_v1 *)ipfw_get_sopt_space(sd,
sizeof(ipfw_nat64lsn_stg_v1));
od->head.length += sizeof(ipfw_nat64lsn_stg_v1);
stg->count = 0;
break;
}
stg->next.index = idx.index;
} while (ret == 0);
CALLOUT_UNLOCK(cfg);
IPFW_UH_RUNLOCK(ch);
return (0);
return ((total > 0 || idx.index == LAST_IDX) ? 0: ret);
}
static struct ipfw_sopt_handler scodes[] = {
@ -789,7 +818,8 @@ static struct ipfw_sopt_handler scodes[] = {
{ IP_FW_NAT64LSN_LIST, 0, HDIR_GET, nat64lsn_list },
{ IP_FW_NAT64LSN_STATS, 0, HDIR_GET, nat64lsn_stats },
{ IP_FW_NAT64LSN_RESET_STATS,0, HDIR_SET, nat64lsn_reset_stats },
{ IP_FW_NAT64LSN_LIST_STATES,0, HDIR_GET, nat64lsn_states },
{ IP_FW_NAT64LSN_LIST_STATES,0, HDIR_GET, nat64lsn_states_v0 },
{ IP_FW_NAT64LSN_LIST_STATES,1, HDIR_GET, nat64lsn_states_v1 },
};
static int

View file

@ -60,7 +60,7 @@
* in the range 5 to 9.
*/
#undef __FreeBSD_version
#define __FreeBSD_version 1300016 /* Master, propagated to newvers */
#define __FreeBSD_version 1300017 /* Master, propagated to newvers */
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,

View file

@ -224,6 +224,10 @@ struct nstat {
#define st_ctime st_ctim.tv_sec
#if __BSD_VISIBLE
#define st_birthtime st_birthtim.tv_sec
#define st_atimensec st_atim.tv_nsec
#define st_mtimensec st_mtim.tv_nsec
#define st_ctimensec st_ctim.tv_nsec
#define st_birthtimensec st_birthtim.tv_nsec
#endif
/* For compatibility. */

View file

@ -354,6 +354,25 @@ TAILQ_HEAD(sysctl_ctx_list, sysctl_ctx_entry);
NULL); \
})
/* Oid for a constant '\0' terminated string. */
#define SYSCTL_CONST_STRING(parent, nbr, name, access, arg, descr) \
SYSCTL_OID(parent, nbr, name, CTLTYPE_STRING|(access), \
__DECONST(char *, arg), 0, sysctl_handle_string, "A", descr); \
CTASSERT(!(access & CTLFLAG_WR)); \
CTASSERT(((access) & CTLTYPE) == 0 || \
((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_STRING)
#define SYSCTL_ADD_CONST_STRING(ctx, parent, nbr, name, access, arg, descr) \
({ \
char *__arg = __DECONST(char *, arg); \
CTASSERT(!(access & CTLFLAG_WR)); \
CTASSERT(((access) & CTLTYPE) == 0 || \
((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_STRING); \
sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_STRING|(access), \
__arg, 0, sysctl_handle_string, "A", __DESCR(descr), \
NULL); \
})
/* Oid for a bool. If ptr is NULL, val is returned. */
#define SYSCTL_NULL_BOOL_PTR ((bool *)NULL)
#define SYSCTL_BOOL(parent, nbr, name, access, ptr, val, descr) \

View file

@ -13970,6 +13970,8 @@ softdep_bp_to_mp(bp)
if (LIST_EMPTY(&bp->b_dep))
return (NULL);
vp = bp->b_vp;
KASSERT(vp != NULL,
("%s, buffer with dependencies lacks vnode", __func__));
/*
* The ump mount point is stable after we get a correct
@ -13979,17 +13981,33 @@ softdep_bp_to_mp(bp)
* workitem might be freed while dereferenced.
*/
retry:
if (vp->v_type == VCHR) {
switch (vp->v_type) {
case VCHR:
VI_LOCK(vp);
mp = vp->v_type == VCHR ? vp->v_rdev->si_mountpt : NULL;
VI_UNLOCK(vp);
if (mp == NULL)
goto retry;
} else if (vp->v_type == VREG || vp->v_type == VDIR ||
vp->v_type == VLNK || vp->v_type == VFIFO) {
break;
case VREG:
case VDIR:
case VLNK:
case VFIFO:
case VSOCK:
mp = vp->v_mount;
} else {
return (NULL);
break;
case VBLK:
vn_printf(vp, "softdep_bp_to_mp: unexpected block device\n");
/* FALLTHROUGH */
case VNON:
case VBAD:
case VMARKER:
mp = NULL;
break;
default:
vn_printf(vp, "unknown vnode type");
mp = NULL;
break;
}
return (VFSTOUFS(mp));
}

View file

@ -1757,8 +1757,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
}
pmap_copy_page(src_m, dst_m);
VM_OBJECT_RUNLOCK(object);
dst_m->valid = VM_PAGE_BITS_ALL;
dst_m->dirty = VM_PAGE_BITS_ALL;
dst_m->dirty = dst_m->valid = src_m->valid;
} else {
dst_m = src_m;
if (vm_page_sleep_if_busy(dst_m, "fltupg"))
@ -1771,8 +1770,6 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
*/
break;
vm_page_xbusy(dst_m);
KASSERT(dst_m->valid == VM_PAGE_BITS_ALL,
("invalid dst page %p", dst_m));
}
VM_OBJECT_WUNLOCK(dst_object);
@ -1780,9 +1777,18 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
* Enter it in the pmap. If a wired, copy-on-write
* mapping is being replaced by a write-enabled
* mapping, then wire that new mapping.
*
* The page can be invalid if the user called
* msync(MS_INVALIDATE) or truncated the backing vnode
* or shared memory object. In this case, do not
* insert it into pmap, but still do the copy so that
* all copies of the wired map entry have similar
* backing pages.
*/
pmap_enter(dst_map->pmap, vaddr, dst_m, prot,
access | (upgrade ? PMAP_ENTER_WIRED : 0), 0);
if (dst_m->valid == VM_PAGE_BITS_ALL) {
pmap_enter(dst_map->pmap, vaddr, dst_m, prot,
access | (upgrade ? PMAP_ENTER_WIRED : 0), 0);
}
/*
* Mark it no longer busy, and put it on the active list.

View file

@ -146,12 +146,12 @@ struct TestContext {
uint32_t nr_hdr_len; /* for PORT_HDR_SET and PORT_HDR_GET */
uint32_t nr_first_cpu_id; /* vale polling */
uint32_t nr_num_polling_cpus; /* vale polling */
uint32_t sync_kloop_mode; /* sync-kloop */
int fd; /* netmap file descriptor */
void *csb; /* CSB entries (atok and ktoa) */
struct nmreq_option *nr_opt; /* list of options */
sem_t *sem; /* for thread synchronization */
struct nmport_d *nmport; /* nmport descriptor from libnetmap */
};
static struct TestContext ctx_;
@ -352,8 +352,11 @@ niocregif(struct TestContext *ctx, int netmap_api)
/* The 11 ABI is the one right before the introduction of the new NIOCCTRL
* ABI. The 11 ABI is useful to perform tests with legacy applications
* (which use the 11 ABI) and new kernel (which uses 12, or higher). */
#define NETMAP_API_NIOCREGIF 11
* (which use the 11 ABI) and new kernel (which uses 12, or higher).
* However, version 14 introduced a change in the layout of struct netmap_if,
* so that binary backward compatibility to 11 is not supported anymore.
*/
#define NETMAP_API_NIOCREGIF 14
static int
legacy_regif_default(struct TestContext *ctx)
@ -1113,7 +1116,7 @@ bad_extmem_option(struct TestContext *ctx)
pools_info_fill(&pools_info);
/* Request a large ring size, to make sure that the kernel
* rejects our request. */
pools_info.nr_ring_pool_objsize = (1 << 16);
pools_info.nr_ring_pool_objsize = (1 << 20);
return _extmem_option(ctx, &pools_info) < 0 ? 0 : -1;
}
@ -1140,6 +1143,10 @@ duplicate_extmem_options(struct TestContext *ctx)
save1 = e1;
save2 = e2;
strncpy(ctx->ifname_ext, "vale0:0", sizeof(ctx->ifname_ext));
ctx->nr_tx_slots = 16;
ctx->nr_rx_slots = 16;
ret = port_register_hwall(ctx);
if (ret >= 0) {
printf("duplicate option not detected\n");
@ -1322,51 +1329,58 @@ sync_kloop(struct TestContext *ctx)
static int
sync_kloop_eventfds(struct TestContext *ctx)
{
struct nmreq_opt_sync_kloop_eventfds *opt = NULL;
struct nmreq_option save;
struct nmreq_opt_sync_kloop_eventfds *evopt = NULL;
struct nmreq_opt_sync_kloop_mode modeopt;
struct nmreq_option evsave;
int num_entries;
size_t opt_size;
int ret, i;
memset(&modeopt, 0, sizeof(modeopt));
modeopt.nro_opt.nro_reqtype = NETMAP_REQ_OPT_SYNC_KLOOP_MODE;
modeopt.mode = ctx->sync_kloop_mode;
push_option(&modeopt.nro_opt, ctx);
num_entries = num_registered_rings(ctx);
opt_size = sizeof(*opt) + num_entries * sizeof(opt->eventfds[0]);
opt = calloc(1, opt_size);
opt->nro_opt.nro_next = 0;
opt->nro_opt.nro_reqtype = NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS;
opt->nro_opt.nro_status = 0;
opt->nro_opt.nro_size = opt_size;
opt_size = sizeof(*evopt) + num_entries * sizeof(evopt->eventfds[0]);
evopt = calloc(1, opt_size);
evopt->nro_opt.nro_next = 0;
evopt->nro_opt.nro_reqtype = NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS;
evopt->nro_opt.nro_status = 0;
evopt->nro_opt.nro_size = opt_size;
for (i = 0; i < num_entries; i++) {
int efd = eventfd(0, 0);
opt->eventfds[i].ioeventfd = efd;
evopt->eventfds[i].ioeventfd = efd;
efd = eventfd(0, 0);
opt->eventfds[i].irqfd = efd;
evopt->eventfds[i].irqfd = efd;
}
push_option(&opt->nro_opt, ctx);
save = opt->nro_opt;
push_option(&evopt->nro_opt, ctx);
evsave = evopt->nro_opt;
ret = sync_kloop_start_stop(ctx);
if (ret != 0) {
free(opt);
free(evopt);
clear_options(ctx);
return ret;
}
#ifdef __linux__
save.nro_status = 0;
evsave.nro_status = 0;
#else /* !__linux__ */
save.nro_status = EOPNOTSUPP;
evsave.nro_status = EOPNOTSUPP;
#endif /* !__linux__ */
ret = checkoption(&opt->nro_opt, &save);
free(opt);
ret = checkoption(&evopt->nro_opt, &evsave);
free(evopt);
clear_options(ctx);
return ret;
}
static int
sync_kloop_eventfds_all(struct TestContext *ctx)
sync_kloop_eventfds_all_mode(struct TestContext *ctx,
uint32_t sync_kloop_mode)
{
int ret;
@ -1375,9 +1389,17 @@ sync_kloop_eventfds_all(struct TestContext *ctx)
return ret;
}
ctx->sync_kloop_mode = sync_kloop_mode;
return sync_kloop_eventfds(ctx);
}
static int
sync_kloop_eventfds_all(struct TestContext *ctx)
{
return sync_kloop_eventfds_all_mode(ctx, 0);
}
static int
sync_kloop_eventfds_all_tx(struct TestContext *ctx)
{
@ -1398,6 +1420,27 @@ sync_kloop_eventfds_all_tx(struct TestContext *ctx)
return sync_kloop_eventfds(ctx);
}
static int
sync_kloop_eventfds_all_direct(struct TestContext *ctx)
{
return sync_kloop_eventfds_all_mode(ctx,
NM_OPT_SYNC_KLOOP_DIRECT_TX | NM_OPT_SYNC_KLOOP_DIRECT_RX);
}
static int
sync_kloop_eventfds_all_direct_tx(struct TestContext *ctx)
{
return sync_kloop_eventfds_all_mode(ctx,
NM_OPT_SYNC_KLOOP_DIRECT_TX);
}
static int
sync_kloop_eventfds_all_direct_rx(struct TestContext *ctx)
{
return sync_kloop_eventfds_all_mode(ctx,
NM_OPT_SYNC_KLOOP_DIRECT_RX);
}
static int
sync_kloop_nocsb(struct TestContext *ctx)
{
@ -1677,6 +1720,9 @@ static struct mytest tests[] = {
decltest(sync_kloop),
decltest(sync_kloop_eventfds_all),
decltest(sync_kloop_eventfds_all_tx),
decltest(sync_kloop_eventfds_all_direct),
decltest(sync_kloop_eventfds_all_direct_tx),
decltest(sync_kloop_eventfds_all_direct_rx),
decltest(sync_kloop_nocsb),
decltest(sync_kloop_csb_enable),
decltest(sync_kloop_conflict),

View file

@ -174,6 +174,8 @@ acquire_lock(const char *name, int flags)
if ((fd = open(name, O_RDONLY|O_EXLOCK|flags, 0666)) == -1) {
if (errno == EAGAIN || errno == EINTR)
return (-1);
else if (errno == ENOENT && (flags & O_CREAT) == 0)
err(EX_UNAVAILABLE, "%s", name);
err(EX_CANTCREAT, "cannot open %s", name);
}
return (fd);

View file

@ -644,14 +644,17 @@ read_mtree_keywords(FILE *fp, fsnode *node)
st->st_atime = num;
st->st_ctime = num;
st->st_mtime = num;
#if HAVE_STRUCT_STAT_ST_MTIMENSEC
if (p == NULL)
break;
error = read_number(p, 10, &num, 0,
INTMAX_MAX);
if (error)
break;
if (num != 0)
error = EINVAL;
st->st_atimensec = num;
st->st_ctimensec = num;
st->st_mtimensec = num;
#endif
} else if (strcmp(keyword, "type") == 0) {
if (value == NULL) {
error = ENOATTR;

View file

@ -29,13 +29,7 @@
KB=1024
: ${TMPDIR=/tmp}
# TODO: add mtree `time` support; get a lot of errors like this right now when
# passing generating disk images with keyword mtree support, like:
#
# `[...]/mtree.spec:8: error: time: invalid value '1446458503'`
#
#DEFAULT_MTREE_KEYWORDS="type,mode,gid,uid,size,link,time"
DEFAULT_MTREE_KEYWORDS="type,mode,gid,uid,size,link"
DEFAULT_MTREE_KEYWORDS="type,mode,gid,uid,size,link,time"
TEST_IMAGE="$TMPDIR/test.img"
TEST_INPUTS_DIR="$TMPDIR/inputs"
TEST_MD_DEVICE_FILE="$TMPDIR/md.output"