Merge ^/head r358000 through r358048.

This commit is contained in:
Dimitry Andric 2020-02-17 20:27:05 +00:00
commit 3c4ad300a1
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang1000-import/; revision=358049
41 changed files with 387 additions and 221 deletions

View file

@ -32,6 +32,11 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 13.x IS SLOW:
information about prerequisites and upgrading, if you are not already
using clang 3.5.0 or higher.
20200217:
The size of struct vnet and the magic cookie have changed.
Users need to recompile libkvm and all modules using VIMAGE
together with their new kernel.
20200212:
Defining the long deprecated NO_CTF, NO_DEBUG_FILES, NO_INSTALLLIB,
NO_MAN, NO_PROFILE, and NO_WARNS variables is now an error. Update

View file

@ -248,6 +248,7 @@ void TestSkipped(const char *testcase, const char *test, const std::string& reas
const ::testing::TestInfo* const info = ::testing::UnitTest::GetInstance()->current_test_info(); \
std::cerr << "Skipping " << info->test_case_name() << "::" << info->name() << " because: " << reason << std::endl; \
TestSkipped(info->test_case_name(), info->name(), reason); \
GTEST_SKIP(); \
} while (0)
// Mark a test that can only be run as root.

View file

@ -763,6 +763,7 @@ TEST_F(PipePdfork, ModeBits) {
#endif
TEST_F(PipePdfork, WildcardWait) {
TEST_SKIPPED("https://bugs.freebsd.org/244165");
// TODO(FreeBSD): make wildcard wait ignore pdfork()ed children
// https://bugs.freebsd.org/201054
TerminateChild();

View file

@ -49,6 +49,7 @@ __SCCSID("@(#)kvm.c 8.2 (Berkeley) 2/13/94");
#include <sys/sysctl.h>
#include <sys/mman.h>
#include <stdbool.h>
#include <net/vnet.h>
#include <fcntl.h>

View file

@ -115,8 +115,7 @@ int
kvm_getswapinfo_kvm(kvm_t *kd, struct kvm_swap *swap_ary, int swap_max,
int flags)
{
int i;
swblk_t ttl;
int i, ttl;
TAILQ_HEAD(, swdevt) swtailq;
struct swdevt *sp, swinfo;
struct kvm_swap tot;
@ -167,8 +166,7 @@ int
kvm_getswapinfo_sysctl(kvm_t *kd, struct kvm_swap *swap_ary, int swap_max,
int flags)
{
int ti;
swblk_t ttl;
int ti, ttl;
size_t mibi, len;
int soid[SWI_MAXMIB];
struct xswdev xsd;

View file

@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/stat.h>
#include <sys/mman.h>
#include <stdbool.h>
#include <net/vnet.h>
#include <assert.h>

View file

@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/types.h>
#include <stdbool.h>
#include <net/vnet.h>
#include <kvm.h>

View file

@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/reboot.h>
#include <sys/sbuf.h>
#include <geom/geom.h>
#include <geom/geom_disk.h>
#endif /* _KERNEL */
@ -878,6 +879,7 @@ static int ada_spindown_shutdown = ADA_DEFAULT_SPINDOWN_SHUTDOWN;
static int ada_spindown_suspend = ADA_DEFAULT_SPINDOWN_SUSPEND;
static int ada_read_ahead = ADA_DEFAULT_READ_AHEAD;
static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE;
static int ada_enable_biospeedup = 1;
static SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
"CAM Direct Access Disk driver");
@ -895,6 +897,8 @@ SYSCTL_INT(_kern_cam_ada, OID_AUTO, read_ahead, CTLFLAG_RWTUN,
&ada_read_ahead, 0, "Enable disk read-ahead");
SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RWTUN,
&ada_write_cache, 0, "Enable disk write cache");
SYSCTL_INT(_kern_cam_ada, OID_AUTO, enable_biospeedup, CTLFLAG_RDTUN,
&ada_enable_biospeedup, 0, "Enable BIO_SPEEDUP processing");
/*
* ADA_ORDEREDTAG_INTERVAL determines how often, relative
@ -1566,6 +1570,9 @@ adagetattr(struct bio *bp)
int ret;
struct cam_periph *periph;
if (g_handleattr_int(bp, "GEOM::canspeedup", ada_enable_biospeedup))
return (EJUSTRETURN);
periph = (struct cam_periph *)bp->bio_disk->d_drv1;
cam_periph_lock(periph);
ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,

View file

@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$");
#include <sys/cons.h>
#include <sys/proc.h>
#include <sys/reboot.h>
#include <geom/geom.h>
#include <geom/geom_disk.h>
#endif /* _KERNEL */
@ -174,9 +175,12 @@ static SYSCTL_NODE(_kern_cam, OID_AUTO, nda, CTLFLAG_RD, 0,
static int nda_send_ordered = NDA_DEFAULT_SEND_ORDERED;
static int nda_default_timeout = NDA_DEFAULT_TIMEOUT;
static int nda_max_trim_entries = NDA_MAX_TRIM_ENTRIES;
static int nda_enable_biospeedup = 1;
SYSCTL_INT(_kern_cam_nda, OID_AUTO, max_trim, CTLFLAG_RDTUN,
&nda_max_trim_entries, NDA_MAX_TRIM_ENTRIES,
"Maximum number of BIO_DELETE to send down as a DSM TRIM.");
SYSCTL_INT(_kern_cam_nda, OID_AUTO, enable_biospeedup, CTLFLAG_RDTUN,
&nda_enable_biospeedup, 0, "Enable BIO_SPEEDUP processing");
/*
* All NVMe media is non-rotational, so all nvme device instances
@ -700,6 +704,9 @@ ndagetattr(struct bio *bp)
int ret;
struct cam_periph *periph;
if (g_handleattr_int(bp, "GEOM::canspeedup", nda_enable_biospeedup))
return (EJUSTRETURN);
periph = (struct cam_periph *)bp->bio_disk->d_drv1;
cam_periph_lock(periph);
ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,

View file

@ -1547,6 +1547,7 @@ static int da_default_timeout = DA_DEFAULT_TIMEOUT;
static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT;
static int da_send_ordered = DA_DEFAULT_SEND_ORDERED;
static int da_disable_wp_detection = 0;
static int da_enable_biospeedup = 1;
static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
"CAM Direct Access Disk driver");
@ -1561,6 +1562,8 @@ SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
SYSCTL_INT(_kern_cam_da, OID_AUTO, disable_wp_detection, CTLFLAG_RWTUN,
&da_disable_wp_detection, 0,
"Disable detection of write-protected disks");
SYSCTL_INT(_kern_cam_da, OID_AUTO, enable_biospeedup, CTLFLAG_RDTUN,
&da_enable_biospeedup, 0, "Enable BIO_SPEEDUP processing");
SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout,
CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, dasysctlsofttimeout, "I",
@ -1967,6 +1970,9 @@ dagetattr(struct bio *bp)
int ret;
struct cam_periph *periph;
if (g_handleattr_int(bp, "GEOM::canspeedup", da_enable_biospeedup))
return (EJUSTRETURN);
periph = (struct cam_periph *)bp->bio_disk->d_drv1;
cam_periph_lock(periph);
ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,

View file

@ -237,7 +237,8 @@ static driver_t acpi_driver = {
};
static devclass_t acpi_devclass;
DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_devclass, acpi_modevent, 0);
EARLY_DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_devclass, acpi_modevent, 0,
BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
MODULE_VERSION(acpi, 1);
ACPI_SERIAL_DECL(acpi, "ACPI root bus");

View file

@ -293,27 +293,31 @@ altera_sdcard_write_rxtx_buffer(struct altera_sdcard_softc *sc, void *data,
}
static void
altera_sdcard_io_start_internal(struct altera_sdcard_softc *sc, struct bio **bp)
altera_sdcard_io_start_internal(struct altera_sdcard_softc *sc,
struct bio **bpp)
{
struct bio *bp;
switch (*bp->bio_cmd) {
bp = *bpp;
switch (bp->bio_cmd) {
case BIO_READ:
altera_sdcard_write_cmd_arg(sc, *bp->bio_pblkno *
altera_sdcard_write_cmd_arg(sc, bp->bio_pblkno *
ALTERA_SDCARD_SECTORSIZE);
altera_sdcard_write_cmd(sc, ALTERA_SDCARD_CMD_READ_BLOCK);
break;
case BIO_WRITE:
altera_sdcard_write_rxtx_buffer(sc, *bp->bio_data,
*bp->bio_bcount);
altera_sdcard_write_cmd_arg(sc, *bp->bio_pblkno *
altera_sdcard_write_rxtx_buffer(sc, bp->bio_data,
bp->bio_bcount);
altera_sdcard_write_cmd_arg(sc, bp->bio_pblkno *
ALTERA_SDCARD_SECTORSIZE);
altera_sdcard_write_cmd(sc, ALTERA_SDCARD_CMD_WRITE_BLOCK);
break;
default:
biofinish(*bp, NULL, EOPNOTSUPP);
*bp = NULL;
biofinish(bp, NULL, EOPNOTSUPP);
*bpp = NULL;
}
}
@ -333,7 +337,7 @@ altera_sdcard_io_start(struct altera_sdcard_softc *sc, struct bio *bp)
KASSERT(bp->bio_bcount == ALTERA_SDCARD_SECTORSIZE,
("%s: I/O size not %d", __func__, ALTERA_SDCARD_SECTORSIZE));
altera_sdcard_io_start_internal(sc, &bp);
sc->as_currentbio = *bp;
sc->as_currentbio = bp;
sc->as_retriesleft = ALTERA_SDCARD_RETRY_LIMIT;
}

View file

@ -42,7 +42,8 @@ __FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
static int drm_msi = 1; /* Enable by default. */
SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
"DRM device");
SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1,
"Enable MSI interrupts for drm devices");

View file

@ -69,7 +69,7 @@ int drm_sysctl_init(struct drm_device *dev)
/* Add the sysctl node for DRI if it doesn't already exist */
drioid = SYSCTL_ADD_NODE(&info->ctx, SYSCTL_CHILDREN(&sysctl___hw), OID_AUTO,
"dri", CTLFLAG_RW, NULL, "DRI Graphics");
"dri", CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, "DRI Graphics");
if (!drioid) {
free(dev->sysctl, DRM_MEM_DRIVER);
dev->sysctl = NULL;
@ -92,23 +92,17 @@ int drm_sysctl_init(struct drm_device *dev)
info->name[0] = '0' + i;
info->name[1] = 0;
top = SYSCTL_ADD_NODE(&info->ctx, SYSCTL_CHILDREN(drioid),
OID_AUTO, info->name, CTLFLAG_RW, NULL, NULL);
OID_AUTO, info->name, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL);
if (!top) {
drm_sysctl_cleanup(dev);
return (-ENOMEM);
}
for (i = 0; i < DRM_SYSCTL_ENTRIES; i++) {
oid = SYSCTL_ADD_OID(&info->ctx,
SYSCTL_CHILDREN(top),
OID_AUTO,
drm_sysctl_list[i].name,
CTLTYPE_STRING | CTLFLAG_RD,
dev,
0,
drm_sysctl_list[i].f,
"A",
NULL);
oid = SYSCTL_ADD_OID(&info->ctx, SYSCTL_CHILDREN(top),
OID_AUTO, drm_sysctl_list[i].name,
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
dev, 0, drm_sysctl_list[i].f, "A", NULL);
if (!oid) {
drm_sysctl_cleanup(dev);
return (-ENOMEM);

View file

@ -332,7 +332,8 @@ uint32_t pci_numdevs = 0;
static int pcie_chipset, pcix_chipset;
/* sysctl vars */
SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"PCI bus tuning parameters");
static int pci_enable_io_modes = 1;
SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RWTUN,

View file

@ -192,7 +192,7 @@
#define PCIZ_PMUX 0x001a /* Protocol Multiplexing */
#define PCIZ_PASID 0x001b /* Process Address Space ID */
#define PCIZ_LN_REQ 0x001c /* LN Requester */
#define PCIZ_DPC 0x001d /* Downstream Porto Containment */
#define PCIZ_DPC 0x001d /* Downstream Port Containment */
#define PCIZ_L1PM 0x001e /* L1 PM Substates */
/* config registers for header type 0 devices */

View file

@ -368,8 +368,8 @@ refcount_release_last(volatile u_int *count, u_int n, u_int old)
/*
* Last reference. Signal the user to call the destructor.
*
* Ensure that the destructor sees all updates. The fence_rel
* at the start of refcount_releasen synchronizes with this fence.
* Ensure that the destructor sees all updates. This synchronizes
* with release fences from all routines which drop the count.
*/
atomic_thread_fence_acq();
return (true);

View file

@ -8,6 +8,8 @@ SUBDIR_PARALLEL=
# Modules that include binary-only blobs of microcode should be selectable by
# MK_SOURCELESS_UCODE option (see below).
.include "${SYSDIR}/conf/config.mk"
.if defined(MODULES_OVERRIDE) && !defined(ALL_MODULES)
SUBDIR=${MODULES_OVERRIDE}
.else
@ -396,8 +398,10 @@ _autofs= autofs
.if ${MK_CDDL} != "no" || defined(ALL_MODULES)
.if (${MACHINE_CPUARCH} != "arm" || ${MACHINE_ARCH:Marmv[67]*} != "") && \
${MACHINE_CPUARCH} != "mips"
.if ${KERN_OPTS:MKDTRACE_HOOKS}
SUBDIR+= dtrace
.endif
.endif
SUBDIR+= opensolaris
.endif
@ -712,9 +716,11 @@ _sgx_linux= sgx_linux
_smartpqi= smartpqi
.if ${MK_BHYVE} != "no" || defined(ALL_MODULES)
.if ${KERN_OPTS:MSMP}
_vmm= vmm
.endif
.endif
.endif
.if ${MACHINE_CPUARCH} == "i386"
# XXX some of these can move to the general case when de-i386'ed
@ -799,8 +805,6 @@ afterinstall: .PHONY
fi
.endif
.include "${SYSDIR}/conf/config.mk"
SUBDIR:= ${SUBDIR:u:O}
.include <bsd.subdir.mk>

View file

@ -322,6 +322,11 @@ SX_SYSINIT_FLAGS(ifnet_sx, &ifnet_sxlock, "ifnet_sx", SX_RECURSE);
*/
#define IFNET_HOLD (void *)(uintptr_t)(-1)
#ifdef VIMAGE
#define VNET_IS_SHUTTING_DOWN(_vnet) \
((_vnet)->vnet_shutdown && (_vnet)->vnet_state < SI_SUB_VNET_DONE)
#endif
static if_com_alloc_t *if_com_alloc[256];
static if_com_free_t *if_com_free[256];
@ -1080,7 +1085,7 @@ if_detach_internal(struct ifnet *ifp, int vmove, struct if_clone **ifcp)
#ifdef VIMAGE
bool shutdown;
shutdown = ifp->if_vnet->vnet_shutdown;
shutdown = VNET_IS_SHUTTING_DOWN(ifp->if_vnet);
#endif
IFNET_WLOCK();
CK_STAILQ_FOREACH(iter, &V_ifnet, if_link)
@ -1339,6 +1344,7 @@ if_vmove_loan(struct thread *td, struct ifnet *ifp, char *ifname, int jid)
struct prison *pr;
struct ifnet *difp;
int error;
bool shutdown;
/* Try to find the prison within our visibility. */
sx_slock(&allprison_lock);
@ -1366,7 +1372,8 @@ if_vmove_loan(struct thread *td, struct ifnet *ifp, char *ifname, int jid)
}
/* Make sure the VNET is stable. */
if (ifp->if_vnet->vnet_shutdown) {
shutdown = VNET_IS_SHUTTING_DOWN(ifp->if_vnet);
if (shutdown) {
CURVNET_RESTORE();
prison_free(pr);
return (EBUSY);
@ -1391,6 +1398,7 @@ if_vmove_reclaim(struct thread *td, char *ifname, int jid)
struct vnet *vnet_dst;
struct ifnet *ifp;
int error;
bool shutdown;
/* Try to find the prison within our visibility. */
sx_slock(&allprison_lock);
@ -1419,7 +1427,8 @@ if_vmove_reclaim(struct thread *td, char *ifname, int jid)
}
/* Make sure the VNET is stable. */
if (ifp->if_vnet->vnet_shutdown) {
shutdown = VNET_IS_SHUTTING_DOWN(ifp->if_vnet);
if (shutdown) {
CURVNET_RESTORE();
prison_free(pr);
return (EBUSY);
@ -2950,11 +2959,15 @@ ifioctl(struct socket *so, u_long cmd, caddr_t data, struct thread *td)
struct ifreq *ifr;
int error;
int oif_flags;
#ifdef VIMAGE
bool shutdown;
#endif
CURVNET_SET(so->so_vnet);
#ifdef VIMAGE
/* Make sure the VNET is stable. */
if (so->so_vnet->vnet_shutdown) {
shutdown = VNET_IS_SHUTTING_DOWN(so->so_vnet);
if (shutdown) {
CURVNET_RESTORE();
return (EBUSY);
}

View file

@ -1056,6 +1056,8 @@ netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m)
if (m != NULL) {
KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__,
cpuid));
VNET_ASSERT(m->m_pkthdr.rcvif != NULL,
("%s:%d rcvif == NULL: m=%p", __func__, __LINE__, m));
error = netisr_queue_internal(proto, m, cpuid);
} else
error = ENOBUFS;

View file

@ -279,6 +279,9 @@ vnet_destroy(struct vnet *vnet)
LIST_REMOVE(vnet, vnet_le);
VNET_LIST_WUNLOCK();
/* Signal that VNET is being shutdown. */
vnet->vnet_shutdown = true;
CURVNET_SET_QUIET(vnet);
vnet_sysuninit();
CURVNET_RESTORE();
@ -350,15 +353,15 @@ vnet_data_startup(void *dummy __unused)
}
SYSINIT(vnet_data, SI_SUB_KLD, SI_ORDER_FIRST, vnet_data_startup, NULL);
/* Dummy VNET_SYSINIT to make sure we always reach the final end state. */
static void
vnet_sysuninit_shutdown(void *unused __unused)
vnet_sysinit_done(void *unused __unused)
{
/* Signal that VNET is being shutdown. */
curvnet->vnet_shutdown = 1;
return;
}
VNET_SYSUNINIT(vnet_sysuninit_shutdown, SI_SUB_VNET_DONE, SI_ORDER_FIRST,
vnet_sysuninit_shutdown, NULL);
VNET_SYSINIT(vnet_sysinit_done, SI_SUB_VNET_DONE, SI_ORDER_ANY,
vnet_sysinit_done, NULL);
/*
* When a module is loaded and requires storage for a virtualized global
@ -572,8 +575,10 @@ vnet_sysinit(void)
struct vnet_sysinit *vs;
VNET_SYSINIT_RLOCK();
TAILQ_FOREACH(vs, &vnet_constructors, link)
TAILQ_FOREACH(vs, &vnet_constructors, link) {
curvnet->vnet_state = vs->subsystem;
vs->func(vs->arg);
}
VNET_SYSINIT_RUNLOCK();
}
@ -589,8 +594,10 @@ vnet_sysuninit(void)
VNET_SYSINIT_RLOCK();
TAILQ_FOREACH_REVERSE(vs, &vnet_destructors, vnet_sysuninit_head,
link)
link) {
curvnet->vnet_state = vs->subsystem;
vs->func(vs->arg);
}
VNET_SYSINIT_RUNLOCK();
}
@ -704,7 +711,8 @@ db_vnet_print(struct vnet *vnet)
db_printf(" vnet_data_mem = %p\n", vnet->vnet_data_mem);
db_printf(" vnet_data_base = %#jx\n",
(uintmax_t)vnet->vnet_data_base);
db_printf(" vnet_shutdown = %#08x\n", vnet->vnet_shutdown);
db_printf(" vnet_state = %#08x\n", vnet->vnet_state);
db_printf(" vnet_shutdown = %#03x\n", vnet->vnet_shutdown);
db_printf("\n");
}

View file

@ -72,11 +72,12 @@ struct vnet {
u_int vnet_magic_n;
u_int vnet_ifcnt;
u_int vnet_sockcnt;
u_int vnet_shutdown; /* Shutdown in progress. */
u_int vnet_state; /* SI_SUB_* */
void *vnet_data_mem;
uintptr_t vnet_data_base;
};
#define VNET_MAGIC_N 0x3e0d8f29
bool vnet_shutdown; /* Shutdown in progress. */
} __aligned(CACHE_LINE_SIZE);
#define VNET_MAGIC_N 0x5e4a6f28
/*
* These two virtual network stack allocator definitions are also required

View file

@ -303,6 +303,7 @@ igmp_save_context(struct mbuf *m, struct ifnet *ifp)
#ifdef VIMAGE
m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
#endif /* VIMAGE */
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.flowid = ifp->if_index;
}

View file

@ -517,6 +517,9 @@ sctp_ss_prio_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
{
struct sctp_stream_out *strq, *strqt, *strqn;
if (asoc->ss_data.locked_on_sending) {
return (asoc->ss_data.locked_on_sending);
}
strqt = asoc->ss_data.last_out_stream;
prio_again:
/* Find the next stream to use */
@ -694,6 +697,9 @@ sctp_ss_fb_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
{
struct sctp_stream_out *strq = NULL, *strqt;
if (asoc->ss_data.locked_on_sending) {
return (asoc->ss_data.locked_on_sending);
}
if (asoc->ss_data.last_out_stream == NULL ||
TAILQ_FIRST(&asoc->ss_data.out.wheel) == TAILQ_LAST(&asoc->ss_data.out.wheel, sctpwheel_listhead)) {
strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
@ -900,6 +906,9 @@ sctp_ss_fcfs_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
struct sctp_stream_out *strq;
struct sctp_stream_queue_pending *sp;
if (asoc->ss_data.locked_on_sending) {
return (asoc->ss_data.locked_on_sending);
}
sp = TAILQ_FIRST(&asoc->ss_data.out.list);
default_again:
if (sp != NULL) {

View file

@ -437,8 +437,10 @@ tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
{
struct hc_metrics *hc_entry;
if (!V_tcp_use_hostcache)
if (!V_tcp_use_hostcache) {
bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
return;
}
/*
* Find the right bucket.

View file

@ -283,6 +283,7 @@ mld_save_context(struct mbuf *m, struct ifnet *ifp)
#ifdef VIMAGE
m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
#endif /* VIMAGE */
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.flowid = ifp->if_index;
}

View file

@ -60,7 +60,7 @@
* in the range 5 to 9.
*/
#undef __FreeBSD_version
#define __FreeBSD_version 1300077 /* Master, propagated to newvers */
#define __FreeBSD_version 1300078 /* Master, propagated to newvers */
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,

View file

@ -119,6 +119,9 @@ refcount_releasen(volatile u_int *count, u_int n)
KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
("refcount_releasen: n=%u too large", n));
/*
* Paired with acquire fence in refcount_release_last.
*/
atomic_thread_fence_rel();
old = atomic_fetchadd_int(count, -n);
if (__predict_false(n >= REFCOUNT_COUNT(old) ||
@ -198,6 +201,9 @@ refcount_release_if_gt(volatile u_int *count, u_int n)
return (false);
if (__predict_false(REFCOUNT_SATURATED(old)))
return (true);
/*
* Paired with acquire fence in refcount_release_last.
*/
if (atomic_fcmpset_rel_int(count, &old, old - 1))
return (true);
}

View file

@ -1464,6 +1464,9 @@ softdep_send_speedup(struct ufsmount *ump, size_t shortage, u_int flags)
{
struct buf *bp;
if ((ump->um_flags & UM_CANSPEEDUP) == 0)
return;
bp = malloc(sizeof(*bp), M_TRIM, M_WAITOK | M_ZERO);
bp->b_iocmd = BIO_SPEEDUP;
bp->b_ioflags = flags;

View file

@ -794,7 +794,7 @@ ffs_mountfs(devvp, mp, td)
struct ucred *cred;
struct g_consumer *cp;
struct mount *nmp;
int candelete;
int candelete, canspeedup;
off_t loc;
fs = NULL;
@ -1011,6 +1011,12 @@ ffs_mountfs(devvp, mp, td)
}
}
len = sizeof(int);
if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) {
if (canspeedup)
ump->um_flags |= UM_CANSPEEDUP;
}
ump->um_mountp = mp;
ump->um_dev = dev;
ump->um_devvp = devvp;

View file

@ -131,6 +131,7 @@ struct ufsmount {
*/
#define UM_CANDELETE 0x00000001 /* devvp supports TRIM */
#define UM_WRITESUSPENDED 0x00000002 /* suspension in progress */
#define UM_CANSPEEDUP 0x00000004 /* devvp supports SPEEDUP */
/*
* function prototypes

View file

@ -1188,8 +1188,8 @@ swap_pager_unswapped(vm_page_t m)
* The pages in "ma" must be busied and will remain busied upon return.
*/
static int
swap_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int *rbehind,
int *rahead)
swap_pager_getpages_locked(vm_object_t object, vm_page_t *ma, int count,
int *rbehind, int *rahead)
{
struct buf *bp;
vm_page_t bm, mpred, msucc, p;
@ -1197,7 +1197,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int *rbehind,
daddr_t blk;
int i, maxahead, maxbehind, reqcount;
VM_OBJECT_WLOCK(object);
VM_OBJECT_ASSERT_WLOCKED(object);
reqcount = count;
KASSERT(object->type == OBJT_SWAP,
@ -1352,6 +1352,15 @@ swap_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int *rbehind,
*/
}
static int
swap_pager_getpages(vm_object_t object, vm_page_t *ma, int count,
int *rbehind, int *rahead)
{
VM_OBJECT_WLOCK(object);
return (swap_pager_getpages_locked(object, ma, count, rbehind, rahead));
}
/*
* swap_pager_getpages_async():
*
@ -1444,18 +1453,6 @@ swap_pager_putpages(vm_object_t object, vm_page_t *ma, int count,
/* Maximum I/O size is limited by maximum swap block size. */
n = min(count - i, nsw_cluster_max);
/* Get a block of swap of size up to size n. */
blk = swp_pager_getswapspace(&n, 4);
if (blk == SWAPBLK_NONE) {
for (j = 0; j < n; ++j)
rtvals[i + j] = VM_PAGER_FAIL;
continue;
}
/*
* All I/O parameters have been satisfied. Build the I/O
* request and assign the swap space.
*/
if (async) {
mtx_lock(&swbuf_mtx);
while (nsw_wcount_async == 0)
@ -1464,6 +1461,33 @@ swap_pager_putpages(vm_object_t object, vm_page_t *ma, int count,
nsw_wcount_async--;
mtx_unlock(&swbuf_mtx);
}
/* Get a block of swap of size up to size n. */
VM_OBJECT_WLOCK(object);
blk = swp_pager_getswapspace(&n, 4);
if (blk == SWAPBLK_NONE) {
VM_OBJECT_WUNLOCK(object);
mtx_lock(&swbuf_mtx);
if (++nsw_wcount_async == 1)
wakeup(&nsw_wcount_async);
mtx_unlock(&swbuf_mtx);
for (j = 0; j < n; ++j)
rtvals[i + j] = VM_PAGER_FAIL;
continue;
}
for (j = 0; j < n; ++j) {
mreq = ma[i + j];
vm_page_aflag_clear(mreq, PGA_SWAP_FREE);
addr = swp_pager_meta_build(mreq->object, mreq->pindex,
blk + j);
if (addr != SWAPBLK_NONE)
swp_pager_update_freerange(&s_free, &n_free,
addr);
MPASS(mreq->dirty == VM_PAGE_BITS_ALL);
mreq->oflags |= VPO_SWAPINPROG;
}
VM_OBJECT_WUNLOCK(object);
bp = uma_zalloc(swwbuf_zone, M_WAITOK);
if (async)
bp->b_flags = B_ASYNC;
@ -1475,22 +1499,10 @@ swap_pager_putpages(vm_object_t object, vm_page_t *ma, int count,
bp->b_bcount = PAGE_SIZE * n;
bp->b_bufsize = PAGE_SIZE * n;
bp->b_blkno = blk;
VM_OBJECT_WLOCK(object);
for (j = 0; j < n; ++j) {
mreq = ma[i + j];
vm_page_aflag_clear(mreq, PGA_SWAP_FREE);
addr = swp_pager_meta_build(mreq->object, mreq->pindex,
blk + j);
if (addr != SWAPBLK_NONE)
swp_pager_update_freerange(&s_free, &n_free,
addr);
MPASS(mreq->dirty == VM_PAGE_BITS_ALL);
mreq->oflags |= VPO_SWAPINPROG;
bp->b_pages[j] = mreq;
}
VM_OBJECT_WUNLOCK(object);
for (j = 0; j < n; j++)
bp->b_pages[j] = ma[i + j];
bp->b_npages = n;
/*
* Must set dirty range for NFS to work.
*/
@ -1712,69 +1724,8 @@ swp_pager_force_dirty(vm_page_t m)
{
vm_page_dirty(m);
#ifdef INVARIANTS
if (!vm_page_wired(m) && m->a.queue == PQ_NONE)
panic("page %p is neither wired nor queued", m);
#endif
vm_page_xunbusy(m);
swap_pager_unswapped(m);
}
static void
swp_pager_force_launder(vm_page_t m)
{
vm_page_dirty(m);
vm_page_launder(m);
vm_page_xunbusy(m);
swap_pager_unswapped(m);
}
/*
* SWP_PAGER_FORCE_PAGEIN() - force swap blocks to be paged in
*
* This routine dissociates pages starting at the given index within an
* object from their backing store, paging them in if they do not reside
* in memory. Pages that are paged in are marked dirty and placed in the
* laundry queue. Pages are marked dirty because they no longer have
* backing store. They are placed in the laundry queue because they have
* not been accessed recently. Otherwise, they would already reside in
* memory.
*/
static void
swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex, int npages)
{
vm_page_t ma[npages];
int i, j;
KASSERT(npages > 0, ("%s: No pages", __func__));
KASSERT(npages <= MAXPHYS / PAGE_SIZE,
("%s: Too many pages: %d", __func__, npages));
KASSERT(object->type == OBJT_SWAP,
("%s: Object not swappable", __func__));
vm_object_pip_add(object, npages);
vm_page_grab_pages(object, pindex, VM_ALLOC_NORMAL, ma, npages);
for (i = j = 0;; i++) {
/* Count nonresident pages, to page-in all at once. */
if (i < npages && ma[i]->valid != VM_PAGE_BITS_ALL)
continue;
if (j < i) {
VM_OBJECT_WUNLOCK(object);
/* Page-in nonresident pages. Mark for laundering. */
if (swap_pager_getpages(object, &ma[j], i - j, NULL,
NULL) != VM_PAGER_OK)
panic("%s: read from swap failed", __func__);
VM_OBJECT_WLOCK(object);
do {
swp_pager_force_launder(ma[j]);
} while (++j < i);
}
if (i == npages)
break;
/* Mark dirty a resident page. */
swp_pager_force_dirty(ma[j++]);
}
vm_object_pip_wakeupn(object, npages);
}
/*
@ -1787,62 +1738,95 @@ static void
swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object)
{
struct swblk *sb;
vm_pindex_t pi, s_pindex;
daddr_t blk, n_blks, s_blk;
int i;
vm_page_t m;
vm_pindex_t pi;
daddr_t blk;
int i, nv, rahead, rv;
KASSERT(object->type == OBJT_SWAP,
("%s: Object not swappable", __func__));
n_blks = 0;
for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
&object->un_pager.swp.swp_blks, pi)) != NULL; ) {
if ((object->flags & OBJ_DEAD) != 0) {
/*
* Make sure that pending writes finish before
* returning.
*/
vm_object_pip_wait(object, "swpoff");
swp_pager_meta_free_all(object);
break;
}
for (i = 0; i < SWAP_META_PAGES; i++) {
blk = sb->d[i];
if (!swp_pager_isondev(blk, sp))
blk = SWAPBLK_NONE;
/*
* Count the number of contiguous valid blocks.
*/
for (nv = 0; nv < SWAP_META_PAGES - i; nv++) {
blk = sb->d[i + nv];
if (!swp_pager_isondev(blk, sp) ||
blk == SWAPBLK_NONE)
break;
}
if (nv == 0)
continue;
/*
* If there are no blocks/pages accumulated, start a new
* accumulation here.
* Look for a page corresponding to the first
* valid block and ensure that any pending paging
* operations on it are complete. If the page is valid,
* mark it dirty and free the swap block. Try to batch
* this operation since it may cause sp to be freed,
* meaning that we must restart the scan. Avoid busying
* valid pages since we may block forever on kernel
* stack pages.
*/
if (n_blks == 0) {
if (blk != SWAPBLK_NONE) {
s_blk = blk;
s_pindex = sb->p + i;
n_blks = 1;
m = vm_page_lookup(object, sb->p + i);
if (m == NULL) {
m = vm_page_alloc(object, sb->p + i,
VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
if (m == NULL)
break;
} else {
if ((m->oflags & VPO_SWAPINPROG) != 0) {
m->oflags |= VPO_SWAPSLEEP;
VM_OBJECT_SLEEP(object, &object->handle,
PSWP, "swpoff", 0);
break;
}
continue;
if (vm_page_all_valid(m)) {
do {
swp_pager_force_dirty(m);
} while (--nv > 0 &&
(m = vm_page_next(m)) != NULL &&
vm_page_all_valid(m) &&
(m->oflags & VPO_SWAPINPROG) == 0);
break;
}
if (!vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL))
break;
}
/*
* If the accumulation can be extended without breaking
* the sequence of consecutive blocks and pages that
* swp_pager_force_pagein() depends on, do so.
*/
if (n_blks < MAXPHYS / PAGE_SIZE &&
s_blk + n_blks == blk &&
s_pindex + n_blks == sb->p + i) {
++n_blks;
continue;
}
vm_object_pip_add(object, 1);
rahead = SWAP_META_PAGES;
rv = swap_pager_getpages_locked(object, &m, 1, NULL,
&rahead);
if (rv != VM_PAGER_OK)
panic("%s: read from swap failed: %d",
__func__, rv);
vm_object_pip_wakeupn(object, 1);
VM_OBJECT_WLOCK(object);
vm_page_xunbusy(m);
/*
* The sequence of consecutive blocks and pages cannot
* be extended, so page them all in here. Then,
* because doing so involves releasing and reacquiring
* a lock that protects the swap block pctrie, do not
* rely on the current swap block. Break this loop and
* re-fetch the same pindex from the pctrie again.
* The object lock was dropped so we must restart the
* scan of this swap block. Pages paged in during this
* iteration will be marked dirty in a future iteration.
*/
swp_pager_force_pagein(object, s_pindex, n_blks);
n_blks = 0;
break;
}
if (i == SWAP_META_PAGES)
pi = sb->p + SWAP_META_PAGES;
}
if (n_blks > 0)
swp_pager_force_pagein(object, s_pindex, n_blks);
}
/*
@ -2078,7 +2062,7 @@ swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
* Free the swblk if we end up with the empty page run.
*/
if (swapblk == SWAPBLK_NONE)
swp_pager_free_empty_swblk(object, sb);
swp_pager_free_empty_swblk(object, sb);
return (prev_swapblk);
}
@ -2350,7 +2334,7 @@ swaponsomething(struct vnode *vp, void *id, u_long nblks,
sw_strategy_t *strategy, sw_close_t *close, dev_t dev, int flags)
{
struct swdevt *sp, *tsp;
swblk_t dvbase;
daddr_t dvbase;
u_long mblocks;
/*

View file

@ -38,14 +38,9 @@
*/
#ifndef _VM_SWAP_PAGER_H_
#define _VM_SWAP_PAGER_H_ 1
#define _VM_SWAP_PAGER_H_
typedef int32_t swblk_t; /*
* swap offset. This is the type used to
* address the "virtual swap device" and
* therefore the maximum swap space is
* 2^32 pages.
*/
#include <sys/_types.h>
struct buf;
struct swdevt;
@ -62,8 +57,8 @@ struct swdevt {
dev_t sw_dev;
struct vnode *sw_vp;
void *sw_id;
swblk_t sw_first;
swblk_t sw_end;
__daddr_t sw_first;
__daddr_t sw_end;
struct blist *sw_blist;
TAILQ_ENTRY(swdevt) sw_list;
sw_strategy_t *sw_strategy;

View file

@ -670,6 +670,11 @@ void uma_prealloc(uma_zone_t zone, int itemcnt);
*/
int uma_zone_exhausted(uma_zone_t zone);
/*
* Returns the bytes of memory consumed by the zone.
*/
size_t uma_zone_memory(uma_zone_t zone);
/*
* Common UMA_ZONE_PCPU zones.
*/

View file

@ -2944,10 +2944,13 @@ uma_zdestroy(uma_zone_t zone)
void
uma_zwait(uma_zone_t zone)
{
void *item;
item = uma_zalloc_arg(zone, NULL, M_WAITOK);
uma_zfree(zone, item);
if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
uma_zfree_smr(zone, uma_zalloc_smr(zone, M_WAITOK));
else if ((zone->uz_flags & UMA_ZONE_PCPU) != 0)
uma_zfree_pcpu(zone, uma_zalloc_pcpu(zone, M_WAITOK));
else
uma_zfree(zone, uma_zalloc(zone, M_WAITOK));
}
void *
@ -4678,6 +4681,27 @@ uma_prealloc(uma_zone_t zone, int items)
}
}
/*
* Returns a snapshot of memory consumption in bytes.
*/
size_t
uma_zone_memory(uma_zone_t zone)
{
size_t sz;
int i;
sz = 0;
if (zone->uz_flags & UMA_ZFLAG_CACHE) {
for (i = 0; i < vm_ndomains; i++)
sz += zone->uz_domain[i].uzd_nitems;
return (sz * zone->uz_size);
}
for (i = 0; i < vm_ndomains; i++)
sz += zone->uz_keg->uk_domain[i].ud_pages;
return (sz * PAGE_SIZE);
}
/* See uma.h */
void
uma_reclaim(int req)

View file

@ -174,7 +174,7 @@ static uma_zone_t fakepg_zone;
static void vm_page_alloc_check(vm_page_t m);
static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t m,
const char *wmesg, bool nonshared, bool locked);
vm_pindex_t pindex, const char *wmesg, int allocflags, bool locked);
static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
static void vm_page_enqueue(vm_page_t m, uint8_t queue);
static bool vm_page_free_prep(vm_page_t m);
@ -846,7 +846,8 @@ vm_page_acquire_flags(vm_page_t m, int allocflags)
/*
* vm_page_busy_sleep_flags
*
* Sleep for busy according to VM_ALLOC_ parameters.
* Sleep for busy according to VM_ALLOC_ parameters. Returns true
* if the caller should retry and false otherwise.
*/
static bool
vm_page_busy_sleep_flags(vm_object_t object, vm_page_t m, const char *wmesg,
@ -855,18 +856,19 @@ vm_page_busy_sleep_flags(vm_object_t object, vm_page_t m, const char *wmesg,
if ((allocflags & VM_ALLOC_NOWAIT) != 0)
return (false);
/*
* Reference the page before unlocking and
* sleeping so that the page daemon is less
* likely to reclaim it.
* Reference the page before unlocking and sleeping so that
* the page daemon is less likely to reclaim it.
*/
if ((allocflags & VM_ALLOC_NOCREAT) == 0)
vm_page_aflag_set(m, PGA_REFERENCED);
if (_vm_page_busy_sleep(object, m, wmesg, (allocflags &
VM_ALLOC_IGN_SBUSY) != 0, true))
vm_page_reference(m);
if (_vm_page_busy_sleep(object, m, m->pindex, wmesg, allocflags, true))
VM_OBJECT_WLOCK(object);
if ((allocflags & VM_ALLOC_WAITFAIL) != 0)
return (false);
return (true);
}
@ -900,8 +902,8 @@ vm_page_busy_acquire(vm_page_t m, int allocflags)
else
locked = false;
MPASS(locked || vm_page_wired(m));
if (_vm_page_busy_sleep(obj, m, "vmpba",
(allocflags & VM_ALLOC_SBUSY) != 0, locked))
if (_vm_page_busy_sleep(obj, m, m->pindex, "vmpba", allocflags,
locked) && locked)
VM_OBJECT_WLOCK(obj);
if ((allocflags & VM_ALLOC_WAITFAIL) != 0)
return (false);
@ -1026,19 +1028,49 @@ vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared)
VM_OBJECT_ASSERT_LOCKED(obj);
vm_page_lock_assert(m, MA_NOTOWNED);
if (!_vm_page_busy_sleep(obj, m, wmesg, nonshared, true))
if (!_vm_page_busy_sleep(obj, m, m->pindex, wmesg,
nonshared ? VM_ALLOC_SBUSY : 0 , true))
VM_OBJECT_DROP(obj);
}
/*
* vm_page_busy_sleep_unlocked:
*
* Sleep if the page is busy, using the page pointer as wchan.
* This is used to implement the hard-path of busying mechanism.
*
* If nonshared is true, sleep only if the page is xbusy.
*
* The object lock must not be held on entry. The operation will
* return if the page changes identity.
*/
void
vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, vm_pindex_t pindex,
const char *wmesg, bool nonshared)
{
VM_OBJECT_ASSERT_UNLOCKED(obj);
vm_page_lock_assert(m, MA_NOTOWNED);
_vm_page_busy_sleep(obj, m, pindex, wmesg,
nonshared ? VM_ALLOC_SBUSY : 0, false);
}
/*
* _vm_page_busy_sleep:
*
* Internal busy sleep function.
* Internal busy sleep function. Verifies the page identity and
* lockstate against parameters. Returns true if it sleeps and
* false otherwise.
*
* If locked is true the lock will be dropped for any true returns
* and held for any false returns.
*/
static bool
_vm_page_busy_sleep(vm_object_t obj, vm_page_t m, const char *wmesg,
bool nonshared, bool locked)
_vm_page_busy_sleep(vm_object_t obj, vm_page_t m, vm_pindex_t pindex,
const char *wmesg, int allocflags, bool locked)
{
bool xsleep;
u_int x;
/*
@ -1049,23 +1081,36 @@ _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, const char *wmesg,
if (locked)
VM_OBJECT_DROP(obj);
vm_object_busy_wait(obj, wmesg);
return (locked);
return (true);
}
sleepq_lock(m);
x = m->busy_lock;
if (x == VPB_UNBUSIED || (nonshared && (x & VPB_BIT_SHARED) != 0) ||
((x & VPB_BIT_WAITERS) == 0 &&
!atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS))) {
sleepq_release(m);
if (!vm_page_busied(m))
return (false);
}
xsleep = (allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0;
sleepq_lock(m);
x = atomic_load_int(&m->busy_lock);
do {
/*
* If the page changes objects or becomes unlocked we can
* simply return.
*/
if (x == VPB_UNBUSIED ||
(xsleep && (x & VPB_BIT_SHARED) != 0) ||
m->object != obj || m->pindex != pindex) {
sleepq_release(m);
return (false);
}
if ((x & VPB_BIT_WAITERS) != 0)
break;
} while (!atomic_fcmpset_int(&m->busy_lock, &x, x | VPB_BIT_WAITERS));
if (locked)
VM_OBJECT_DROP(obj);
DROP_GIANT();
sleepq_add(m, NULL, wmesg, 0, 0);
sleepq_wait(m, PVM);
PICKUP_GIANT();
return (locked);
return (true);
}
/*
@ -1343,7 +1388,7 @@ vm_page_readahead_finish(vm_page_t m)
* be locked.
*/
int
vm_page_sleep_if_busy(vm_page_t m, const char *msg)
vm_page_sleep_if_busy(vm_page_t m, const char *wmesg)
{
vm_object_t obj;
@ -1358,8 +1403,7 @@ vm_page_sleep_if_busy(vm_page_t m, const char *msg)
* held by the callers.
*/
obj = m->object;
if (vm_page_busied(m) || (obj != NULL && obj->busy)) {
vm_page_busy_sleep(m, msg, false);
if (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, 0, true)) {
VM_OBJECT_WLOCK(obj);
return (TRUE);
}
@ -1376,7 +1420,7 @@ vm_page_sleep_if_busy(vm_page_t m, const char *msg)
* be locked.
*/
int
vm_page_sleep_if_xbusy(vm_page_t m, const char *msg)
vm_page_sleep_if_xbusy(vm_page_t m, const char *wmesg)
{
vm_object_t obj;
@ -1391,8 +1435,8 @@ vm_page_sleep_if_xbusy(vm_page_t m, const char *msg)
* held by the callers.
*/
obj = m->object;
if (vm_page_xbusied(m) || (obj != NULL && obj->busy)) {
vm_page_busy_sleep(m, msg, true);
if (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, VM_ALLOC_SBUSY,
true)) {
VM_OBJECT_WLOCK(obj);
return (TRUE);
}

View file

@ -591,6 +591,8 @@ bool vm_page_busy_acquire(vm_page_t m, int allocflags);
void vm_page_busy_downgrade(vm_page_t m);
int vm_page_busy_tryupgrade(vm_page_t m);
void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared);
void vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m,
vm_pindex_t pindex, const char *wmesg, bool nonshared);
void vm_page_free(vm_page_t m);
void vm_page_free_zero(vm_page_t m);

View file

@ -193,6 +193,11 @@ lacp_linkstate_destroy_stress_head()
}
lacp_linkstate_destroy_stress_body()
{
if [ "$(atf_config_get ci false)" = "true" ] && \
[ "$(uname -p)" = "i386" ]; then
atf_skip "https://bugs.freebsd.org/244168"
fi
local TAP0 TAP1 LAGG MAC SRCDIR
# Configure the lagg interface to use an RFC5737 nonrouteable addresses

View file

@ -626,6 +626,9 @@ udp_dontroute_head()
udp_dontroute_body()
{
if [ "$(atf_config_get ci false)" = "true" ]; then
atf_skip "https://bugs.freebsd.org/244172"
fi
# Configure the TAP interface to use an RFC5737 nonrouteable address
# and a non-default fib
ADDR0="192.0.2.2"

View file

@ -219,6 +219,9 @@ frag6_07_head() {
}
frag6_07_body() {
if [ "$(atf_config_get ci false)" = "true" ]; then
atf_skip "https://bugs.freebsd.org/244170"
fi
frag6_body 7 frag6_07_check_stats
}

View file

@ -986,20 +986,35 @@ static struct {
uint16_t id;
const char *name;
} ecap_names[] = {
{ PCIZ_AER, "AER" },
{ PCIZ_VC, "Virtual Channel" },
{ PCIZ_SERNUM, "Device Serial Number" },
{ PCIZ_PWRBDGT, "Power Budgeting" },
{ PCIZ_RCLINK_DCL, "Root Complex Link Declaration" },
{ PCIZ_RCLINK_CTL, "Root Complex Internal Link Control" },
{ PCIZ_RCEC_ASSOC, "Root Complex Event Collector ASsociation" },
{ PCIZ_MFVC, "MFVC" },
{ PCIZ_VC2, "Virtual Channel 2" },
{ PCIZ_RCRB, "RCRB" },
{ PCIZ_CAC, "Configuration Access Correction" },
{ PCIZ_ACS, "ACS" },
{ PCIZ_ARI, "ARI" },
{ PCIZ_ATS, "ATS" },
{ PCIZ_SRIOV, "SRIOV" },
{ PCIZ_MRIOV, "MRIOV" },
{ PCIZ_MULTICAST, "Multicast" },
{ PCIZ_PAGE_REQ, "Page Page Request" },
{ PCIZ_AMD, "AMD proprietary "},
{ PCIZ_RESIZE_BAR, "Resizable BAR" },
{ PCIZ_DPA, "DPA" },
{ PCIZ_TPH_REQ, "TPH Requester" },
{ PCIZ_LTR, "LTR" },
{ PCIZ_SEC_PCIE, "Secondary PCI Express" },
{ PCIZ_PMUX, "Protocol Multiplexing" },
{ PCIZ_PASID, "Process Address Space ID" },
{ PCIZ_LN_REQ, "LN Requester" },
{ PCIZ_DPC, "Downstream Port Containment" },
{ PCIZ_L1PM, "L1 PM Substates" },
{ 0, NULL }
};