Add malloc_domainset(9) and _domainset variants to other allocator KPIs.

Remove malloc_domain(9) and most other _domain KPIs added in r327900.
The new functions allow the caller to specify a general NUMA domain
selection policy, rather than specifically requesting an allocation from
a specific domain.  The latter policy tends to interact poorly with
M_WAITOK, resulting in situations where a caller is blocked indefinitely
because the specified domain is depleted.  Most existing consumers of
the _domain KPIs are converted to instead use a DOMAINSET_PREF() policy,
in which we fall back to other domains to satisfy the allocation
request.

This change also defines a set of DOMAINSET_FIXED() policies, which
only permit allocations from the specified domain.

Discussed with:	gallatin, jeff
Reported and tested by:	pho (previous version)
MFC after:	2 weeks
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D17418
This commit is contained in:
Mark Johnston 2018-10-30 18:26:34 +00:00
parent 58b6812de1
commit 9978bd996b
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=339927
20 changed files with 231 additions and 144 deletions

View file

@ -38,6 +38,8 @@
# xargs -n1 | sort | uniq -d;
# done
# 20181030: malloc_domain(9) KPI change
OLD_FILES+=share/man/man9/malloc_domain.9.gz
# 20181026: joy(4) removal
OLD_FILES+=usr/share/man/man4/joy.4.gz
# 20181025: OpenSSL libraries version bump to avoid conflict with ports

View file

@ -812,7 +812,8 @@ MLINKS+=condvar.9 cv_broadcast.9 \
MLINKS+=config_intrhook.9 config_intrhook_disestablish.9 \
config_intrhook.9 config_intrhook_establish.9 \
config_intrhook.9 config_intrhook_oneshot.9
MLINKS+=contigmalloc.9 contigfree.9
MLINKS+=contigmalloc.9 contigmalloc_domainset.9 \
contigmalloc.9 contigfree.9
MLINKS+=casuword.9 casueword.9 \
casuword.9 casueword32.9 \
casuword.9 casuword32.9
@ -1289,7 +1290,7 @@ MLINKS+=make_dev.9 destroy_dev.9 \
make_dev.9 make_dev_p.9 \
make_dev.9 make_dev_s.9
MLINKS+=malloc.9 free.9 \
malloc.9 malloc_domain.9 \
malloc.9 malloc_domainset.9 \
malloc.9 free_domain.9 \
malloc.9 mallocarray.9 \
malloc.9 MALLOC_DECLARE.9 \

View file

@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd January 29, 2015
.Dd October 30, 2018
.Dt CONTIGMALLOC 9
.Os
.Sh NAME
@ -50,6 +50,19 @@
.Fa "unsigned long size"
.Fa "struct malloc_type *type"
.Fc
.In sys/param.h
.In sys/domainset.h
.Ft "void *"
.Fo contigmalloc_domainset
.Fa "unsigned long size"
.Fa "struct malloc_type *type"
.Fa "struct domainset *ds"
.Fa "int flags"
.Fa "vm_paddr_t low"
.Fa "vm_paddr_t high"
.Fa "unsigned long alignment"
.Fa "vm_paddr_t boundary"
.Fc
.Sh DESCRIPTION
The
.Fn contigmalloc
@ -70,6 +83,15 @@ address range of
bytes allocated from the kernel virtual address (KVA) map.
.Pp
The
.Fn contigmalloc_domainset
variant allows the caller to additionally specify a
.Xr numa 4
domain selection policy.
See
.Xr domainset 9
for some example policies.
.Pp
The
.Fa flags
parameter modifies
.Fn contigmalloc Ns 's
@ -90,7 +112,9 @@ Other flags (if present) are ignored.
The
.Fn contigfree
function deallocates memory allocated by a previous call to
.Fn contigmalloc .
.Fn contigmalloc
or
.Fn contigmalloc_domainset .
.Sh IMPLEMENTATION NOTES
The
.Fn contigmalloc

View file

@ -24,7 +24,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd October 20, 2018
.Dd October 30, 2018
.Dt DOMAINSET 9
.Os
.Sh NAME
@ -43,7 +43,11 @@ struct domainset {
};
.Ed
.Pp
.Ft struct domainset *
.Fn DOMAINSET_FIXED domain
.Ft struct domainset *
.Fn DOMAINSET_RR
.Ft struct domainset *
.Fn DOMAINSET_PREF domain
.Ft struct domainset *
.Fn domainset_create "const struct domainset *key"
@ -98,11 +102,26 @@ efficiency higher and is preferential to round-robin for general use.
.El
.Pp
The
.Fn DOMAINSET_FIXED ,
.Fn DOMAINSET_RR
and
.Fn DOMAINSET_PREF
provide pointers to global pre-defined policies for use when the
macros provide pointers to global pre-defined policies for use when the
desired policy is known at compile time.
.Fn DOMAINSET_FIXED
is a policy which only permits allocations from the specified domain.
.Fn DOMAINSET_RR
provides round-robin selection among all domains in the system.
The
.Fn DOMAINSET_PREF
policies attempt allocation from the specified domain, but unlike
.Fn DOMAINSET_FIXED
will fall back to other domains to satisfy the request.
These policies should be used in preference to
.Fn DOMAINSET_FIXED
to avoid blocking indefinitely on a
.Dv M_WAITOK
request.
The
.Fn domainset_create
function takes a partially filled in domainset as a key and returns a

View file

@ -29,7 +29,7 @@
.\" $NetBSD: malloc.9,v 1.3 1996/11/11 00:05:11 lukem Exp $
.\" $FreeBSD$
.\"
.Dd June 13, 2018
.Dd October 30, 2018
.Dt MALLOC 9
.Os
.Sh NAME
@ -46,13 +46,9 @@
.Ft void *
.Fn malloc "size_t size" "struct malloc_type *type" "int flags"
.Ft void *
.Fn malloc_domain "size_t size" "struct malloc_type *type" "int domain" "int flags"
.Ft void *
.Fn mallocarray "size_t nmemb" "size_t size" "struct malloc_type *type" "int flags"
.Ft void
.Fn free "void *addr" "struct malloc_type *type"
.Ft void
.Fn free_domain "void *addr" "struct malloc_type *type"
.Ft void *
.Fn realloc "void *addr" "size_t size" "struct malloc_type *type" "int flags"
.Ft void *
@ -62,6 +58,12 @@
.In sys/malloc.h
.In sys/kernel.h
.Fn MALLOC_DEFINE type shortdesc longdesc
.In sys/param.h
.In sys/domainset.h
.Ft void *
.Fn malloc_domainset "size_t size" "struct malloc_type *type" "struct domainset *ds" "int flags"
.Ft void
.Fn free_domain "void *addr" "struct malloc_type *type"
.Sh DESCRIPTION
The
.Fn malloc
@ -70,12 +72,15 @@ object whose size is specified by
.Fa size .
.Pp
The
.Fn malloc_domain
variant allocates the object from the specified memory domain. Memory allocated
with this function should be returned with
.Fn free_domain .
.Fn malloc_domainset
variant allocates memory from a specific
.Xr numa 4
domain using the specified domain selection policy.
See
.Xr numa 9 for more details.
.Xr domainset 9
for some example policies.
Memory allocated with this function should be returned with
.Fn free_domain .
.Pp
The
.Fn mallocarray
@ -310,7 +315,9 @@ functions.
Failing consistency checks will cause a panic or a system console
message.
.Sh SEE ALSO
.Xr numa 4 ,
.Xr vmstat 8 ,
.Xr contigmalloc 9 ,
.Xr domainset 9 ,
.Xr memguard 9 ,
.Xr vnode 9

View file

@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/capsicum.h>
#include <sys/domainset.h>
#include <sys/file.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
@ -1231,8 +1232,8 @@ pmclog_process_userlog(struct pmc_owner *po, struct pmc_op_writelog *wl)
void
pmclog_initialize()
{
int domain;
struct pmclog_buffer *plb;
int domain, ncpus, total;
if (pmclog_buffer_size <= 0 || pmclog_buffer_size > 16*1024) {
(void) printf("hwpmc: tunable logbuffersize=%d must be "
@ -1253,16 +1254,17 @@ pmclog_initialize()
pmclog_buffer_size = PMC_LOG_BUFFER_SIZE;
}
for (domain = 0; domain < vm_ndomains; domain++) {
int ncpus = pmc_dom_hdrs[domain]->pdbh_ncpus;
int total = ncpus*pmc_nlogbuffers_pcpu;
ncpus = pmc_dom_hdrs[domain]->pdbh_ncpus;
total = ncpus * pmc_nlogbuffers_pcpu;
plb = malloc_domain(sizeof(struct pmclog_buffer)*total, M_PMC, domain, M_WAITOK|M_ZERO);
plb = malloc_domainset(sizeof(struct pmclog_buffer) * total,
M_PMC, DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
pmc_dom_hdrs[domain]->pdbh_plbs = plb;
for (int i = 0; i < total; i++, plb++) {
for (; total > 0; total--, plb++) {
void *buf;
buf = malloc_domain(1024 * pmclog_buffer_size, M_PMC, domain,
M_WAITOK|M_ZERO);
buf = malloc_domainset(1024 * pmclog_buffer_size, M_PMC,
DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
PMCLOG_INIT_BUFFER_DESCRIPTOR(plb, buf, domain);
pmc_plb_rele_unlocked(plb);
}

View file

@ -36,6 +36,7 @@
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/domainset.h>
#include <sys/eventhandler.h>
#include <sys/gtaskqueue.h>
#include <sys/jail.h>
@ -78,14 +79,6 @@ __FBSDID("$FreeBSD$");
#include "hwpmc_soft.h"
#ifdef NUMA
#define NDOMAINS vm_ndomains
#else
#define NDOMAINS 1
#define malloc_domain(size, type, domain, flags) malloc((size), (type), (flags))
#define free_domain(addr, type) free(addr, type)
#endif
#define PMC_EPOCH_ENTER() struct epoch_tracker pmc_et; epoch_enter_preempt(global_epoch_preempt, &pmc_et)
#define PMC_EPOCH_EXIT() epoch_exit_preempt(global_epoch_preempt, &pmc_et)
@ -5643,15 +5636,16 @@ pmc_initialize(void)
continue;
pc = pcpu_find(cpu);
domain = pc->pc_domain;
sb = malloc_domain(sizeof(struct pmc_samplebuffer) +
pmc_nsamples * sizeof(struct pmc_sample), M_PMC, domain,
M_WAITOK|M_ZERO);
sb = malloc_domainset(sizeof(struct pmc_samplebuffer) +
pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
KASSERT(pmc_pcpu[cpu] != NULL,
("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
sb->ps_callchains = malloc_domain(pmc_callchaindepth * pmc_nsamples *
sizeof(uintptr_t), M_PMC, domain, M_WAITOK|M_ZERO);
sb->ps_callchains = malloc_domainset(pmc_callchaindepth *
pmc_nsamples * sizeof(uintptr_t), M_PMC,
DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
ps->ps_pc = sb->ps_callchains +
@ -5659,35 +5653,27 @@ pmc_initialize(void)
pmc_pcpu[cpu]->pc_sb[PMC_HR] = sb;
sb = malloc_domain(sizeof(struct pmc_samplebuffer) +
pmc_nsamples * sizeof(struct pmc_sample), M_PMC, domain,
M_WAITOK|M_ZERO);
KASSERT(pmc_pcpu[cpu] != NULL,
("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
sb->ps_callchains = malloc_domain(pmc_callchaindepth * pmc_nsamples *
sizeof(uintptr_t), M_PMC, domain, M_WAITOK|M_ZERO);
sb = malloc_domainset(sizeof(struct pmc_samplebuffer) +
pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
sb->ps_callchains = malloc_domainset(pmc_callchaindepth *
pmc_nsamples * sizeof(uintptr_t), M_PMC,
DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
ps->ps_pc = sb->ps_callchains +
(n * pmc_callchaindepth);
pmc_pcpu[cpu]->pc_sb[PMC_SR] = sb;
sb = malloc_domain(sizeof(struct pmc_samplebuffer) +
pmc_nsamples * sizeof(struct pmc_sample), M_PMC, domain,
M_WAITOK|M_ZERO);
KASSERT(pmc_pcpu[cpu] != NULL,
("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
sb->ps_callchains = malloc_domain(pmc_callchaindepth * pmc_nsamples *
sizeof(uintptr_t), M_PMC, domain, M_WAITOK|M_ZERO);
sb = malloc_domainset(sizeof(struct pmc_samplebuffer) +
pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
sb->ps_callchains = malloc_domainset(pmc_callchaindepth *
pmc_nsamples * sizeof(uintptr_t), M_PMC,
DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
ps->ps_pc = sb->ps_callchains +
(n * pmc_callchaindepth);
ps->ps_pc = sb->ps_callchains + n * pmc_callchaindepth;
pmc_pcpu[cpu]->pc_sb[PMC_UR] = sb;
}

View file

@ -864,8 +864,8 @@ pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
/* Inform UMA that this allocator uses kernel_map/object. */
*flags = UMA_SLAB_KERNEL;
return ((void *)kmem_alloc_contig_domain(domain, bytes, wait, 0x0ULL,
0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain),
bytes, wait, 0x0ULL, 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
}
#endif

View file

@ -119,6 +119,7 @@ __FBSDID("$FreeBSD$");
*/
LIST_HEAD(domainlist, domainset);
struct domainset __read_mostly domainset_fixed[MAXMEMDOM];
struct domainset __read_mostly domainset_prefer[MAXMEMDOM];
struct domainset __read_mostly domainset_roundrobin;
@ -1402,6 +1403,12 @@ domainset_init(void)
_domainset_create(dset, NULL);
for (i = 0; i < vm_ndomains; i++) {
dset = &domainset_fixed[i];
DOMAINSET_ZERO(&dset->ds_mask);
DOMAINSET_SET(i, &dset->ds_mask);
dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN;
_domainset_create(dset, NULL);
dset = &domainset_prefer[i];
DOMAINSET_COPY(&all_domains, &dset->ds_mask);
dset->ds_policy = DOMAINSET_POLICY_PREFER;

View file

@ -68,6 +68,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/vm_domainset.h>
#include <vm/vm_pageout.h>
#include <vm/vm_param.h>
#include <vm/vm_kern.h>
@ -453,13 +454,13 @@ contigmalloc(unsigned long size, struct malloc_type *type, int flags,
}
void *
contigmalloc_domain(unsigned long size, struct malloc_type *type,
int domain, int flags, vm_paddr_t low, vm_paddr_t high,
contigmalloc_domainset(unsigned long size, struct malloc_type *type,
struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high,
unsigned long alignment, vm_paddr_t boundary)
{
void *ret;
ret = (void *)kmem_alloc_contig_domain(domain, size, flags, low, high,
ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high,
alignment, boundary, VM_MEMATTR_DEFAULT);
if (ret != NULL)
malloc_type_allocated(type, round_page(size));
@ -595,9 +596,8 @@ void *
return ((void *) va);
}
void *
malloc_domain(size_t size, struct malloc_type *mtp, int domain,
int flags)
static void *
malloc_domain(size_t size, struct malloc_type *mtp, int domain, int flags)
{
int indx;
caddr_t va;
@ -640,6 +640,24 @@ malloc_domain(size_t size, struct malloc_type *mtp, int domain,
return ((void *) va);
}
void *
malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
int flags)
{
struct vm_domainset_iter di;
void *ret;
int domain;
vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
do {
ret = malloc_domain(size, mtp, domain, flags);
if (ret != NULL)
break;
} while (vm_domainset_iter_policy(&di, &domain) == 0);
return (ret);
}
void *
mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
{

View file

@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/conf.h>
#include <sys/domainset.h>
#include <sys/malloc.h>
#include <sys/systm.h>
#include <sys/mbuf.h>
@ -590,8 +591,9 @@ mbuf_jumbo_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
/* Inform UMA that this allocator uses kernel_map/object. */
*flags = UMA_SLAB_KERNEL;
return ((void *)kmem_alloc_contig_domain(domain, bytes, wait,
(vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain),
bytes, wait, (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0,
VM_MEMATTR_DEFAULT));
}
/*

View file

@ -35,8 +35,9 @@ __FBSDID("$FreeBSD$");
#include "opt_hwpmc_hooks.h"
#include <sys/types.h>
#include <sys/param.h>
#include <sys/ctype.h>
#include <sys/domainset.h>
#include <sys/param.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
@ -330,22 +331,6 @@ pmc_soft_ev_release(struct pmc_soft *ps)
mtx_unlock_spin(&pmc_softs_mtx);
}
#ifdef NUMA
#define NDOMAINS vm_ndomains
static int
getdomain(int cpu)
{
struct pcpu *pc;
pc = pcpu_find(cpu);
return (pc->pc_domain);
}
#else
#define NDOMAINS 1
#define malloc_domain(size, type, domain, flags) malloc((size), (type), (flags))
#define getdomain(cpu) 0
#endif
/*
* Initialise hwpmc.
*/
@ -362,14 +347,16 @@ init_hwpmc(void *dummy __unused)
}
pmc_softs = malloc(pmc_softevents * sizeof(*pmc_softs), M_PMCHOOKS,
M_WAITOK | M_ZERO);
for (domain = 0; domain < NDOMAINS; domain++) {
pmc_dom_hdrs[domain] = malloc_domain(sizeof(struct pmc_domain_buffer_header), M_PMC, domain,
M_WAITOK|M_ZERO);
for (domain = 0; domain < vm_ndomains; domain++) {
pmc_dom_hdrs[domain] = malloc_domainset(
sizeof(struct pmc_domain_buffer_header), M_PMC,
DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
mtx_init(&pmc_dom_hdrs[domain]->pdbh_mtx, "pmc_bufferlist_mtx", "pmc-leaf", MTX_SPIN);
TAILQ_INIT(&pmc_dom_hdrs[domain]->pdbh_head);
}
CPU_FOREACH(cpu) {
domain = getdomain(cpu);
domain = pcpu_find(cpu)->pc_domain;
KASSERT(pmc_dom_hdrs[domain] != NULL, ("no mem allocated for domain: %d", domain));
pmc_dom_hdrs[domain]->pdbh_ncpus++;
}

View file

@ -37,6 +37,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/busdma_bufalloc.h>
#include <sys/domainset.h>
#include <sys/malloc.h>
#include <vm/vm.h>
@ -152,18 +153,15 @@ void *
busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, vm_size_t size, int domain,
uint8_t *pflag, int wait)
{
#ifdef VM_MEMATTR_UNCACHEABLE
#ifdef VM_MEMATTR_UNCACHEABLE
/* Inform UMA that this allocator uses kernel_arena/object. */
*pflag = UMA_SLAB_KERNEL;
return ((void *)kmem_alloc_attr_domain(domain, size, wait, 0,
BUS_SPACE_MAXADDR, VM_MEMATTR_UNCACHEABLE));
return ((void *)kmem_alloc_attr_domainset(DOMAINSET_FIXED(domain), size,
wait, 0, BUS_SPACE_MAXADDR, VM_MEMATTR_UNCACHEABLE));
#else
panic("VM_MEMATTR_UNCACHEABLE unavailable");
#endif /* VM_MEMATTR_UNCACHEABLE */
}

View file

@ -96,7 +96,8 @@ struct domainset {
domainid_t ds_order[MAXMEMDOM]; /* nth domain table. */
};
extern struct domainset domainset_prefer[MAXMEMDOM];
extern struct domainset domainset_fixed[MAXMEMDOM], domainset_prefer[MAXMEMDOM];
#define DOMAINSET_FIXED(domain) (&domainset_fixed[(domain)])
#define DOMAINSET_PREF(domain) (&domainset_prefer[(domain)])
extern struct domainset domainset_roundrobin;
#define DOMAINSET_RR() (&domainset_roundrobin)

View file

@ -160,6 +160,7 @@ MALLOC_DECLARE(M_TEMP);
*/
MALLOC_DECLARE(M_IOV);
struct domainset;
extern struct mtx malloc_mtx;
/*
@ -172,8 +173,8 @@ void *contigmalloc(unsigned long size, struct malloc_type *type, int flags,
vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
vm_paddr_t boundary) __malloc_like __result_use_check
__alloc_size(1) __alloc_align(6);
void *contigmalloc_domain(unsigned long size, struct malloc_type *type,
int domain, int flags, vm_paddr_t low, vm_paddr_t high,
void *contigmalloc_domainset(unsigned long size, struct malloc_type *type,
struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high,
unsigned long alignment, vm_paddr_t boundary)
__malloc_like __result_use_check __alloc_size(1) __alloc_align(6);
void free(void *addr, struct malloc_type *type);
@ -230,8 +231,9 @@ void *malloc(size_t size, struct malloc_type *type, int flags) __malloc_like
_malloc_item; \
})
void *malloc_domain(size_t size, struct malloc_type *type, int domain,
int flags) __malloc_like __result_use_check __alloc_size(1);
void *malloc_domainset(size_t size, struct malloc_type *type,
struct domainset *ds, int flags) __malloc_like __result_use_check
__alloc_size(1);
void *mallocarray(size_t nmemb, size_t size, struct malloc_type *type,
int flags) __malloc_like __result_use_check
__alloc_size2(1, 2);

View file

@ -1172,7 +1172,7 @@ page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
void *p; /* Returned page */
*pflag = UMA_SLAB_KERNEL;
p = (void *) kmem_malloc_domain(domain, bytes, wait);
p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
return (p);
}
@ -3718,6 +3718,7 @@ uma_zone_exhausted_nolock(uma_zone_t zone)
void *
uma_large_malloc_domain(vm_size_t size, int domain, int wait)
{
struct domainset *policy;
vm_offset_t addr;
uma_slab_t slab;
@ -3729,10 +3730,9 @@ uma_large_malloc_domain(vm_size_t size, int domain, int wait)
slab = zone_alloc_item(slabzone, NULL, domain, wait);
if (slab == NULL)
return (NULL);
if (domain == UMA_ANYDOMAIN)
addr = kmem_malloc(size, wait);
else
addr = kmem_malloc_domain(domain, size, wait);
policy = (domain == UMA_ANYDOMAIN) ? DOMAINSET_RR() :
DOMAINSET_FIXED(domain);
addr = kmem_malloc_domainset(policy, size, wait);
if (addr != 0) {
vsetslab(addr, slab);
slab->us_data = (void *)addr;

View file

@ -44,6 +44,7 @@ struct vmem;
#ifdef _KERNEL
struct cdev;
struct cdevsw;
struct domainset;
/* These operate on kernel virtual addresses only. */
vm_offset_t kva_alloc(vm_size_t);
@ -56,16 +57,17 @@ void kmap_free_wakeup(vm_map_t, vm_offset_t, vm_size_t);
/* These operate on virtual addresses backed by memory. */
vm_offset_t kmem_alloc_attr(vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
vm_offset_t kmem_alloc_attr_domain(int domain, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
vm_offset_t kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size,
int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
vm_offset_t kmem_alloc_contig(vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr);
vm_offset_t kmem_alloc_contig_domain(int domain, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr);
vm_offset_t kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size,
int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment,
vm_paddr_t boundary, vm_memattr_t memattr);
vm_offset_t kmem_malloc(vm_size_t size, int flags);
vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags);
vm_offset_t kmem_malloc_domainset(struct domainset *ds, vm_size_t size,
int flags);
void kmem_free(vm_offset_t addr, vm_size_t size);
/* This provides memory for previously allocated address space. */

View file

@ -175,7 +175,7 @@ kva_free(vm_offset_t addr, vm_size_t size)
* necessarily physically contiguous. If M_ZERO is specified through the
* given flags, then the pages are zeroed before they are mapped.
*/
vm_offset_t
static vm_offset_t
kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, vm_memattr_t memattr)
{
@ -231,11 +231,20 @@ vm_offset_t
kmem_alloc_attr(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high,
vm_memattr_t memattr)
{
return (kmem_alloc_attr_domainset(DOMAINSET_RR(), size, flags, low,
high, memattr));
}
vm_offset_t
kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr)
{
struct vm_domainset_iter di;
vm_offset_t addr;
int domain;
vm_domainset_iter_policy_init(&di, DOMAINSET_RR(), &domain, &flags);
vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
do {
addr = kmem_alloc_attr_domain(domain, size, flags, low, high,
memattr);
@ -254,7 +263,7 @@ kmem_alloc_attr(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high,
* through the given flags, then the pages are zeroed before they are
* mapped.
*/
vm_offset_t
static vm_offset_t
kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr)
@ -315,11 +324,21 @@ vm_offset_t
kmem_alloc_contig(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high,
u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
{
return (kmem_alloc_contig_domainset(DOMAINSET_RR(), size, flags, low,
high, alignment, boundary, memattr));
}
vm_offset_t
kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr)
{
struct vm_domainset_iter di;
vm_offset_t addr;
int domain;
vm_domainset_iter_policy_init(&di, DOMAINSET_RR(), &domain, &flags);
vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
do {
addr = kmem_alloc_contig_domain(domain, size, flags, low, high,
alignment, boundary, memattr);
@ -368,11 +387,11 @@ kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
}
/*
* kmem_malloc:
* kmem_malloc_domain:
*
* Allocate wired-down pages in the kernel's address space.
*/
vm_offset_t
static vm_offset_t
kmem_malloc_domain(int domain, vm_size_t size, int flags)
{
vmem_t *arena;
@ -401,12 +420,19 @@ kmem_malloc_domain(int domain, vm_size_t size, int flags)
vm_offset_t
kmem_malloc(vm_size_t size, int flags)
{
return (kmem_malloc_domainset(DOMAINSET_RR(), size, flags));
}
vm_offset_t
kmem_malloc_domainset(struct domainset *ds, vm_size_t size, int flags)
{
struct vm_domainset_iter di;
vm_offset_t addr;
int domain;
vm_domainset_iter_policy_init(&di, DOMAINSET_RR(), &domain, &flags);
vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
do {
addr = kmem_malloc_domain(domain, size, flags);
if (addr != 0)

View file

@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/domainset.h>
#include <sys/malloc.h>
#include <sys/bus.h>
#include <sys/conf.h>
@ -373,16 +374,16 @@ dmar_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "%s", __func__);
tag = (struct bus_dma_tag_dmar *)dmat;
map = malloc_domain(sizeof(*map), M_DMAR_DMAMAP,
tag->common.domain, M_NOWAIT | M_ZERO);
map = malloc_domainset(sizeof(*map), M_DMAR_DMAMAP,
DOMAINSET_PREF(tag->common.domain), M_NOWAIT | M_ZERO);
if (map == NULL) {
*mapp = NULL;
return (ENOMEM);
}
if (tag->segments == NULL) {
tag->segments = malloc_domain(sizeof(bus_dma_segment_t) *
tag->segments = malloc_domainset(sizeof(bus_dma_segment_t) *
tag->common.nsegments, M_DMAR_DMAMAP,
tag->common.domain, M_NOWAIT);
DOMAINSET_PREF(tag->common.domain), M_NOWAIT);
if (tag->segments == NULL) {
free_domain(map, M_DMAR_DMAMAP);
*mapp = NULL;
@ -447,13 +448,13 @@ dmar_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
if (tag->common.maxsize < PAGE_SIZE &&
tag->common.alignment <= tag->common.maxsize &&
attr == VM_MEMATTR_DEFAULT) {
*vaddr = malloc_domain(tag->common.maxsize, M_DEVBUF,
tag->common.domain, mflags);
*vaddr = malloc_domainset(tag->common.maxsize, M_DEVBUF,
DOMAINSET_PREF(tag->common.domain), mflags);
map->flags |= BUS_DMAMAP_DMAR_MALLOC;
} else {
*vaddr = (void *)kmem_alloc_attr_domain(tag->common.domain,
tag->common.maxsize, mflags, 0ul, BUS_SPACE_MAXADDR,
attr);
*vaddr = (void *)kmem_alloc_attr_domainset(
DOMAINSET_PREF(tag->common.domain), tag->common.maxsize,
mflags, 0ul, BUS_SPACE_MAXADDR, attr);
map->flags |= BUS_DMAMAP_DMAR_KMEM_ALLOC;
}
if (*vaddr == NULL) {

View file

@ -31,6 +31,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/domainset.h>
#include <sys/malloc.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
@ -294,9 +295,9 @@ bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
error = 0;
if (dmat->segments == NULL) {
dmat->segments = (bus_dma_segment_t *)malloc_domain(
dmat->segments = (bus_dma_segment_t *)malloc_domainset(
sizeof(bus_dma_segment_t) * dmat->common.nsegments,
M_DEVBUF, dmat->common.domain, M_NOWAIT);
M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), M_NOWAIT);
if (dmat->segments == NULL) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
__func__, dmat, ENOMEM);
@ -317,8 +318,8 @@ bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
}
bz = dmat->bounce_zone;
*mapp = (bus_dmamap_t)malloc_domain(sizeof(**mapp), M_DEVBUF,
dmat->common.domain, M_NOWAIT | M_ZERO);
*mapp = (bus_dmamap_t)malloc_domainset(sizeof(**mapp), M_DEVBUF,
DOMAINSET_PREF(dmat->common.domain), M_NOWAIT | M_ZERO);
if (*mapp == NULL) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
__func__, dmat, ENOMEM);
@ -411,9 +412,9 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
*mapp = NULL;
if (dmat->segments == NULL) {
dmat->segments = (bus_dma_segment_t *)malloc_domain(
dmat->segments = (bus_dma_segment_t *)malloc_domainset(
sizeof(bus_dma_segment_t) * dmat->common.nsegments,
M_DEVBUF, dmat->common.domain, mflags);
M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), mflags);
if (dmat->segments == NULL) {
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->common.flags, ENOMEM);
@ -452,20 +453,21 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
(dmat->common.alignment <= dmat->common.maxsize) &&
dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
attr == VM_MEMATTR_DEFAULT) {
*vaddr = malloc_domain(dmat->common.maxsize, M_DEVBUF,
dmat->common.domain, mflags);
*vaddr = malloc_domainset(dmat->common.maxsize, M_DEVBUF,
DOMAINSET_PREF(dmat->common.domain), mflags);
} else if (dmat->common.nsegments >=
howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) &&
dmat->common.alignment <= PAGE_SIZE &&
(dmat->common.boundary % PAGE_SIZE) == 0) {
/* Page-based multi-segment allocations allowed */
*vaddr = (void *)kmem_alloc_attr_domain(dmat->common.domain,
dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
attr);
*vaddr = (void *)kmem_alloc_attr_domainset(
DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize,
mflags, 0ul, dmat->common.lowaddr, attr);
dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
} else {
*vaddr = (void *)kmem_alloc_contig_domain(dmat->common.domain,
dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
*vaddr = (void *)kmem_alloc_contig_domainset(
DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize,
mflags, 0ul, dmat->common.lowaddr,
dmat->common.alignment != 0 ? dmat->common.alignment : 1ul,
dmat->common.boundary, attr);
dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
@ -1149,14 +1151,14 @@ alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
while (numpages > 0) {
struct bounce_page *bpage;
bpage = (struct bounce_page *)malloc_domain(sizeof(*bpage),
M_DEVBUF, dmat->common.domain, M_NOWAIT | M_ZERO);
bpage = malloc_domainset(sizeof(*bpage), M_DEVBUF,
DOMAINSET_PREF(dmat->common.domain), M_NOWAIT | M_ZERO);
if (bpage == NULL)
break;
bpage->vaddr = (vm_offset_t)contigmalloc_domain(PAGE_SIZE,
M_DEVBUF, dmat->common.domain, M_NOWAIT, 0ul,
bz->lowaddr, PAGE_SIZE, 0);
bpage->vaddr = (vm_offset_t)contigmalloc_domainset(PAGE_SIZE,
M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), M_NOWAIT,
0ul, bz->lowaddr, PAGE_SIZE, 0);
if (bpage->vaddr == 0) {
free_domain(bpage, M_DEVBUF);
break;