diff --git a/sys/dev/amd_ecc_inject/ecc_inject.c b/sys/dev/amd_ecc_inject/ecc_inject.c index 0f7346e84103..a682115b08ae 100644 --- a/sys/dev/amd_ecc_inject/ecc_inject.c +++ b/sys/dev/amd_ecc_inject/ecc_inject.c @@ -186,7 +186,7 @@ ecc_ei_inject(int count) KASSERT(bit_mask != 0 && (bit_mask & ~INJ_VECTOR_MASK) == 0, ("bit mask value is outside of range: 0x%x", bit_mask)); - memory = kmem_alloc_attr(kernel_arena, PAGE_SIZE, M_WAITOK, 0, ~0, + memory = kmem_alloc_attr(PAGE_SIZE, M_WAITOK, 0, ~0, VM_MEMATTR_UNCACHEABLE); for (injected = 0; injected < count; injected++) { diff --git a/sys/dev/drm/drm_scatter.c b/sys/dev/drm/drm_scatter.c index c202475f3a88..163634878d93 100644 --- a/sys/dev/drm/drm_scatter.c +++ b/sys/dev/drm/drm_scatter.c @@ -52,8 +52,8 @@ drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather *request) entry->busaddr = malloc(entry->pages * sizeof(*entry->busaddr), DRM_MEM_SGLISTS, M_WAITOK | M_ZERO); - entry->vaddr = kmem_alloc_attr(kernel_arena, size, M_WAITOK | M_ZERO, - 0, BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING); + entry->vaddr = kmem_alloc_attr(size, M_WAITOK | M_ZERO, 0, + BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING); if (entry->vaddr == 0) { drm_sg_cleanup(entry); return (ENOMEM); diff --git a/sys/dev/drm2/drm_scatter.c b/sys/dev/drm2/drm_scatter.c index 6e69f0cdd727..510fce4cd6a4 100644 --- a/sys/dev/drm2/drm_scatter.c +++ b/sys/dev/drm2/drm_scatter.c @@ -37,8 +37,8 @@ __FBSDID("$FreeBSD$"); static inline vm_offset_t drm_vmalloc_dma(vm_size_t size) { - return kmem_alloc_attr(kernel_arena, size, M_NOWAIT | M_ZERO, - 0, BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING); + return kmem_alloc_attr(size, M_NOWAIT | M_ZERO, 0, + BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING); } void drm_sg_cleanup(struct drm_sg_mem * entry) diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h index 60dd72af5e30..8de581b7e6df 100644 --- a/sys/vm/vm_extern.h +++ b/sys/vm/vm_extern.h @@ -54,7 +54,7 @@ vm_offset_t kmap_alloc_wait(vm_map_t, vm_size_t); void kmap_free_wakeup(vm_map_t, vm_offset_t, vm_size_t); /* These operate on virtual addresses backed by memory. */ -vm_offset_t kmem_alloc_attr(struct vmem *, vm_size_t size, int flags, +vm_offset_t kmem_alloc_attr(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr); vm_offset_t kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr); diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c index 0a1af123ddbc..00a7d9287413 100644 --- a/sys/vm/vm_init.c +++ b/sys/vm/vm_init.c @@ -274,9 +274,8 @@ vm_ksubmap_init(struct kva_md_info *kmi) * Try to protect 32-bit DMAable memory from the largest * early alloc of wired mem. */ - firstaddr = kmem_alloc_attr(kernel_arena, size, - M_ZERO | M_NOWAIT, (vm_paddr_t)1 << 32, - ~(vm_paddr_t)0, VM_MEMATTR_DEFAULT); + firstaddr = kmem_alloc_attr(size, M_ZERO | M_NOWAIT, + (vm_paddr_t)1 << 32, ~(vm_paddr_t)0, VM_MEMATTR_DEFAULT); if (firstaddr == 0) #endif firstaddr = kmem_malloc(kernel_arena, size, diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index 73c23b805daa..17c681451612 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -220,16 +220,13 @@ kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, } vm_offset_t -kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low, - vm_paddr_t high, vm_memattr_t memattr) +kmem_alloc_attr(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, + vm_memattr_t memattr) { struct vm_domainset_iter di; vm_offset_t addr; int domain; - KASSERT(vmem == kernel_arena, - ("kmem_alloc_attr: Only kernel_arena is supported.")); - vm_domainset_iter_malloc_init(&di, kernel_object, &domain, &flags); do { addr = kmem_alloc_attr_domain(domain, size, flags, low, high,