uma: Deduplicate uma_small_alloc

This commit refactors the UMA small alloc code and
removes most UMA machine-dependent code.
The existing machine-dependent uma_small_alloc code is almost identical
across all architectures, except for powerpc where using the direct
map addresses involved extra steps in some cases.

The MI/MD split was replaced by a default uma_small_alloc
implementation that can be overridden by architecture-specific code by
defining the UMA_MD_SMALL_ALLOC symbol. Furthermore, UMA_USE_DMAP was
introduced to replace most UMA_MD_SMALL_ALLOC uses.

Reviewed by: markj, kib
Approved by: markj (mentor)
Differential Revision:	https://reviews.freebsd.org/D45084
This commit is contained in:
Bojan Novković 2024-05-03 18:48:18 +02:00
parent 9b1de7e484
commit da76d349b6
14 changed files with 57 additions and 229 deletions

View File

@ -1,71 +0,0 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2003 Alan L. Cox <alc@cs.rice.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/malloc.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_page.h>
#include <vm/vm_phys.h>
#include <vm/vm_dumpset.h>
#include <vm/uma.h>
#include <vm/uma_int.h>
#include <machine/md_var.h>
void *
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
int wait)
{
vm_page_t m;
vm_paddr_t pa;
void *va;
*flags = UMA_SLAB_PRIV;
m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) |
VM_ALLOC_WIRED);
if (m == NULL)
return (NULL);
pa = m->phys_addr;
if ((wait & M_NODUMP) == 0)
dump_add_page(pa);
va = (void *)PHYS_TO_DMAP(pa);
return (va);
}
void
uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
{
vm_page_t m;
vm_paddr_t pa;
pa = DMAP_TO_PHYS((vm_offset_t)mem);
dump_drop_page(pa);
m = PHYS_TO_VM_PAGE(pa);
vm_page_unwire_noq(m);
vm_page_free(m);
}

View File

@ -72,12 +72,12 @@
#endif
/*
* We provide a machine specific single page allocator through the use
* of the direct mapped segment. This uses 2MB pages for reduced
* We provide a single page allocator through the use of the
* direct mapped segment. This uses 2MB pages for reduced
* TLB pressure.
*/
#if !defined(KASAN) && !defined(KMSAN)
#define UMA_MD_SMALL_ALLOC
#define UMA_USE_DMAP
#endif
/*

View File

@ -1,69 +0,0 @@
/*-
* Copyright (c) 2003 Alan L. Cox <alc@cs.rice.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/malloc.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_page.h>
#include <vm/vm_phys.h>
#include <vm/vm_dumpset.h>
#include <vm/uma.h>
#include <vm/uma_int.h>
#include <machine/machdep.h>
void *
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
int wait)
{
vm_page_t m;
vm_paddr_t pa;
void *va;
*flags = UMA_SLAB_PRIV;
m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) |
VM_ALLOC_WIRED);
if (m == NULL)
return (NULL);
pa = m->phys_addr;
if ((wait & M_NODUMP) == 0)
dump_add_page(pa);
va = (void *)PHYS_TO_DMAP(pa);
return (va);
}
void
uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
{
vm_page_t m;
vm_paddr_t pa;
pa = DMAP_TO_PHYS((vm_offset_t)mem);
dump_drop_page(pa);
m = PHYS_TO_VM_PAGE(pa);
vm_page_unwire_noq(m);
vm_page_free(m);
}

View File

@ -293,7 +293,7 @@
#endif
#if !defined(KASAN) && !defined(KMSAN)
#define UMA_MD_SMALL_ALLOC
#define UMA_USE_DMAP
#endif
#ifndef LOCORE

View File

@ -92,7 +92,6 @@ amd64/amd64/support.S standard
amd64/amd64/sys_machdep.c standard
amd64/amd64/trap.c standard
amd64/amd64/uio_machdep.c standard
amd64/amd64/uma_machdep.c standard
amd64/amd64/vm_machdep.c standard
amd64/pci/pci_cfgreg.c optional pci
cddl/dev/dtrace/amd64/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}"

View File

@ -78,7 +78,6 @@ arm64/arm64/swtch.S standard
arm64/arm64/sys_machdep.c standard
arm64/arm64/trap.c standard
arm64/arm64/uio_machdep.c standard
arm64/arm64/uma_machdep.c standard
arm64/arm64/undefined.c standard
arm64/arm64/unwind.c optional ddb | kdtrace_hooks | stack \
compile-with "${NORMAL_C:N-fsanitize*:N-fno-sanitize*}"

View File

@ -67,7 +67,6 @@ riscv/riscv/sys_machdep.c standard
riscv/riscv/trap.c standard
riscv/riscv/timer.c standard
riscv/riscv/uio_machdep.c standard
riscv/riscv/uma_machdep.c standard
riscv/riscv/unwind.c optional ddb | kdtrace_hooks | stack
riscv/riscv/vm_machdep.c standard

View File

@ -624,14 +624,14 @@ qc_drain(vmem_t *vm)
uma_zone_reclaim(vm->vm_qcache[i].qc_cache, UMA_RECLAIM_DRAIN);
}
#ifndef UMA_MD_SMALL_ALLOC
#ifndef UMA_USE_DMAP
static struct mtx_padalign __exclusive_cache_line vmem_bt_lock;
/*
* vmem_bt_alloc: Allocate a new page of boundary tags.
*
* On architectures with uma_small_alloc there is no recursion; no address
* On architectures with UMA_USE_DMAP there is no recursion; no address
* space need be allocated to allocate boundary tags. For the others, we
* must handle recursion. Boundary tags are necessary to allocate new
* boundary tags.
@ -707,7 +707,7 @@ vmem_startup(void)
vmem_bt_zone = uma_zcreate("vmem btag",
sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, UMA_ZONE_VM);
#ifndef UMA_MD_SMALL_ALLOC
#ifndef UMA_USE_DMAP
mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
/*

View File

@ -111,6 +111,8 @@
#define KERNBASE 0x00100100 /* start of kernel virtual */
#define UMA_MD_SMALL_ALLOC
#ifdef AIM
#ifndef __powerpc64__
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)KERNEL_SR << ADDR_SR_SHFT)
@ -122,13 +124,13 @@
* Use the direct-mapped BAT registers for UMA small allocs. This
* takes pressure off the small amount of available KVA.
*/
#define UMA_MD_SMALL_ALLOC
#define UMA_USE_DMAP
#else /* Book-E */
/* Use the direct map for UMA small allocs on powerpc64. */
#ifdef __powerpc64__
#define UMA_MD_SMALL_ALLOC
#define UMA_USE_DMAP
#else
#define VM_MIN_KERNEL_ADDRESS 0xc0000000
#define VM_MAX_KERNEL_ADDRESS 0xffffefff

View File

@ -234,7 +234,7 @@
#define VM_INITIAL_PAGEIN 16
#endif
#define UMA_MD_SMALL_ALLOC
#define UMA_USE_DMAP
#ifndef LOCORE
extern vm_paddr_t dmap_phys_base;

View File

@ -1,68 +0,0 @@
/*-
* Copyright (c) 2003 Alan L. Cox <alc@cs.rice.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/malloc.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_page.h>
#include <vm/vm_phys.h>
#include <vm/vm_dumpset.h>
#include <vm/uma.h>
#include <vm/uma_int.h>
void *
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
int wait)
{
vm_page_t m;
vm_paddr_t pa;
void *va;
*flags = UMA_SLAB_PRIV;
m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) |
VM_ALLOC_WIRED);
if (m == NULL)
return (NULL);
pa = m->phys_addr;
if ((wait & M_NODUMP) == 0)
dump_add_page(pa);
va = (void *)PHYS_TO_DMAP(pa);
return (va);
}
void
uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
{
vm_page_t m;
vm_paddr_t pa;
pa = DMAP_TO_PHYS((vm_offset_t)mem);
dump_drop_page(pa);
m = PHYS_TO_VM_PAGE(pa);
vm_page_unwire_noq(m);
vm_page_free(m);
}

View File

@ -2079,6 +2079,28 @@ contig_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
bytes, wait, 0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
}
#if defined(UMA_USE_DMAP) && !defined(UMA_MD_SMALL_ALLOC)
void *
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
int wait)
{
vm_page_t m;
vm_paddr_t pa;
void *va;
*flags = UMA_SLAB_PRIV;
m = vm_page_alloc_noobj_domain(domain,
malloc2vm_flags(wait) | VM_ALLOC_WIRED);
if (m == NULL)
return (NULL);
pa = m->phys_addr;
if ((wait & M_NODUMP) == 0)
dump_add_page(pa);
va = (void *)PHYS_TO_DMAP(pa);
return (va);
}
#endif
/*
* Frees a number of pages to the system
*
@ -2141,6 +2163,21 @@ pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
kva_free(sva, size);
}
#if defined(UMA_USE_DMAP) && !defined(UMA_MD_SMALL_ALLOC)
void
uma_small_free(void *mem, vm_size_t size, uint8_t flags)
{
vm_page_t m;
vm_paddr_t pa;
pa = DMAP_TO_PHYS((vm_offset_t)mem);
dump_drop_page(pa);
m = PHYS_TO_VM_PAGE(pa);
vm_page_unwire_noq(m);
vm_page_free(m);
}
#endif
/*
* Zero fill initializer
*
@ -3154,7 +3191,7 @@ uma_startup1(vm_offset_t virtual_avail)
smr_init();
}
#ifndef UMA_MD_SMALL_ALLOC
#ifndef UMA_USE_DMAP
extern void vm_radix_reserve_kva(void);
#endif
@ -3174,7 +3211,7 @@ uma_startup2(void)
vm_map_unlock(kernel_map);
}
#ifndef UMA_MD_SMALL_ALLOC
#ifndef UMA_USE_DMAP
/* Set up radix zone to use noobj_alloc. */
vm_radix_reserve_kva();
#endif
@ -5171,7 +5208,7 @@ uma_zone_reserve_kva(uma_zone_t zone, int count)
pages = howmany(count, keg->uk_ipers) * keg->uk_ppera;
#ifdef UMA_MD_SMALL_ALLOC
#ifdef UMA_USE_DMAP
if (keg->uk_ppera > 1) {
#else
if (1) {

View File

@ -172,7 +172,7 @@ static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
start = end; \
}
#ifndef UMA_MD_SMALL_ALLOC
#ifndef UMA_USE_DMAP
/*
* Allocate a new slab for kernel map entries. The kernel map may be locked or
@ -264,7 +264,7 @@ vm_map_startup(void)
kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
UMA_ZONE_VM | UMA_ZONE_NOBUCKET);
#ifndef UMA_MD_SMALL_ALLOC
#ifndef UMA_USE_DMAP
/* Reserve an extra map entry for use when replenishing the reserve. */
uma_zone_reserve(kmapentzone, KMAPENT_RESERVE + 1);
uma_prealloc(kmapentzone, KMAPENT_RESERVE + 1);
@ -660,7 +660,7 @@ _vm_map_unlock(vm_map_t map, const char *file, int line)
VM_MAP_UNLOCK_CONSISTENT(map);
if (map->system_map) {
#ifndef UMA_MD_SMALL_ALLOC
#ifndef UMA_USE_DMAP
if (map == kernel_map && (map->flags & MAP_REPLENISH) != 0) {
uma_prealloc(kmapentzone, 1);
map->flags &= ~MAP_REPLENISH;
@ -937,7 +937,7 @@ vm_map_entry_create(vm_map_t map)
{
vm_map_entry_t new_entry;
#ifndef UMA_MD_SMALL_ALLOC
#ifndef UMA_USE_DMAP
if (map == kernel_map) {
VM_MAP_ASSERT_LOCKED(map);

View File

@ -82,7 +82,7 @@ vm_radix_node_free(struct pctrie *ptree, void *node)
uma_zfree_smr(vm_radix_node_zone, node);
}
#ifndef UMA_MD_SMALL_ALLOC
#ifndef UMA_USE_DMAP
void vm_radix_reserve_kva(void);
/*
* Reserve the KVA necessary to satisfy the node allocation.