malloc(9): extend contigmalloc(9) by a "slab cookie"

Extend kern_malloc.c internals to also cover contigmalloc(9) by a
"slab cookie" and not just malloc/malloc_large.  This allows us to
call free(9) even on contigmalloc(9) addresses and deprecate
contigfree(9).  Update the contigmalloc(9) man page accordingly.

The way this is done (free(9) working for contigmalloc) will hide the
UMA/VM bits from a consumer which may otherwise need to know whether
the original allocation was by malloc or contigmalloc by looking at
the cookie (likely via an accessor function).  This simplifies
the implementation of consumers of mixed environments a lot.

This is preliminary work to allow LinuxKPI to be adjusted to better
play by the rules Linux puts out for various allocations.
Most of this was described/explained to me by jhb.

One may observe that realloc(9) is currently unchanged (and contrary
to [contig]malloc/[contig]free an implementation may need access
the "slab cookie" information given it will likely be implementation
dependent which allocation type to use if size changes beyond the
usable size of the initial allocation).

Described by:	jhb
Sponsored by:	The FreeBSD Foundation
MFC after:	10 days
Reviewed by:	markj, kib
Differential Revision: https://reviews.freebsd.org/D45812
This commit is contained in:
Bjoern A. Zeeb 2024-06-30 19:00:44 +00:00
parent bb7f7d5b52
commit 9e6544dd6e
2 changed files with 108 additions and 29 deletions

View file

@ -23,7 +23,7 @@
.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd October 30, 2018
.Dd July 22, 2024
.Dt CONTIGMALLOC 9
.Os
.Sh NAME
@ -113,6 +113,13 @@ function deallocates memory allocated by a previous call to
.Fn contigmalloc
or
.Fn contigmalloc_domainset .
Its use is deprecated in favor of
.Xr free 9
which no longer requires the caller to know the
.Fa size
and also accepts
.Dv NULL
as an address.
.Sh IMPLEMENTATION NOTES
The
.Fn contigmalloc

View file

@ -116,6 +116,16 @@ dtrace_malloc_probe_func_t __read_mostly dtrace_malloc_probe;
#define DEBUG_REDZONE_ARG
#endif
typedef enum {
SLAB_COOKIE_SLAB_PTR = 0x0,
SLAB_COOKIE_MALLOC_LARGE = 0x1,
SLAB_COOKIE_CONTIG_MALLOC = 0x2,
} slab_cookie_t;
#define SLAB_COOKIE_MASK 0x3
#define SLAB_COOKIE_SHIFT 2
#define GET_SLAB_COOKIE(_slab) \
((slab_cookie_t)(uintptr_t)(_slab) & SLAB_COOKIE_MASK)
/*
* When realloc() is called, if the new size is sufficiently smaller than
* the old size, realloc() will allocate a new, smaller block to avoid
@ -451,6 +461,21 @@ malloc_type_freed(struct malloc_type *mtp, unsigned long size)
* If M_NOWAIT is set, this routine will not block and return NULL if
* the allocation fails.
*/
#define IS_CONTIG_MALLOC(_slab) \
(GET_SLAB_COOKIE(_slab) == SLAB_COOKIE_CONTIG_MALLOC)
#define CONTIG_MALLOC_SLAB(_size) \
((void *)(((_size) << SLAB_COOKIE_SHIFT) | SLAB_COOKIE_CONTIG_MALLOC))
static inline size_t
contigmalloc_size(uma_slab_t slab)
{
uintptr_t va;
KASSERT(IS_CONTIG_MALLOC(slab),
("%s: called on non-contigmalloc allocation: %p", __func__, slab));
va = (uintptr_t)slab;
return (va >> SLAB_COOKIE_SHIFT);
}
void *
contigmalloc(unsigned long size, struct malloc_type *type, int flags,
vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
@ -460,8 +485,11 @@ contigmalloc(unsigned long size, struct malloc_type *type, int flags,
ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment,
boundary, VM_MEMATTR_DEFAULT);
if (ret != NULL)
if (ret != NULL) {
/* Use low bits unused for slab pointers. */
vsetzoneslab((uintptr_t)ret, NULL, CONTIG_MALLOC_SLAB(size));
malloc_type_allocated(type, round_page(size));
}
return (ret);
}
@ -474,25 +502,28 @@ contigmalloc_domainset(unsigned long size, struct malloc_type *type,
ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high,
alignment, boundary, VM_MEMATTR_DEFAULT);
if (ret != NULL)
if (ret != NULL) {
/* Use low bits unused for slab pointers. */
vsetzoneslab((uintptr_t)ret, NULL, CONTIG_MALLOC_SLAB(size));
malloc_type_allocated(type, round_page(size));
}
return (ret);
}
/*
* contigfree:
* contigfree (deprecated).
*
* Free a block of memory allocated by contigmalloc.
*
* This routine may not block.
*/
void
contigfree(void *addr, unsigned long size, struct malloc_type *type)
contigfree(void *addr, unsigned long size __unused, struct malloc_type *type)
{
kmem_free(addr, size);
malloc_type_freed(type, round_page(size));
free(addr, type);
}
#undef IS_CONTIG_MALLOC
#undef CONTIG_MALLOC_SLAB
#ifdef MALLOC_DEBUG
static int
@ -562,22 +593,19 @@ malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp,
/*
* Handle large allocations and frees by using kmem_malloc directly.
*/
static inline bool
malloc_large_slab(uma_slab_t slab)
{
uintptr_t va;
va = (uintptr_t)slab;
return ((va & 1) != 0);
}
#define IS_MALLOC_LARGE(_slab) \
(GET_SLAB_COOKIE(_slab) == SLAB_COOKIE_MALLOC_LARGE)
#define MALLOC_LARGE_SLAB(_size) \
((void *)(((_size) << SLAB_COOKIE_SHIFT) | SLAB_COOKIE_MALLOC_LARGE))
static inline size_t
malloc_large_size(uma_slab_t slab)
{
uintptr_t va;
va = (uintptr_t)slab;
return (va >> 1);
KASSERT(IS_MALLOC_LARGE(slab),
("%s: called on non-malloc_large allocation: %p", __func__, slab));
return (va >> SLAB_COOKIE_SHIFT);
}
static caddr_t __noinline
@ -589,8 +617,8 @@ malloc_large(size_t size, struct malloc_type *mtp, struct domainset *policy,
size = roundup(size, PAGE_SIZE);
va = kmem_malloc_domainset(policy, size, flags);
if (va != NULL) {
/* The low bit is unused for slab pointers. */
vsetzoneslab((uintptr_t)va, NULL, (void *)((size << 1) | 1));
/* Use low bits unused for slab pointers. */
vsetzoneslab((uintptr_t)va, NULL, MALLOC_LARGE_SLAB(size));
uma_total_inc(size);
}
malloc_type_allocated(mtp, va == NULL ? 0 : size);
@ -613,6 +641,8 @@ free_large(void *addr, size_t size)
kmem_free(addr, size);
uma_total_dec(size);
}
#undef IS_MALLOC_LARGE
#undef MALLOC_LARGE_SLAB
/*
* malloc:
@ -914,18 +944,30 @@ free(void *addr, struct malloc_type *mtp)
vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
if (slab == NULL)
panic("free: address %p(%p) has not been allocated.\n",
panic("free: address %p(%p) has not been allocated",
addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
if (__predict_true(!malloc_large_slab(slab))) {
switch (GET_SLAB_COOKIE(slab)) {
case __predict_true(SLAB_COOKIE_SLAB_PTR):
size = zone->uz_size;
#if defined(INVARIANTS) && !defined(KASAN)
free_save_type(addr, mtp, size);
#endif
uma_zfree_arg(zone, addr, slab);
} else {
break;
case SLAB_COOKIE_MALLOC_LARGE:
size = malloc_large_size(slab);
free_large(addr, size);
break;
case SLAB_COOKIE_CONTIG_MALLOC:
size = contigmalloc_size(slab);
kmem_free(addr, size);
size = round_page(size);
break;
default:
panic("%s: addr %p slab %p with unknown cookie %d", __func__,
addr, slab, GET_SLAB_COOKIE(slab));
/* NOTREACHED */
}
malloc_type_freed(mtp, size);
}
@ -957,7 +999,8 @@ zfree(void *addr, struct malloc_type *mtp)
panic("free: address %p(%p) has not been allocated.\n",
addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
if (__predict_true(!malloc_large_slab(slab))) {
switch (GET_SLAB_COOKIE(slab)) {
case __predict_true(SLAB_COOKIE_SLAB_PTR):
size = zone->uz_size;
#if defined(INVARIANTS) && !defined(KASAN)
free_save_type(addr, mtp, size);
@ -965,11 +1008,22 @@ zfree(void *addr, struct malloc_type *mtp)
kasan_mark(addr, size, size, 0);
explicit_bzero(addr, size);
uma_zfree_arg(zone, addr, slab);
} else {
break;
case SLAB_COOKIE_MALLOC_LARGE:
size = malloc_large_size(slab);
kasan_mark(addr, size, size, 0);
explicit_bzero(addr, size);
free_large(addr, size);
break;
case SLAB_COOKIE_CONTIG_MALLOC:
size = round_page(contigmalloc_size(slab));
explicit_bzero(addr, size);
kmem_free(addr, size);
break;
default:
panic("%s: addr %p slab %p with unknown cookie %d", __func__,
addr, slab, GET_SLAB_COOKIE(slab));
/* NOTREACHED */
}
malloc_type_freed(mtp, size);
}
@ -1016,10 +1070,20 @@ realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
("realloc: address %p out of range", (void *)addr));
/* Get the size of the original block */
if (!malloc_large_slab(slab))
switch (GET_SLAB_COOKIE(slab)) {
case __predict_true(SLAB_COOKIE_SLAB_PTR):
alloc = zone->uz_size;
else
break;
case SLAB_COOKIE_MALLOC_LARGE:
alloc = malloc_large_size(slab);
break;
default:
#ifdef INVARIANTS
panic("%s: called for addr %p of unsupported allocation type; "
"slab %p cookie %d", __func__, addr, slab, GET_SLAB_COOKIE(slab));
#endif
return (NULL);
}
/* Reuse the original block if appropriate */
if (size <= alloc &&
@ -1101,10 +1165,18 @@ malloc_usable_size(const void *addr)
panic("malloc_usable_size: address %p(%p) is not allocated.\n",
addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
if (!malloc_large_slab(slab))
switch (GET_SLAB_COOKIE(slab)) {
case __predict_true(SLAB_COOKIE_SLAB_PTR):
size = zone->uz_size;
else
break;
case SLAB_COOKIE_MALLOC_LARGE:
size = malloc_large_size(slab);
break;
default:
__assert_unreachable();
size = 0;
break;
}
#endif
/*