MDLs are supposed to be variable size (they include an array of pages

that describe a buffer of variable size). The problem is, allocating
MDLs off the heap is slow, and it can happen that drivers will allocate
lots and lots of lots of MDLs as they run.

As a compromise, we now do the following: we pre-allocate a zone for
MDLs big enough to describe any buffer with 16 or less pages. If
IoAllocateMdl() needs a MDL for a buffer with 16 or less pages, we'll
allocate it from the zone. Otherwise, we allocate it from the heap.
MDLs allocate from the zone have a flag set in their mdl_flags field.
When the MDL is released, IoMdlFree() will uma_zfree() the MDL if
it has the MDL_ZONE_ALLOCED flag set, otherwise it will release it
to the heap.

The assumption is that 16 pages is a "big number" and we will rarely
need MDLs larger than that.

- Moved the ndis_buffer zone to subr_ntoskrnl.c from kern_ndis.c
  and named it mdl_zone.

- Modified IoAllocateMdl() and IoFreeMdl() to use uma_zalloc() and
  uma_zfree() if necessary.

- Made ndis_mtop() use IoAllocateMdl() instead of calling uma_zalloc()
  directly.

Inspired by: discussion with Giridhar Pemmasani
This commit is contained in:
Bill Paul 2005-02-26 00:22:16 +00:00
parent e1e33cb261
commit a944e196da
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=142530
3 changed files with 47 additions and 18 deletions

View file

@ -56,8 +56,6 @@ __FBSDID("$FreeBSD$");
#include <sys/bus.h>
#include <sys/rman.h>
#include <vm/uma.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
@ -120,7 +118,6 @@ static int ndis_enlarge_thrqueue(int);
static int ndis_shrink_thrqueue(int);
static void ndis_runq(void *);
static uma_zone_t ndis_buffer_zone;
struct mtx ndis_thr_mtx;
struct mtx ndis_req_mtx;
static STAILQ_HEAD(ndisqhead, ndis_req) ndis_ttodo;
@ -160,11 +157,6 @@ ndis_modevent(module_t mod, int cmd, void *arg)
patch++;
}
/* Initialize TX buffer UMA zone. */
ndis_buffer_zone = uma_zcreate("NDIS buffer",
sizeof(struct mdl) + (sizeof(vm_offset_t *) * 16),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
ndis_create_kthreads();
TAILQ_INIT(&ndis_devhead);
@ -186,9 +178,6 @@ ndis_modevent(module_t mod, int cmd, void *arg)
windrv_unwrap(patch->ipt_wrap);
patch++;
}
/* Remove zones */
uma_zdestroy(ndis_buffer_zone);
}
break;
case MOD_UNLOAD:
@ -208,8 +197,6 @@ ndis_modevent(module_t mod, int cmd, void *arg)
patch++;
}
/* Remove zones */
uma_zdestroy(ndis_buffer_zone);
break;
default:
error = EINVAL;
@ -878,7 +865,7 @@ ndis_free_bufs(b0)
while(b0 != NULL) {
next = b0->mdl_next;
uma_zfree (ndis_buffer_zone, b0);
IoFreeMdl(b0);
b0 = next;
}
@ -1100,7 +1087,7 @@ ndis_mtop(m0, p)
for (m = m0; m != NULL; m = m->m_next) {
if (m->m_len == 0)
continue;
buf = uma_zalloc(ndis_buffer_zone, M_NOWAIT | M_ZERO);
buf = IoAllocateMdl(m->m_data, m->m_len, FALSE, FALSE, NULL);
if (buf == NULL) {
ndis_free_packet(*p);
*p = NULL;

View file

@ -100,6 +100,10 @@ typedef struct mdl mdl, ndis_buffer;
#define MDL_NETWORK_HEADER 0x1000
#define MDL_MAPPING_CAN_FAIL 0x2000
#define MDL_ALLOCATED_MUST_SUCCEED 0x4000
#define MDL_ZONE_ALLOCED 0x8000 /* BSD private */
#define MDL_ZONE_PAGES 16
#define MDL_ZONE_SIZE (sizeof(mdl) + (sizeof(vm_offset_t) * MDL_ZONE_PAGES))
/* Note: assumes x86 page size of 4K. */

View file

@ -65,6 +65,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <vm/uma.h>
#include <compat/ndis/pe_var.h>
#include <compat/ndis/ntoskrnl_var.h>
@ -186,6 +187,7 @@ static kspin_lock ntoskrnl_global;
static kspin_lock ntoskrnl_cancellock;
static int ntoskrnl_kth = 0;
static struct nt_objref_head ntoskrnl_reflist;
static uma_zone_t mdl_zone;
int
ntoskrnl_libinit()
@ -204,6 +206,22 @@ ntoskrnl_libinit()
patch++;
}
/*
* MDLs are supposed to be variable size (they describe
* buffers containing some number of pages, but we don't
* know ahead of time how many pages that will be). But
* always allocating them off the heap is very slow. As
* a compromize, we create an MDL UMA zone big enough to
* handle any buffer requiring up to 16 pages, and we
* use those for any MDLs for buffers of 16 pages or less
* in size. For buffers larger than that (which we assume
* will be few and far between, we allocate the MDLs off
* the heap.
*/
mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
return(0);
}
@ -219,6 +237,8 @@ ntoskrnl_libfini()
patch++;
}
uma_zdestroy(mdl_zone);
return(0);
}
@ -1726,15 +1746,30 @@ IoAllocateMdl(vaddr, len, secondarybuf, chargequota, iopkt)
irp *iopkt;
{
mdl *m;
int zone = 0;
m = ExAllocatePoolWithTag(NonPagedPool,
MmSizeOfMdl(vaddr, len), 0);
if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
m = ExAllocatePoolWithTag(NonPagedPool,
MmSizeOfMdl(vaddr, len), 0);
else {
m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
zone++;
}
if (m == NULL)
return (NULL);
MmInitializeMdl(m, vaddr, len);
/*
* MmInitializMdl() clears the flags field, so we
* have to set this here. If the MDL came from the
* MDL UMA zone, tag it so we can release it to
* the right place later.
*/
if (zone)
m->mdl_flags = MDL_ZONE_ALLOCED;
if (iopkt != NULL) {
if (secondarybuf == TRUE) {
mdl *last;
@ -1759,7 +1794,10 @@ IoFreeMdl(m)
if (m == NULL)
return;
free (m, M_DEVBUF);
if (m->mdl_flags & MDL_ZONE_ALLOCED)
uma_zfree(mdl_zone, m);
else
ExFreePool(m);
return;
}