Another attempt at cleaning up the new memory allocator.

This commit is contained in:
John Dyson 1997-08-05 22:24:31 +00:00
parent b79933ebfa
commit 0d65e566b9
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=27923
6 changed files with 238 additions and 330 deletions

View file

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.154 1997/08/05 01:32:05 dyson Exp $
* $Id: pmap.c,v 1.155 1997/08/05 22:06:47 dyson Exp $
*/
/*
@ -89,6 +89,7 @@
#include <vm/vm_extern.h>
#include <vm/vm_pageout.h>
#include <vm/vm_pager.h>
#include <vm/vm_zone.h>
#include <sys/user.h>
@ -118,8 +119,6 @@
#define PTPHINT
static void init_pv_entries __P((int));
/*
* Get PDEs and PTEs for user/kernel address space
*/
@ -154,8 +153,9 @@ vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
static vm_offset_t vm_first_phys;
static int pgeflag; /* PG_G or-in */
static int pseflag; /* PG_PS or-in */
int pgeflag; /* PG_G or-in */
int pseflag; /* PG_PS or-in */
int pv_npg;
static int nkpt;
static vm_page_t nkpg;
@ -163,15 +163,14 @@ vm_offset_t kernel_vm_end;
extern vm_offset_t clean_sva, clean_eva;
#define PV_FREELIST_MIN ((PAGE_SIZE / sizeof (struct pv_entry)) / 2)
/*
* Data for the pv entry allocation mechanism
*/
static int pv_freelistcnt;
TAILQ_HEAD (,pv_entry) pv_freelist = {0};
static vm_offset_t pvva;
static int npvvapg;
vm_zone_t pvzone;
struct vm_zone pvzone_store;
struct vm_object pvzone_obj;
#define NPVINIT 8192
struct pv_entry pvinit[NPVINIT];
/*
* All those kernel PT submaps that BSD is so fond of
@ -191,7 +190,6 @@ static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv));
static unsigned * get_ptbase __P((pmap_t pmap));
static pv_entry_t get_pv_entry __P((void));
static void i386_protection_init __P((void));
static void pmap_alloc_pv_entry __P((void));
static void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem));
static PMAP_INLINE int pmap_is_managed __P((vm_offset_t pa));
@ -475,43 +473,50 @@ pmap_init(phys_start, phys_end)
{
vm_offset_t addr;
vm_size_t s;
int i, npg;
int i;
/*
* calculate the number of pv_entries needed
*/
vm_first_phys = phys_avail[0];
for (i = 0; phys_avail[i + 1]; i += 2);
npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE;
pv_npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE;
/*
* Allocate memory for random pmap data structures. Includes the
* pv_head_table.
*/
s = (vm_size_t) (sizeof(pv_table_t) * npg);
s = (vm_size_t) (sizeof(pv_table_t) * pv_npg);
s = round_page(s);
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
pv_table = (pv_table_t *) addr;
for(i = 0; i < npg; i++) {
for(i = 0; i < pv_npg; i++) {
vm_offset_t pa;
TAILQ_INIT(&pv_table[i].pv_list);
pv_table[i].pv_list_count = 0;
pa = vm_first_phys + i * PAGE_SIZE;
pv_table[i].pv_vm_page = PHYS_TO_VM_PAGE(pa);
}
TAILQ_INIT(&pv_freelist);
/*
* init the pv free list
*/
init_pv_entries(npg);
pvzone = &pvzone_store;
zbootinit(pvzone, "PV entries", sizeof(pvinit[0]), pvinit, NPVINIT);
/*
* Now it is safe to enable pv_table recording.
*/
pmap_initialized = TRUE;
}
void
pmap_init2() {
zinitna(pvzone, &pvzone_obj, NULL, 0,
PMAP_SHPGPERPROC * maxproc + pv_npg, ZONE_INTERRUPT, 4);
}
/*
* Used to map a range of physical addresses into kernel
* virtual address space.
@ -660,9 +665,9 @@ pmap_extract(pmap, va)
vm_offset_t rtval;
vm_offset_t pdirindex;
pdirindex = va >> PDRSHIFT;
if (pmap) {
if (pmap && (rtval = (unsigned) pmap->pm_pdir[pdirindex])) {
unsigned *pte;
if (((rtval = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
if ((rtval & PG_PS) != 0) {
rtval &= ~(NBPDR - 1);
rtval |= va & (NBPDR - 1);
return rtval;
@ -1384,7 +1389,9 @@ pmap_release(pmap)
pdstack[pdstackptr] = (vm_offset_t) pmap->pm_pdir;
++pdstackptr;
} else {
kmem_free(kernel_map, (vm_offset_t) pmap->pm_pdir, PAGE_SIZE);
int pdstmp = pdstackptr - 1;
kmem_free(kernel_map, pdstack[pdstmp], PAGE_SIZE);
pdstack[pdstmp] = (vm_offset_t) pmap->pm_pdir;
}
pmap->pm_pdir = 0;
}
@ -1484,12 +1491,11 @@ pmap_reference(pmap)
/*
* free the pv_entry back to the free list
*/
static PMAP_INLINE void
static inline void
free_pv_entry(pv)
pv_entry_t pv;
{
++pv_freelistcnt;
TAILQ_INSERT_HEAD(&pv_freelist, pv, pv_list);
zfreei(pvzone, pv);
}
/*
@ -1498,108 +1504,10 @@ free_pv_entry(pv)
* the memory allocation is performed bypassing the malloc code
* because of the possibility of allocations at interrupt time.
*/
static pv_entry_t
get_pv_entry()
static inline pv_entry_t
get_pv_entry(void)
{
pv_entry_t tmp;
/*
* get more pv_entry pages if needed
*/
if (pv_freelistcnt < PV_FREELIST_MIN || !TAILQ_FIRST(&pv_freelist)) {
pmap_alloc_pv_entry();
}
/*
* get a pv_entry off of the free list
*/
--pv_freelistcnt;
tmp = TAILQ_FIRST(&pv_freelist);
TAILQ_REMOVE(&pv_freelist, tmp, pv_list);
return tmp;
}
/*
* This *strange* allocation routine eliminates the possibility of a malloc
* failure (*FATAL*) for a pv_entry_t data structure.
* also -- this code is MUCH MUCH faster than the malloc equiv...
* We really need to do the slab allocator thingie here.
*/
static void
pmap_alloc_pv_entry()
{
/*
* do we have any pre-allocated map-pages left?
*/
if (npvvapg) {
vm_page_t m;
/*
* allocate a physical page out of the vm system
*/
m = vm_page_alloc(kernel_object,
OFF_TO_IDX(pvva - vm_map_min(kernel_map)),
VM_ALLOC_INTERRUPT);
if (m) {
int newentries;
int i;
pv_entry_t entry;
newentries = (PAGE_SIZE / sizeof(struct pv_entry));
/*
* wire the page
*/
vm_page_wire(m);
m->flags &= ~PG_BUSY;
/*
* let the kernel see it
*/
pmap_kenter(pvva, VM_PAGE_TO_PHYS(m));
entry = (pv_entry_t) pvva;
/*
* update the allocation pointers
*/
pvva += PAGE_SIZE;
--npvvapg;
/*
* free the entries into the free list
*/
for (i = 0; i < newentries; i++) {
free_pv_entry(entry);
entry++;
}
}
}
if (!TAILQ_FIRST(&pv_freelist))
panic("get_pv_entry: cannot get a pv_entry_t");
}
/*
* init the pv_entry allocation system
*/
void
init_pv_entries(npg)
int npg;
{
/*
* Allocate enough kvm space for one entry per page, and
* each process having PMAP_SHPGPERPROC pages shared with other
* processes. (The system can panic if this is too small, but also
* can fail on bootup if this is too big.)
* XXX The pv management mechanism needs to be fixed so that systems
* with lots of shared mappings amongst lots of processes will still
* work. The fix will likely be that once we run out of pv entries
* we will free other entries (and the associated mappings), with
* some policy yet to be determined.
*/
npvvapg = ((PMAP_SHPGPERPROC * maxproc + npg) * sizeof(struct pv_entry)
+ PAGE_SIZE - 1) / PAGE_SIZE;
pvva = kmem_alloc_pageable(kernel_map, npvvapg * PAGE_SIZE);
/*
* get the first batch of entries
*/
pmap_alloc_pv_entry();
return zalloci(pvzone);
}
/*
@ -2614,7 +2522,8 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
}
srcmpte = vm_page_lookup(src_pmap->pm_pteobj, ptepindex);
if ((srcmpte->hold_count == 0) || (srcmpte->flags & PG_BUSY))
if ((srcmpte == NULL) ||
(srcmpte->hold_count == 0) || (srcmpte->flags & PG_BUSY))
continue;
if (pdnxt > end_addr)

View file

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.154 1997/08/05 01:32:05 dyson Exp $
* $Id: pmap.c,v 1.155 1997/08/05 22:06:47 dyson Exp $
*/
/*
@ -89,6 +89,7 @@
#include <vm/vm_extern.h>
#include <vm/vm_pageout.h>
#include <vm/vm_pager.h>
#include <vm/vm_zone.h>
#include <sys/user.h>
@ -118,8 +119,6 @@
#define PTPHINT
static void init_pv_entries __P((int));
/*
* Get PDEs and PTEs for user/kernel address space
*/
@ -154,8 +153,9 @@ vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
static vm_offset_t vm_first_phys;
static int pgeflag; /* PG_G or-in */
static int pseflag; /* PG_PS or-in */
int pgeflag; /* PG_G or-in */
int pseflag; /* PG_PS or-in */
int pv_npg;
static int nkpt;
static vm_page_t nkpg;
@ -163,15 +163,14 @@ vm_offset_t kernel_vm_end;
extern vm_offset_t clean_sva, clean_eva;
#define PV_FREELIST_MIN ((PAGE_SIZE / sizeof (struct pv_entry)) / 2)
/*
* Data for the pv entry allocation mechanism
*/
static int pv_freelistcnt;
TAILQ_HEAD (,pv_entry) pv_freelist = {0};
static vm_offset_t pvva;
static int npvvapg;
vm_zone_t pvzone;
struct vm_zone pvzone_store;
struct vm_object pvzone_obj;
#define NPVINIT 8192
struct pv_entry pvinit[NPVINIT];
/*
* All those kernel PT submaps that BSD is so fond of
@ -191,7 +190,6 @@ static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv));
static unsigned * get_ptbase __P((pmap_t pmap));
static pv_entry_t get_pv_entry __P((void));
static void i386_protection_init __P((void));
static void pmap_alloc_pv_entry __P((void));
static void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem));
static PMAP_INLINE int pmap_is_managed __P((vm_offset_t pa));
@ -475,43 +473,50 @@ pmap_init(phys_start, phys_end)
{
vm_offset_t addr;
vm_size_t s;
int i, npg;
int i;
/*
* calculate the number of pv_entries needed
*/
vm_first_phys = phys_avail[0];
for (i = 0; phys_avail[i + 1]; i += 2);
npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE;
pv_npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE;
/*
* Allocate memory for random pmap data structures. Includes the
* pv_head_table.
*/
s = (vm_size_t) (sizeof(pv_table_t) * npg);
s = (vm_size_t) (sizeof(pv_table_t) * pv_npg);
s = round_page(s);
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
pv_table = (pv_table_t *) addr;
for(i = 0; i < npg; i++) {
for(i = 0; i < pv_npg; i++) {
vm_offset_t pa;
TAILQ_INIT(&pv_table[i].pv_list);
pv_table[i].pv_list_count = 0;
pa = vm_first_phys + i * PAGE_SIZE;
pv_table[i].pv_vm_page = PHYS_TO_VM_PAGE(pa);
}
TAILQ_INIT(&pv_freelist);
/*
* init the pv free list
*/
init_pv_entries(npg);
pvzone = &pvzone_store;
zbootinit(pvzone, "PV entries", sizeof(pvinit[0]), pvinit, NPVINIT);
/*
* Now it is safe to enable pv_table recording.
*/
pmap_initialized = TRUE;
}
void
pmap_init2() {
zinitna(pvzone, &pvzone_obj, NULL, 0,
PMAP_SHPGPERPROC * maxproc + pv_npg, ZONE_INTERRUPT, 4);
}
/*
* Used to map a range of physical addresses into kernel
* virtual address space.
@ -660,9 +665,9 @@ pmap_extract(pmap, va)
vm_offset_t rtval;
vm_offset_t pdirindex;
pdirindex = va >> PDRSHIFT;
if (pmap) {
if (pmap && (rtval = (unsigned) pmap->pm_pdir[pdirindex])) {
unsigned *pte;
if (((rtval = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
if ((rtval & PG_PS) != 0) {
rtval &= ~(NBPDR - 1);
rtval |= va & (NBPDR - 1);
return rtval;
@ -1384,7 +1389,9 @@ pmap_release(pmap)
pdstack[pdstackptr] = (vm_offset_t) pmap->pm_pdir;
++pdstackptr;
} else {
kmem_free(kernel_map, (vm_offset_t) pmap->pm_pdir, PAGE_SIZE);
int pdstmp = pdstackptr - 1;
kmem_free(kernel_map, pdstack[pdstmp], PAGE_SIZE);
pdstack[pdstmp] = (vm_offset_t) pmap->pm_pdir;
}
pmap->pm_pdir = 0;
}
@ -1484,12 +1491,11 @@ pmap_reference(pmap)
/*
* free the pv_entry back to the free list
*/
static PMAP_INLINE void
static inline void
free_pv_entry(pv)
pv_entry_t pv;
{
++pv_freelistcnt;
TAILQ_INSERT_HEAD(&pv_freelist, pv, pv_list);
zfreei(pvzone, pv);
}
/*
@ -1498,108 +1504,10 @@ free_pv_entry(pv)
* the memory allocation is performed bypassing the malloc code
* because of the possibility of allocations at interrupt time.
*/
static pv_entry_t
get_pv_entry()
static inline pv_entry_t
get_pv_entry(void)
{
pv_entry_t tmp;
/*
* get more pv_entry pages if needed
*/
if (pv_freelistcnt < PV_FREELIST_MIN || !TAILQ_FIRST(&pv_freelist)) {
pmap_alloc_pv_entry();
}
/*
* get a pv_entry off of the free list
*/
--pv_freelistcnt;
tmp = TAILQ_FIRST(&pv_freelist);
TAILQ_REMOVE(&pv_freelist, tmp, pv_list);
return tmp;
}
/*
* This *strange* allocation routine eliminates the possibility of a malloc
* failure (*FATAL*) for a pv_entry_t data structure.
* also -- this code is MUCH MUCH faster than the malloc equiv...
* We really need to do the slab allocator thingie here.
*/
static void
pmap_alloc_pv_entry()
{
/*
* do we have any pre-allocated map-pages left?
*/
if (npvvapg) {
vm_page_t m;
/*
* allocate a physical page out of the vm system
*/
m = vm_page_alloc(kernel_object,
OFF_TO_IDX(pvva - vm_map_min(kernel_map)),
VM_ALLOC_INTERRUPT);
if (m) {
int newentries;
int i;
pv_entry_t entry;
newentries = (PAGE_SIZE / sizeof(struct pv_entry));
/*
* wire the page
*/
vm_page_wire(m);
m->flags &= ~PG_BUSY;
/*
* let the kernel see it
*/
pmap_kenter(pvva, VM_PAGE_TO_PHYS(m));
entry = (pv_entry_t) pvva;
/*
* update the allocation pointers
*/
pvva += PAGE_SIZE;
--npvvapg;
/*
* free the entries into the free list
*/
for (i = 0; i < newentries; i++) {
free_pv_entry(entry);
entry++;
}
}
}
if (!TAILQ_FIRST(&pv_freelist))
panic("get_pv_entry: cannot get a pv_entry_t");
}
/*
* init the pv_entry allocation system
*/
void
init_pv_entries(npg)
int npg;
{
/*
* Allocate enough kvm space for one entry per page, and
* each process having PMAP_SHPGPERPROC pages shared with other
* processes. (The system can panic if this is too small, but also
* can fail on bootup if this is too big.)
* XXX The pv management mechanism needs to be fixed so that systems
* with lots of shared mappings amongst lots of processes will still
* work. The fix will likely be that once we run out of pv entries
* we will free other entries (and the associated mappings), with
* some policy yet to be determined.
*/
npvvapg = ((PMAP_SHPGPERPROC * maxproc + npg) * sizeof(struct pv_entry)
+ PAGE_SIZE - 1) / PAGE_SIZE;
pvva = kmem_alloc_pageable(kernel_map, npvvapg * PAGE_SIZE);
/*
* get the first batch of entries
*/
pmap_alloc_pv_entry();
return zalloci(pvzone);
}
/*
@ -2614,7 +2522,8 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
}
srcmpte = vm_page_lookup(src_pmap->pm_pteobj, ptepindex);
if ((srcmpte->hold_count == 0) || (srcmpte->flags & PG_BUSY))
if ((srcmpte == NULL) ||
(srcmpte->hold_count == 0) || (srcmpte->flags & PG_BUSY))
continue;
if (pdnxt > end_addr)

View file

@ -16,7 +16,7 @@
* 4. Modifications may be freely made to this file if the above conditions
* are met.
*
* $Id: sys_pipe.c,v 1.29 1997/08/05 00:01:26 dyson Exp $
* $Id: sys_pipe.c,v 1.30 1997/08/05 00:05:00 dyson Exp $
*/
/*
@ -166,8 +166,7 @@ pipe(p, uap, retval)
int fd, error;
if (pipe_zone == NULL)
pipe_zone = zinit("PIPE", sizeof (struct pipe), 0,
ZONE_WAIT, 4);
pipe_zone = zinit("PIPE", sizeof (struct pipe), 0, 0, 4);
rpipe = zalloc( pipe_zone);
pipeinit(rpipe);

View file

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.81 1997/08/05 01:32:52 dyson Exp $
* $Id: vm_map.c,v 1.82 1997/08/05 22:07:27 dyson Exp $
*/
/*
@ -174,13 +174,13 @@ void
vm_map_startup()
{
mapzone = &mapzone_store;
_zbootinit(mapzone, "MAP", sizeof (struct vm_map),
zbootinit(mapzone, "MAP", sizeof (struct vm_map),
map_init, MAX_KMAP);
kmapentzone = &kmapentzone_store;
_zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry),
zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry),
kmap_entry_init, MAX_KMAPENT);
mapentzone = &mapentzone_store;
_zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
map_entry_init, MAX_MAPENT);
}
@ -207,12 +207,12 @@ vmspace_alloc(min, max, pageable)
void
vm_init2(void) {
_zinit(kmapentzone, &kmapentobj,
zinitna(kmapentzone, &kmapentobj,
NULL, 0, 4096, ZONE_INTERRUPT, 4);
_zinit(mapentzone, &mapentobj,
NULL, 0, 0, ZONE_WAIT, 4);
_zinit(mapzone, &mapobj,
NULL, 0, 0, ZONE_WAIT, 4);
zinitna(mapentzone, &mapentobj,
NULL, 0, 0, 0, 4);
zinitna(mapzone, &mapobj,
NULL, 0, 0, 0, 4);
}
void

View file

@ -6,19 +6,19 @@
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice immediately at the beginning of the file, without modification,
* this list of conditions, and the following disclaimer.
* notice immediately at the beginning of the file, without modification,
* this list of conditions, and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Absolutely no warranty of function or purpose is made by the author
* John S. Dyson.
* John S. Dyson.
* 4. This work was done expressly for inclusion into FreeBSD. Other use
* is allowed if this notation is included.
* is allowed if this notation is included.
* 5. Modifications may be freely made to this file if the above conditions
* are met.
* are met.
*
* $Id$
* $Id: vm_zone.c,v 1.1 1997/08/05 00:07:29 dyson Exp $
*/
#include <sys/param.h>
@ -48,23 +48,44 @@
* Note that the initial implementation of this had coloring, and
* absolutely no improvement (actually perf degradation) occurred.
*
* _zinit, zinit, _zbootinit are the initialization routines.
* zinitna, zinit, zbootinit are the initialization routines.
* zalloc, zfree, are the interrupt/lock unsafe allocation/free routines.
* zalloci, zfreei, are the interrupt/lock safe allocation/free routines.
*/
/*
* Create a zone, but don't allocate the zone structure. If the
* zone had been previously created by the zone boot code, initialize
* various parts of the zone code.
*
* If waits are not allowed during allocation (e.g. during interrupt
* code), a-priori allocate the kernel virtual space, and allocate
* only pages when needed.
*
* Arguments:
* z pointer to zone structure.
* obj pointer to VM object (opt).
* name name of zone.
* size size of zone entries.
* nentries number of zone entries allocated (only ZONE_INTERRUPT.)
* flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
* zalloc number of pages allocated when memory is needed.
*
* Note that when using ZONE_INTERRUPT, the size of the zone is limited
* by the nentries argument. The size of the memory allocatable is
* unlimited if ZONE_INTERRUPT is not set.
*
*/
int
_zinit(vm_zone_t z, vm_object_t obj, char *name, int size,
zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
int nentries, int flags, int zalloc) {
int totsize;
if ((z->zflags & ZONE_BOOT) == 0) {
z->zsize = size;
simple_lock_init(&z->zlock);
z->zfreecnt = 0;
z->zname = name;
}
z->zflags |= flags;
@ -73,7 +94,7 @@ _zinit(vm_zone_t z, vm_object_t obj, char *name, int size,
* If we cannot wait, allocate KVA space up front, and we will fill
* in pages as needed.
*/
if ((z->zflags & ZONE_WAIT) == 0) {
if (z->zflags & ZONE_INTERRUPT) {
totsize = round_page(z->zsize * nentries);
@ -89,6 +110,9 @@ _zinit(vm_zone_t z, vm_object_t obj, char *name, int size,
z->zobj = obj;
_vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
}
z->zallocflag = VM_ALLOC_INTERRUPT;
} else {
z->zallocflag = VM_ALLOC_SYSTEM;
}
if ( z->zsize > PAGE_SIZE)
@ -96,10 +120,6 @@ _zinit(vm_zone_t z, vm_object_t obj, char *name, int size,
else
z->zfreemin = PAGE_SIZE / z->zsize;
z->zallocflag = VM_ALLOC_SYSTEM;
if (z->zflags & ZONE_INTERRUPT)
z->zallocflag = VM_ALLOC_INTERRUPT;
z->zpagecount = 0;
if (zalloc)
z->zalloc = zalloc;
@ -109,6 +129,13 @@ _zinit(vm_zone_t z, vm_object_t obj, char *name, int size,
return 1;
}
/*
* Subroutine same as zinitna, except zone data structure is allocated
* automatically by malloc. This routine should normally be used, except
* in certain tricky startup conditions in the VM system -- then
* zbootinit and zinitna can be used. Zinit is the standard zone
* initialization call.
*/
vm_zone_t
zinit(char *name, int size, int nentries, int flags, int zalloc) {
vm_zone_t z;
@ -116,7 +143,8 @@ zinit(char *name, int size, int nentries, int flags, int zalloc) {
if (z == NULL)
return NULL;
if (_zinit(z, NULL, name, size, nentries, flags, zalloc) == 0) {
z->zflags = 0;
if (zinitna(z, NULL, name, size, nentries, flags, zalloc) == 0) {
free(z, M_ZONE);
return NULL;
}
@ -124,8 +152,12 @@ zinit(char *name, int size, int nentries, int flags, int zalloc) {
return z;
}
/*
* Initialize a zone before the system is fully up. This routine should
* only be called before full VM startup.
*/
void
_zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems) {
zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems) {
int i;
@ -143,11 +175,14 @@ _zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems) {
for (i = 0; i < nitems; i++) {
* (void **) item = z->zitems;
z->zitems = item;
++z->zfreecnt;
(char *) item += z->zsize;
}
z->zfreecnt += nitems;
}
/*
* Zone critical region locks.
*/
static inline int
zlock(vm_zone_t z) {
int s;
@ -162,13 +197,32 @@ zunlock(vm_zone_t z, int s) {
splx(s);
}
/*
* void *zalloc(vm_zone_t zone) --
* Returns an item from a specified zone.
*
* void zfree(vm_zone_t zone, void *item) --
* Frees an item back to a specified zone.
*
* void *zalloci(vm_zone_t zone) --
* Returns an item from a specified zone, interrupt safe.
*
* void zfreei(vm_zone_t zone, void *item) --
* Frees an item back to a specified zone, interrupt safe.
*
*/
/*
* Zone allocator/deallocator. These are interrupt / (or potentially SMP)
* safe. The raw zalloc/zfree routines are in the vm_zone header file,
* and are not interrupt safe, but are fast.
*/
void *
zalloci(vm_zone_t z) {
int s;
void *item;
s = zlock(z);
item = zalloc(z);
item = _zalloc(z);
zunlock(z, s);
return item;
}
@ -176,21 +230,23 @@ zalloci(vm_zone_t z) {
void
zfreei(vm_zone_t z, void *item) {
int s;
s = zlock(z);
zfree(z, item);
_zfree(z, item);
zunlock(z, s);
return;
}
/*
* Internal zone routine. Not to be called from external (non vm_zone) code.
*/
void *
zget(vm_zone_t z, int s) {
_zget(vm_zone_t z) {
int i;
vm_page_t m;
int nitems;
void *item, *litem;
void *item;
if ((z->zflags & ZONE_WAIT) == 0) {
if (z->zflags & ZONE_INTERRUPT) {
item = (char *) z->zkva + z->zpagecount * PAGE_SIZE;
for( i = 0; ((i < z->zalloc) && (z->zpagecount < z->zpagemax)); i++) {
@ -200,7 +256,7 @@ zget(vm_zone_t z, int s) {
}
pmap_kenter(z->zkva + z->zpagecount * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
++z->zpagecount;
z->zpagecount++;
}
nitems = (i * PAGE_SIZE) / z->zsize;
} else {
@ -214,14 +270,22 @@ zget(vm_zone_t z, int s) {
/*
* Save one for immediate allocation
*/
nitems -= 1;
for (i = 0; i < nitems; i++) {
* (void **) item = z->zitems;
z->zitems = item;
(char *) item += z->zsize;
++z->zfreecnt;
if (nitems != 0) {
nitems -= 1;
for (i = 0; i < nitems; i++) {
* (void **) item = z->zitems;
z->zitems = item;
(char *) item += z->zsize;
}
z->zfreecnt += nitems;
} else if (z->zfreecnt > 0) {
item = z->zitems;
z->zitems = *(void **) item;
z->zfreecnt--;
} else {
item = NULL;
}
return item;
}

View file

@ -1,25 +1,43 @@
/*
* Copyright (c) 1997 John S. Dyson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice immediately at the beginning of the file, without modification,
* this list of conditions, and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Absolutely no warranty of function or purpose is made by the author
* John S. Dyson.
* 4. This work was done expressly for inclusion into FreeBSD. Other use
* is allowed if this notation is included.
* 5. Modifications may be freely made to this file if the above conditions
* are met.
*
* $Id$
*/
#if !defined(_SYS_ZONE_H)
#define _SYS_ZONE_H
#define ZONE_COLOR 1
#define ZONE_INTERRUPT 2
#define ZONE_WAIT 4
#define ZONE_PREALLOCATE 8
#define ZONE_BOOT 16
#define ZONE_INTERRUPT 1 /* Use this if you need to allocate at int time */
#define ZONE_BOOT 16 /* This is an internal flag used by zbootinit */
#include <machine/param.h>
#include <sys/lock.h>
#define CACHE_LINE_SIZE 32
typedef struct vm_zone {
struct simplelock zlock; /* lock for data structure */
void *zitems; /* linked list of items */
int zfreemin; /* minimum number of free entries */
int zfreecnt; /* free entries */
int zfreemin; /* minimum number of free entries */
vm_offset_t zkva; /* Base kva of zone */
int zpagecount; /* Total # of allocated pages */
int zpagemax; /* Max address space */
@ -33,50 +51,59 @@ typedef struct vm_zone {
vm_zone_t zinit(char *name, int size, int nentries, int flags, int zalloc);
int _zinit(vm_zone_t z, struct vm_object *obj, char *name, int size,
int zinitna(vm_zone_t z, struct vm_object *obj, char *name, int size,
int nentries, int flags, int zalloc);
static void * zalloc(vm_zone_t z);
static void zfree(vm_zone_t z, void *item);
void * zalloci(vm_zone_t z) __attribute__((regparm(1)));
void zfreei(vm_zone_t z, void *item) __attribute__((regparm(2)));
void _zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems) ;
void * zget(vm_zone_t z, int s) __attribute__((regparm(2)));
#if SMP > 1
void zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems) ;
void * _zget(vm_zone_t z) __attribute__((regparm(1)));
/*
* void *zalloc(vm_zone_t zone) --
* Returns an item from a specified zone.
*
* void zfree(vm_zone_t zone, void *item) --
* Frees an item back to a specified zone.
*/
static __inline__ void *
zalloc(vm_zone_t z) {
return zalloci(z);
}
static __inline__ void
zfree(vm_zone_t z, void *item) {
zfreei(z, item);
}
#else
static __inline__ void *
zalloc(vm_zone_t z) {
int s;
_zalloc(vm_zone_t z) {
void *item;
if (z->zfreecnt <= z->zfreemin) {
return zget(z, s);
return _zget(z);
}
item = z->zitems;
z->zitems = *(void **) item;
--z->zfreecnt;
z->zfreecnt--;
return item;
}
static __inline__ void
zfree(vm_zone_t z, void *item) {
_zfree(vm_zone_t z, void *item) {
* (void **) item = z->zitems;
z->zitems = item;
++z->zfreecnt;
z->zfreecnt++;
}
static __inline__ void *
zalloc(vm_zone_t z) {
#if NCPU > 1
return zalloci(z);
#else
return _zalloc(z);
#endif
}
static __inline__ void
zfree(vm_zone_t z, void *item) {
#if NCPU > 1
zfreei(z, item);
#else
_zfree(z, item);
#endif
}
#endif