Tidy up some loose ends.

i386/ia64/alpha - catch up to sparc64/ppc:
- replace pmap_kernel() with refs to kernel_pmap
- change kernel_pmap pointer to (&kernel_pmap_store)
  (this is a speedup since ld can set these at compile/link time)
all platforms (as suggested by jake):
- gc unused pmap_reference
- gc unused pmap_destroy
- gc unused struct pmap.pm_count
(we never used pm_count - we track address space sharing at the vmspace)
This commit is contained in:
Peter Wemm 2002-04-29 07:43:16 +00:00
parent d76b2f9d54
commit db17c6fc07
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=95710
23 changed files with 33 additions and 297 deletions

View file

@ -222,7 +222,7 @@
* Given a map and a machine independent protection code,
* convert to an alpha protection code.
*/
#define pte_prot(m, p) (protection_codes[m == pmap_kernel() ? 0 : 1][p])
#define pte_prot(m, p) (protection_codes[m == kernel_pmap ? 0 : 1][p])
int protection_codes[2][8];
/*
@ -296,8 +296,7 @@ vm_size_t Lev2mapsize, Lev3mapsize;
/*
* Statically allocated kernel pmap
*/
static struct pmap kernel_pmap_store;
pmap_t kernel_pmap;
struct pmap kernel_pmap_store;
vm_offset_t avail_start; /* PA of first available physical page */
vm_offset_t avail_end; /* PA of last available physical page */
@ -531,13 +530,9 @@ pmap_bootstrap(vm_offset_t ptaddr, u_int maxasn)
alpha_protection_init();
/*
* The kernel's pmap is statically allocated so we don't have to use
* pmap_create, which is unlikely to work correctly at this part of
* the boot sequence (XXX and which no longer exists).
* Initialize the kernel pmap (which is statically allocated).
*/
kernel_pmap = &kernel_pmap_store;
kernel_pmap->pm_lev1 = Lev1map;
kernel_pmap->pm_count = 1;
kernel_pmap->pm_active = ~0;
kernel_pmap->pm_asn[alpha_pal_whami()].asn = 0;
kernel_pmap->pm_asn[alpha_pal_whami()].gen = 1;
@ -1356,7 +1351,6 @@ pmap_pinit0(pmap)
int i;
pmap->pm_lev1 = Lev1map;
pmap->pm_count = 1;
pmap->pm_ptphint = NULL;
pmap->pm_active = 0;
for (i = 0; i < MAXCPU; i++) {
@ -1407,7 +1401,6 @@ pmap_pinit(pmap)
pmap->pm_lev1[PTLEV1I] = pmap_phys_to_pte(VM_PAGE_TO_PHYS(lev1pg))
| PG_V | PG_KRE | PG_KWE;
pmap->pm_count = 1;
pmap->pm_ptphint = NULL;
pmap->pm_active = 0;
for (i = 0; i < MAXCPU; i++) {
@ -1775,39 +1768,9 @@ pmap_growkernel(vm_offset_t addr)
critical_exit();
}
/*
* Retire the given physical map from service.
* Should only be called if the map contains
* no valid mappings.
*/
void
pmap_destroy(pmap_t pmap)
{
int count;
if (pmap == NULL)
return;
count = --pmap->pm_count;
if (count == 0) {
pmap_release(pmap);
panic("destroying a pmap is not yet implemented");
}
}
/*
* Add a reference to the specified pmap.
*/
void
pmap_reference(pmap_t pmap)
{
if (pmap != NULL) {
pmap->pm_count++;
}
}
/***************************************************
* page management routines.
* page management routines.
***************************************************/
/*
@ -2691,16 +2654,6 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
{
}
/*
* Routine: pmap_kernel
* Function:
* Returns the physical map handle for the kernel.
*/
pmap_t
pmap_kernel()
{
return (kernel_pmap);
}
/*
* pmap_zero_page zeros the specified hardware page by

View file

@ -173,7 +173,6 @@ struct pmap {
pt_entry_t *pm_lev1; /* KVA of lev0map */
vm_object_t pm_pteobj; /* Container for pte's */
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
int pm_count; /* reference count */
u_int32_t pm_active; /* active cpus */
struct {
u_int32_t asn:ASN_BITS; /* address space number */
@ -192,7 +191,8 @@ struct pmap {
typedef struct pmap *pmap_t;
#ifdef _KERNEL
extern pmap_t kernel_pmap;
extern struct pmap kernel_pmap_store;
#define kernel_pmap (&kernel_pmap_store)
#endif
/*
@ -231,7 +231,6 @@ vm_offset_t pmap_steal_memory(vm_size_t);
void pmap_bootstrap(vm_offset_t, u_int);
void pmap_setdevram(unsigned long long basea, vm_offset_t sizea);
int pmap_uses_prom_console(void);
pmap_t pmap_kernel(void);
void *pmap_mapdev(vm_offset_t, vm_size_t);
void pmap_unmapdev(vm_offset_t, vm_size_t);
unsigned *pmap_pte(pmap_t, vm_offset_t) __pure2;

View file

@ -239,7 +239,7 @@ static void isa_dmastart_cb(void *arg, bus_dma_segment_t *segs, int nseg,
panic("isa_dmastart: transfer mapping not contiguous");
if ((chipset.sgmap == NULL) &&
(pmap_extract(pmap_kernel(), (vm_offset_t)addr)
(pmap_extract(kernel_pmap, (vm_offset_t)addr)
> BUS_SPACE_MAXADDR_24BIT)) {
/* we bounced */
dma_bounced |= (1 << chan);

View file

@ -146,8 +146,7 @@
#define pte_prot(m, p) (protection_codes[p])
static int protection_codes[8];
static struct pmap kernel_pmap_store;
pmap_t kernel_pmap;
struct pmap kernel_pmap_store;
LIST_HEAD(pmaplist, pmap);
struct pmaplist allpmaps;
@ -306,14 +305,9 @@ pmap_bootstrap(firstaddr, loadaddr)
i386_protection_init();
/*
* The kernel's pmap is statically allocated so we don't have to use
* pmap_create, which is unlikely to work correctly at this part of
* the boot sequence (XXX and which no longer exists).
* Initialize the kernel pmap (which is statically allocated).
*/
kernel_pmap = &kernel_pmap_store;
kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
kernel_pmap->pm_count = 1;
kernel_pmap->pm_active = -1; /* don't allow deactivation */
TAILQ_INIT(&kernel_pmap->pm_pvlist);
LIST_INIT(&allpmaps);
@ -1281,7 +1275,6 @@ pmap_pinit0(pmap)
pmap->pm_pdir =
(pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE);
pmap_kenter((vm_offset_t) pmap->pm_pdir, (vm_offset_t) IdlePTD);
pmap->pm_count = 1;
pmap->pm_ptphint = NULL;
pmap->pm_active = 0;
TAILQ_INIT(&pmap->pm_pvlist);
@ -1342,7 +1335,6 @@ pmap_pinit(pmap)
pmap->pm_pdir[PTDPTDI] =
VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW | PG_A | PG_M;
pmap->pm_count = 1;
pmap->pm_active = 0;
pmap->pm_ptphint = NULL;
TAILQ_INIT(&pmap->pm_pvlist);
@ -1642,39 +1634,9 @@ pmap_growkernel(vm_offset_t addr)
splx(s);
}
/*
* Retire the given physical map from service.
* Should only be called if the map contains
* no valid mappings.
*/
void
pmap_destroy(pmap_t pmap)
{
int count;
if (pmap == NULL)
return;
count = --pmap->pm_count;
if (count == 0) {
pmap_release(pmap);
panic("destroying a pmap is not yet implemented");
}
}
/*
* Add a reference to the specified pmap.
*/
void
pmap_reference(pmap_t pmap)
{
if (pmap != NULL) {
pmap->pm_count++;
}
}
/***************************************************
* page management routines.
* page management routines.
***************************************************/
/*
@ -2845,17 +2807,6 @@ printf ("IT HAPPENNED!");
}
}
/*
* Routine: pmap_kernel
* Function:
* Returns the physical map handle for the kernel.
*/
pmap_t
pmap_kernel()
{
return (kernel_pmap);
}
/*
* pmap_zero_page zeros the specified hardware page by mapping
* the page into KVM and using bzero to clear its contents.

View file

@ -208,7 +208,6 @@ struct pmap {
pd_entry_t *pm_pdir; /* KVA of page directory */
vm_object_t pm_pteobj; /* Container for pte's */
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
int pm_count; /* reference count */
int pm_active; /* active on cpus */
struct pmap_statistics pm_stats; /* pmap statistics */
struct vm_page *pm_ptphint; /* pmap ptp hint */
@ -220,7 +219,8 @@ struct pmap {
typedef struct pmap *pmap_t;
#ifdef _KERNEL
extern pmap_t kernel_pmap;
extern struct pmap kernel_pmap_store;
#define kernel_pmap (&kernel_pmap_store)
#endif
/*
@ -262,7 +262,6 @@ extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
void pmap_bootstrap( vm_offset_t, vm_offset_t);
pmap_t pmap_kernel(void);
void *pmap_mapdev(vm_offset_t, vm_size_t);
void pmap_unmapdev(vm_offset_t, vm_size_t);
pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2;

View file

@ -257,7 +257,7 @@ isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan)
}
/* translate to physical */
phys = pmap_extract(pmap_kernel(), (vm_offset_t)addr);
phys = pmap_extract(kernel_pmap, (vm_offset_t)addr);
if (flags & ISADMA_RAW) {
dma_auto_mode |= (1 << chan);
@ -380,7 +380,7 @@ isa_dmarangecheck(caddr_t va, u_int length, int chan)
endva = (vm_offset_t)round_page((vm_offset_t)va + length);
for (; va < (caddr_t) endva ; va += PAGE_SIZE) {
phys = trunc_page(pmap_extract(pmap_kernel(), (vm_offset_t)va));
phys = trunc_page(pmap_extract(kernel_pmap, (vm_offset_t)va));
#define ISARAM_END RAM_END
if (phys == 0)
panic("isa_dmacheck: no physical page present");

View file

@ -146,8 +146,7 @@
#define pte_prot(m, p) (protection_codes[p])
static int protection_codes[8];
static struct pmap kernel_pmap_store;
pmap_t kernel_pmap;
struct pmap kernel_pmap_store;
LIST_HEAD(pmaplist, pmap);
struct pmaplist allpmaps;
@ -306,14 +305,9 @@ pmap_bootstrap(firstaddr, loadaddr)
i386_protection_init();
/*
* The kernel's pmap is statically allocated so we don't have to use
* pmap_create, which is unlikely to work correctly at this part of
* the boot sequence (XXX and which no longer exists).
* Initialize the kernel pmap (which is statically allocated).
*/
kernel_pmap = &kernel_pmap_store;
kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
kernel_pmap->pm_count = 1;
kernel_pmap->pm_active = -1; /* don't allow deactivation */
TAILQ_INIT(&kernel_pmap->pm_pvlist);
LIST_INIT(&allpmaps);
@ -1281,7 +1275,6 @@ pmap_pinit0(pmap)
pmap->pm_pdir =
(pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE);
pmap_kenter((vm_offset_t) pmap->pm_pdir, (vm_offset_t) IdlePTD);
pmap->pm_count = 1;
pmap->pm_ptphint = NULL;
pmap->pm_active = 0;
TAILQ_INIT(&pmap->pm_pvlist);
@ -1342,7 +1335,6 @@ pmap_pinit(pmap)
pmap->pm_pdir[PTDPTDI] =
VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW | PG_A | PG_M;
pmap->pm_count = 1;
pmap->pm_active = 0;
pmap->pm_ptphint = NULL;
TAILQ_INIT(&pmap->pm_pvlist);
@ -1642,39 +1634,9 @@ pmap_growkernel(vm_offset_t addr)
splx(s);
}
/*
* Retire the given physical map from service.
* Should only be called if the map contains
* no valid mappings.
*/
void
pmap_destroy(pmap_t pmap)
{
int count;
if (pmap == NULL)
return;
count = --pmap->pm_count;
if (count == 0) {
pmap_release(pmap);
panic("destroying a pmap is not yet implemented");
}
}
/*
* Add a reference to the specified pmap.
*/
void
pmap_reference(pmap_t pmap)
{
if (pmap != NULL) {
pmap->pm_count++;
}
}
/***************************************************
* page management routines.
* page management routines.
***************************************************/
/*
@ -2845,17 +2807,6 @@ printf ("IT HAPPENNED!");
}
}
/*
* Routine: pmap_kernel
* Function:
* Returns the physical map handle for the kernel.
*/
pmap_t
pmap_kernel()
{
return (kernel_pmap);
}
/*
* pmap_zero_page zeros the specified hardware page by mapping
* the page into KVM and using bzero to clear its contents.

View file

@ -208,7 +208,6 @@ struct pmap {
pd_entry_t *pm_pdir; /* KVA of page directory */
vm_object_t pm_pteobj; /* Container for pte's */
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
int pm_count; /* reference count */
int pm_active; /* active on cpus */
struct pmap_statistics pm_stats; /* pmap statistics */
struct vm_page *pm_ptphint; /* pmap ptp hint */
@ -220,7 +219,8 @@ struct pmap {
typedef struct pmap *pmap_t;
#ifdef _KERNEL
extern pmap_t kernel_pmap;
extern struct pmap kernel_pmap_store;
#define kernel_pmap (&kernel_pmap_store)
#endif
/*
@ -262,7 +262,6 @@ extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
void pmap_bootstrap( vm_offset_t, vm_offset_t);
pmap_t pmap_kernel(void);
void *pmap_mapdev(vm_offset_t, vm_size_t);
void pmap_unmapdev(vm_offset_t, vm_size_t);
pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2;

View file

@ -392,7 +392,7 @@ static int bs_dmarangecheck(caddr_t va, unsigned length)
endva = (vm_offset_t)round_page((unsigned long)(va+length));
for (; va < (caddr_t)endva; va += PAGE_SIZE) {
phys = trunc_page(pmap_extract(pmap_kernel(), (vm_offset_t)va));
phys = trunc_page(pmap_extract(kernel_pmap, (vm_offset_t)va));
if (phys == 0)
panic("bs_dmarangecheck: no physical page present");
if (phys >= RAM_END)

View file

@ -257,7 +257,7 @@ isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan)
}
/* translate to physical */
phys = pmap_extract(pmap_kernel(), (vm_offset_t)addr);
phys = pmap_extract(kernel_pmap, (vm_offset_t)addr);
if (flags & ISADMA_RAW) {
dma_auto_mode |= (1 << chan);
@ -380,7 +380,7 @@ isa_dmarangecheck(caddr_t va, u_int length, int chan)
endva = (vm_offset_t)round_page((vm_offset_t)va + length);
for (; va < (caddr_t) endva ; va += PAGE_SIZE) {
phys = trunc_page(pmap_extract(pmap_kernel(), (vm_offset_t)va));
phys = trunc_page(pmap_extract(kernel_pmap, (vm_offset_t)va));
#define ISARAM_END RAM_END
if (phys == 0)
panic("isa_dmacheck: no physical page present");

View file

@ -166,7 +166,7 @@ MALLOC_DEFINE(M_PMAP, "PMAP", "PMAP Structures");
* Given a map and a machine independent protection code,
* convert to an ia64 protection code.
*/
#define pte_prot(m, p) (protection_codes[m == pmap_kernel() ? 0 : 1][p])
#define pte_prot(m, p) (protection_codes[m == kernel_pmap ? 0 : 1][p])
#define pte_prot_pl(m, p) (pte_prot(m, p) & 3)
#define pte_prot_ar(m, p) (pte_prot(m, p) >> 2)
int protection_codes[2][8];
@ -179,8 +179,7 @@ int protection_codes[2][8];
/*
* Statically allocated kernel pmap
*/
static struct pmap kernel_pmap_store;
pmap_t kernel_pmap;
struct pmap kernel_pmap_store;
vm_offset_t avail_start; /* PA of first available physical page */
vm_offset_t avail_end; /* PA of last available physical page */
@ -435,14 +434,10 @@ pmap_bootstrap()
ia64_protection_init();
/*
* The kernel's pmap is statically allocated so we don't have to use
* pmap_create, which is unlikely to work correctly at this part of
* the boot sequence (XXX and which no longer exists).
* Initialize the kernel pmap (which is statically allocated).
*/
kernel_pmap = &kernel_pmap_store;
for (i = 0; i < 5; i++)
kernel_pmap->pm_rid[i] = 0;
kernel_pmap->pm_count = 1;
kernel_pmap->pm_active = 1;
TAILQ_INIT(&kernel_pmap->pm_pvlist);
PCPU_SET(current_pmap, kernel_pmap);
@ -824,7 +819,6 @@ pmap_pinit0(struct pmap *pmap)
pmap->pm_flags = 0;
for (i = 0; i < 5; i++)
pmap->pm_rid[i] = 0;
pmap->pm_count = 1;
pmap->pm_ptphint = NULL;
pmap->pm_active = 0;
TAILQ_INIT(&pmap->pm_pvlist);
@ -843,7 +837,6 @@ pmap_pinit(struct pmap *pmap)
pmap->pm_flags = 0;
for (i = 0; i < 5; i++)
pmap->pm_rid[i] = 0;
pmap->pm_count = 1;
pmap->pm_ptphint = NULL;
pmap->pm_active = 0;
TAILQ_INIT(&pmap->pm_pvlist);
@ -926,39 +919,8 @@ pmap_growkernel(vm_offset_t addr)
}
}
/*
* Retire the given physical map from service.
* Should only be called if the map contains
* no valid mappings.
*/
void
pmap_destroy(pmap_t pmap)
{
int count;
if (pmap == NULL)
return;
count = --pmap->pm_count;
if (count == 0) {
pmap_release(pmap);
panic("destroying a pmap is not yet implemented");
}
}
/*
* Add a reference to the specified pmap.
*/
void
pmap_reference(pmap_t pmap)
{
if (pmap != NULL) {
pmap->pm_count++;
}
}
/***************************************************
* page management routines.
* page management routines.
***************************************************/
/*
@ -2095,16 +2057,6 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
{
}
/*
* Routine: pmap_kernel
* Function:
* Returns the physical map handle for the kernel.
*/
pmap_t
pmap_kernel()
{
return (kernel_pmap);
}
/*
* pmap_zero_page zeros the specified hardware page by

View file

@ -89,7 +89,6 @@ struct md_page {
struct pmap {
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
u_int32_t pm_rid[5]; /* base RID for pmap */
int pm_count; /* reference count */
int pm_flags; /* pmap flags */
int pm_active; /* active flag */
struct pmap_statistics pm_stats; /* pmap statistics */
@ -104,7 +103,8 @@ struct pmap {
typedef struct pmap *pmap_t;
#ifdef _KERNEL
extern pmap_t kernel_pmap;
extern struct pmap kernel_pmap_store;
#define kernel_pmap (&kernel_pmap_store)
#endif
/*
@ -134,7 +134,6 @@ vm_offset_t pmap_steal_memory(vm_size_t);
void pmap_bootstrap(void);
void pmap_setdevram(unsigned long long basea, vm_offset_t sizea);
int pmap_uses_prom_console(void);
pmap_t pmap_kernel(void);
void *pmap_mapdev(vm_offset_t, vm_size_t);
void pmap_unmapdev(vm_offset_t, vm_size_t);
unsigned *pmap_pte(pmap_t, vm_offset_t) __pure2;

View file

@ -242,7 +242,7 @@ static void isa_dmastart_cb(void *arg, bus_dma_segment_t *segs, int nseg,
#if 0
if ((chipset.sgmap == NULL) &&
(pmap_extract(pmap_kernel(), (vm_offset_t)addr)
(pmap_extract(kernel_pmap, (vm_offset_t)addr)
> BUS_SPACE_MAXADDR_24BIT)) {
/* we bounced */
dma_bounced |= (1 << chan);

View file

@ -290,7 +290,7 @@ isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan)
}
/* translate to physical */
phys = pmap_extract(pmap_kernel(), (vm_offset_t)addr);
phys = pmap_extract(kernel_pmap, (vm_offset_t)addr);
if (flags & ISADMA_RAW) {
dma_auto_mode |= (1 << chan);
@ -440,7 +440,7 @@ isa_dmarangecheck(caddr_t va, u_int length, int chan)
endva = (vm_offset_t)round_page((vm_offset_t)va + length);
for (; va < (caddr_t) endva ; va += PAGE_SIZE) {
phys = trunc_page(pmap_extract(pmap_kernel(), (vm_offset_t)va));
phys = trunc_page(pmap_extract(kernel_pmap, (vm_offset_t)va));
#ifdef EPSON_BOUNCEDMA
#define ISARAM_END 0xf00000
#else

View file

@ -290,7 +290,7 @@ isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan)
}
/* translate to physical */
phys = pmap_extract(pmap_kernel(), (vm_offset_t)addr);
phys = pmap_extract(kernel_pmap, (vm_offset_t)addr);
if (flags & ISADMA_RAW) {
dma_auto_mode |= (1 << chan);
@ -440,7 +440,7 @@ isa_dmarangecheck(caddr_t va, u_int length, int chan)
endva = (vm_offset_t)round_page((vm_offset_t)va + length);
for (; va < (caddr_t) endva ; va += PAGE_SIZE) {
phys = trunc_page(pmap_extract(pmap_kernel(), (vm_offset_t)va));
phys = trunc_page(pmap_extract(kernel_pmap, (vm_offset_t)va));
#ifdef EPSON_BOUNCEDMA
#define ISARAM_END 0xf00000
#else

View file

@ -686,7 +686,6 @@ pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
}
kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
kernel_pmap->pm_active = ~0;
kernel_pmap->pm_count = 1;
/*
* Allocate a kernel stack with a guard page for thread0 and map it
@ -1223,7 +1222,6 @@ pmap_pinit(pmap_t pmap)
/*
* Allocate some segment registers for this pmap.
*/
pmap->pm_count = 1;
for (i = 0; i < NPMAPS; i += VSID_NBPW) {
u_int hash, n;
@ -1369,17 +1367,6 @@ pmap_qremove(vm_offset_t va, int count)
pmap_kremove(va);
}
/*
* Add a reference to the specified pmap.
*/
void
pmap_reference(pmap_t pm)
{
if (pm != NULL)
pm->pm_count++;
}
void
pmap_release(pmap_t pmap)
{

View file

@ -41,7 +41,6 @@ struct pmap {
u_int pm_sr[16];
u_int pm_active;
u_int pm_context;
u_int pm_count;
struct pmap_statistics pm_stats;
};

View file

@ -686,7 +686,6 @@ pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
}
kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
kernel_pmap->pm_active = ~0;
kernel_pmap->pm_count = 1;
/*
* Allocate a kernel stack with a guard page for thread0 and map it
@ -1223,7 +1222,6 @@ pmap_pinit(pmap_t pmap)
/*
* Allocate some segment registers for this pmap.
*/
pmap->pm_count = 1;
for (i = 0; i < NPMAPS; i += VSID_NBPW) {
u_int hash, n;
@ -1369,17 +1367,6 @@ pmap_qremove(vm_offset_t va, int count)
pmap_kremove(va);
}
/*
* Add a reference to the specified pmap.
*/
void
pmap_reference(pmap_t pm)
{
if (pm != NULL)
pm->pm_count++;
}
void
pmap_release(pmap_t pmap)
{

View file

@ -686,7 +686,6 @@ pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
}
kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
kernel_pmap->pm_active = ~0;
kernel_pmap->pm_count = 1;
/*
* Allocate a kernel stack with a guard page for thread0 and map it
@ -1223,7 +1222,6 @@ pmap_pinit(pmap_t pmap)
/*
* Allocate some segment registers for this pmap.
*/
pmap->pm_count = 1;
for (i = 0; i < NPMAPS; i += VSID_NBPW) {
u_int hash, n;
@ -1369,17 +1367,6 @@ pmap_qremove(vm_offset_t va, int count)
pmap_kremove(va);
}
/*
* Add a reference to the specified pmap.
*/
void
pmap_reference(pmap_t pm)
{
if (pm != NULL)
pm->pm_count++;
}
void
pmap_release(pmap_t pmap)
{

View file

@ -71,7 +71,6 @@ struct pmap {
vm_object_t pm_tsb_obj;
u_int pm_active;
u_int pm_context[MAXCPU];
u_int pm_count;
struct pmap_statistics pm_stats;
};

View file

@ -406,7 +406,6 @@ pmap_bootstrap(vm_offset_t ekva)
for (i = 0; i < MAXCPU; i++)
pm->pm_context[i] = TLB_CTX_KERNEL;
pm->pm_active = ~0;
pm->pm_count = 1;
TAILQ_INIT(&pm->pm_pvlist);
/* XXX flush all non-locked tlb entries */
@ -1116,7 +1115,6 @@ pmap_pinit0(pmap_t pm)
for (i = 0; i < MAXCPU; i++)
pm->pm_context[i] = 0;
pm->pm_active = 0;
pm->pm_count = 1;
pm->pm_tsb = NULL;
pm->pm_tsb_obj = NULL;
TAILQ_INIT(&pm->pm_pvlist);
@ -1167,7 +1165,6 @@ pmap_pinit(pmap_t pm)
for (i = 0; i < MAXCPU; i++)
pm->pm_context[i] = -1;
pm->pm_active = 0;
pm->pm_count = 1;
TAILQ_INIT(&pm->pm_pvlist);
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
}
@ -1219,26 +1216,6 @@ pmap_growkernel(vm_offset_t addr)
{
}
/*
* Retire the given physical map from service. Pmaps are always allocated
* as part of a larger structure, so this never happens.
*/
void
pmap_destroy(pmap_t pm)
{
panic("pmap_destroy: unimplemented");
}
/*
* Add a reference to the specified pmap.
*/
void
pmap_reference(pmap_t pm)
{
if (pm != NULL)
pm->pm_count++;
}
/*
* This routine is very drastic, but can save the system
* in a pinch.

View file

@ -98,7 +98,6 @@ void pmap_clear_reference(vm_page_t m);
void pmap_collect(void);
void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
void pmap_copy_page(vm_page_t, vm_page_t);
void pmap_destroy(pmap_t);
void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
boolean_t);
vm_offset_t pmap_extract(pmap_t pmap, vm_offset_t va);
@ -122,7 +121,6 @@ void pmap_pinit2(pmap_t);
void pmap_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
void pmap_qenter(vm_offset_t, vm_page_t *, int);
void pmap_qremove(vm_offset_t, int);
void pmap_reference(pmap_t);
void pmap_release(pmap_t);
void pmap_remove(pmap_t, vm_offset_t, vm_offset_t);
void pmap_remove_pages(pmap_t, vm_offset_t, vm_offset_t);

View file

@ -270,7 +270,6 @@ kmem_suballoc(parent, min, max, size)
panic("kmem_suballoc");
}
*max = *min + size;
pmap_reference(vm_map_pmap(parent));
result = vm_map_create(vm_map_pmap(parent), *min, *max);
if (result == NULL)
panic("kmem_suballoc: cannot create submap");