Eliminate vm.pmap.shpgperproc and vm.pmap.pv_entry_max because they no

longer serve any purpose.  Prior to r157446, they served a purpose
because there was a fixed amount of kernel virtual address space
reserved for pv entries at boot time.  However, since that change pv
entries are accessed through the direct map, and so there is no limit
imposed by a fixed amount of kernel virtual address space.

Fix a couple of nearby style issues.

Reviewed by:	jhb, kib
MFC after:	1 week
This commit is contained in:
Alan Cox 2012-03-21 04:00:58 +00:00
parent dc1f12bf69
commit d43d9d104d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=233256
3 changed files with 10 additions and 77 deletions

View file

@ -148,10 +148,6 @@ __FBSDID("$FreeBSD$");
#include <machine/smp.h>
#endif
#ifndef PMAP_SHPGPERPROC
#define PMAP_SHPGPERPROC 200
#endif
#if !defined(DIAGNOSTIC)
#ifdef __GNUC_GNU_INLINE__
#define PMAP_INLINE __attribute__((__gnu_inline__)) inline
@ -206,9 +202,8 @@ static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */
/*
* Data for the pv entry allocation mechanism
*/
static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
static int pv_entry_count;
static struct md_page *pv_table;
static int shpgperproc = PMAP_SHPGPERPROC;
/*
* All those kernel PT submaps that BSD is so fond of
@ -222,7 +217,7 @@ caddr_t CADDR1 = 0;
static caddr_t crashdumpmap;
static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try);
static pv_entry_t get_pv_entry(pmap_t locked_pmap, boolean_t try);
static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
@ -730,16 +725,6 @@ pmap_init(void)
mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
}
/*
* Initialize the address space (zone) for the pv entries. Set a
* high water mark so that the system can recover from excessive
* numbers of pv entries.
*/
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
pv_entry_high_water = 9 * (pv_entry_max / 10);
/*
* If the kernel is running in a virtual machine on an AMD Family 10h
* processor, then it must assume that MCA is enabled by the virtual
@ -775,36 +760,6 @@ pmap_init(void)
TAILQ_INIT(&pv_table[i].pv_list);
}
static int
pmap_pventry_proc(SYSCTL_HANDLER_ARGS)
{
int error;
error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
if (error == 0 && req->newptr) {
shpgperproc = (pv_entry_max - cnt.v_page_count) / maxproc;
pv_entry_high_water = 9 * (pv_entry_max / 10);
}
return (error);
}
SYSCTL_PROC(_vm_pmap, OID_AUTO, pv_entry_max, CTLTYPE_INT|CTLFLAG_RW,
&pv_entry_max, 0, pmap_pventry_proc, "IU", "Max number of PV entries");
static int
pmap_shpgperproc_proc(SYSCTL_HANDLER_ARGS)
{
int error;
error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
if (error == 0 && req->newptr) {
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
pv_entry_high_water = 9 * (pv_entry_max / 10);
}
return (error);
}
SYSCTL_PROC(_vm_pmap, OID_AUTO, shpgperproc, CTLTYPE_INT|CTLFLAG_RW,
&shpgperproc, 0, pmap_shpgperproc_proc, "IU", "Page share factor per proc");
static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
"2MB page mapping counters");
@ -2184,10 +2139,8 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv)
* when needed.
*/
static pv_entry_t
get_pv_entry(pmap_t pmap, int try)
get_pv_entry(pmap_t pmap, boolean_t try)
{
static const struct timeval printinterval = { 60, 0 };
static struct timeval lastprint;
struct vpgqueues *pq;
int bit, field;
pv_entry_t pv;
@ -2197,12 +2150,6 @@ get_pv_entry(pmap_t pmap, int try)
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
PV_STAT(pv_entry_allocs++);
pv_entry_count++;
if (pv_entry_count > pv_entry_high_water)
if (ratecheck(&lastprint, &printinterval))
printf("Approaching the limit on PV entries, consider "
"increasing either the vm.pmap.shpgperproc or the "
"vm.pmap.pv_entry_max sysctl.\n");
pq = NULL;
retry:
pc = TAILQ_FIRST(&pmap->pm_pvchunk);
@ -2220,8 +2167,10 @@ get_pv_entry(pmap_t pmap, int try)
if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
pc->pc_map[2] == 0) {
TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
pc_list);
}
pv_entry_count++;
PV_STAT(pv_entry_spare--);
return (pv);
}
@ -2232,7 +2181,6 @@ get_pv_entry(pmap_t pmap, int try)
VM_ALLOC_WIRED);
if (m == NULL) {
if (try) {
pv_entry_count--;
PV_STAT(pc_chunk_tryfail++);
return (NULL);
}
@ -2248,7 +2196,7 @@ get_pv_entry(pmap_t pmap, int try)
PV_STAT(pmap_collect_active++);
pq = &vm_page_queues[PQ_ACTIVE];
} else
panic("get_pv_entry: increase vm.pmap.shpgperproc");
panic("get_pv_entry: allocation failed");
pmap_collect(pmap, pq);
goto retry;
}
@ -2262,6 +2210,7 @@ get_pv_entry(pmap_t pmap, int try)
pc->pc_map[2] = PC_FREE2;
pv = &pc->pc_pventry[0];
TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
pv_entry_count++;
PV_STAT(pv_entry_spare += _NPCPV - 1);
return (pv);
}
@ -2419,8 +2368,7 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (pv_entry_count < pv_entry_high_water &&
(pv = get_pv_entry(pmap, TRUE)) != NULL) {
if ((pv = get_pv_entry(pmap, TRUE)) != NULL) {
pv->pv_va = va;
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
return (TRUE);
@ -2438,8 +2386,7 @@ pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
pv_entry_t pv;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (pv_entry_count < pv_entry_high_water &&
(pv = get_pv_entry(pmap, TRUE)) != NULL) {
if ((pv = get_pv_entry(pmap, TRUE)) != NULL) {
pv->pv_va = va;
pvh = pa_to_pvh(pa);
TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);

View file

@ -492,19 +492,6 @@ device cpuctl
#
options ENABLE_ALART # Control alarm on Intel intpm driver
#
# Set the number of PV entries per process. Increasing this can
# stop panics related to heavy use of shared memory. However, that can
# (combined with large amounts of physical memory) cause panics at
# boot time due the kernel running out of VM space.
#
# If you're tweaking this, you might also want to increase the sysctls
# "vm.v_free_min", "vm.v_free_reserved", and "vm.v_free_target".
#
# The value below is the one more than the default.
#
options PMAP_SHPGPERPROC=201
#
# Number of initial kernel page table pages used for early bootstrap.
# This number should include enough pages to map the kernel and any

View file

@ -7,7 +7,6 @@ COUNT_XINVLTLB_HITS opt_smp.h
COUNT_IPIS opt_smp.h
MAXMEM
PERFMON
PMAP_SHPGPERPROC opt_pmap.h
MPTABLE_FORCE_HTT
MP_WATCHDOG
NKPT opt_pmap.h