Auto size available kernel virtual address space based on phsyical memory

size.  This avoids blowing out kva in kmeminit() on large memory machines
(4 gigs or more).

Reviewed by:	tmm
This commit is contained in:
Jake Burkholder 2002-08-10 22:14:16 +00:00
parent d64915d6e3
commit 5aebb40291
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=101653
7 changed files with 124 additions and 63 deletions

View file

@ -76,6 +76,7 @@ static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93";
struct vmstate {
vm_offset_t vm_tsb;
vm_size_t vm_tsb_mask;
};
void
@ -91,7 +92,8 @@ _kvm_initvtop(kvm_t *kd)
{
struct nlist nlist[2];
struct vmstate *vm;
u_long pa;
vm_offset_t pa;
vm_size_t mask;
vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
if (vm == NULL) {
@ -102,17 +104,21 @@ _kvm_initvtop(kvm_t *kd)
vm->vm_tsb = 0;
nlist[0].n_name = "tsb_kernel_phys";
nlist[1].n_name = 0;
nlist[1].n_name = "tsb_kernel_mask";
nlist[2].n_name = 0;
if (kvm_nlist(kd, nlist) != 0) {
_kvm_err(kd, kd->program, "bad namelist");
return (-1);
}
if (kvm_read(kd, nlist[0].n_value, &pa, sizeof(pa)) != sizeof(pa)) {
if (kvm_read(kd, nlist[0].n_value, &pa, sizeof(pa)) != sizeof(pa) ||
kvm_read(kd, nlist[1].n_value, &mask, sizeof(mask)) !=
sizeof(mask)) {
_kvm_err(kd, kd->program, "cannot read tsb_kernel_phys");
return (-1);
}
vm->vm_tsb = pa;
vm->vm_tsb_mask = mask;
return (0);
}
@ -127,7 +133,8 @@ _kvm_kvatop(kvm_t *kd, u_long va, u_long *pa)
vpn = btop(va);
offset = va & PAGE_MASK;
tte_pa = kd->vmst->vm_tsb + ((vpn & TSB_KERNEL_MASK) << TTE_SHIFT);
tte_pa = kd->vmst->vm_tsb +
((vpn & kd->vmst->vm_tsb_mask) << TTE_SHIFT);
/* XXX This has to be a physical address read, kvm_read is virtual */
if (lseek(kd->pmfd, tte_pa, 0) == -1) {

View file

@ -43,12 +43,9 @@
(TSB_BSHIFT - TSB_BUCKET_SHIFT - TTE_SHIFT)
#define TSB_BUCKET_MASK ((1 << TSB_BUCKET_ADDRESS_BITS) - 1)
#define TSB_KERNEL_SIZE \
((KVA_PAGES * PAGE_SIZE_4M) / sizeof(struct tte))
#define TSB_KERNEL_MASK (TSB_KERNEL_SIZE - 1)
#define TSB_KERNEL_VA_MASK (TSB_KERNEL_MASK << TTE_SHIFT)
extern struct tte *tsb_kernel;
extern vm_size_t tsb_kernel_mask;
extern vm_size_t tsb_kernel_size;
extern vm_offset_t tsb_kernel_phys;
static __inline struct tte *
@ -66,7 +63,7 @@ tsb_vtobucket(pmap_t pm, vm_offset_t va)
static __inline struct tte *
tsb_kvpntotte(vm_offset_t vpn)
{
return (&tsb_kernel[vpn & TSB_KERNEL_MASK]);
return (&tsb_kernel[vpn & tsb_kernel_mask]);
}
static __inline struct tte *

View file

@ -88,9 +88,9 @@
* that if this moves above the va hole, we will have to deal with sign
* extension of virtual addresses.
*/
#define VM_MAXUSER_ADDRESS ((vm_offset_t)0x7fe00000000)
#define VM_MAXUSER_ADDRESS (0x7fe00000000UL)
#define VM_MIN_ADDRESS ((vm_offset_t)0)
#define VM_MIN_ADDRESS (0UL)
#define VM_MAX_ADDRESS (VM_MAXUSER_ADDRESS)
/*
@ -115,19 +115,6 @@
#define VM_KMEM_SIZE_SCALE (3)
#endif
/*
* Number of 4 meg pages to use for the kernel tsb.
*/
#ifndef KVA_PAGES
#define KVA_PAGES (1)
#endif
/*
* Range of kernel virtual addresses. max = min + range.
*/
#define KVA_RANGE \
((KVA_PAGES * PAGE_SIZE_4M) << (PAGE_SHIFT - TTE_SHIFT))
/*
* Lowest kernel virtual address, where the kernel is loaded. This is also
* arbitrary. We pick a resonably low address, which allows all of kernel
@ -136,12 +123,12 @@
* same as for x86 with default KVA_PAGES...
*/
#define VM_MIN_KERNEL_ADDRESS (0xc0000000)
#define VM_MAX_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + KVA_RANGE - PAGE_SIZE)
#define KERNBASE (VM_MIN_KERNEL_ADDRESS)
#define VM_MIN_PROM_ADDRESS (0xf0000000)
#define VM_MAX_PROM_ADDRESS (0xffffe000)
#define KERNBASE (VM_MIN_KERNEL_ADDRESS)
#define VM_MAX_KERNEL_ADDRESS (vm_max_kernel_address)
/*
* Initial pagein size of beginning of executable file.
*/
@ -149,4 +136,6 @@
#define VM_INITIAL_PAGEIN 16
#endif
extern vm_offset_t vm_max_kernel_address;
#endif /* !_MACHINE_VMPARAM_H_ */

View file

@ -67,6 +67,9 @@
#include "assym.s"
#define TSB_KERNEL_MASK 0x0
#define TSB_KERNEL 0x0
.register %g2,#ignore
.register %g3,#ignore
.register %g6,#ignore
@ -1360,15 +1363,22 @@ END(intr_enqueue)
srlx %g6, TAR_VPN_SHIFT, %g6
/*
* Find the index into the kernel tsb.
* Find the index into the kernel tsb. The tsb mask gets patched at
* startup.
*/
set TSB_KERNEL_MASK, %g4
.globl tl1_immu_miss_load_tsb_mask
tl1_immu_miss_load_tsb_mask:
sethi %hi(TSB_KERNEL_MASK), %g4
or %g4, %lo(TSB_KERNEL_MASK), %g4
and %g6, %g4, %g3
/*
* Compute the tte address.
* Compute the tte address. The address of the kernel tsb gets
* patched at startup.
*/
ldxa [%g0 + AA_IMMU_TSB] %asi, %g4
.globl tl1_immu_miss_load_tsb
tl1_immu_miss_load_tsb:
sethi %hi(TSB_KERNEL), %g4
sllx %g3, TTE_SHIFT, %g3
add %g3, %g4, %g3
@ -1449,16 +1459,23 @@ END(tl1_immu_miss_trap)
EMPTY
/*
* Find the index into the kernel tsb.
* Find the index into the kernel tsb. The tsb mask gets patched at
* startup.
*/
set TSB_KERNEL_MASK, %g4
.globl tl1_dmmu_miss_load_tsb_mask
tl1_dmmu_miss_load_tsb_mask:
sethi %hi(TSB_KERNEL_MASK), %g4
or %g4, %lo(TSB_KERNEL_MASK), %g4
srlx %g6, TAR_VPN_SHIFT, %g6
and %g6, %g4, %g3
/*
* Compute the tte address.
* Compute the tte address. The address of the kernel tsb gets
* patched at startup.
*/
ldxa [%g0 + AA_DMMU_TSB] %asi, %g4
.globl tl1_dmmu_miss_load_tsb
tl1_dmmu_miss_load_tsb:
sethi %hi(TSB_KERNEL), %g4
sllx %g3, TTE_SHIFT, %g3
add %g3, %g4, %g3
@ -1606,16 +1623,23 @@ END(tl1_dmmu_miss_user)
mov %g6, %g2
/*
* Find the index into the kernel tsb.
* Find the index into the kernel tsb. The tsb mask gets patched at
* startup.
*/
set TSB_KERNEL_MASK, %g4
.globl tl1_dmmu_prot_load_tsb_mask
tl1_dmmu_prot_load_tsb_mask:
sethi %hi(TSB_KERNEL_MASK), %g4
or %g4, %lo(TSB_KERNEL_MASK), %g4
srlx %g6, TAR_VPN_SHIFT, %g6
and %g6, %g4, %g5
/*
* Compute the tte address.
* Compute the tte address. The address of the kernel tsb gets
* patched at startup.
*/
ldxa [%g0 + AA_DMMU_TSB] %asi, %g4
.globl tl1_dmmu_prot_load_tsb
tl1_dmmu_prot_load_tsb:
sethi %hi(TSB_KERNEL), %g4
sllx %g5, TTE_SHIFT, %g5
add %g4, %g5, %g3

View file

@ -100,7 +100,6 @@ ASSYM(TLB_DIRECT_SHIFT, TLB_DIRECT_SHIFT);
ASSYM(TSB_BUCKET_ADDRESS_BITS, TSB_BUCKET_ADDRESS_BITS);
ASSYM(TSB_BUCKET_SHIFT, TSB_BUCKET_SHIFT);
ASSYM(TSB_KERNEL_MASK, TSB_KERNEL_MASK);
ASSYM(INT_SHIFT, INT_SHIFT);
ASSYM(PTR_SHIFT, PTR_SHIFT);

View file

@ -93,6 +93,7 @@
#include <machine/cache.h>
#include <machine/frame.h>
#include <machine/instr.h>
#include <machine/md_var.h>
#include <machine/metadata.h>
#include <machine/smp.h>
@ -146,6 +147,8 @@ vm_offset_t virtual_avail;
vm_offset_t virtual_end;
vm_offset_t kernel_vm_end;
vm_offset_t vm_max_kernel_address;
/*
* Kernel pmap.
*/
@ -160,6 +163,13 @@ static vm_offset_t pmap_bootstrap_alloc(vm_size_t size);
static vm_offset_t pmap_map_direct(vm_page_t m);
extern int tl1_immu_miss_load_tsb[];
extern int tl1_immu_miss_load_tsb_mask[];
extern int tl1_dmmu_miss_load_tsb[];
extern int tl1_dmmu_miss_load_tsb_mask[];
extern int tl1_dmmu_prot_load_tsb[];
extern int tl1_dmmu_prot_load_tsb_mask[];
/*
* If user pmap is processed with pmap_remove and with pmap_remove and the
* resident count drops to 0, there are no more pages to remove, so we
@ -267,19 +277,13 @@ pmap_bootstrap(vm_offset_t ekva)
vm_offset_t pa;
vm_offset_t va;
vm_size_t physsz;
vm_size_t virtsz;
ihandle_t pmem;
ihandle_t vmem;
int sz;
int i;
int j;
/*
* Set the start and end of kva. The kernel is loaded at the first
* available 4 meg super page, so round up to the end of the page.
*/
virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
virtual_end = VM_MAX_KERNEL_ADDRESS;
/*
* Find out what physical memory is available from the prom and
* initialize the phys_avail array. This must be done before
@ -309,17 +313,64 @@ pmap_bootstrap(vm_offset_t ekva)
}
physmem = btoc(physsz);
virtsz = roundup(physsz, PAGE_SIZE_4M << (PAGE_SHIFT - TTE_SHIFT));
vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT);
tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1;
/*
* Allocate the kernel tsb and lock it in the tlb.
* Set the start and end of kva. The kernel is loaded at the first
* available 4 meg super page, so round up to the end of the page.
*/
pa = pmap_bootstrap_alloc(KVA_PAGES * PAGE_SIZE_4M);
virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
virtual_end = vm_max_kernel_address;
/*
* Allocate the kernel tsb.
*/
pa = pmap_bootstrap_alloc(tsb_kernel_size);
if (pa & PAGE_MASK_4M)
panic("pmap_bootstrap: tsb unaligned\n");
tsb_kernel_phys = pa;
tsb_kernel = (struct tte *)virtual_avail;
virtual_avail += KVA_PAGES * PAGE_SIZE_4M;
virtual_avail += tsb_kernel_size;
/*
* Patch the virtual address and the tsb mask into the trap table.
*/
#define SETHI_G4(x) \
EIF_OP(IOP_FORM2) | EIF_F2_RD(4) | EIF_F2_OP2(INS0_SETHI) | \
EIF_IMM((x) >> 10, 22)
#define OR_G4_I_G4(x) \
EIF_OP(IOP_MISC) | EIF_F3_RD(4) | EIF_F3_OP3(INS2_OR) | \
EIF_F3_RS1(4) | EIF_F3_I(1) | EIF_IMM(x, 10)
tl1_immu_miss_load_tsb[0] = SETHI_G4((vm_offset_t)tsb_kernel);
tl1_immu_miss_load_tsb_mask[0] = SETHI_G4(tsb_kernel_mask);
tl1_immu_miss_load_tsb_mask[1] = OR_G4_I_G4(tsb_kernel_mask);
flush(tl1_immu_miss_load_tsb);
flush(tl1_immu_miss_load_tsb_mask);
flush(tl1_immu_miss_load_tsb_mask + 1);
tl1_dmmu_miss_load_tsb[0] = SETHI_G4((vm_offset_t)tsb_kernel);
tl1_dmmu_miss_load_tsb_mask[0] = SETHI_G4(tsb_kernel_mask);
tl1_dmmu_miss_load_tsb_mask[1] = OR_G4_I_G4(tsb_kernel_mask);
flush(tl1_dmmu_miss_load_tsb);
flush(tl1_dmmu_miss_load_tsb_mask);
flush(tl1_dmmu_miss_load_tsb_mask + 1);
tl1_dmmu_prot_load_tsb[0] = SETHI_G4((vm_offset_t)tsb_kernel);
tl1_dmmu_prot_load_tsb_mask[0] = SETHI_G4(tsb_kernel_mask);
tl1_dmmu_prot_load_tsb_mask[1] = OR_G4_I_G4(tsb_kernel_mask);
flush(tl1_dmmu_prot_load_tsb);
flush(tl1_dmmu_prot_load_tsb_mask);
flush(tl1_dmmu_prot_load_tsb_mask + 1);
/*
* Lock it in the tlb.
*/
pmap_map_tsb();
bzero(tsb_kernel, KVA_PAGES * PAGE_SIZE_4M);
bzero(tsb_kernel, tsb_kernel_size);
/*
* Enter fake 8k pages for the 4MB kernel pages, so that
@ -431,9 +482,9 @@ pmap_map_tsb(void)
/*
* Map the 4mb tsb pages.
*/
for (i = 0; i < KVA_PAGES; i++) {
va = (vm_offset_t)tsb_kernel + i * PAGE_SIZE_4M;
pa = tsb_kernel_phys + i * PAGE_SIZE_4M;
for (i = 0; i < tsb_kernel_size; i += PAGE_SIZE_4M) {
va = (vm_offset_t)tsb_kernel + i;
pa = tsb_kernel_phys + i;
/* XXX - cheetah */
data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP | TD_CV |
TD_P | TD_W;
@ -442,14 +493,6 @@ pmap_map_tsb(void)
stxa_sync(0, ASI_DTLB_DATA_IN_REG, data);
}
/*
* Load the tsb registers.
*/
stxa(AA_DMMU_TSB, ASI_DMMU, (vm_offset_t)tsb_kernel);
stxa(AA_IMMU_TSB, ASI_IMMU, (vm_offset_t)tsb_kernel);
membar(Sync);
flush(tsb_kernel);
/*
* Set the secondary context to be the kernel context (needed for
* fp block operations in the kernel and the cache code).

View file

@ -92,6 +92,8 @@ SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, tsb_nforeach, CTLFLAG_RD,
#endif
struct tte *tsb_kernel;
vm_size_t tsb_kernel_mask;
vm_size_t tsb_kernel_size;
vm_offset_t tsb_kernel_phys;
struct tte *