riscv: Construct an identity map in locore.S

This is useful for two reasons. Within this change, it allows the
early DTB mapping to be eliminated, as we can now just dereference the
physical address provided by FW and copy the DTB contents into KVA.

It will also aid in an upcoming change: the larger reworking of page
table bootstrapping on this platform.

Reviewed by:	markj, jhb
MFC after:	1 month
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D45324
This commit is contained in:
Mitchell Horne 2024-06-20 15:28:20 -03:00
parent e402155e1a
commit bfb8575469
6 changed files with 25 additions and 55 deletions

View File

@ -39,7 +39,6 @@ struct riscv_bootparams {
vm_offset_t kern_l1pt; /* Kernel L1 base */
vm_offset_t kern_phys; /* Kernel base (physical) addr */
vm_offset_t kern_stack;
vm_offset_t dtbp_virt; /* Device tree blob virtual addr */
vm_offset_t dtbp_phys; /* Device tree blob physical addr */
vm_offset_t modulep; /* loader(8) metadata */
};

View File

@ -209,8 +209,6 @@
#define PS_STRINGS_SV39 (USRSTACK_SV39 - sizeof(struct ps_strings))
#define PS_STRINGS_SV48 (USRSTACK_SV48 - sizeof(struct ps_strings))
#define VM_EARLY_DTB_ADDRESS (VM_MAX_KERNEL_ADDRESS - (2 * L2_SIZE))
/*
* How many physical pages per kmem arena virtual page.
*/

View File

@ -58,7 +58,6 @@
ASSYM(KERNBASE, KERNBASE);
ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS);
ASSYM(VM_MAX_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS);
ASSYM(VM_EARLY_DTB_ADDRESS, VM_EARLY_DTB_ADDRESS);
ASSYM(PMAP_MAPDEV_EARLY_SIZE, PMAP_MAPDEV_EARLY_SIZE);
ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
@ -102,6 +101,5 @@ ASSYM(RISCV_BOOTPARAMS_KERN_L1PT, offsetof(struct riscv_bootparams, kern_l1pt));
ASSYM(RISCV_BOOTPARAMS_KERN_PHYS, offsetof(struct riscv_bootparams, kern_phys));
ASSYM(RISCV_BOOTPARAMS_KERN_STACK, offsetof(struct riscv_bootparams,
kern_stack));
ASSYM(RISCV_BOOTPARAMS_DTBP_VIRT, offsetof(struct riscv_bootparams, dtbp_virt));
ASSYM(RISCV_BOOTPARAMS_DTBP_PHYS, offsetof(struct riscv_bootparams, dtbp_phys));
ASSYM(RISCV_BOOTPARAMS_MODULEP, offsetof(struct riscv_bootparams, modulep));

View File

@ -118,9 +118,26 @@ _start:
* a1 - zero or dtbp
*/
pagetables:
/* Get the kernel's load address */
/* Get the kernel's load address (kernstart) in s9 */
jal get_physmem
/* Construct 1GB Identity Map (1:1 PA->VA) */
lla s1, pagetable_l1
srli s2, s9, L1_SHIFT /* kernstart >> L1_SHIFT */
andi a5, s2, Ln_ADDR_MASK /* & Ln_ADDR_MASK */
li t4, (PTE_KERN)
slli s2, s2, PTE_PPN2_S /* (s2 << PTE_PPN2_S) */
or t6, t4, s2
/* Store L1 PTE entry to position */
li a6, PTE_SIZE
mulw a5, a5, a6 /* calculate L1 slot */
add t0, s1, a5
sd t6, (t0) /* Store new PTE */
/* Construct the virtual address space */
/* Add L1 entry for kernel */
lla s1, pagetable_l1
lla s2, pagetable_l2 /* Link to next level PN */
@ -172,29 +189,6 @@ pagetables:
add t0, s1, a5
sd t6, (t0)
/* Check if we have a DTB that needs to be mapped */
beqz a1, 2f
/* Create an L2 mapping for the DTB */
lla s1, pagetable_l2_devmap
mv s2, a1
srli s2, s2, PAGE_SHIFT
/* Mask off any bits that aren't aligned */
andi s2, s2, ~((1 << (PTE_PPN1_S - PTE_PPN0_S)) - 1)
li t0, (PTE_KERN)
slli t2, s2, PTE_PPN0_S /* << PTE_PPN0_S */
or t0, t0, t2
/* Store the L2 table entry for the DTB */
li a6, PTE_SIZE
li a5, VM_EARLY_DTB_ADDRESS
srli a5, a5, L2_SHIFT /* >> L2_SHIFT */
andi a5, a5, Ln_ADDR_MASK /* & Ln_ADDR_MASK */
mulw a5, a5, a6
add t1, s1, a5
sd t0, (t1)
/* Page tables END */
/*
@ -202,7 +196,6 @@ pagetables:
* may generate a page fault. We simply wish to continue onwards, so
* have the trap deliver us to 'va'.
*/
2:
lla t0, va
sub t0, t0, s9
li t1, KERNBASE
@ -257,15 +250,7 @@ va:
la t0, initstack
sd t0, RISCV_BOOTPARAMS_KERN_STACK(sp)
li t0, (VM_EARLY_DTB_ADDRESS)
/* Add offset of DTB within superpage */
li t1, (L2_OFFSET)
and t1, a1, t1
add t0, t0, t1
sd t0, RISCV_BOOTPARAMS_DTBP_VIRT(sp)
sd a1, RISCV_BOOTPARAMS_DTBP_PHYS(sp)
sd a0, RISCV_BOOTPARAMS_MODULEP(sp)
mv a0, sp

View File

@ -380,13 +380,16 @@ fake_preload_metadata(struct riscv_bootparams *rvbp)
PRELOAD_PUSH_VALUE(uint32_t, sizeof(size_t));
PRELOAD_PUSH_VALUE(uint64_t, (size_t)((vm_offset_t)&end - KERNBASE));
/* Copy the DTB to KVA space. */
/*
* Copy the DTB to KVA space. We are able to dereference the physical
* address due to the identity map created in locore.
*/
lastaddr = roundup(lastaddr, sizeof(int));
PRELOAD_PUSH_VALUE(uint32_t, MODINFO_METADATA | MODINFOMD_DTBP);
PRELOAD_PUSH_VALUE(uint32_t, sizeof(vm_offset_t));
PRELOAD_PUSH_VALUE(vm_offset_t, lastaddr);
dtb_size = fdt_totalsize(rvbp->dtbp_virt);
memmove((void *)lastaddr, (const void *)rvbp->dtbp_virt, dtb_size);
dtb_size = fdt_totalsize(rvbp->dtbp_phys);
memmove((void *)lastaddr, (const void *)rvbp->dtbp_phys, dtb_size);
lastaddr = roundup(lastaddr + dtb_size, sizeof(int));
PRELOAD_PUSH_VALUE(uint32_t, MODINFO_METADATA | MODINFOMD_KERNEND);

View File

@ -244,12 +244,9 @@ CTASSERT((DMAP_MAX_ADDRESS & ~L1_OFFSET) == DMAP_MAX_ADDRESS);
/*
* This code assumes that the early DEVMAP is L2_SIZE aligned and is fully
* contained within a single L2 entry. The early DTB is mapped immediately
* before the devmap L2 entry.
* contained within a single L2 entry.
*/
CTASSERT((PMAP_MAPDEV_EARLY_SIZE & L2_OFFSET) == 0);
CTASSERT((VM_EARLY_DTB_ADDRESS & L2_OFFSET) == 0);
CTASSERT(VM_EARLY_DTB_ADDRESS < (VM_MAX_KERNEL_ADDRESS - PMAP_MAPDEV_EARLY_SIZE));
static struct rwlock_padalign pvh_global_lock;
static struct mtx_padalign allpmaps_lock;
@ -623,7 +620,6 @@ pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen)
vm_offset_t dpcpu, freemempos, l0pv, msgbufpv;
vm_paddr_t l0pa, l1pa, max_pa, min_pa, pa;
pd_entry_t *l0p;
pt_entry_t *l2p;
u_int l1_slot, l2_slot;
u_int physmap_idx;
int i, mode;
@ -687,15 +683,6 @@ pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen)
freemempos = pmap_bootstrap_l3(l1pt,
VM_MAX_KERNEL_ADDRESS - PMAP_MAPDEV_EARLY_SIZE, freemempos);
/*
* Invalidate the mapping we created for the DTB. At this point a copy
* has been created, and we no longer need it. We want to avoid the
* possibility of an aliased mapping in the future.
*/
l2p = pmap_l2(kernel_pmap, VM_EARLY_DTB_ADDRESS);
if ((pmap_load(l2p) & PTE_V) != 0)
pmap_clear(l2p);
sfence_vma();
#define alloc_pages(var, np) \