Dynamically generate the page table. This will allow us to detect the

physical address we are loaded at to change the mapping.
This commit is contained in:
Andrew Turner 2014-02-07 19:15:25 +00:00
parent c97492d0e0
commit bc6c10477d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=261606

View file

@ -153,22 +153,27 @@ Lunmapped:
ldr r2, =(KERNVIRTADDR - KERNPHYSADDR)
sub r0, r1, r2
adr r4, mmu_init_table
b 3f
/*
* Map PA == VA
*/
ldr r5, =(PHYSADDR)
mov r1, r5
mov r2, r5
/* Map 64MiB, preserved over calls to build_pagetables */
mov r3, #64
bl build_pagetables
2:
str r3, [r0, r2]
add r2, r2, #4
add r3, r3, #(L1_S_SIZE)
adds r1, r1, #-1
bhi 2b
3:
ldmia r4!, {r1,r2,r3} /* # of sections, VA, PA|attr */
cmp r1, #0
adrne r5, 2b
bicne r5, r5, #0xf0000000
orrne r5, r5, #PHYSADDR
movne pc, r5
/* Create the kernel map to jump to */
mov r1, r5
ldr r2, =(KERNBASE)
bl build_pagetables
#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
/* Create the custom map */
ldr r1, =SOCDEV_VA
ldr r2, =SOCDEV_PA
bl build_pagetables
#endif
#if defined(SMP)
orr r0, r0, #2 /* Set TTB shared memory flag */
@ -238,6 +243,40 @@ virt_done:
adr r0, .Lmainreturned
b _C_LABEL(panic)
/* NOTREACHED */
END(btext)
END(_start)
/*
* Builds the page table
* r0 - The table base address
* r1 - The physical address (trashed)
* r2 - The virtual address (trashed)
* r3 - The number of 1MiB sections
* r4 - Trashed
*
* Addresses must be 1MiB aligned
*/
build_pagetables:
/* Set the required page attributed */
ldr r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
#if defined(SMP)
orr r4, #(L1_SHARED)
#endif
orr r1, r4
/* Move the virtual address to the correct bit location */
lsr r2, #(L1_S_SHIFT - 2)
mov r4, r3
1:
str r1, [r0, r2]
add r2, r2, #4
add r1, r1, #(L1_S_SIZE)
adds r4, r4, #-1
bhi 1b
RET
#define MMU_INIT(va,pa,n_sec,attr) \
.word n_sec ; \
.word 4*((va)>>L1_S_SHIFT) ; \
@ -257,27 +296,7 @@ Lstartup_pagetable:
Lstartup_pagetable_secondary:
.word temp_pagetable
#endif
END(btext)
END(_start)
mmu_init_table:
/* fill all table VA==PA */
/* map SDRAM VA==PA, WT cacheable */
#if !defined(SMP)
MMU_INIT(PHYSADDR, PHYSADDR, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
/* map VA 0xc0000000..0xc3ffffff to PA */
MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
/* Map in 0x04000000 worth of the SoC's devices for bootstrap debugging */
MMU_INIT(SOCDEV_VA, SOCDEV_PA, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
#endif
#else
MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
/* map VA 0xc0000000..0xc3ffffff to PA */
MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
MMU_INIT(0x48000000, 0x48000000, 1, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
#endif /* SMP */
.word 0 /* end of table */
.Lstart:
.word _edata
.word _ebss