s390/boot: Uncouple virtual and physical kernel offsets

This is a preparatory rework to allow uncoupling virtual
and physical addresses spaces.

Currently __kaslr_offset is the kernel offset in both
physical memory on boot and in virtual memory after DAT
mode is enabled.

Uncouple these offsets and rename the physical address
space variant to __kaslr_offset_phys while keep the name
__kaslr_offset for the offset in virtual address space.

Do not use __kaslr_offset_phys after DAT mode is enabled
just yet, but still make it a persistent boot variable
for later use.

Use __kaslr_offset and __kaslr_offset_phys offsets in
proper contexts and alter handle_relocs() function to
distinguish between the two.

Acked-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
This commit is contained in:
Alexander Gordeev 2024-02-20 14:35:43 +01:00
parent 236f324b74
commit 3bb11234b1
4 changed files with 20 additions and 9 deletions

View file

@ -17,6 +17,9 @@ config ARCH_HAS_ILOG2_U32
config ARCH_HAS_ILOG2_U64
def_bool n
config ARCH_PROC_KCORE_TEXT
def_bool y
config GENERIC_HWEIGHT
def_bool y

View file

@ -153,8 +153,10 @@ void print_pgm_check_info(void)
decompressor_printk("Kernel command line: %s\n", early_command_line);
decompressor_printk("Kernel fault: interruption code %04x ilc:%x\n",
S390_lowcore.pgm_code, S390_lowcore.pgm_ilc >> 1);
if (kaslr_enabled())
if (kaslr_enabled()) {
decompressor_printk("Kernel random base: %lx\n", __kaslr_offset);
decompressor_printk("Kernel random base phys: %lx\n", __kaslr_offset_phys);
}
decompressor_printk("PSW : %016lx %016lx (%pS)\n",
S390_lowcore.psw_save_area.mask,
S390_lowcore.psw_save_area.addr,

View file

@ -142,7 +142,8 @@ static void copy_bootdata(void)
}
#ifdef CONFIG_PIE_BUILD
static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr, unsigned long offset)
static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
unsigned long offset, unsigned long phys_offset)
{
Elf64_Rela *rela_start, *rela_end, *rela;
int r_type, r_sym, rc;
@ -153,7 +154,7 @@ static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
for (rela = rela_start; rela < rela_end; rela++) {
loc = rela->r_offset + offset;
loc = rela->r_offset + phys_offset;
val = rela->r_addend;
r_sym = ELF64_R_SYM(rela->r_info);
if (r_sym) {
@ -194,7 +195,8 @@ static void free_relocs(void)
physmem_free(RR_RELOC);
}
static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr, unsigned long offset)
static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
unsigned long offset, unsigned long phys_offset)
{
int *reloc;
long loc;
@ -428,8 +430,9 @@ void startup_kernel(void)
THREAD_SIZE, vmlinux.default_lma,
ident_map_size);
if (vmlinux_lma) {
__kaslr_offset = vmlinux_lma - vmlinux.default_lma;
kaslr_adjust_vmlinux_info(__kaslr_offset);
__kaslr_offset_phys = vmlinux_lma - vmlinux.default_lma;
kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
__kaslr_offset = __kaslr_offset_phys;
}
}
vmlinux_lma = vmlinux_lma ?: vmlinux.default_lma;
@ -438,7 +441,7 @@ void startup_kernel(void)
if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
img = decompress_kernel();
memmove((void *)vmlinux_lma, img, vmlinux.image_size);
} else if (__kaslr_offset) {
} else if (__kaslr_offset_phys) {
img = (void *)vmlinux.default_lma;
memmove((void *)vmlinux_lma, img, vmlinux.image_size);
memset(img, 0, vmlinux.image_size);
@ -465,7 +468,8 @@ void startup_kernel(void)
* to bootdata made by setup_vmem()
*/
clear_bss_section(vmlinux_lma);
kaslr_adjust_relocs(vmlinux_lma, vmlinux_lma + vmlinux.image_size, __kaslr_offset);
kaslr_adjust_relocs(vmlinux_lma, vmlinux_lma + vmlinux.image_size,
__kaslr_offset, __kaslr_offset_phys);
kaslr_adjust_got(__kaslr_offset);
free_relocs();
setup_vmem(asce_limit);
@ -475,7 +479,7 @@ void startup_kernel(void)
* Save KASLR offset for early dumps, before vmcore_info is set.
* Mark as uneven to distinguish from real vmcore_info pointer.
*/
S390_lowcore.vmcore_info = __kaslr_offset ? __kaslr_offset | 0x1UL : 0;
S390_lowcore.vmcore_info = __kaslr_offset_phys ? __kaslr_offset_phys | 0x1UL : 0;
/*
* Jump to the decompressed kernel entry point and switch DAT mode on.

View file

@ -180,12 +180,14 @@ int arch_make_page_accessible(struct page *page);
struct vm_layout {
unsigned long kaslr_offset;
unsigned long kaslr_offset_phys;
unsigned long identity_size;
};
extern struct vm_layout vm_layout;
#define __kaslr_offset vm_layout.kaslr_offset
#define __kaslr_offset_phys vm_layout.kaslr_offset_phys
#define ident_map_size vm_layout.identity_size
static inline unsigned long kaslr_offset(void)