diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index bb884c14b2f6..b74741e0a053 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4780,7 +4780,9 @@ prot_virt= [S390] enable hosting protected virtual machines isolated from the hypervisor (if hardware supports - that). + that). If enabled, the default kernel base address + might be overridden even when Kernel Address Space + Layout Randomization is disabled. Format: psi= [KNL] Enable or disable pressure stall information diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 9515d4bf1683..df6b371ed214 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -614,6 +614,25 @@ config RANDOMIZE_BASE as a security feature that deters exploit attempts relying on knowledge of the location of kernel internals. +config KERNEL_IMAGE_BASE + hex "Kernel image base address" + range 0x100000 0x1FFFFFE0000000 if !KASAN + range 0x100000 0x1BFFFFE0000000 if KASAN + default 0x3FFE0000000 if !KASAN + default 0x7FFFE0000000 if KASAN + help + This is the address at which the kernel image is loaded in case + Kernel Address Space Layout Randomization (KASLR) is disabled. + + In case the Protected virtualization guest support is enabled the + Ultravisor imposes a virtual address limit. If the value of this + option leads to the kernel image exceeding the Ultravisor limit, + this option is ignored and the image is loaded below the limit. + + If the value of this option leads to the kernel image overlapping + the virtual memory where other data structures are located, this + option is ignored and the image is loaded above the structures. + endmenu menu "Memory setup" diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 3efa1f457451..fc9c1d51ef8d 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -281,8 +281,8 @@ static unsigned long get_vmem_size(unsigned long identity_size, static unsigned long setup_kernel_memory_layout(unsigned long kernel_size) { - unsigned long kernel_start, kernel_end; unsigned long vmemmap_start; + unsigned long kernel_start; unsigned long asce_limit; unsigned long rte_size; unsigned long pages; @@ -294,11 +294,18 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size) vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page); /* choose kernel address space layout: 4 or 3 levels. */ + BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE)); + BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE); vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE); - if (IS_ENABLED(CONFIG_KASAN) || (vsize > _REGION2_SIZE)) { + if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE || + (vsize > _REGION2_SIZE && kaslr_enabled())) { asce_limit = _REGION1_SIZE; - rte_size = _REGION2_SIZE; - vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION2_SIZE); + if (__NO_KASLR_END_KERNEL > _REGION2_SIZE) { + rte_size = _REGION2_SIZE; + vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION2_SIZE); + } else { + rte_size = _REGION3_SIZE; + } } else { asce_limit = _REGION2_SIZE; rte_size = _REGION3_SIZE; @@ -308,24 +315,32 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size) * Forcing modules and vmalloc area under the ultravisor * secure storage limit, so that any vmalloc allocation * we do could be used to back secure guest storage. + * + * Assume the secure storage limit always exceeds _REGION2_SIZE, + * otherwise asce_limit and rte_size would have been adjusted. */ vmax = adjust_to_uv_max(asce_limit); #ifdef CONFIG_KASAN + BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START); /* force vmalloc and modules below kasan shadow */ vmax = min(vmax, KASAN_SHADOW_START); #endif - kernel_end = vmax; + vsize = min(vsize, vmax); if (kaslr_enabled()) { - unsigned long kaslr_len, slots, pos; + unsigned long kernel_end, kaslr_len, slots, pos; - vsize = min(vsize, vmax); kaslr_len = max(KASLR_LEN, vmax - vsize); slots = DIV_ROUND_UP(kaslr_len - kernel_size, THREAD_SIZE); if (get_random(slots, &pos)) pos = 0; - kernel_end -= pos * THREAD_SIZE; + kernel_end = vmax - pos * THREAD_SIZE; + kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE); + } else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) { + kernel_start = round_down(vmax - kernel_size, THREAD_SIZE); + decompressor_printk("The kernel base address is forced to %lx\n", kernel_start); + } else { + kernel_start = __NO_KASLR_START_KERNEL; } - kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE); __kaslr_offset = kernel_start; MODULES_END = round_down(kernel_start, _SEGMENT_SIZE); diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index b39d724ab6f6..e1b6355ddbef 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -273,4 +273,8 @@ static inline unsigned long virt_to_pfn(const void *kaddr) #include #include +#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) +#define __NO_KASLR_START_KERNEL CONFIG_KERNEL_IMAGE_BASE +#define __NO_KASLR_END_KERNEL (__NO_KASLR_START_KERNEL + KERNEL_IMAGE_SIZE) + #endif /* _S390_PAGE_H */ diff --git a/arch/s390/tools/relocs.c b/arch/s390/tools/relocs.c index 30a732c808f3..b4f35506779b 100644 --- a/arch/s390/tools/relocs.c +++ b/arch/s390/tools/relocs.c @@ -280,7 +280,7 @@ static int do_reloc(struct section *sec, Elf_Rel *rel) case R_390_GOTOFF64: break; case R_390_64: - add_reloc(&relocs64, offset); + add_reloc(&relocs64, offset - (ehdr.e_entry - 0x100000)); break; default: die("Unsupported relocation type: %d\n", r_type);