linux/arch/s390/boot/ipl_report.c
Vasily Gorbik f913a66004 s390/boot: rework decompressor reserved tracking
Currently several approaches for finding unused memory in decompressor
are utilized. While "safe_addr" grows towards higher addresses, vmem
code allocates paging structures top down. The former requires careful
ordering. In addition to that ipl report handling code verifies potential
intersections with secure boot certificates on its own. Neither of two
approaches are memory holes aware and consistent with each other in low
memory conditions.

To solve that, existing approaches are generalized and combined
together, as well as online memory ranges are now taken into
consideration.

physmem_info has been extended to contain reserved memory ranges. New
set of functions allow to handle reserves and find unused memory.
All reserves and memory allocations are "typed". In case of out of
memory condition decompressor fails with detailed info on current
reserved ranges and usable online memory.

Linux version 6.2.0 ...
Kernel command line: ... mem=100M
Our of memory allocating 100000 bytes 100000 aligned in range 0:5800000
Reserved memory ranges:
0000000000000000 0000000003e33000 DECOMPRESSOR
0000000003f00000 00000000057648a3 INITRD
00000000063e0000 00000000063e8000 VMEM
00000000063eb000 00000000063f4000 VMEM
00000000063f7800 0000000006400000 VMEM
0000000005800000 0000000006300000 KASAN
Usable online memory ranges (info source: sclp read info [3]):
0000000000000000 0000000006400000
Usable online memory total: 6400000 Reserved: 61b10a3 Free: 24ef5d
Call Trace:
(sp:000000000002bd58 [<0000000000012a70>] physmem_alloc_top_down+0x60/0x14c)
 sp:000000000002bdc8 [<0000000000013756>] _pa+0x56/0x6a
 sp:000000000002bdf0 [<0000000000013bcc>] pgtable_populate+0x45c/0x65e
 sp:000000000002be90 [<00000000000140aa>] setup_vmem+0x2da/0x424
 sp:000000000002bec8 [<0000000000011c20>] startup_kernel+0x428/0x8b4
 sp:000000000002bf60 [<00000000000100f4>] startup_normal+0xd4/0xd4

physmem_alloc_range allows to find free memory in specified range. It
should be used for one time allocations only like finding position for
amode31 and vmlinux.
physmem_alloc_top_down can be used just like physmem_alloc_range, but
it also allows multiple allocations per type and tries to merge sequential
allocations together. Which is useful for paging structures allocations.
If sequential allocations cannot be merged together they are "chained",
allowing easy per type reserved ranges enumeration and migration to
memblock later. Extra "struct reserved_range" allocated for chaining are
not tracked or reserved but rely on the fact that both
physmem_alloc_range and physmem_alloc_top_down search for free memory
only below current top down allocator position. All reserved ranges
should be transferred to memblock before memblock allocations are
enabled.

The startup code has been reordered to delay any memory allocations until
online memory ranges are detected and occupied memory ranges are marked as
reserved to be excluded from follow-up allocations.
Ipl report certificates are a special case, ipl report certificates list
is checked together with other memory reserves until certificates are
saved elsewhere.
KASAN required memory for shadow memory allocation and mapping is reserved
as 1 large chunk which is later passed to KASAN early initialization code.

Acked-by: Heiko Carstens <hca@linux.ibm.com>
Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2023-03-20 11:02:50 +01:00

166 lines
4.2 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/ctype.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include <asm/sections.h>
#include <asm/boot_data.h>
#include <asm/physmem_info.h>
#include <uapi/asm/ipl.h>
#include "boot.h"
int __bootdata_preserved(ipl_secure_flag);
unsigned long __bootdata_preserved(ipl_cert_list_addr);
unsigned long __bootdata_preserved(ipl_cert_list_size);
unsigned long __bootdata(early_ipl_comp_list_addr);
unsigned long __bootdata(early_ipl_comp_list_size);
static struct ipl_rb_certificates *certs;
static struct ipl_rb_components *comps;
static bool ipl_report_needs_saving;
#define for_each_rb_entry(entry, rb) \
for (entry = rb->entries; \
(void *) entry + sizeof(*entry) <= (void *) rb + rb->len; \
entry++)
static unsigned long get_cert_comp_list_size(void)
{
struct ipl_rb_certificate_entry *cert;
struct ipl_rb_component_entry *comp;
size_t size;
/*
* Find the length for the IPL report boot data
*/
early_ipl_comp_list_size = 0;
for_each_rb_entry(comp, comps)
early_ipl_comp_list_size += sizeof(*comp);
ipl_cert_list_size = 0;
for_each_rb_entry(cert, certs)
ipl_cert_list_size += sizeof(unsigned int) + cert->len;
return ipl_cert_list_size + early_ipl_comp_list_size;
}
bool ipl_report_certs_intersects(unsigned long addr, unsigned long size,
unsigned long *intersection_start)
{
struct ipl_rb_certificate_entry *cert;
if (!ipl_report_needs_saving)
return false;
for_each_rb_entry(cert, certs) {
if (intersects(addr, size, cert->addr, cert->len)) {
*intersection_start = cert->addr;
return true;
}
}
return false;
}
static void copy_components_bootdata(void)
{
struct ipl_rb_component_entry *comp, *ptr;
ptr = (struct ipl_rb_component_entry *) early_ipl_comp_list_addr;
for_each_rb_entry(comp, comps)
memcpy(ptr++, comp, sizeof(*ptr));
}
static void copy_certificates_bootdata(void)
{
struct ipl_rb_certificate_entry *cert;
void *ptr;
ptr = (void *) ipl_cert_list_addr;
for_each_rb_entry(cert, certs) {
*(unsigned int *) ptr = cert->len;
ptr += sizeof(unsigned int);
memcpy(ptr, (void *) cert->addr, cert->len);
ptr += cert->len;
}
}
int read_ipl_report(void)
{
struct ipl_pl_hdr *pl_hdr;
struct ipl_rl_hdr *rl_hdr;
struct ipl_rb_hdr *rb_hdr;
unsigned long tmp;
void *rl_end;
/*
* Check if there is a IPL report by looking at the copy
* of the IPL parameter information block.
*/
if (!ipl_block_valid ||
!(ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR))
return -1;
ipl_secure_flag = !!(ipl_block.hdr.flags & IPL_PL_FLAG_SIPL);
/*
* There is an IPL report, to find it load the pointer to the
* IPL parameter information block from lowcore and skip past
* the IPL parameter list, then align the address to a double
* word boundary.
*/
tmp = (unsigned long) S390_lowcore.ipl_parmblock_ptr;
pl_hdr = (struct ipl_pl_hdr *) tmp;
tmp = (tmp + pl_hdr->len + 7) & -8UL;
rl_hdr = (struct ipl_rl_hdr *) tmp;
/* Walk through the IPL report blocks in the IPL Report list */
certs = NULL;
comps = NULL;
rl_end = (void *) rl_hdr + rl_hdr->len;
rb_hdr = (void *) rl_hdr + sizeof(*rl_hdr);
while ((void *) rb_hdr + sizeof(*rb_hdr) < rl_end &&
(void *) rb_hdr + rb_hdr->len <= rl_end) {
switch (rb_hdr->rbt) {
case IPL_RBT_CERTIFICATES:
certs = (struct ipl_rb_certificates *) rb_hdr;
break;
case IPL_RBT_COMPONENTS:
comps = (struct ipl_rb_components *) rb_hdr;
break;
default:
break;
}
rb_hdr = (void *) rb_hdr + rb_hdr->len;
}
/*
* With either the component list or the certificate list
* missing the kernel will stay ignorant of secure IPL.
*/
if (!comps || !certs) {
certs = NULL;
return -1;
}
ipl_report_needs_saving = true;
physmem_reserve(RR_IPLREPORT, (unsigned long)pl_hdr,
(unsigned long)rl_end - (unsigned long)pl_hdr);
return 0;
}
void save_ipl_cert_comp_list(void)
{
unsigned long size;
if (!ipl_report_needs_saving)
return;
size = get_cert_comp_list_size();
early_ipl_comp_list_addr = physmem_alloc_top_down(RR_CERT_COMP_LIST, size, sizeof(int));
ipl_cert_list_addr = early_ipl_comp_list_addr + early_ipl_comp_list_size;
copy_components_bootdata();
copy_certificates_bootdata();
physmem_free(RR_IPLREPORT);
ipl_report_needs_saving = false;
}