linux/arch/s390/kernel/abs_lowcore.c
Alexander Gordeev 2154e0b328 s390/mm: allocate Absolute Lowcore Area in decompressor
Move Absolute Lowcore Area allocation to the decompressor.
As result, get_abs_lowcore() and put_abs_lowcore() access
brackets become really straight and do not require complex
execution context analysis and LAP and interrupts tackling.

Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2023-01-13 14:15:06 +01:00

47 lines
1 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <linux/pgtable.h>
#include <asm/abs_lowcore.h>
unsigned long __bootdata_preserved(__abs_lowcore);
int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc)
{
unsigned long addr = __abs_lowcore + (cpu * sizeof(struct lowcore));
unsigned long phys = __pa(lc);
int rc, i;
for (i = 0; i < LC_PAGES; i++) {
rc = __vmem_map_4k_page(addr, phys, PAGE_KERNEL, alloc);
if (rc) {
/*
* Do not unmap allocated page tables in case the
* allocation was not requested. In such a case the
* request is expected coming from an atomic context,
* while the unmap attempt might sleep.
*/
if (alloc) {
for (--i; i >= 0; i--) {
addr -= PAGE_SIZE;
vmem_unmap_4k_page(addr);
}
}
return rc;
}
addr += PAGE_SIZE;
phys += PAGE_SIZE;
}
return 0;
}
void abs_lowcore_unmap(int cpu)
{
unsigned long addr = __abs_lowcore + (cpu * sizeof(struct lowcore));
int i;
for (i = 0; i < LC_PAGES; i++) {
vmem_unmap_4k_page(addr);
addr += PAGE_SIZE;
}
}