um: rework userspace stubs to not hard-code stub location

The userspace stacks mostly have a stack (and in the case of the
syscall stub we can just set their stack pointer) that points to
the location of the stub data page already.

Rework the stubs to use the stack pointer to derive the start of
the data page, rather than requiring it to be hard-coded.

In the clone stub, also integrate the int3 into the stack remap,
since we really must not use the stack while we remap it.

This prepares for putting the stub at a variable location that's
not part of the normal address space of the userspace processes
running inside the UML machine.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
This commit is contained in:
Johannes Berg 2021-01-13 22:09:43 +01:00 committed by Richard Weinberger
parent 84b2789d61
commit 9f0b4807a4
9 changed files with 75 additions and 48 deletions

View file

@ -20,18 +20,10 @@
* 'UL' and other type specifiers unilaterally. We
* use the following macros to deal with this.
*/
#ifdef __ASSEMBLY__
#define _UML_AC(X, Y) (Y)
#else
#define __UML_AC(X, Y) (X(Y))
#define _UML_AC(X, Y) __UML_AC(X, Y)
#endif
#define STUB_START _UML_AC(, 0x100000)
#define STUB_CODE _UML_AC((unsigned long), STUB_START)
#define STUB_DATA _UML_AC((unsigned long), STUB_CODE + UM_KERN_PAGE_SIZE)
#define STUB_END _UML_AC((unsigned long), STUB_DATA + UM_KERN_PAGE_SIZE)
#define STUB_START 0x100000UL
#define STUB_CODE STUB_START
#define STUB_DATA (STUB_CODE + UM_KERN_PAGE_SIZE)
#define STUB_END (STUB_DATA + UM_KERN_PAGE_SIZE)
#ifndef __ASSEMBLY__

View file

@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* for use by sys-$SUBARCH/kernel-offsets.c */
#include <stub-data.h>
DEFINE(KERNEL_MADV_REMOVE, MADV_REMOVE);
@ -43,3 +44,8 @@ DEFINE(UML_CONFIG_64BIT, CONFIG_64BIT);
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
DEFINE(UML_CONFIG_UML_TIME_TRAVEL_SUPPORT, CONFIG_UML_TIME_TRAVEL_SUPPORT);
#endif
/* for stub */
DEFINE(UML_STUB_FIELD_OFFSET, offsetof(struct stub_data, offset));
DEFINE(UML_STUB_FIELD_CHILD_ERR, offsetof(struct stub_data, child_err));
DEFINE(UML_STUB_FIELD_FD, offsetof(struct stub_data, fd));

View file

@ -41,8 +41,7 @@ stub_clone_handler(void)
goto done;
}
remap_stack(data->fd, data->offset);
goto done;
remap_stack_and_trap();
done:
trap_myself();

View file

@ -40,6 +40,8 @@ static int __init init_syscall_regs(void)
syscall_regs[REGS_IP_INDEX] = STUB_CODE +
((unsigned long) batch_syscall_stub -
(unsigned long) __syscall_stub_start);
syscall_regs[REGS_SP_INDEX] = STUB_DATA;
return 0;
}

View file

@ -7,8 +7,8 @@
#define __SYSDEP_STUB_H
#include <asm/ptrace.h>
#include <generated/asm-offsets.h>
#define STUB_SYSCALL_RET EAX
#define STUB_MMAP_NR __NR_mmap2
#define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT)
@ -77,17 +77,28 @@ static inline void trap_myself(void)
__asm("int3");
}
static inline void remap_stack(int fd, unsigned long offset)
static void inline remap_stack_and_trap(void)
{
__asm__ volatile ("movl %%eax,%%ebp ; movl %0,%%eax ; int $0x80 ;"
"movl %7, %%ebx ; movl %%eax, (%%ebx)"
: : "g" (STUB_MMAP_NR), "b" (STUB_DATA),
"c" (UM_KERN_PAGE_SIZE),
"d" (PROT_READ | PROT_WRITE),
"S" (MAP_FIXED | MAP_SHARED), "D" (fd),
"a" (offset),
"i" (&((struct stub_data *) STUB_DATA)->child_err)
: "memory");
__asm__ volatile (
"movl %%esp,%%ebx ;"
"andl %0,%%ebx ;"
"movl %1,%%eax ;"
"movl %%ebx,%%edi ; addl %2,%%edi ; movl (%%edi),%%edi ;"
"movl %%ebx,%%ebp ; addl %3,%%ebp ; movl (%%ebp),%%ebp ;"
"int $0x80 ;"
"addl %4,%%ebx ; movl %%eax, (%%ebx) ;"
"int $3"
: :
"g" (~(UM_KERN_PAGE_SIZE - 1)),
"g" (STUB_MMAP_NR),
"g" (UML_STUB_FIELD_FD),
"g" (UML_STUB_FIELD_OFFSET),
"g" (UML_STUB_FIELD_CHILD_ERR),
"c" (UM_KERN_PAGE_SIZE),
"d" (PROT_READ | PROT_WRITE),
"S" (MAP_FIXED | MAP_SHARED)
:
"memory");
}
#endif

View file

@ -7,8 +7,8 @@
#define __SYSDEP_STUB_H
#include <sysdep/ptrace_user.h>
#include <generated/asm-offsets.h>
#define STUB_SYSCALL_RET PT_INDEX(RAX)
#define STUB_MMAP_NR __NR_mmap
#define MMAP_OFFSET(o) (o)
@ -82,18 +82,30 @@ static inline void trap_myself(void)
__asm("int3");
}
static inline void remap_stack(long fd, unsigned long offset)
static inline void remap_stack_and_trap(void)
{
__asm__ volatile ("movq %4,%%r10 ; movq %5,%%r8 ; "
"movq %6, %%r9; " __syscall "; movq %7, %%rbx ; "
"movq %%rax, (%%rbx)":
: "a" (STUB_MMAP_NR), "D" (STUB_DATA),
"S" (UM_KERN_PAGE_SIZE),
"d" (PROT_READ | PROT_WRITE),
"g" (MAP_FIXED | MAP_SHARED), "g" (fd),
"g" (offset),
"i" (&((struct stub_data *) STUB_DATA)->child_err)
: __syscall_clobber, "r10", "r8", "r9" );
__asm__ volatile (
"movq %0,%%rax ;"
"movq %%rsp,%%rdi ;"
"andq %1,%%rdi ;"
"movq %2,%%r10 ;"
"movq %%rdi,%%r8 ; addq %3,%%r8 ; movq (%%r8),%%r8 ;"
"movq %%rdi,%%r9 ; addq %4,%%r9 ; movq (%%r9),%%r9 ;"
__syscall ";"
"movq %%rsp,%%rdi ; andq %1,%%rdi ;"
"addq %5,%%rdi ; movq %%rax, (%%rdi) ;"
"int3"
: :
"g" (STUB_MMAP_NR),
"g" (~(UM_KERN_PAGE_SIZE - 1)),
"g" (MAP_FIXED | MAP_SHARED),
"g" (UML_STUB_FIELD_FD),
"g" (UML_STUB_FIELD_OFFSET),
"g" (UML_STUB_FIELD_CHILD_ERR),
"S" (UM_KERN_PAGE_SIZE),
"d" (PROT_READ | PROT_WRITE)
:
__syscall_clobber, "r10", "r8", "r9");
}
#endif

View file

@ -5,21 +5,22 @@
.globl batch_syscall_stub
batch_syscall_stub:
/* load pointer to first operation */
mov $(STUB_DATA+8), %esp
/* %esp comes in as "top of page" */
mov %esp, %ecx
/* %esp has pointer to first operation */
add $8, %esp
again:
/* load length of additional data */
mov 0x0(%esp), %eax
/* if(length == 0) : end of list */
/* write possible 0 to header */
mov %eax, STUB_DATA+4
mov %eax, 0x4(%ecx)
cmpl $0, %eax
jz done
/* save current pointer */
mov %esp, STUB_DATA+4
mov %esp, 0x4(%ecx)
/* skip additional data */
add %eax, %esp
@ -38,6 +39,10 @@ again:
/* execute syscall */
int $0x80
/* restore top of page pointer in %ecx */
mov %esp, %ecx
andl $(~UM_KERN_PAGE_SIZE) + 1, %ecx
/* check return value */
pop %ebx
cmp %ebx, %eax
@ -45,7 +50,7 @@ again:
done:
/* save return value */
mov %eax, STUB_DATA
mov %eax, (%ecx)
/* stop */
int3

View file

@ -4,9 +4,8 @@
.section .__syscall_stub, "ax"
.globl batch_syscall_stub
batch_syscall_stub:
mov $(STUB_DATA), %rbx
/* load pointer to first operation */
mov %rbx, %rsp
/* %rsp has the pointer to first operation */
mov %rsp, %rbx
add $0x10, %rsp
again:
/* load length of additional data */

View file

@ -11,10 +11,11 @@
void __attribute__ ((__section__ (".__syscall_stub")))
stub_segv_handler(int sig, siginfo_t *info, void *p)
{
int stack;
ucontext_t *uc = p;
struct faultinfo *f = (void *)(((unsigned long)&stack) & ~(UM_KERN_PAGE_SIZE - 1));
GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA),
&uc->uc_mcontext);
GET_FAULTINFO_FROM_MC(*f, &uc->uc_mcontext);
trap_myself();
}