linux/mm/usercopy.c

304 lines
9.3 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* This implements the various checks for CONFIG_HARDENED_USERCOPY*,
* which are designed to protect kernel memory from needless exposure
* and overwrite under many unintended conditions. This code is based
* on PAX_USERCOPY, which is:
*
* Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
* Security Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/thread_info.h>
usercopy: Allow boot cmdline disabling of hardening Enabling HARDENED_USERCOPY may cause measurable regressions in networking performance: up to 8% under UDP flood. I ran a small packet UDP flood using pktgen vs. a host b2b connected. On the receiver side the UDP packets are processed by a simple user space process that just reads and drops them: https://github.com/netoptimizer/network-testing/blob/master/src/udp_sink.c Not very useful from a functional PoV, but it helps to pin-point bottlenecks in the networking stack. When running a kernel with CONFIG_HARDENED_USERCOPY=y, I see a 5-8% regression in the receive tput, compared to the same kernel without this option enabled. With CONFIG_HARDENED_USERCOPY=y, perf shows ~6% of CPU time spent cumulatively in __check_object_size (~4%) and __virt_addr_valid (~2%). The call-chain is: __GI___libc_recvfrom entry_SYSCALL_64_after_hwframe do_syscall_64 __x64_sys_recvfrom __sys_recvfrom inet_recvmsg udp_recvmsg __check_object_size udp_recvmsg() actually calls copy_to_iter() (inlined) and the latters calls check_copy_size() (again, inlined). A generic distro may want to enable HARDENED_USERCOPY in their default kernel config, but at the same time, such distro may want to be able to avoid the performance penalties in with the default configuration and disable the stricter check on a per-boot basis. This change adds a boot parameter that conditionally disables HARDENED_USERCOPY via "hardened_usercopy=off". Signed-off-by: Chris von Recklinghausen <crecklin@redhat.com> Signed-off-by: Kees Cook <keescook@chromium.org>
2018-07-03 19:43:08 +00:00
#include <linux/atomic.h>
#include <linux/jump_label.h>
#include <asm/sections.h>
/*
* Checks if a given pointer and length is contained by the current
* stack frame (if possible).
*
* Returns:
* NOT_STACK: not at all on the stack
* GOOD_FRAME: fully within a valid stack frame
* GOOD_STACK: fully on the stack (when can't do frame-checking)
* BAD_STACK: error condition (invalid stack position or bad stack frame)
*/
static noinline int check_stack_object(const void *obj, unsigned long len)
{
const void * const stack = task_stack_page(current);
const void * const stackend = stack + THREAD_SIZE;
int ret;
/* Object is not on the stack at all. */
if (obj + len <= stack || stackend <= obj)
return NOT_STACK;
/*
* Reject: object partially overlaps the stack (passing the
* the check above means at least one end is within the stack,
* so if this check fails, the other end is outside the stack).
*/
if (obj < stack || stackend < obj + len)
return BAD_STACK;
/* Check if object is safely within a valid frame. */
ret = arch_within_stack_frames(stack, stackend, obj, len);
if (ret)
return ret;
return GOOD_STACK;
}
/*
* If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
* an unexpected state during a copy_from_user() or copy_to_user() call.
* There are several checks being performed on the buffer by the
* __check_object_size() function. Normal stack buffer usage should never
* trip the checks, and kernel text addressing will always trip the check.
* For cache objects, it is checking that only the whitelisted range of
* bytes for a given cache is being accessed (via the cache's usersize and
* useroffset fields). To adjust a cache whitelist, use the usercopy-aware
* kmem_cache_create_usercopy() function to create the cache (and
* carefully audit the whitelist range).
*/
void usercopy_warn(const char *name, const char *detail, bool to_user,
unsigned long offset, unsigned long len)
{
WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
to_user ? "exposure" : "overwrite",
to_user ? "from" : "to",
name ? : "unknown?!",
detail ? " '" : "", detail ? : "", detail ? "'" : "",
offset, len);
}
void __noreturn usercopy_abort(const char *name, const char *detail,
bool to_user, unsigned long offset,
unsigned long len)
{
pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
to_user ? "exposure" : "overwrite",
to_user ? "from" : "to",
name ? : "unknown?!",
detail ? " '" : "", detail ? : "", detail ? "'" : "",
offset, len);
/*
* For greater effect, it would be nice to do do_group_exit(),
* but BUG() actually hooks all the lock-breaking and per-arch
* Oops code, so that is used here instead.
*/
BUG();
}
/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
static bool overlaps(const unsigned long ptr, unsigned long n,
unsigned long low, unsigned long high)
{
const unsigned long check_low = ptr;
unsigned long check_high = check_low + n;
/* Does not overlap if entirely above or entirely below. */
usercopy: fix overlap check for kernel text When running with a local patch which moves the '_stext' symbol to the very beginning of the kernel text area, I got the following panic with CONFIG_HARDENED_USERCOPY: usercopy: kernel memory exposure attempt detected from ffff88103dfff000 (<linear kernel text>) (4096 bytes) ------------[ cut here ]------------ kernel BUG at mm/usercopy.c:79! invalid opcode: 0000 [#1] SMP ... CPU: 0 PID: 4800 Comm: cp Not tainted 4.8.0-rc3.after+ #1 Hardware name: Dell Inc. PowerEdge R720/0X3D66, BIOS 2.5.4 01/22/2016 task: ffff880817444140 task.stack: ffff880816274000 RIP: 0010:[<ffffffff8121c796>] __check_object_size+0x76/0x413 RSP: 0018:ffff880816277c40 EFLAGS: 00010246 RAX: 000000000000006b RBX: ffff88103dfff000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff88081f80dfa8 RDI: ffff88081f80dfa8 RBP: ffff880816277c90 R08: 000000000000054c R09: 0000000000000000 R10: 0000000000000005 R11: 0000000000000006 R12: 0000000000001000 R13: ffff88103e000000 R14: ffff88103dffffff R15: 0000000000000001 FS: 00007fb9d1750800(0000) GS:ffff88081f800000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000021d2000 CR3: 000000081a08f000 CR4: 00000000001406f0 Stack: ffff880816277cc8 0000000000010000 000000043de07000 0000000000000000 0000000000001000 ffff880816277e60 0000000000001000 ffff880816277e28 000000000000c000 0000000000001000 ffff880816277ce8 ffffffff8136c3a6 Call Trace: [<ffffffff8136c3a6>] copy_page_to_iter_iovec+0xa6/0x1c0 [<ffffffff8136e766>] copy_page_to_iter+0x16/0x90 [<ffffffff811970e3>] generic_file_read_iter+0x3e3/0x7c0 [<ffffffffa06a738d>] ? xfs_file_buffered_aio_write+0xad/0x260 [xfs] [<ffffffff816e6262>] ? down_read+0x12/0x40 [<ffffffffa06a61b1>] xfs_file_buffered_aio_read+0x51/0xc0 [xfs] [<ffffffffa06a6692>] xfs_file_read_iter+0x62/0xb0 [xfs] [<ffffffff812224cf>] __vfs_read+0xdf/0x130 [<ffffffff81222c9e>] vfs_read+0x8e/0x140 [<ffffffff81224195>] SyS_read+0x55/0xc0 [<ffffffff81003a47>] do_syscall_64+0x67/0x160 [<ffffffff816e8421>] entry_SYSCALL64_slow_path+0x25/0x25 RIP: 0033:[<00007fb9d0c33c00>] 0x7fb9d0c33c00 RSP: 002b:00007ffc9c262f28 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 RAX: ffffffffffffffda RBX: fffffffffff8ffff RCX: 00007fb9d0c33c00 RDX: 0000000000010000 RSI: 00000000021c3000 RDI: 0000000000000004 RBP: 00000000021c3000 R08: 0000000000000000 R09: 00007ffc9c264d6c R10: 00007ffc9c262c50 R11: 0000000000000246 R12: 0000000000010000 R13: 00007ffc9c2630b0 R14: 0000000000000004 R15: 0000000000010000 Code: 81 48 0f 44 d0 48 c7 c6 90 4d a3 81 48 c7 c0 bb b3 a2 81 48 0f 44 f0 4d 89 e1 48 89 d9 48 c7 c7 68 16 a3 81 31 c0 e8 f4 57 f7 ff <0f> 0b 48 8d 90 00 40 00 00 48 39 d3 0f 83 22 01 00 00 48 39 c3 RIP [<ffffffff8121c796>] __check_object_size+0x76/0x413 RSP <ffff880816277c40> The checked object's range [ffff88103dfff000, ffff88103e000000) is valid, so there shouldn't have been a BUG. The hardened usercopy code got confused because the range's ending address is the same as the kernel's text starting address at 0xffff88103e000000. The overlap check is slightly off. Fixes: f5509cc18daa ("mm: Hardened usercopy") Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com> Signed-off-by: Kees Cook <keescook@chromium.org>
2016-08-22 16:53:59 +00:00
if (check_low >= high || check_high <= low)
return false;
return true;
}
/* Is this address range in the kernel text area? */
static inline void check_kernel_text_object(const unsigned long ptr,
unsigned long n, bool to_user)
{
unsigned long textlow = (unsigned long)_stext;
unsigned long texthigh = (unsigned long)_etext;
unsigned long textlow_linear, texthigh_linear;
if (overlaps(ptr, n, textlow, texthigh))
usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
/*
* Some architectures have virtual memory mappings with a secondary
* mapping of the kernel text, i.e. there is more than one virtual
* kernel address that points to the kernel image. It is usually
* when there is a separate linear physical memory mapping, in that
* __pa() is not just the reverse of __va(). This can be detected
* and checked:
*/
textlow_linear = (unsigned long)lm_alias(textlow);
/* No different mapping: we're done. */
if (textlow_linear == textlow)
return;
/* Check the secondary mapping... */
texthigh_linear = (unsigned long)lm_alias(texthigh);
if (overlaps(ptr, n, textlow_linear, texthigh_linear))
usercopy_abort("linear kernel text", NULL, to_user,
ptr - textlow_linear, n);
}
static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
bool to_user)
{
/* Reject if object wraps past end of memory. */
if (ptr + n < ptr)
usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
/* Reject if NULL or ZERO-allocation. */
if (ZERO_OR_NULL_PTR(ptr))
usercopy_abort("null address", NULL, to_user, ptr, n);
}
/* Checks for allocs that are marked in some way as spanning multiple pages. */
static inline void check_page_span(const void *ptr, unsigned long n,
struct page *page, bool to_user)
{
#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
const void *end = ptr + n - 1;
struct page *endpage;
bool is_reserved, is_cma;
/*
* Sometimes the kernel data regions are not marked Reserved (see
* check below). And sometimes [_sdata,_edata) does not cover
* rodata and/or bss, so check each range explicitly.
*/
/* Allow reads of kernel rodata region (if not marked as Reserved). */
if (ptr >= (const void *)__start_rodata &&
end <= (const void *)__end_rodata) {
if (!to_user)
usercopy_abort("rodata", NULL, to_user, 0, n);
return;
}
/* Allow kernel data region (if not marked as Reserved). */
if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
return;
/* Allow kernel bss region (if not marked as Reserved). */
if (ptr >= (const void *)__bss_start &&
end <= (const void *)__bss_stop)
return;
/* Is the object wholly within one base page? */
if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
((unsigned long)end & (unsigned long)PAGE_MASK)))
return;
/* Allow if fully inside the same compound (__GFP_COMP) page. */
endpage = virt_to_head_page(end);
if (likely(endpage == page))
return;
/*
* Reject if range is entirely either Reserved (i.e. special or
* device memory), or CMA. Otherwise, reject since the object spans
* several independently allocated pages.
*/
is_reserved = PageReserved(page);
is_cma = is_migrate_cma_page(page);
if (!is_reserved && !is_cma)
usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
page = virt_to_head_page(ptr);
if (is_reserved && !PageReserved(page))
usercopy_abort("spans Reserved and non-Reserved pages",
NULL, to_user, 0, n);
if (is_cma && !is_migrate_cma_page(page))
usercopy_abort("spans CMA and non-CMA pages", NULL,
to_user, 0, n);
}
#endif
}
static inline void check_heap_object(const void *ptr, unsigned long n,
bool to_user)
{
struct page *page;
if (!virt_addr_valid(ptr))
return;
page = virt_to_head_page(ptr);
if (PageSlab(page)) {
/* Check slab allocator for flags and size. */
__check_heap_object(ptr, n, page, to_user);
} else {
/* Verify object does not incorrectly span multiple pages. */
check_page_span(ptr, n, page, to_user);
}
}
usercopy: Allow boot cmdline disabling of hardening Enabling HARDENED_USERCOPY may cause measurable regressions in networking performance: up to 8% under UDP flood. I ran a small packet UDP flood using pktgen vs. a host b2b connected. On the receiver side the UDP packets are processed by a simple user space process that just reads and drops them: https://github.com/netoptimizer/network-testing/blob/master/src/udp_sink.c Not very useful from a functional PoV, but it helps to pin-point bottlenecks in the networking stack. When running a kernel with CONFIG_HARDENED_USERCOPY=y, I see a 5-8% regression in the receive tput, compared to the same kernel without this option enabled. With CONFIG_HARDENED_USERCOPY=y, perf shows ~6% of CPU time spent cumulatively in __check_object_size (~4%) and __virt_addr_valid (~2%). The call-chain is: __GI___libc_recvfrom entry_SYSCALL_64_after_hwframe do_syscall_64 __x64_sys_recvfrom __sys_recvfrom inet_recvmsg udp_recvmsg __check_object_size udp_recvmsg() actually calls copy_to_iter() (inlined) and the latters calls check_copy_size() (again, inlined). A generic distro may want to enable HARDENED_USERCOPY in their default kernel config, but at the same time, such distro may want to be able to avoid the performance penalties in with the default configuration and disable the stricter check on a per-boot basis. This change adds a boot parameter that conditionally disables HARDENED_USERCOPY via "hardened_usercopy=off". Signed-off-by: Chris von Recklinghausen <crecklin@redhat.com> Signed-off-by: Kees Cook <keescook@chromium.org>
2018-07-03 19:43:08 +00:00
static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
/*
* Validates that the given object is:
* - not bogus address
* - fully contained by stack (or stack frame, when available)
* - fully within SLAB object (or object whitelist area, when available)
* - not in kernel text
*/
void __check_object_size(const void *ptr, unsigned long n, bool to_user)
{
usercopy: Allow boot cmdline disabling of hardening Enabling HARDENED_USERCOPY may cause measurable regressions in networking performance: up to 8% under UDP flood. I ran a small packet UDP flood using pktgen vs. a host b2b connected. On the receiver side the UDP packets are processed by a simple user space process that just reads and drops them: https://github.com/netoptimizer/network-testing/blob/master/src/udp_sink.c Not very useful from a functional PoV, but it helps to pin-point bottlenecks in the networking stack. When running a kernel with CONFIG_HARDENED_USERCOPY=y, I see a 5-8% regression in the receive tput, compared to the same kernel without this option enabled. With CONFIG_HARDENED_USERCOPY=y, perf shows ~6% of CPU time spent cumulatively in __check_object_size (~4%) and __virt_addr_valid (~2%). The call-chain is: __GI___libc_recvfrom entry_SYSCALL_64_after_hwframe do_syscall_64 __x64_sys_recvfrom __sys_recvfrom inet_recvmsg udp_recvmsg __check_object_size udp_recvmsg() actually calls copy_to_iter() (inlined) and the latters calls check_copy_size() (again, inlined). A generic distro may want to enable HARDENED_USERCOPY in their default kernel config, but at the same time, such distro may want to be able to avoid the performance penalties in with the default configuration and disable the stricter check on a per-boot basis. This change adds a boot parameter that conditionally disables HARDENED_USERCOPY via "hardened_usercopy=off". Signed-off-by: Chris von Recklinghausen <crecklin@redhat.com> Signed-off-by: Kees Cook <keescook@chromium.org>
2018-07-03 19:43:08 +00:00
if (static_branch_unlikely(&bypass_usercopy_checks))
return;
/* Skip all tests if size is zero. */
if (!n)
return;
/* Check for invalid addresses. */
check_bogus_address((const unsigned long)ptr, n, to_user);
/* Check for bad stack object. */
switch (check_stack_object(ptr, n)) {
case NOT_STACK:
/* Object is not touching the current process stack. */
break;
case GOOD_FRAME:
case GOOD_STACK:
/*
* Object is either in the correct frame (when it
* is possible to check) or just generally on the
* process stack (when frame checking not available).
*/
return;
default:
usercopy_abort("process stack", NULL, to_user, 0, n);
}
/* Check for bad heap object. */
check_heap_object(ptr, n, to_user);
/* Check for object in kernel to avoid text exposure. */
check_kernel_text_object((const unsigned long)ptr, n, to_user);
}
EXPORT_SYMBOL(__check_object_size);
usercopy: Allow boot cmdline disabling of hardening Enabling HARDENED_USERCOPY may cause measurable regressions in networking performance: up to 8% under UDP flood. I ran a small packet UDP flood using pktgen vs. a host b2b connected. On the receiver side the UDP packets are processed by a simple user space process that just reads and drops them: https://github.com/netoptimizer/network-testing/blob/master/src/udp_sink.c Not very useful from a functional PoV, but it helps to pin-point bottlenecks in the networking stack. When running a kernel with CONFIG_HARDENED_USERCOPY=y, I see a 5-8% regression in the receive tput, compared to the same kernel without this option enabled. With CONFIG_HARDENED_USERCOPY=y, perf shows ~6% of CPU time spent cumulatively in __check_object_size (~4%) and __virt_addr_valid (~2%). The call-chain is: __GI___libc_recvfrom entry_SYSCALL_64_after_hwframe do_syscall_64 __x64_sys_recvfrom __sys_recvfrom inet_recvmsg udp_recvmsg __check_object_size udp_recvmsg() actually calls copy_to_iter() (inlined) and the latters calls check_copy_size() (again, inlined). A generic distro may want to enable HARDENED_USERCOPY in their default kernel config, but at the same time, such distro may want to be able to avoid the performance penalties in with the default configuration and disable the stricter check on a per-boot basis. This change adds a boot parameter that conditionally disables HARDENED_USERCOPY via "hardened_usercopy=off". Signed-off-by: Chris von Recklinghausen <crecklin@redhat.com> Signed-off-by: Kees Cook <keescook@chromium.org>
2018-07-03 19:43:08 +00:00
static bool enable_checks __initdata = true;
static int __init parse_hardened_usercopy(char *str)
{
return strtobool(str, &enable_checks);
}
__setup("hardened_usercopy=", parse_hardened_usercopy);
static int __init set_hardened_usercopy(void)
{
if (enable_checks == false)
static_branch_enable(&bypass_usercopy_checks);
return 1;
}
late_initcall(set_hardened_usercopy);