linux/mm/kasan/kasan.c
Josh Poimboeuf b53f40db59 x86/suspend: fix false positive KASAN warning on suspend/resume
Resuming from a suspend operation is showing a KASAN false positive
warning:

  BUG: KASAN: stack-out-of-bounds in unwind_get_return_address+0x11d/0x130 at addr ffff8803867d7878
  Read of size 8 by task pm-suspend/7774
  page:ffffea000e19f5c0 count:0 mapcount:0 mapping:          (null) index:0x0
  flags: 0x2ffff0000000000()
  page dumped because: kasan: bad access detected
  CPU: 0 PID: 7774 Comm: pm-suspend Tainted: G    B           4.9.0-rc7+ #8
  Hardware name: Gigabyte Technology Co., Ltd. Z170X-UD5/Z170X-UD5-CF, BIOS F5 03/07/2016
  Call Trace:
    dump_stack+0x63/0x82
    kasan_report_error+0x4b4/0x4e0
    ? acpi_hw_read_port+0xd0/0x1ea
    ? kfree_const+0x22/0x30
    ? acpi_hw_validate_io_request+0x1a6/0x1a6
    __asan_report_load8_noabort+0x61/0x70
    ? unwind_get_return_address+0x11d/0x130
    unwind_get_return_address+0x11d/0x130
    ? unwind_next_frame+0x97/0xf0
    __save_stack_trace+0x92/0x100
    save_stack_trace+0x1b/0x20
    save_stack+0x46/0xd0
    ? save_stack_trace+0x1b/0x20
    ? save_stack+0x46/0xd0
    ? kasan_kmalloc+0xad/0xe0
    ? kasan_slab_alloc+0x12/0x20
    ? acpi_hw_read+0x2b6/0x3aa
    ? acpi_hw_validate_register+0x20b/0x20b
    ? acpi_hw_write_port+0x72/0xc7
    ? acpi_hw_write+0x11f/0x15f
    ? acpi_hw_read_multiple+0x19f/0x19f
    ? memcpy+0x45/0x50
    ? acpi_hw_write_port+0x72/0xc7
    ? acpi_hw_write+0x11f/0x15f
    ? acpi_hw_read_multiple+0x19f/0x19f
    ? kasan_unpoison_shadow+0x36/0x50
    kasan_kmalloc+0xad/0xe0
    kasan_slab_alloc+0x12/0x20
    kmem_cache_alloc_trace+0xbc/0x1e0
    ? acpi_get_sleep_type_data+0x9a/0x578
    acpi_get_sleep_type_data+0x9a/0x578
    acpi_hw_legacy_wake_prep+0x88/0x22c
    ? acpi_hw_legacy_sleep+0x3c7/0x3c7
    ? acpi_write_bit_register+0x28d/0x2d3
    ? acpi_read_bit_register+0x19b/0x19b
    acpi_hw_sleep_dispatch+0xb5/0xba
    acpi_leave_sleep_state_prep+0x17/0x19
    acpi_suspend_enter+0x154/0x1e0
    ? trace_suspend_resume+0xe8/0xe8
    suspend_devices_and_enter+0xb09/0xdb0
    ? printk+0xa8/0xd8
    ? arch_suspend_enable_irqs+0x20/0x20
    ? try_to_freeze_tasks+0x295/0x600
    pm_suspend+0x6c9/0x780
    ? finish_wait+0x1f0/0x1f0
    ? suspend_devices_and_enter+0xdb0/0xdb0
    state_store+0xa2/0x120
    ? kobj_attr_show+0x60/0x60
    kobj_attr_store+0x36/0x70
    sysfs_kf_write+0x131/0x200
    kernfs_fop_write+0x295/0x3f0
    __vfs_write+0xef/0x760
    ? handle_mm_fault+0x1346/0x35e0
    ? do_iter_readv_writev+0x660/0x660
    ? __pmd_alloc+0x310/0x310
    ? do_lock_file_wait+0x1e0/0x1e0
    ? apparmor_file_permission+0x18/0x20
    ? security_file_permission+0x73/0x1c0
    ? rw_verify_area+0xbd/0x2b0
    vfs_write+0x149/0x4a0
    SyS_write+0xd9/0x1c0
    ? SyS_read+0x1c0/0x1c0
    entry_SYSCALL_64_fastpath+0x1e/0xad
  Memory state around the buggy address:
   ffff8803867d7700: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
   ffff8803867d7780: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
  >ffff8803867d7800: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 f4
                                                                  ^
   ffff8803867d7880: f3 f3 f3 f3 00 00 00 00 00 00 00 00 00 00 00 00
   ffff8803867d7900: 00 00 00 f1 f1 f1 f1 04 f4 f4 f4 f3 f3 f3 f3 00

KASAN instrumentation poisons the stack when entering a function and
unpoisons it when exiting the function.  However, in the suspend path,
some functions never return, so their stack never gets unpoisoned,
resulting in stale KASAN shadow data which can cause later false
positive warnings like the one above.

Reported-by: Scott Bauer <scott.bauer@intel.com>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Acked-by: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-12-06 02:22:44 +01:00

793 lines
19 KiB
C

/*
* This file contains shadow memory manipulation code.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
*
* Some code borrowed from https://github.com/xairy/kasan-prototype by
* Andrey Konovalov <adech.fo@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DISABLE_BRANCH_PROFILING
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/linkage.h>
#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/bug.h>
#include "kasan.h"
#include "../slab.h"
/*
* Poisons the shadow memory for 'size' bytes starting from 'addr'.
* Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
*/
static void kasan_poison_shadow(const void *address, size_t size, u8 value)
{
void *shadow_start, *shadow_end;
shadow_start = kasan_mem_to_shadow(address);
shadow_end = kasan_mem_to_shadow(address + size);
memset(shadow_start, value, shadow_end - shadow_start);
}
void kasan_unpoison_shadow(const void *address, size_t size)
{
kasan_poison_shadow(address, size, 0);
if (size & KASAN_SHADOW_MASK) {
u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
*shadow = size & KASAN_SHADOW_MASK;
}
}
static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
{
void *base = task_stack_page(task);
size_t size = sp - base;
kasan_unpoison_shadow(base, size);
}
/* Unpoison the entire stack for a task. */
void kasan_unpoison_task_stack(struct task_struct *task)
{
__kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
}
/* Unpoison the stack for the current task beyond a watermark sp value. */
asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
{
/*
* Calculate the task stack base address. Avoid using 'current'
* because this function is called by early resume code which hasn't
* yet set up the percpu register (%gs).
*/
void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
kasan_unpoison_shadow(base, watermark - base);
}
/*
* Clear all poison for the region between the current SP and a provided
* watermark value, as is sometimes required prior to hand-crafted asm function
* returns in the middle of functions.
*/
void kasan_unpoison_stack_above_sp_to(const void *watermark)
{
const void *sp = __builtin_frame_address(0);
size_t size = watermark - sp;
if (WARN_ON(sp > watermark))
return;
kasan_unpoison_shadow(sp, size);
}
/*
* All functions below always inlined so compiler could
* perform better optimizations in each of __asan_loadX/__assn_storeX
* depending on memory access size X.
*/
static __always_inline bool memory_is_poisoned_1(unsigned long addr)
{
s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
if (unlikely(shadow_value)) {
s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
return unlikely(last_accessible_byte >= shadow_value);
}
return false;
}
static __always_inline bool memory_is_poisoned_2(unsigned long addr)
{
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
if (unlikely(*shadow_addr)) {
if (memory_is_poisoned_1(addr + 1))
return true;
/*
* If single shadow byte covers 2-byte access, we don't
* need to do anything more. Otherwise, test the first
* shadow byte.
*/
if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
return false;
return unlikely(*(u8 *)shadow_addr);
}
return false;
}
static __always_inline bool memory_is_poisoned_4(unsigned long addr)
{
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
if (unlikely(*shadow_addr)) {
if (memory_is_poisoned_1(addr + 3))
return true;
/*
* If single shadow byte covers 4-byte access, we don't
* need to do anything more. Otherwise, test the first
* shadow byte.
*/
if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
return false;
return unlikely(*(u8 *)shadow_addr);
}
return false;
}
static __always_inline bool memory_is_poisoned_8(unsigned long addr)
{
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
if (unlikely(*shadow_addr)) {
if (memory_is_poisoned_1(addr + 7))
return true;
/*
* If single shadow byte covers 8-byte access, we don't
* need to do anything more. Otherwise, test the first
* shadow byte.
*/
if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
return false;
return unlikely(*(u8 *)shadow_addr);
}
return false;
}
static __always_inline bool memory_is_poisoned_16(unsigned long addr)
{
u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
if (unlikely(*shadow_addr)) {
u16 shadow_first_bytes = *(u16 *)shadow_addr;
if (unlikely(shadow_first_bytes))
return true;
/*
* If two shadow bytes covers 16-byte access, we don't
* need to do anything more. Otherwise, test the last
* shadow byte.
*/
if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
return false;
return memory_is_poisoned_1(addr + 15);
}
return false;
}
static __always_inline unsigned long bytes_is_zero(const u8 *start,
size_t size)
{
while (size) {
if (unlikely(*start))
return (unsigned long)start;
start++;
size--;
}
return 0;
}
static __always_inline unsigned long memory_is_zero(const void *start,
const void *end)
{
unsigned int words;
unsigned long ret;
unsigned int prefix = (unsigned long)start % 8;
if (end - start <= 16)
return bytes_is_zero(start, end - start);
if (prefix) {
prefix = 8 - prefix;
ret = bytes_is_zero(start, prefix);
if (unlikely(ret))
return ret;
start += prefix;
}
words = (end - start) / 8;
while (words) {
if (unlikely(*(u64 *)start))
return bytes_is_zero(start, 8);
start += 8;
words--;
}
return bytes_is_zero(start, (end - start) % 8);
}
static __always_inline bool memory_is_poisoned_n(unsigned long addr,
size_t size)
{
unsigned long ret;
ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
kasan_mem_to_shadow((void *)addr + size - 1) + 1);
if (unlikely(ret)) {
unsigned long last_byte = addr + size - 1;
s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
if (unlikely(ret != (unsigned long)last_shadow ||
((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
return true;
}
return false;
}
static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
{
if (__builtin_constant_p(size)) {
switch (size) {
case 1:
return memory_is_poisoned_1(addr);
case 2:
return memory_is_poisoned_2(addr);
case 4:
return memory_is_poisoned_4(addr);
case 8:
return memory_is_poisoned_8(addr);
case 16:
return memory_is_poisoned_16(addr);
default:
BUILD_BUG();
}
}
return memory_is_poisoned_n(addr, size);
}
static __always_inline void check_memory_region_inline(unsigned long addr,
size_t size, bool write,
unsigned long ret_ip)
{
if (unlikely(size == 0))
return;
if (unlikely((void *)addr <
kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
kasan_report(addr, size, write, ret_ip);
return;
}
if (likely(!memory_is_poisoned(addr, size)))
return;
kasan_report(addr, size, write, ret_ip);
}
static void check_memory_region(unsigned long addr,
size_t size, bool write,
unsigned long ret_ip)
{
check_memory_region_inline(addr, size, write, ret_ip);
}
void kasan_check_read(const void *p, unsigned int size)
{
check_memory_region((unsigned long)p, size, false, _RET_IP_);
}
EXPORT_SYMBOL(kasan_check_read);
void kasan_check_write(const void *p, unsigned int size)
{
check_memory_region((unsigned long)p, size, true, _RET_IP_);
}
EXPORT_SYMBOL(kasan_check_write);
#undef memset
void *memset(void *addr, int c, size_t len)
{
check_memory_region((unsigned long)addr, len, true, _RET_IP_);
return __memset(addr, c, len);
}
#undef memmove
void *memmove(void *dest, const void *src, size_t len)
{
check_memory_region((unsigned long)src, len, false, _RET_IP_);
check_memory_region((unsigned long)dest, len, true, _RET_IP_);
return __memmove(dest, src, len);
}
#undef memcpy
void *memcpy(void *dest, const void *src, size_t len)
{
check_memory_region((unsigned long)src, len, false, _RET_IP_);
check_memory_region((unsigned long)dest, len, true, _RET_IP_);
return __memcpy(dest, src, len);
}
void kasan_alloc_pages(struct page *page, unsigned int order)
{
if (likely(!PageHighMem(page)))
kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
}
void kasan_free_pages(struct page *page, unsigned int order)
{
if (likely(!PageHighMem(page)))
kasan_poison_shadow(page_address(page),
PAGE_SIZE << order,
KASAN_FREE_PAGE);
}
/*
* Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
* For larger allocations larger redzones are used.
*/
static size_t optimal_redzone(size_t object_size)
{
int rz =
object_size <= 64 - 16 ? 16 :
object_size <= 128 - 32 ? 32 :
object_size <= 512 - 64 ? 64 :
object_size <= 4096 - 128 ? 128 :
object_size <= (1 << 14) - 256 ? 256 :
object_size <= (1 << 15) - 512 ? 512 :
object_size <= (1 << 16) - 1024 ? 1024 : 2048;
return rz;
}
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
unsigned long *flags)
{
int redzone_adjust;
int orig_size = *size;
/* Add alloc meta. */
cache->kasan_info.alloc_meta_offset = *size;
*size += sizeof(struct kasan_alloc_meta);
/* Add free meta. */
if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
cache->object_size < sizeof(struct kasan_free_meta)) {
cache->kasan_info.free_meta_offset = *size;
*size += sizeof(struct kasan_free_meta);
}
redzone_adjust = optimal_redzone(cache->object_size) -
(*size - cache->object_size);
if (redzone_adjust > 0)
*size += redzone_adjust;
*size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
optimal_redzone(cache->object_size)));
/*
* If the metadata doesn't fit, don't enable KASAN at all.
*/
if (*size <= cache->kasan_info.alloc_meta_offset ||
*size <= cache->kasan_info.free_meta_offset) {
cache->kasan_info.alloc_meta_offset = 0;
cache->kasan_info.free_meta_offset = 0;
*size = orig_size;
return;
}
*flags |= SLAB_KASAN;
}
void kasan_cache_shrink(struct kmem_cache *cache)
{
quarantine_remove_cache(cache);
}
void kasan_cache_destroy(struct kmem_cache *cache)
{
quarantine_remove_cache(cache);
}
size_t kasan_metadata_size(struct kmem_cache *cache)
{
return (cache->kasan_info.alloc_meta_offset ?
sizeof(struct kasan_alloc_meta) : 0) +
(cache->kasan_info.free_meta_offset ?
sizeof(struct kasan_free_meta) : 0);
}
void kasan_poison_slab(struct page *page)
{
kasan_poison_shadow(page_address(page),
PAGE_SIZE << compound_order(page),
KASAN_KMALLOC_REDZONE);
}
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
{
kasan_unpoison_shadow(object, cache->object_size);
}
void kasan_poison_object_data(struct kmem_cache *cache, void *object)
{
kasan_poison_shadow(object,
round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
KASAN_KMALLOC_REDZONE);
}
static inline int in_irqentry_text(unsigned long ptr)
{
return (ptr >= (unsigned long)&__irqentry_text_start &&
ptr < (unsigned long)&__irqentry_text_end) ||
(ptr >= (unsigned long)&__softirqentry_text_start &&
ptr < (unsigned long)&__softirqentry_text_end);
}
static inline void filter_irq_stacks(struct stack_trace *trace)
{
int i;
if (!trace->nr_entries)
return;
for (i = 0; i < trace->nr_entries; i++)
if (in_irqentry_text(trace->entries[i])) {
/* Include the irqentry function into the stack. */
trace->nr_entries = i + 1;
break;
}
}
static inline depot_stack_handle_t save_stack(gfp_t flags)
{
unsigned long entries[KASAN_STACK_DEPTH];
struct stack_trace trace = {
.nr_entries = 0,
.entries = entries,
.max_entries = KASAN_STACK_DEPTH,
.skip = 0
};
save_stack_trace(&trace);
filter_irq_stacks(&trace);
if (trace.nr_entries != 0 &&
trace.entries[trace.nr_entries-1] == ULONG_MAX)
trace.nr_entries--;
return depot_save_stack(&trace, flags);
}
static inline void set_track(struct kasan_track *track, gfp_t flags)
{
track->pid = current->pid;
track->stack = save_stack(flags);
}
struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
const void *object)
{
BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
return (void *)object + cache->kasan_info.alloc_meta_offset;
}
struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
const void *object)
{
BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
return (void *)object + cache->kasan_info.free_meta_offset;
}
void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
{
struct kasan_alloc_meta *alloc_info;
if (!(cache->flags & SLAB_KASAN))
return;
alloc_info = get_alloc_info(cache, object);
__memset(alloc_info, 0, sizeof(*alloc_info));
}
void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
{
kasan_kmalloc(cache, object, cache->object_size, flags);
}
static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
{
unsigned long size = cache->object_size;
unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
/* RCU slabs could be legally used after free within the RCU period */
if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
return;
kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
}
bool kasan_slab_free(struct kmem_cache *cache, void *object)
{
s8 shadow_byte;
/* RCU slabs could be legally used after free within the RCU period */
if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
return false;
shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
kasan_report_double_free(cache, object, shadow_byte);
return true;
}
kasan_poison_slab_free(cache, object);
if (unlikely(!(cache->flags & SLAB_KASAN)))
return false;
set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
quarantine_put(get_free_info(cache, object), cache);
return true;
}
void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
gfp_t flags)
{
unsigned long redzone_start;
unsigned long redzone_end;
if (gfpflags_allow_blocking(flags))
quarantine_reduce();
if (unlikely(object == NULL))
return;
redzone_start = round_up((unsigned long)(object + size),
KASAN_SHADOW_SCALE_SIZE);
redzone_end = round_up((unsigned long)object + cache->object_size,
KASAN_SHADOW_SCALE_SIZE);
kasan_unpoison_shadow(object, size);
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
KASAN_KMALLOC_REDZONE);
if (cache->flags & SLAB_KASAN)
set_track(&get_alloc_info(cache, object)->alloc_track, flags);
}
EXPORT_SYMBOL(kasan_kmalloc);
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
{
struct page *page;
unsigned long redzone_start;
unsigned long redzone_end;
if (gfpflags_allow_blocking(flags))
quarantine_reduce();
if (unlikely(ptr == NULL))
return;
page = virt_to_page(ptr);
redzone_start = round_up((unsigned long)(ptr + size),
KASAN_SHADOW_SCALE_SIZE);
redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
kasan_unpoison_shadow(ptr, size);
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
KASAN_PAGE_REDZONE);
}
void kasan_krealloc(const void *object, size_t size, gfp_t flags)
{
struct page *page;
if (unlikely(object == ZERO_SIZE_PTR))
return;
page = virt_to_head_page(object);
if (unlikely(!PageSlab(page)))
kasan_kmalloc_large(object, size, flags);
else
kasan_kmalloc(page->slab_cache, object, size, flags);
}
void kasan_poison_kfree(void *ptr)
{
struct page *page;
page = virt_to_head_page(ptr);
if (unlikely(!PageSlab(page)))
kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
KASAN_FREE_PAGE);
else
kasan_poison_slab_free(page->slab_cache, ptr);
}
void kasan_kfree_large(const void *ptr)
{
struct page *page = virt_to_page(ptr);
kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
KASAN_FREE_PAGE);
}
int kasan_module_alloc(void *addr, size_t size)
{
void *ret;
size_t shadow_size;
unsigned long shadow_start;
shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
PAGE_SIZE);
if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
return -EINVAL;
ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
shadow_start + shadow_size,
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
__builtin_return_address(0));
if (ret) {
find_vm_area(addr)->flags |= VM_KASAN;
kmemleak_ignore(ret);
return 0;
}
return -ENOMEM;
}
void kasan_free_shadow(const struct vm_struct *vm)
{
if (vm->flags & VM_KASAN)
vfree(kasan_mem_to_shadow(vm->addr));
}
static void register_global(struct kasan_global *global)
{
size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
kasan_unpoison_shadow(global->beg, global->size);
kasan_poison_shadow(global->beg + aligned_size,
global->size_with_redzone - aligned_size,
KASAN_GLOBAL_REDZONE);
}
void __asan_register_globals(struct kasan_global *globals, size_t size)
{
int i;
for (i = 0; i < size; i++)
register_global(&globals[i]);
}
EXPORT_SYMBOL(__asan_register_globals);
void __asan_unregister_globals(struct kasan_global *globals, size_t size)
{
}
EXPORT_SYMBOL(__asan_unregister_globals);
#define DEFINE_ASAN_LOAD_STORE(size) \
void __asan_load##size(unsigned long addr) \
{ \
check_memory_region_inline(addr, size, false, _RET_IP_);\
} \
EXPORT_SYMBOL(__asan_load##size); \
__alias(__asan_load##size) \
void __asan_load##size##_noabort(unsigned long); \
EXPORT_SYMBOL(__asan_load##size##_noabort); \
void __asan_store##size(unsigned long addr) \
{ \
check_memory_region_inline(addr, size, true, _RET_IP_); \
} \
EXPORT_SYMBOL(__asan_store##size); \
__alias(__asan_store##size) \
void __asan_store##size##_noabort(unsigned long); \
EXPORT_SYMBOL(__asan_store##size##_noabort)
DEFINE_ASAN_LOAD_STORE(1);
DEFINE_ASAN_LOAD_STORE(2);
DEFINE_ASAN_LOAD_STORE(4);
DEFINE_ASAN_LOAD_STORE(8);
DEFINE_ASAN_LOAD_STORE(16);
void __asan_loadN(unsigned long addr, size_t size)
{
check_memory_region(addr, size, false, _RET_IP_);
}
EXPORT_SYMBOL(__asan_loadN);
__alias(__asan_loadN)
void __asan_loadN_noabort(unsigned long, size_t);
EXPORT_SYMBOL(__asan_loadN_noabort);
void __asan_storeN(unsigned long addr, size_t size)
{
check_memory_region(addr, size, true, _RET_IP_);
}
EXPORT_SYMBOL(__asan_storeN);
__alias(__asan_storeN)
void __asan_storeN_noabort(unsigned long, size_t);
EXPORT_SYMBOL(__asan_storeN_noabort);
/* to shut up compiler complaints */
void __asan_handle_no_return(void) {}
EXPORT_SYMBOL(__asan_handle_no_return);
#ifdef CONFIG_MEMORY_HOTPLUG
static int kasan_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
}
static int __init kasan_memhotplug_init(void)
{
pr_info("WARNING: KASAN doesn't support memory hot-add\n");
pr_info("Memory hot-add will be disabled\n");
hotplug_memory_notifier(kasan_mem_notifier, 0);
return 0;
}
module_init(kasan_memhotplug_init);
#endif