mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "7 patches. Subsystems affected by this patch series: mm (kasan, mm/slub, mm/madvise, and memcg), and lib" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: lib: use PFN_PHYS() in devmem_is_allowed() mm/memcg: fix incorrect flushing of lruvec data in obj_stock mm/madvise: report SIGBUS as -EFAULT for MADV_POPULATE_(READ|WRITE) mm: slub: fix slub_debug disabling for list of slabs slub: fix kmalloc_pagealloc_invalid_free unit test kasan, slub: reset tag when printing address kasan, kmemleak: reset tags when scanning block
This commit is contained in:
commit
dfa377c35d
6 changed files with 30 additions and 20 deletions
|
@ -19,7 +19,7 @@
|
|||
*/
|
||||
int devmem_is_allowed(unsigned long pfn)
|
||||
{
|
||||
if (iomem_is_exclusive(pfn << PAGE_SHIFT))
|
||||
if (iomem_is_exclusive(PFN_PHYS(pfn)))
|
||||
return 0;
|
||||
if (!page_is_ram(pfn))
|
||||
return 1;
|
||||
|
|
7
mm/gup.c
7
mm/gup.c
|
@ -1558,9 +1558,12 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
|
|||
gup_flags |= FOLL_WRITE;
|
||||
|
||||
/*
|
||||
* See check_vma_flags(): Will return -EFAULT on incompatible mappings
|
||||
* or with insufficient permissions.
|
||||
* We want to report -EINVAL instead of -EFAULT for any permission
|
||||
* problems or incompatible mappings.
|
||||
*/
|
||||
if (check_vma_flags(vma, gup_flags))
|
||||
return -EINVAL;
|
||||
|
||||
return __get_user_pages(mm, start, nr_pages, gup_flags,
|
||||
NULL, NULL, locked);
|
||||
}
|
||||
|
|
|
@ -290,7 +290,7 @@ static void hex_dump_object(struct seq_file *seq,
|
|||
warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
|
||||
kasan_disable_current();
|
||||
warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
|
||||
HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
|
||||
HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
|
||||
kasan_enable_current();
|
||||
}
|
||||
|
||||
|
@ -1171,7 +1171,7 @@ static bool update_checksum(struct kmemleak_object *object)
|
|||
|
||||
kasan_disable_current();
|
||||
kcsan_disable_current();
|
||||
object->checksum = crc32(0, (void *)object->pointer, object->size);
|
||||
object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
|
||||
kasan_enable_current();
|
||||
kcsan_enable_current();
|
||||
|
||||
|
@ -1246,7 +1246,7 @@ static void scan_block(void *_start, void *_end,
|
|||
break;
|
||||
|
||||
kasan_disable_current();
|
||||
pointer = *ptr;
|
||||
pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
|
||||
kasan_enable_current();
|
||||
|
||||
untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
|
||||
|
|
|
@ -862,10 +862,12 @@ static long madvise_populate(struct vm_area_struct *vma,
|
|||
switch (pages) {
|
||||
case -EINTR:
|
||||
return -EINTR;
|
||||
case -EFAULT: /* Incompatible mappings / permissions. */
|
||||
case -EINVAL: /* Incompatible mappings / permissions. */
|
||||
return -EINVAL;
|
||||
case -EHWPOISON:
|
||||
return -EHWPOISON;
|
||||
case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */
|
||||
return -EFAULT;
|
||||
default:
|
||||
pr_warn_once("%s: unhandled return value: %ld\n",
|
||||
__func__, pages);
|
||||
|
|
|
@ -3106,13 +3106,15 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
|
|||
stock->cached_pgdat = pgdat;
|
||||
} else if (stock->cached_pgdat != pgdat) {
|
||||
/* Flush the existing cached vmstat data */
|
||||
struct pglist_data *oldpg = stock->cached_pgdat;
|
||||
|
||||
if (stock->nr_slab_reclaimable_b) {
|
||||
mod_objcg_mlstate(objcg, pgdat, NR_SLAB_RECLAIMABLE_B,
|
||||
mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
|
||||
stock->nr_slab_reclaimable_b);
|
||||
stock->nr_slab_reclaimable_b = 0;
|
||||
}
|
||||
if (stock->nr_slab_unreclaimable_b) {
|
||||
mod_objcg_mlstate(objcg, pgdat, NR_SLAB_UNRECLAIMABLE_B,
|
||||
mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
|
||||
stock->nr_slab_unreclaimable_b);
|
||||
stock->nr_slab_unreclaimable_b = 0;
|
||||
}
|
||||
|
|
25
mm/slub.c
25
mm/slub.c
|
@ -576,8 +576,8 @@ static void print_section(char *level, char *text, u8 *addr,
|
|||
unsigned int length)
|
||||
{
|
||||
metadata_access_enable();
|
||||
print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS,
|
||||
16, 1, addr, length, 1);
|
||||
print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
|
||||
16, 1, kasan_reset_tag((void *)addr), length, 1);
|
||||
metadata_access_disable();
|
||||
}
|
||||
|
||||
|
@ -1400,12 +1400,13 @@ parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init)
|
|||
static int __init setup_slub_debug(char *str)
|
||||
{
|
||||
slab_flags_t flags;
|
||||
slab_flags_t global_flags;
|
||||
char *saved_str;
|
||||
char *slab_list;
|
||||
bool global_slub_debug_changed = false;
|
||||
bool slab_list_specified = false;
|
||||
|
||||
slub_debug = DEBUG_DEFAULT_FLAGS;
|
||||
global_flags = DEBUG_DEFAULT_FLAGS;
|
||||
if (*str++ != '=' || !*str)
|
||||
/*
|
||||
* No options specified. Switch on full debugging.
|
||||
|
@ -1417,7 +1418,7 @@ static int __init setup_slub_debug(char *str)
|
|||
str = parse_slub_debug_flags(str, &flags, &slab_list, true);
|
||||
|
||||
if (!slab_list) {
|
||||
slub_debug = flags;
|
||||
global_flags = flags;
|
||||
global_slub_debug_changed = true;
|
||||
} else {
|
||||
slab_list_specified = true;
|
||||
|
@ -1426,16 +1427,18 @@ static int __init setup_slub_debug(char *str)
|
|||
|
||||
/*
|
||||
* For backwards compatibility, a single list of flags with list of
|
||||
* slabs means debugging is only enabled for those slabs, so the global
|
||||
* slub_debug should be 0. We can extended that to multiple lists as
|
||||
* slabs means debugging is only changed for those slabs, so the global
|
||||
* slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
|
||||
* on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
|
||||
* long as there is no option specifying flags without a slab list.
|
||||
*/
|
||||
if (slab_list_specified) {
|
||||
if (!global_slub_debug_changed)
|
||||
slub_debug = 0;
|
||||
global_flags = slub_debug;
|
||||
slub_debug_string = saved_str;
|
||||
}
|
||||
out:
|
||||
slub_debug = global_flags;
|
||||
if (slub_debug != 0 || slub_debug_string)
|
||||
static_branch_enable(&slub_debug_enabled);
|
||||
else
|
||||
|
@ -3236,12 +3239,12 @@ struct detached_freelist {
|
|||
struct kmem_cache *s;
|
||||
};
|
||||
|
||||
static inline void free_nonslab_page(struct page *page)
|
||||
static inline void free_nonslab_page(struct page *page, void *object)
|
||||
{
|
||||
unsigned int order = compound_order(page);
|
||||
|
||||
VM_BUG_ON_PAGE(!PageCompound(page), page);
|
||||
kfree_hook(page_address(page));
|
||||
kfree_hook(object);
|
||||
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
|
||||
__free_pages(page, order);
|
||||
}
|
||||
|
@ -3282,7 +3285,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
|
|||
if (!s) {
|
||||
/* Handle kalloc'ed objects */
|
||||
if (unlikely(!PageSlab(page))) {
|
||||
free_nonslab_page(page);
|
||||
free_nonslab_page(page, object);
|
||||
p[size] = NULL; /* mark object processed */
|
||||
return size;
|
||||
}
|
||||
|
@ -4258,7 +4261,7 @@ void kfree(const void *x)
|
|||
|
||||
page = virt_to_head_page(x);
|
||||
if (unlikely(!PageSlab(page))) {
|
||||
free_nonslab_page(page);
|
||||
free_nonslab_page(page, object);
|
||||
return;
|
||||
}
|
||||
slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
|
||||
|
|
Loading…
Reference in a new issue