Mainly MM fixes. About half for issues which were introduced after 5.18

and the remainder for longer-term issues.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCYsxt9wAKCRDdBJ7gKXxA
 jnjWAQD6ts4tgsX+hQ5lrZjWRvYIxH/I4jbtxyMyhc+iKarotAD+NILVgrzIvr0v
 ijlA4LLtmdhN1UWdSomUm3bZVn6n+QA=
 =1375
 -----END PGP SIGNATURE-----

Merge tag 'mm-hotfixes-stable-2022-07-11' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull hotfixes from Andrew Morton:
 "Mainly MM fixes. About half for issues which were introduced after
  5.18 and the remainder for longer-term issues"

* tag 'mm-hotfixes-stable-2022-07-11' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  mm: split huge PUD on wp_huge_pud fallback
  nilfs2: fix incorrect masking of permission flags for symlinks
  mm/rmap: fix dereferencing invalid subpage pointer in try_to_migrate_one()
  riscv/mm: fix build error while PAGE_TABLE_CHECK enabled without MMU
  Documentation: highmem: use literal block for code example in highmem.h comment
  mm: sparsemem: fix missing higher order allocation splitting
  mm/damon: use set_huge_pte_at() to make huge pte old
  sh: convert nommu io{re,un}map() to static inline functions
  mm: userfaultfd: fix UFFDIO_CONTINUE on fallocated shmem pages
This commit is contained in:
Linus Torvalds 2022-07-11 12:49:56 -07:00
commit 8e59a6a7a4
9 changed files with 63 additions and 38 deletions

View file

@ -38,7 +38,7 @@ config RISCV
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
select ARCH_SUPPORTS_HUGETLBFS if MMU
select ARCH_SUPPORTS_PAGE_TABLE_CHECK
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU

View file

@ -271,8 +271,12 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
#endif /* CONFIG_HAVE_IOREMAP_PROT */
#else /* CONFIG_MMU */
#define iounmap(addr) do { } while (0)
#define ioremap(offset, size) ((void __iomem *)(unsigned long)(offset))
static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
{
return (void __iomem *)(unsigned long)offset;
}
static inline void iounmap(volatile void __iomem *addr) { }
#endif /* CONFIG_MMU */
#define ioremap_uc ioremap

View file

@ -198,6 +198,9 @@ static inline int nilfs_acl_chmod(struct inode *inode)
static inline int nilfs_init_acl(struct inode *inode, struct inode *dir)
{
if (S_ISLNK(inode->i_mode))
return 0;
inode->i_mode &= ~current_umask();
return 0;
}

View file

@ -149,19 +149,19 @@ static inline void *kmap_local_folio(struct folio *folio, size_t offset);
* It is used in atomic context when code wants to access the contents of a
* page that might be allocated from high memory (see __GFP_HIGHMEM), for
* example a page in the pagecache. The API has two functions, and they
* can be used in a manner similar to the following:
* can be used in a manner similar to the following::
*
* -- Find the page of interest. --
* struct page *page = find_get_page(mapping, offset);
* // Find the page of interest.
* struct page *page = find_get_page(mapping, offset);
*
* -- Gain access to the contents of that page. --
* void *vaddr = kmap_atomic(page);
* // Gain access to the contents of that page.
* void *vaddr = kmap_atomic(page);
*
* -- Do something to the contents of that page. --
* memset(vaddr, 0, PAGE_SIZE);
* // Do something to the contents of that page.
* memset(vaddr, 0, PAGE_SIZE);
*
* -- Unmap that page. --
* kunmap_atomic(vaddr);
* // Unmap that page.
* kunmap_atomic(vaddr);
*
* Note that the kunmap_atomic() call takes the result of the kmap_atomic()
* call, not the argument.

View file

@ -336,8 +336,7 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
if (pte_young(entry)) {
referenced = true;
entry = pte_mkold(entry);
huge_ptep_set_access_flags(vma, addr, pte, entry,
vma->vm_flags & VM_WRITE);
set_huge_pte_at(mm, addr, pte, entry);
}
#ifdef CONFIG_MMU_NOTIFIER

View file

@ -4798,6 +4798,19 @@ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
static vm_fault_t create_huge_pud(struct vm_fault *vmf)
{
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
/* No support for anonymous transparent PUD pages yet */
if (vma_is_anonymous(vmf->vma))
return VM_FAULT_FALLBACK;
if (vmf->vma->vm_ops->huge_fault)
return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
return VM_FAULT_FALLBACK;
}
static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
{
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
/* No support for anonymous transparent PUD pages yet */
@ -4812,19 +4825,7 @@ static vm_fault_t create_huge_pud(struct vm_fault *vmf)
split:
/* COW or write-notify not handled on PUD level: split pud.*/
__split_huge_pud(vmf->vma, vmf->pud, vmf->address);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
return VM_FAULT_FALLBACK;
}
static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* No support for anonymous transparent PUD pages yet */
if (vma_is_anonymous(vmf->vma))
return VM_FAULT_FALLBACK;
if (vmf->vma->vm_ops->huge_fault)
return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
return VM_FAULT_FALLBACK;
}

View file

@ -1899,8 +1899,23 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
/* Unexpected PMD-mapped THP? */
VM_BUG_ON_FOLIO(!pvmw.pte, folio);
subpage = folio_page(folio,
pte_pfn(*pvmw.pte) - folio_pfn(folio));
if (folio_is_zone_device(folio)) {
/*
* Our PTE is a non-present device exclusive entry and
* calculating the subpage as for the common case would
* result in an invalid pointer.
*
* Since only PAGE_SIZE pages can currently be
* migrated, just set it to page. This will need to be
* changed when hugepage migrations to device private
* memory are supported.
*/
VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio);
subpage = &folio->page;
} else {
subpage = folio_page(folio,
pte_pfn(*pvmw.pte) - folio_pfn(folio));
}
address = pvmw.address;
anon_exclusive = folio_test_anon(folio) &&
PageAnonExclusive(subpage);
@ -1993,15 +2008,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
/*
* No need to invalidate here it will synchronize on
* against the special swap migration pte.
*
* The assignment to subpage above was computed from a
* swap PTE which results in an invalid pointer.
* Since only PAGE_SIZE pages can currently be
* migrated, just set it to page. This will need to be
* changed when hugepage migrations to device private
* memory are supported.
*/
subpage = &folio->page;
} else if (PageHWPoison(subpage)) {
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (folio_test_hugetlb(folio)) {

View file

@ -78,6 +78,14 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
spin_lock(&init_mm.page_table_lock);
if (likely(pmd_leaf(*pmd))) {
/*
* Higher order allocations from buddy allocator must be able to
* be treated as indepdenent small pages (as they can be freed
* individually).
*/
if (!PageReserved(page))
split_page(page, get_order(PMD_SIZE));
/* Make pte visible before pmd. See comment in pmd_install(). */
smp_wmb();
pmd_populate_kernel(&init_mm, pmd, pgtable);

View file

@ -246,7 +246,10 @@ static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
struct page *page;
int ret;
ret = shmem_getpage(inode, pgoff, &page, SGP_READ);
ret = shmem_getpage(inode, pgoff, &page, SGP_NOALLOC);
/* Our caller expects us to return -EFAULT if we failed to find page. */
if (ret == -ENOENT)
ret = -EFAULT;
if (ret)
goto out;
if (!page) {