mm/memcg: Add folio_memcg_lock() and folio_memcg_unlock()

These are the folio equivalents of lock_page_memcg() and
unlock_page_memcg().

lock_page_memcg() and unlock_page_memcg() have too many callers to be
easily replaced in a single patch, so reimplement them as wrappers for
now to be cleaned up later when enough callers have been converted to
use folios.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Matthew Wilcox (Oracle) 2021-06-28 17:26:00 -04:00
parent 9d8053fc7a
commit f70ad44874
2 changed files with 39 additions and 16 deletions

View file

@ -978,6 +978,8 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
extern bool cgroup_memory_noswap;
#endif
void folio_memcg_lock(struct folio *folio);
void folio_memcg_unlock(struct folio *folio);
void lock_page_memcg(struct page *page);
void unlock_page_memcg(struct page *page);
@ -1397,6 +1399,14 @@ static inline void unlock_page_memcg(struct page *page)
{
}
static inline void folio_memcg_lock(struct folio *folio)
{
}
static inline void folio_memcg_unlock(struct folio *folio)
{
}
static inline void mem_cgroup_handle_over_high(void)
{
}

View file

@ -1933,18 +1933,17 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
}
/**
* lock_page_memcg - lock a page and memcg binding
* @page: the page
* folio_memcg_lock - Bind a folio to its memcg.
* @folio: The folio.
*
* This function protects unlocked LRU pages from being moved to
* This function prevents unlocked LRU folios from being moved to
* another cgroup.
*
* It ensures lifetime of the locked memcg. Caller is responsible
* for the lifetime of the page.
* It ensures lifetime of the bound memcg. The caller is responsible
* for the lifetime of the folio.
*/
void lock_page_memcg(struct page *page)
void folio_memcg_lock(struct folio *folio)
{
struct page *head = compound_head(page); /* rmap on tail pages */
struct mem_cgroup *memcg;
unsigned long flags;
@ -1958,7 +1957,7 @@ void lock_page_memcg(struct page *page)
if (mem_cgroup_disabled())
return;
again:
memcg = page_memcg(head);
memcg = folio_memcg(folio);
if (unlikely(!memcg))
return;
@ -1972,7 +1971,7 @@ void lock_page_memcg(struct page *page)
return;
spin_lock_irqsave(&memcg->move_lock, flags);
if (memcg != page_memcg(head)) {
if (memcg != folio_memcg(folio)) {
spin_unlock_irqrestore(&memcg->move_lock, flags);
goto again;
}
@ -1986,9 +1985,15 @@ void lock_page_memcg(struct page *page)
memcg->move_lock_task = current;
memcg->move_lock_flags = flags;
}
EXPORT_SYMBOL(folio_memcg_lock);
void lock_page_memcg(struct page *page)
{
folio_memcg_lock(page_folio(page));
}
EXPORT_SYMBOL(lock_page_memcg);
static void __unlock_page_memcg(struct mem_cgroup *memcg)
static void __folio_memcg_unlock(struct mem_cgroup *memcg)
{
if (memcg && memcg->move_lock_task == current) {
unsigned long flags = memcg->move_lock_flags;
@ -2003,14 +2008,22 @@ static void __unlock_page_memcg(struct mem_cgroup *memcg)
}
/**
* unlock_page_memcg - unlock a page and memcg binding
* @page: the page
* folio_memcg_unlock - Release the binding between a folio and its memcg.
* @folio: The folio.
*
* This releases the binding created by folio_memcg_lock(). This does
* not change the accounting of this folio to its memcg, but it does
* permit others to change it.
*/
void folio_memcg_unlock(struct folio *folio)
{
__folio_memcg_unlock(folio_memcg(folio));
}
EXPORT_SYMBOL(folio_memcg_unlock);
void unlock_page_memcg(struct page *page)
{
struct page *head = compound_head(page);
__unlock_page_memcg(page_memcg(head));
folio_memcg_unlock(page_folio(page));
}
EXPORT_SYMBOL(unlock_page_memcg);
@ -5643,7 +5656,7 @@ static int mem_cgroup_move_account(struct page *page,
page->memcg_data = (unsigned long)to;
__unlock_page_memcg(from);
__folio_memcg_unlock(from);
ret = 0;
nid = page_to_nid(page);