mirror of
https://github.com/torvalds/linux
synced 2024-11-02 18:48:59 +00:00
mm/mglru: remove CONFIG_MEMCG
Remove CONFIG_MEMCG in a refactoring to improve code readability at the cost of a few bytes in struct lru_gen_folio per node when CONFIG_MEMCG=n. Link: https://lkml.kernel.org/r/20231227141205.2200125-4-kinseyho@google.com Signed-off-by: Kinsey Ho <kinseyho@google.com> Co-developed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Tested-by: Donet Tom <donettom@linux.vnet.ibm.com> Acked-by: Yu Zhao <yuzhao@google.com> Cc: kernel test robot <lkp@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
61dd3f246b
commit
745b13e647
3 changed files with 23 additions and 74 deletions
|
@ -1017,9 +1017,7 @@ struct lru_gen_mm_list {
|
||||||
|
|
||||||
void lru_gen_add_mm(struct mm_struct *mm);
|
void lru_gen_add_mm(struct mm_struct *mm);
|
||||||
void lru_gen_del_mm(struct mm_struct *mm);
|
void lru_gen_del_mm(struct mm_struct *mm);
|
||||||
#ifdef CONFIG_MEMCG
|
|
||||||
void lru_gen_migrate_mm(struct mm_struct *mm);
|
void lru_gen_migrate_mm(struct mm_struct *mm);
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline void lru_gen_init_mm(struct mm_struct *mm)
|
static inline void lru_gen_init_mm(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
@ -1050,11 +1048,9 @@ static inline void lru_gen_del_mm(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG
|
|
||||||
static inline void lru_gen_migrate_mm(struct mm_struct *mm)
|
static inline void lru_gen_migrate_mm(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline void lru_gen_init_mm(struct mm_struct *mm)
|
static inline void lru_gen_init_mm(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
|
|
@ -440,14 +440,12 @@ struct lru_gen_folio {
|
||||||
atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
|
atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
|
||||||
/* whether the multi-gen LRU is enabled */
|
/* whether the multi-gen LRU is enabled */
|
||||||
bool enabled;
|
bool enabled;
|
||||||
#ifdef CONFIG_MEMCG
|
|
||||||
/* the memcg generation this lru_gen_folio belongs to */
|
/* the memcg generation this lru_gen_folio belongs to */
|
||||||
u8 gen;
|
u8 gen;
|
||||||
/* the list segment this lru_gen_folio belongs to */
|
/* the list segment this lru_gen_folio belongs to */
|
||||||
u8 seg;
|
u8 seg;
|
||||||
/* per-node lru_gen_folio list for global reclaim */
|
/* per-node lru_gen_folio list for global reclaim */
|
||||||
struct hlist_nulls_node list;
|
struct hlist_nulls_node list;
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -493,11 +491,6 @@ struct lru_gen_mm_walk {
|
||||||
bool force_scan;
|
bool force_scan;
|
||||||
};
|
};
|
||||||
|
|
||||||
void lru_gen_init_lruvec(struct lruvec *lruvec);
|
|
||||||
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
|
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For each node, memcgs are divided into two generations: the old and the
|
* For each node, memcgs are divided into two generations: the old and the
|
||||||
* young. For each generation, memcgs are randomly sharded into multiple bins
|
* young. For each generation, memcgs are randomly sharded into multiple bins
|
||||||
|
@ -555,6 +548,8 @@ struct lru_gen_memcg {
|
||||||
};
|
};
|
||||||
|
|
||||||
void lru_gen_init_pgdat(struct pglist_data *pgdat);
|
void lru_gen_init_pgdat(struct pglist_data *pgdat);
|
||||||
|
void lru_gen_init_lruvec(struct lruvec *lruvec);
|
||||||
|
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
|
||||||
|
|
||||||
void lru_gen_init_memcg(struct mem_cgroup *memcg);
|
void lru_gen_init_memcg(struct mem_cgroup *memcg);
|
||||||
void lru_gen_exit_memcg(struct mem_cgroup *memcg);
|
void lru_gen_exit_memcg(struct mem_cgroup *memcg);
|
||||||
|
@ -563,19 +558,6 @@ void lru_gen_offline_memcg(struct mem_cgroup *memcg);
|
||||||
void lru_gen_release_memcg(struct mem_cgroup *memcg);
|
void lru_gen_release_memcg(struct mem_cgroup *memcg);
|
||||||
void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid);
|
void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid);
|
||||||
|
|
||||||
#else /* !CONFIG_MEMCG */
|
|
||||||
|
|
||||||
#define MEMCG_NR_GENS 1
|
|
||||||
|
|
||||||
struct lru_gen_memcg {
|
|
||||||
};
|
|
||||||
|
|
||||||
static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* CONFIG_MEMCG */
|
|
||||||
|
|
||||||
#else /* !CONFIG_LRU_GEN */
|
#else /* !CONFIG_LRU_GEN */
|
||||||
|
|
||||||
static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
|
static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
|
||||||
|
@ -590,8 +572,6 @@ static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG
|
|
||||||
|
|
||||||
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
|
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -616,8 +596,6 @@ static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_MEMCG */
|
|
||||||
|
|
||||||
#endif /* CONFIG_LRU_GEN */
|
#endif /* CONFIG_LRU_GEN */
|
||||||
|
|
||||||
struct lruvec {
|
struct lruvec {
|
||||||
|
|
67
mm/vmscan.c
67
mm/vmscan.c
|
@ -4097,13 +4097,6 @@ enum {
|
||||||
MEMCG_LRU_YOUNG,
|
MEMCG_LRU_YOUNG,
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG
|
|
||||||
|
|
||||||
static int lru_gen_memcg_seg(struct lruvec *lruvec)
|
|
||||||
{
|
|
||||||
return READ_ONCE(lruvec->lrugen.seg);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
|
static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
|
||||||
{
|
{
|
||||||
int seg;
|
int seg;
|
||||||
|
@ -4150,6 +4143,8 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
|
||||||
spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags);
|
spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_MEMCG
|
||||||
|
|
||||||
void lru_gen_online_memcg(struct mem_cgroup *memcg)
|
void lru_gen_online_memcg(struct mem_cgroup *memcg)
|
||||||
{
|
{
|
||||||
int gen;
|
int gen;
|
||||||
|
@ -4217,18 +4212,11 @@ void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
|
||||||
struct lruvec *lruvec = get_lruvec(memcg, nid);
|
struct lruvec *lruvec = get_lruvec(memcg, nid);
|
||||||
|
|
||||||
/* see the comment on MEMCG_NR_GENS */
|
/* see the comment on MEMCG_NR_GENS */
|
||||||
if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_HEAD)
|
if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_HEAD)
|
||||||
lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD);
|
lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* !CONFIG_MEMCG */
|
#endif /* CONFIG_MEMCG */
|
||||||
|
|
||||||
static int lru_gen_memcg_seg(struct lruvec *lruvec)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/******************************************************************************
|
/******************************************************************************
|
||||||
* the eviction
|
* the eviction
|
||||||
|
@ -4776,7 +4764,7 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
|
||||||
|
|
||||||
if (mem_cgroup_below_low(NULL, memcg)) {
|
if (mem_cgroup_below_low(NULL, memcg)) {
|
||||||
/* see the comment on MEMCG_NR_GENS */
|
/* see the comment on MEMCG_NR_GENS */
|
||||||
if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_TAIL)
|
if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL)
|
||||||
return MEMCG_LRU_TAIL;
|
return MEMCG_LRU_TAIL;
|
||||||
|
|
||||||
memcg_memory_event(memcg, MEMCG_LOW);
|
memcg_memory_event(memcg, MEMCG_LOW);
|
||||||
|
@ -4799,12 +4787,10 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* one retry if offlined or too small */
|
/* one retry if offlined or too small */
|
||||||
return lru_gen_memcg_seg(lruvec) != MEMCG_LRU_TAIL ?
|
return READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL ?
|
||||||
MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG;
|
MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG
|
|
||||||
|
|
||||||
static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
|
static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
|
||||||
{
|
{
|
||||||
int op;
|
int op;
|
||||||
|
@ -4896,20 +4882,6 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc
|
||||||
blk_finish_plug(&plug);
|
blk_finish_plug(&plug);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* !CONFIG_MEMCG */
|
|
||||||
|
|
||||||
static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
|
|
||||||
{
|
|
||||||
BUILD_BUG();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
|
||||||
{
|
|
||||||
BUILD_BUG();
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc)
|
static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc)
|
||||||
{
|
{
|
||||||
int priority;
|
int priority;
|
||||||
|
@ -5560,6 +5532,18 @@ static const struct file_operations lru_gen_ro_fops = {
|
||||||
* initialization
|
* initialization
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
|
|
||||||
|
void lru_gen_init_pgdat(struct pglist_data *pgdat)
|
||||||
|
{
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
spin_lock_init(&pgdat->memcg_lru.lock);
|
||||||
|
|
||||||
|
for (i = 0; i < MEMCG_NR_GENS; i++) {
|
||||||
|
for (j = 0; j < MEMCG_NR_BINS; j++)
|
||||||
|
INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void lru_gen_init_lruvec(struct lruvec *lruvec)
|
void lru_gen_init_lruvec(struct lruvec *lruvec)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
@ -5582,18 +5566,6 @@ void lru_gen_init_lruvec(struct lruvec *lruvec)
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG
|
#ifdef CONFIG_MEMCG
|
||||||
|
|
||||||
void lru_gen_init_pgdat(struct pglist_data *pgdat)
|
|
||||||
{
|
|
||||||
int i, j;
|
|
||||||
|
|
||||||
spin_lock_init(&pgdat->memcg_lru.lock);
|
|
||||||
|
|
||||||
for (i = 0; i < MEMCG_NR_GENS; i++) {
|
|
||||||
for (j = 0; j < MEMCG_NR_BINS; j++)
|
|
||||||
INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void lru_gen_init_memcg(struct mem_cgroup *memcg)
|
void lru_gen_init_memcg(struct mem_cgroup *memcg)
|
||||||
{
|
{
|
||||||
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
|
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
|
||||||
|
@ -5653,14 +5625,17 @@ late_initcall(init_lru_gen);
|
||||||
|
|
||||||
static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
|
static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
|
||||||
{
|
{
|
||||||
|
BUILD_BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
||||||
{
|
{
|
||||||
|
BUILD_BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc)
|
static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc)
|
||||||
{
|
{
|
||||||
|
BUILD_BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_LRU_GEN */
|
#endif /* CONFIG_LRU_GEN */
|
||||||
|
|
Loading…
Reference in a new issue