mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 20:51:03 +02:00
mm: memcg: rename and document global_reclaim()
Evidently, global_reclaim() can be a confusing name. Especially that it
used to exist before with a subtly different definition (removed by commit
b5ead35e7e
("mm: vmscan: naming fixes: global_reclaim() and
sane_reclaim()"). It can be interpreted as non-cgroup reclaim, even
though it returns true for cgroup reclaim on the root memcg (through
memory.reclaim).
Rename it to root_reclaim() in an attempt to make it less ambiguous, and
add documentation to it as well as cgroup_reclaim.
Link: https://lkml.kernel.org/r/20230621023053.432374-1-yosryahmed@google.com
Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
Reported-by: Johannes Weiner <hannes@cmpxchg.org>
Closes: https://lore.kernel.org/lkml/20230405200150.GA35884@cmpxchg.org/
Acked-by: Yu Zhao <yuzhao@google.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeelb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
7302338a14
commit
7a704474b3
21
mm/vmscan.c
21
mm/vmscan.c
@@ -429,12 +429,17 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg)
|
|||||||
up_read(&shrinker_rwsem);
|
up_read(&shrinker_rwsem);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Returns true for reclaim through cgroup limits or cgroup interfaces. */
|
||||||
static bool cgroup_reclaim(struct scan_control *sc)
|
static bool cgroup_reclaim(struct scan_control *sc)
|
||||||
{
|
{
|
||||||
return sc->target_mem_cgroup;
|
return sc->target_mem_cgroup;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool global_reclaim(struct scan_control *sc)
|
/*
|
||||||
|
* Returns true for reclaim on the root cgroup. This is true for direct
|
||||||
|
* allocator reclaim and reclaim through cgroup interfaces on the root cgroup.
|
||||||
|
*/
|
||||||
|
static bool root_reclaim(struct scan_control *sc)
|
||||||
{
|
{
|
||||||
return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup);
|
return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup);
|
||||||
}
|
}
|
||||||
@@ -489,7 +494,7 @@ static bool cgroup_reclaim(struct scan_control *sc)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool global_reclaim(struct scan_control *sc)
|
static bool root_reclaim(struct scan_control *sc)
|
||||||
{
|
{
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@@ -546,7 +551,7 @@ static void flush_reclaim_state(struct scan_control *sc)
|
|||||||
* memcg reclaim, to make reporting more accurate and reduce
|
* memcg reclaim, to make reporting more accurate and reduce
|
||||||
* underestimation, but it's probably not worth the complexity for now.
|
* underestimation, but it's probably not worth the complexity for now.
|
||||||
*/
|
*/
|
||||||
if (current->reclaim_state && global_reclaim(sc)) {
|
if (current->reclaim_state && root_reclaim(sc)) {
|
||||||
sc->nr_reclaimed += current->reclaim_state->reclaimed;
|
sc->nr_reclaimed += current->reclaim_state->reclaimed;
|
||||||
current->reclaim_state->reclaimed = 0;
|
current->reclaim_state->reclaimed = 0;
|
||||||
}
|
}
|
||||||
@@ -5325,7 +5330,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool
|
|||||||
static unsigned long get_nr_to_reclaim(struct scan_control *sc)
|
static unsigned long get_nr_to_reclaim(struct scan_control *sc)
|
||||||
{
|
{
|
||||||
/* don't abort memcg reclaim to ensure fairness */
|
/* don't abort memcg reclaim to ensure fairness */
|
||||||
if (!global_reclaim(sc))
|
if (!root_reclaim(sc))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
return max(sc->nr_to_reclaim, compact_gap(sc->order));
|
return max(sc->nr_to_reclaim, compact_gap(sc->order));
|
||||||
@@ -5477,7 +5482,7 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc
|
|||||||
{
|
{
|
||||||
struct blk_plug plug;
|
struct blk_plug plug;
|
||||||
|
|
||||||
VM_WARN_ON_ONCE(global_reclaim(sc));
|
VM_WARN_ON_ONCE(root_reclaim(sc));
|
||||||
VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap);
|
VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap);
|
||||||
|
|
||||||
lru_add_drain();
|
lru_add_drain();
|
||||||
@@ -5538,7 +5543,7 @@ static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *
|
|||||||
struct blk_plug plug;
|
struct blk_plug plug;
|
||||||
unsigned long reclaimed = sc->nr_reclaimed;
|
unsigned long reclaimed = sc->nr_reclaimed;
|
||||||
|
|
||||||
VM_WARN_ON_ONCE(!global_reclaim(sc));
|
VM_WARN_ON_ONCE(!root_reclaim(sc));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unmapped clean folios are already prioritized. Scanning for more of
|
* Unmapped clean folios are already prioritized. Scanning for more of
|
||||||
@@ -6260,7 +6265,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
|||||||
bool proportional_reclaim;
|
bool proportional_reclaim;
|
||||||
struct blk_plug plug;
|
struct blk_plug plug;
|
||||||
|
|
||||||
if (lru_gen_enabled() && !global_reclaim(sc)) {
|
if (lru_gen_enabled() && !root_reclaim(sc)) {
|
||||||
lru_gen_shrink_lruvec(lruvec, sc);
|
lru_gen_shrink_lruvec(lruvec, sc);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -6501,7 +6506,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
|
|||||||
struct lruvec *target_lruvec;
|
struct lruvec *target_lruvec;
|
||||||
bool reclaimable = false;
|
bool reclaimable = false;
|
||||||
|
|
||||||
if (lru_gen_enabled() && global_reclaim(sc)) {
|
if (lru_gen_enabled() && root_reclaim(sc)) {
|
||||||
lru_gen_shrink_node(pgdat, sc);
|
lru_gen_shrink_node(pgdat, sc);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user