mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 20:51:03 +02:00
mm: compaction: remove unnecessary is_via_compact_memory() checks
Remove from all paths not reachable via /proc/sys/vm/compact_memory. Link: https://lkml.kernel.org/r/20230519123959.77335-5-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Michal Hocko <mhocko@suse.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
e8606320e9
commit
f98a497e1f
@@ -2280,9 +2280,6 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
|
|||||||
unsigned long available;
|
unsigned long available;
|
||||||
unsigned long watermark;
|
unsigned long watermark;
|
||||||
|
|
||||||
if (is_via_compact_memory(order))
|
|
||||||
return true;
|
|
||||||
|
|
||||||
/* Allocation can already succeed, nothing to do */
|
/* Allocation can already succeed, nothing to do */
|
||||||
watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
|
watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
|
||||||
if (zone_watermark_ok(zone, order, watermark,
|
if (zone_watermark_ok(zone, order, watermark,
|
||||||
@@ -2848,9 +2845,6 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)
|
|||||||
if (!populated_zone(zone))
|
if (!populated_zone(zone))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (is_via_compact_memory(pgdat->kcompactd_max_order))
|
|
||||||
return true;
|
|
||||||
|
|
||||||
/* Allocation can already succeed, check other zones */
|
/* Allocation can already succeed, check other zones */
|
||||||
if (zone_watermark_ok(zone, pgdat->kcompactd_max_order,
|
if (zone_watermark_ok(zone, pgdat->kcompactd_max_order,
|
||||||
min_wmark_pages(zone),
|
min_wmark_pages(zone),
|
||||||
@@ -2895,9 +2889,6 @@ static void kcompactd_do_work(pg_data_t *pgdat)
|
|||||||
if (compaction_deferred(zone, cc.order))
|
if (compaction_deferred(zone, cc.order))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (is_via_compact_memory(cc.order))
|
|
||||||
goto compact;
|
|
||||||
|
|
||||||
/* Allocation can already succeed, nothing to do */
|
/* Allocation can already succeed, nothing to do */
|
||||||
if (zone_watermark_ok(zone, cc.order,
|
if (zone_watermark_ok(zone, cc.order,
|
||||||
min_wmark_pages(zone), zoneid, 0))
|
min_wmark_pages(zone), zoneid, 0))
|
||||||
@@ -2906,7 +2897,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
|
|||||||
if (compaction_suitable(zone, cc.order,
|
if (compaction_suitable(zone, cc.order,
|
||||||
zoneid) != COMPACT_CONTINUE)
|
zoneid) != COMPACT_CONTINUE)
|
||||||
continue;
|
continue;
|
||||||
compact:
|
|
||||||
if (kthread_should_stop())
|
if (kthread_should_stop())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@@ -6399,9 +6399,6 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
|
|||||||
if (!managed_zone(zone))
|
if (!managed_zone(zone))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (sc->order == -1) /* is_via_compact_memory() */
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/* Allocation can already succeed, nothing to do */
|
/* Allocation can already succeed, nothing to do */
|
||||||
if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
|
if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
|
||||||
sc->reclaim_idx, 0))
|
sc->reclaim_idx, 0))
|
||||||
@@ -6598,9 +6595,6 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
|
|||||||
{
|
{
|
||||||
unsigned long watermark;
|
unsigned long watermark;
|
||||||
|
|
||||||
if (sc->order == -1) /* is_via_compact_memory() */
|
|
||||||
goto suitable;
|
|
||||||
|
|
||||||
/* Allocation can already succeed, nothing to do */
|
/* Allocation can already succeed, nothing to do */
|
||||||
if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
|
if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
|
||||||
sc->reclaim_idx, 0))
|
sc->reclaim_idx, 0))
|
||||||
@@ -6610,7 +6604,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
|
|||||||
if (compaction_suitable(zone, sc->order,
|
if (compaction_suitable(zone, sc->order,
|
||||||
sc->reclaim_idx) == COMPACT_SKIPPED)
|
sc->reclaim_idx) == COMPACT_SKIPPED)
|
||||||
return false;
|
return false;
|
||||||
suitable:
|
|
||||||
/*
|
/*
|
||||||
* Compaction is already possible, but it takes time to run and there
|
* Compaction is already possible, but it takes time to run and there
|
||||||
* are potentially other callers using the pages just freed. So proceed
|
* are potentially other callers using the pages just freed. So proceed
|
||||||
|
Reference in New Issue
Block a user