mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 12:43:29 +02:00
mm: kill lock|unlock_page_memcg()
Since commit c7c3dec1c9
("mm: rmap: remove lock_page_memcg()"),
no more user, kill lock_page_memcg() and unlock_page_memcg().
Link: https://lkml.kernel.org/r/20230614143612.62575-1-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
399fd496c4
commit
6c77b607ee
@@ -297,7 +297,7 @@ Lock order is as follows::
|
|||||||
|
|
||||||
Page lock (PG_locked bit of page->flags)
|
Page lock (PG_locked bit of page->flags)
|
||||||
mm->page_table_lock or split pte_lock
|
mm->page_table_lock or split pte_lock
|
||||||
lock_page_memcg (memcg->move_lock)
|
folio_memcg_lock (memcg->move_lock)
|
||||||
mapping->i_pages lock
|
mapping->i_pages lock
|
||||||
lruvec->lru_lock.
|
lruvec->lru_lock.
|
||||||
|
|
||||||
|
@@ -419,7 +419,7 @@ static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
|
|||||||
*
|
*
|
||||||
* - the folio lock
|
* - the folio lock
|
||||||
* - LRU isolation
|
* - LRU isolation
|
||||||
* - lock_page_memcg()
|
* - folio_memcg_lock()
|
||||||
* - exclusive reference
|
* - exclusive reference
|
||||||
* - mem_cgroup_trylock_pages()
|
* - mem_cgroup_trylock_pages()
|
||||||
*
|
*
|
||||||
@@ -949,8 +949,6 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
|
|||||||
|
|
||||||
void folio_memcg_lock(struct folio *folio);
|
void folio_memcg_lock(struct folio *folio);
|
||||||
void folio_memcg_unlock(struct folio *folio);
|
void folio_memcg_unlock(struct folio *folio);
|
||||||
void lock_page_memcg(struct page *page);
|
|
||||||
void unlock_page_memcg(struct page *page);
|
|
||||||
|
|
||||||
void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
|
void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
|
||||||
|
|
||||||
@@ -1438,14 +1436,6 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void lock_page_memcg(struct page *page)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void unlock_page_memcg(struct page *page)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void folio_memcg_lock(struct folio *folio)
|
static inline void folio_memcg_lock(struct folio *folio)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@@ -117,7 +117,7 @@
|
|||||||
* ->i_pages lock (page_remove_rmap->set_page_dirty)
|
* ->i_pages lock (page_remove_rmap->set_page_dirty)
|
||||||
* bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
|
* bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
|
||||||
* ->inode->i_lock (page_remove_rmap->set_page_dirty)
|
* ->inode->i_lock (page_remove_rmap->set_page_dirty)
|
||||||
* ->memcg->move_lock (page_remove_rmap->lock_page_memcg)
|
* ->memcg->move_lock (page_remove_rmap->folio_memcg_lock)
|
||||||
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
|
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
|
||||||
* ->inode->i_lock (zap_pte_range->set_page_dirty)
|
* ->inode->i_lock (zap_pte_range->set_page_dirty)
|
||||||
* ->private_lock (zap_pte_range->block_dirty_folio)
|
* ->private_lock (zap_pte_range->block_dirty_folio)
|
||||||
|
@@ -2148,17 +2148,12 @@ again:
|
|||||||
* When charge migration first begins, we can have multiple
|
* When charge migration first begins, we can have multiple
|
||||||
* critical sections holding the fast-path RCU lock and one
|
* critical sections holding the fast-path RCU lock and one
|
||||||
* holding the slowpath move_lock. Track the task who has the
|
* holding the slowpath move_lock. Track the task who has the
|
||||||
* move_lock for unlock_page_memcg().
|
* move_lock for folio_memcg_unlock().
|
||||||
*/
|
*/
|
||||||
memcg->move_lock_task = current;
|
memcg->move_lock_task = current;
|
||||||
memcg->move_lock_flags = flags;
|
memcg->move_lock_flags = flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
void lock_page_memcg(struct page *page)
|
|
||||||
{
|
|
||||||
folio_memcg_lock(page_folio(page));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __folio_memcg_unlock(struct mem_cgroup *memcg)
|
static void __folio_memcg_unlock(struct mem_cgroup *memcg)
|
||||||
{
|
{
|
||||||
if (memcg && memcg->move_lock_task == current) {
|
if (memcg && memcg->move_lock_task == current) {
|
||||||
@@ -2186,11 +2181,6 @@ void folio_memcg_unlock(struct folio *folio)
|
|||||||
__folio_memcg_unlock(folio_memcg(folio));
|
__folio_memcg_unlock(folio_memcg(folio));
|
||||||
}
|
}
|
||||||
|
|
||||||
void unlock_page_memcg(struct page *page)
|
|
||||||
{
|
|
||||||
folio_memcg_unlock(page_folio(page));
|
|
||||||
}
|
|
||||||
|
|
||||||
struct memcg_stock_pcp {
|
struct memcg_stock_pcp {
|
||||||
local_lock_t stock_lock;
|
local_lock_t stock_lock;
|
||||||
struct mem_cgroup *cached; /* this never be root cgroup */
|
struct mem_cgroup *cached; /* this never be root cgroup */
|
||||||
@@ -2866,7 +2856,7 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
|
|||||||
*
|
*
|
||||||
* - the page lock
|
* - the page lock
|
||||||
* - LRU isolation
|
* - LRU isolation
|
||||||
* - lock_page_memcg()
|
* - folio_memcg_lock()
|
||||||
* - exclusive reference
|
* - exclusive reference
|
||||||
* - mem_cgroup_trylock_pages()
|
* - mem_cgroup_trylock_pages()
|
||||||
*/
|
*/
|
||||||
@@ -5829,7 +5819,7 @@ static int mem_cgroup_move_account(struct page *page,
|
|||||||
* with (un)charging, migration, LRU putback, or anything else
|
* with (un)charging, migration, LRU putback, or anything else
|
||||||
* that would rely on a stable page's memory cgroup.
|
* that would rely on a stable page's memory cgroup.
|
||||||
*
|
*
|
||||||
* Note that lock_page_memcg is a memcg lock, not a page lock,
|
* Note that folio_memcg_lock is a memcg lock, not a page lock,
|
||||||
* to save space. As soon as we switch page's memory cgroup to a
|
* to save space. As soon as we switch page's memory cgroup to a
|
||||||
* new memcg that isn't locked, the above state can change
|
* new memcg that isn't locked, the above state can change
|
||||||
* concurrently again. Make sure we're truly done with it.
|
* concurrently again. Make sure we're truly done with it.
|
||||||
@@ -6320,7 +6310,7 @@ static void mem_cgroup_move_charge(void)
|
|||||||
{
|
{
|
||||||
lru_add_drain_all();
|
lru_add_drain_all();
|
||||||
/*
|
/*
|
||||||
* Signal lock_page_memcg() to take the memcg's move_lock
|
* Signal folio_memcg_lock() to take the memcg's move_lock
|
||||||
* while we're moving its pages to another memcg. Then wait
|
* while we're moving its pages to another memcg. Then wait
|
||||||
* for already started RCU-only updates to finish.
|
* for already started RCU-only updates to finish.
|
||||||
*/
|
*/
|
||||||
|
@@ -2597,7 +2597,7 @@ EXPORT_SYMBOL(noop_dirty_folio);
|
|||||||
/*
|
/*
|
||||||
* Helper function for set_page_dirty family.
|
* Helper function for set_page_dirty family.
|
||||||
*
|
*
|
||||||
* Caller must hold lock_page_memcg().
|
* Caller must hold folio_memcg_lock().
|
||||||
*
|
*
|
||||||
* NOTE: This relies on being atomic wrt interrupts.
|
* NOTE: This relies on being atomic wrt interrupts.
|
||||||
*/
|
*/
|
||||||
@@ -2631,7 +2631,7 @@ static void folio_account_dirtied(struct folio *folio,
|
|||||||
/*
|
/*
|
||||||
* Helper function for deaccounting dirty page without writeback.
|
* Helper function for deaccounting dirty page without writeback.
|
||||||
*
|
*
|
||||||
* Caller must hold lock_page_memcg().
|
* Caller must hold folio_memcg_lock().
|
||||||
*/
|
*/
|
||||||
void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
|
void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
|
||||||
{
|
{
|
||||||
@@ -2650,7 +2650,7 @@ void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
|
|||||||
* If warn is true, then emit a warning if the folio is not uptodate and has
|
* If warn is true, then emit a warning if the folio is not uptodate and has
|
||||||
* not been truncated.
|
* not been truncated.
|
||||||
*
|
*
|
||||||
* The caller must hold lock_page_memcg(). Most callers have the folio
|
* The caller must hold folio_memcg_lock(). Most callers have the folio
|
||||||
* locked. A few have the folio blocked from truncation through other
|
* locked. A few have the folio blocked from truncation through other
|
||||||
* means (eg zap_vma_pages() has it mapped and is holding the page table
|
* means (eg zap_vma_pages() has it mapped and is holding the page table
|
||||||
* lock). This can also be called from mark_buffer_dirty(), which I
|
* lock). This can also be called from mark_buffer_dirty(), which I
|
||||||
|
Reference in New Issue
Block a user