mm/writeback: Add folio_cancel_dirty()

Turn __cancel_dirty_page() into __folio_cancel_dirty() and add wrappers.
Move the prototypes into pagemap.h since this is page cache functionality.
Saves 44 bytes of kernel text in total; 33 bytes from __folio_cancel_dirty
and 11 from two callers of cancel_dirty_page().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Matthew Wilcox (Oracle)
2021-03-08 16:43:04 -05:00
parent fc9b6a538b
commit fdaf532a23
3 changed files with 19 additions and 15 deletions

View File

@@ -2657,28 +2657,28 @@ EXPORT_SYMBOL(set_page_dirty_lock);
* page without actually doing it through the VM. Can you say "ext3 is
* horribly ugly"? Thought you could.
*/
void __cancel_dirty_page(struct page *page)
void __folio_cancel_dirty(struct folio *folio)
{
struct address_space *mapping = page_mapping(page);
struct address_space *mapping = folio_mapping(folio);
if (mapping_can_writeback(mapping)) {
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
struct wb_lock_cookie cookie = {};
lock_page_memcg(page);
folio_memcg_lock(folio);
wb = unlocked_inode_to_wb_begin(inode, &cookie);
if (TestClearPageDirty(page))
account_page_cleaned(page, mapping, wb);
if (folio_test_clear_dirty(folio))
folio_account_cleaned(folio, mapping, wb);
unlocked_inode_to_wb_end(inode, &cookie);
unlock_page_memcg(page);
folio_memcg_unlock(folio);
} else {
ClearPageDirty(page);
folio_clear_dirty(folio);
}
}
EXPORT_SYMBOL(__cancel_dirty_page);
EXPORT_SYMBOL(__folio_cancel_dirty);
/*
* Clear a page's dirty flag, while caring for dirty memory accounting.