mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 12:43:29 +02:00
Merge tag 'folio-5.19' of git://git.infradead.org/users/willy/pagecache
Pull page cache updates from Matthew Wilcox: - Appoint myself page cache maintainer - Fix how scsicam uses the page cache - Use the memalloc_nofs_save() API to replace AOP_FLAG_NOFS - Remove the AOP flags entirely - Remove pagecache_write_begin() and pagecache_write_end() - Documentation updates - Convert several address_space operations to use folios: - is_dirty_writeback - readpage becomes read_folio - releasepage becomes release_folio - freepage becomes free_folio - Change filler_t to require a struct file pointer be the first argument like ->read_folio * tag 'folio-5.19' of git://git.infradead.org/users/willy/pagecache: (107 commits) nilfs2: Fix some kernel-doc comments Appoint myself page cache maintainer fs: Remove aops->freepage secretmem: Convert to free_folio nfs: Convert to free_folio orangefs: Convert to free_folio fs: Add free_folio address space operation fs: Convert drop_buffers() to use a folio fs: Change try_to_free_buffers() to take a folio jbd2: Convert release_buffer_page() to use a folio jbd2: Convert jbd2_journal_try_to_free_buffers to take a folio reiserfs: Convert release_buffer_page() to use a folio fs: Remove last vestiges of releasepage ubifs: Convert to release_folio reiserfs: Convert to release_folio orangefs: Convert to release_folio ocfs2: Convert to release_folio nilfs2: Remove comment about releasepage nfs: Convert to release_folio jfs: Convert to release_folio ...
This commit is contained in:
@@ -492,7 +492,7 @@ static inline gfp_t readahead_gfp_mask(struct address_space *x)
|
||||
return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
|
||||
}
|
||||
|
||||
typedef int filler_t(void *, struct page *);
|
||||
typedef int filler_t(struct file *, struct folio *);
|
||||
|
||||
pgoff_t page_cache_next_miss(struct address_space *mapping,
|
||||
pgoff_t index, unsigned long max_scan);
|
||||
@@ -735,7 +735,7 @@ static inline unsigned find_get_pages_tag(struct address_space *mapping,
|
||||
}
|
||||
|
||||
struct page *grab_cache_page_write_begin(struct address_space *mapping,
|
||||
pgoff_t index, unsigned flags);
|
||||
pgoff_t index);
|
||||
|
||||
/*
|
||||
* Returns locked page at given index in given cache, creating it if needed.
|
||||
@@ -747,9 +747,9 @@ static inline struct page *grab_cache_page(struct address_space *mapping,
|
||||
}
|
||||
|
||||
struct folio *read_cache_folio(struct address_space *, pgoff_t index,
|
||||
filler_t *filler, void *data);
|
||||
filler_t *filler, struct file *file);
|
||||
struct page *read_cache_page(struct address_space *, pgoff_t index,
|
||||
filler_t *filler, void *data);
|
||||
filler_t *filler, struct file *file);
|
||||
extern struct page * read_cache_page_gfp(struct address_space *mapping,
|
||||
pgoff_t index, gfp_t gfp_mask);
|
||||
|
||||
@@ -888,6 +888,18 @@ bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
|
||||
void unlock_page(struct page *page);
|
||||
void folio_unlock(struct folio *folio);
|
||||
|
||||
/**
|
||||
* folio_trylock() - Attempt to lock a folio.
|
||||
* @folio: The folio to attempt to lock.
|
||||
*
|
||||
* Sometimes it is undesirable to wait for a folio to be unlocked (eg
|
||||
* when the locks are being taken in the wrong order, or if making
|
||||
* progress through a batch of folios is more important than processing
|
||||
* them in order). Usually folio_lock() is the correct function to call.
|
||||
*
|
||||
* Context: Any context.
|
||||
* Return: Whether the lock was successfully acquired.
|
||||
*/
|
||||
static inline bool folio_trylock(struct folio *folio)
|
||||
{
|
||||
return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
|
||||
@@ -901,6 +913,28 @@ static inline int trylock_page(struct page *page)
|
||||
return folio_trylock(page_folio(page));
|
||||
}
|
||||
|
||||
/**
|
||||
* folio_lock() - Lock this folio.
|
||||
* @folio: The folio to lock.
|
||||
*
|
||||
* The folio lock protects against many things, probably more than it
|
||||
* should. It is primarily held while a folio is being brought uptodate,
|
||||
* either from its backing file or from swap. It is also held while a
|
||||
* folio is being truncated from its address_space, so holding the lock
|
||||
* is sufficient to keep folio->mapping stable.
|
||||
*
|
||||
* The folio lock is also held while write() is modifying the page to
|
||||
* provide POSIX atomicity guarantees (as long as the write does not
|
||||
* cross a page boundary). Other modifications to the data in the folio
|
||||
* do not hold the folio lock and can race with writes, eg DMA and stores
|
||||
* to mapped pages.
|
||||
*
|
||||
* Context: May sleep. If you need to acquire the locks of two or
|
||||
* more folios, they must be in order of ascending index, if they are
|
||||
* in the same address_space. If they are in different address_spaces,
|
||||
* acquire the lock of the folio which belongs to the address_space which
|
||||
* has the lowest address in memory first.
|
||||
*/
|
||||
static inline void folio_lock(struct folio *folio)
|
||||
{
|
||||
might_sleep();
|
||||
@@ -908,8 +942,16 @@ static inline void folio_lock(struct folio *folio)
|
||||
__folio_lock(folio);
|
||||
}
|
||||
|
||||
/*
|
||||
* lock_page may only be called if we have the page's inode pinned.
|
||||
/**
|
||||
* lock_page() - Lock the folio containing this page.
|
||||
* @page: The page to lock.
|
||||
*
|
||||
* See folio_lock() for a description of what the lock protects.
|
||||
* This is a legacy function and new code should probably use folio_lock()
|
||||
* instead.
|
||||
*
|
||||
* Context: May sleep. Pages in the same folio share a lock, so do not
|
||||
* attempt to lock two pages which share a folio.
|
||||
*/
|
||||
static inline void lock_page(struct page *page)
|
||||
{
|
||||
@@ -921,6 +963,16 @@ static inline void lock_page(struct page *page)
|
||||
__folio_lock(folio);
|
||||
}
|
||||
|
||||
/**
|
||||
* folio_lock_killable() - Lock this folio, interruptible by a fatal signal.
|
||||
* @folio: The folio to lock.
|
||||
*
|
||||
* Attempts to lock the folio, like folio_lock(), except that the sleep
|
||||
* to acquire the lock is interruptible by a fatal signal.
|
||||
*
|
||||
* Context: May sleep; see folio_lock().
|
||||
* Return: 0 if the lock was acquired; -EINTR if a fatal signal was received.
|
||||
*/
|
||||
static inline int folio_lock_killable(struct folio *folio)
|
||||
{
|
||||
might_sleep();
|
||||
@@ -967,8 +1019,8 @@ int folio_wait_bit_killable(struct folio *folio, int bit_nr);
|
||||
* Wait for a folio to be unlocked.
|
||||
*
|
||||
* This must be called with the caller "holding" the folio,
|
||||
* ie with increased "page->count" so that the folio won't
|
||||
* go away during the wait..
|
||||
* ie with increased folio reference count so that the folio won't
|
||||
* go away during the wait.
|
||||
*/
|
||||
static inline void folio_wait_locked(struct folio *folio)
|
||||
{
|
||||
@@ -1015,10 +1067,6 @@ static inline void folio_cancel_dirty(struct folio *folio)
|
||||
if (folio_test_dirty(folio))
|
||||
__folio_cancel_dirty(folio);
|
||||
}
|
||||
static inline void cancel_dirty_page(struct page *page)
|
||||
{
|
||||
folio_cancel_dirty(page_folio(page));
|
||||
}
|
||||
bool folio_clear_dirty_for_io(struct folio *folio);
|
||||
bool clear_page_dirty_for_io(struct page *page);
|
||||
void folio_invalidate(struct folio *folio, size_t offset, size_t length);
|
||||
@@ -1191,7 +1239,7 @@ void page_cache_sync_readahead(struct address_space *mapping,
|
||||
* @mapping: address_space which holds the pagecache and I/O vectors
|
||||
* @ra: file_ra_state which holds the readahead state
|
||||
* @file: Used by the filesystem for authentication.
|
||||
* @page: The page at @index which triggered the readahead call.
|
||||
* @folio: The folio at @index which triggered the readahead call.
|
||||
* @index: Index of first page to be read.
|
||||
* @req_count: Total number of pages being read by the caller.
|
||||
*
|
||||
@@ -1203,10 +1251,10 @@ void page_cache_sync_readahead(struct address_space *mapping,
|
||||
static inline
|
||||
void page_cache_async_readahead(struct address_space *mapping,
|
||||
struct file_ra_state *ra, struct file *file,
|
||||
struct page *page, pgoff_t index, unsigned long req_count)
|
||||
struct folio *folio, pgoff_t index, unsigned long req_count)
|
||||
{
|
||||
DEFINE_READAHEAD(ractl, file, ra, mapping, index);
|
||||
page_cache_async_ra(&ractl, page_folio(page), req_count);
|
||||
page_cache_async_ra(&ractl, folio, req_count);
|
||||
}
|
||||
|
||||
static inline struct folio *__readahead_folio(struct readahead_control *ractl)
|
||||
|
Reference in New Issue
Block a user