mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 04:33:26 +02:00
mm: Cleanup faultaround and finish_fault() codepaths
alloc_set_pte() has two users with different requirements: in the faultaround code, it called from an atomic context and PTE page table has to be preallocated. finish_fault() can sleep and allocate page table as needed. PTL locking rules are also strange, hard to follow and overkill for finish_fault(). Let's untangle the mess. alloc_set_pte() has gone now. All locking is explicit. The price is some code duplication to handle huge pages in faultaround path, but it should be fine, having overall improvement in readability. Link: https://lore.kernel.org/r/20201229132819.najtavneutnf7ajp@box Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> [will: s/from from/from/ in comment; spotted by willy] Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
committed by
Will Deacon
parent
19c329f680
commit
f9ce0be71d
@@ -542,8 +542,8 @@ struct vm_fault {
|
||||
* is not NULL, otherwise pmd.
|
||||
*/
|
||||
pgtable_t prealloc_pte; /* Pre-allocated pte page table.
|
||||
* vm_ops->map_pages() calls
|
||||
* alloc_set_pte() from atomic context.
|
||||
* vm_ops->map_pages() sets up a page
|
||||
* table from atomic context.
|
||||
* do_fault_around() pre-allocates
|
||||
* page table to avoid allocation from
|
||||
* atomic context.
|
||||
@@ -578,7 +578,7 @@ struct vm_operations_struct {
|
||||
vm_fault_t (*fault)(struct vm_fault *vmf);
|
||||
vm_fault_t (*huge_fault)(struct vm_fault *vmf,
|
||||
enum page_entry_size pe_size);
|
||||
void (*map_pages)(struct vm_fault *vmf,
|
||||
vm_fault_t (*map_pages)(struct vm_fault *vmf,
|
||||
pgoff_t start_pgoff, pgoff_t end_pgoff);
|
||||
unsigned long (*pagesize)(struct vm_area_struct * area);
|
||||
|
||||
@@ -988,7 +988,9 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
|
||||
return pte;
|
||||
}
|
||||
|
||||
vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page);
|
||||
vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
|
||||
void do_set_pte(struct vm_fault *vmf, struct page *page);
|
||||
|
||||
vm_fault_t finish_fault(struct vm_fault *vmf);
|
||||
vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
|
||||
#endif
|
||||
@@ -2622,7 +2624,7 @@ extern void truncate_inode_pages_final(struct address_space *);
|
||||
|
||||
/* generic vm_area_ops exported for stackable file systems */
|
||||
extern vm_fault_t filemap_fault(struct vm_fault *vmf);
|
||||
extern void filemap_map_pages(struct vm_fault *vmf,
|
||||
extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
|
||||
pgoff_t start_pgoff, pgoff_t end_pgoff);
|
||||
extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
|
||||
|
||||
|
Reference in New Issue
Block a user