mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 20:51:03 +02:00
mm/hugetlb: convert hugetlb_add_to_page_cache to take in a folio
Every caller of hugetlb_add_to_page_cache() is now passing in &folio->page, change the function to take in a folio directly and clean up the call sites. Link: https://lkml.kernel.org/r/20230125170537.96973-7-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Muchun Song <songmuchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
d2d7bb44bf
commit
9b91c0e277
@@ -871,7 +871,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
|
|||||||
}
|
}
|
||||||
clear_huge_page(&folio->page, addr, pages_per_huge_page(h));
|
clear_huge_page(&folio->page, addr, pages_per_huge_page(h));
|
||||||
__folio_mark_uptodate(folio);
|
__folio_mark_uptodate(folio);
|
||||||
error = hugetlb_add_to_page_cache(&folio->page, mapping, index);
|
error = hugetlb_add_to_page_cache(folio, mapping, index);
|
||||||
if (unlikely(error)) {
|
if (unlikely(error)) {
|
||||||
restore_reserve_on_error(h, &pseudo_vma, addr, folio);
|
restore_reserve_on_error(h, &pseudo_vma, addr, folio);
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
|
@@ -723,7 +723,7 @@ struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
|
|||||||
nodemask_t *nmask, gfp_t gfp_mask);
|
nodemask_t *nmask, gfp_t gfp_mask);
|
||||||
struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
|
struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
|
||||||
unsigned long address);
|
unsigned long address);
|
||||||
int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
|
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
|
||||||
pgoff_t idx);
|
pgoff_t idx);
|
||||||
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
|
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
|
||||||
unsigned long address, struct folio *folio);
|
unsigned long address, struct folio *folio);
|
||||||
|
@@ -5662,10 +5662,9 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
|
|||||||
return present;
|
return present;
|
||||||
}
|
}
|
||||||
|
|
||||||
int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
|
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
|
||||||
pgoff_t idx)
|
pgoff_t idx)
|
||||||
{
|
{
|
||||||
struct folio *folio = page_folio(page);
|
|
||||||
struct inode *inode = mapping->host;
|
struct inode *inode = mapping->host;
|
||||||
struct hstate *h = hstate_inode(inode);
|
struct hstate *h = hstate_inode(inode);
|
||||||
int err;
|
int err;
|
||||||
@@ -5677,7 +5676,7 @@ int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
|
|||||||
__folio_clear_locked(folio);
|
__folio_clear_locked(folio);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
ClearHPageRestoreReserve(page);
|
folio_clear_hugetlb_restore_reserve(folio);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* mark folio dirty so that it will not be removed from cache/file
|
* mark folio dirty so that it will not be removed from cache/file
|
||||||
@@ -5836,7 +5835,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
|||||||
new_folio = true;
|
new_folio = true;
|
||||||
|
|
||||||
if (vma->vm_flags & VM_MAYSHARE) {
|
if (vma->vm_flags & VM_MAYSHARE) {
|
||||||
int err = hugetlb_add_to_page_cache(&folio->page, mapping, idx);
|
int err = hugetlb_add_to_page_cache(folio, mapping, idx);
|
||||||
if (err) {
|
if (err) {
|
||||||
/*
|
/*
|
||||||
* err can't be -EEXIST which implies someone
|
* err can't be -EEXIST which implies someone
|
||||||
@@ -6269,7 +6268,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
|||||||
* hugetlb_fault_mutex_table that here must be hold by
|
* hugetlb_fault_mutex_table that here must be hold by
|
||||||
* the caller.
|
* the caller.
|
||||||
*/
|
*/
|
||||||
ret = hugetlb_add_to_page_cache(&folio->page, mapping, idx);
|
ret = hugetlb_add_to_page_cache(folio, mapping, idx);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_release_nounlock;
|
goto out_release_nounlock;
|
||||||
folio_in_pagecache = true;
|
folio_in_pagecache = true;
|
||||||
|
Reference in New Issue
Block a user