mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 12:43:29 +02:00
mm/mmu_notifier: use structure for invalidate_range_start/end calls v2
To avoid having to change many call sites everytime we want to add a parameter use a structure to group all parameters for the mmu_notifier invalidate_range_start/end cakks. No functional changes with this patch. [akpm@linux-foundation.org: coding style fixes] Link: http://lkml.kernel.org/r/20181205053628.3210-3-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Jan Kara <jack@suse.cz> Cc: Matthew Wilcox <mawilcox@microsoft.com> Cc: Ross Zwisler <zwisler@kernel.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim Krcmar <rkrcmar@redhat.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Felix Kuehling <felix.kuehling@amd.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> From: Jérôme Glisse <jglisse@redhat.com> Subject: mm/mmu_notifier: use structure for invalidate_range_start/end calls v3 fix build warning in migrate.c when CONFIG_MMU_NOTIFIER=n Link: http://lkml.kernel.org/r/20181213171330.8489-3-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
5d6527a784
commit
ac46d4f3c4
30
mm/rmap.c
30
mm/rmap.c
@@ -889,15 +889,17 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
||||
.address = address,
|
||||
.flags = PVMW_SYNC,
|
||||
};
|
||||
unsigned long start = address, end;
|
||||
struct mmu_notifier_range range;
|
||||
int *cleaned = arg;
|
||||
|
||||
/*
|
||||
* We have to assume the worse case ie pmd for invalidation. Note that
|
||||
* the page can not be free from this function.
|
||||
*/
|
||||
end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
|
||||
mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
|
||||
mmu_notifier_range_init(&range, vma->vm_mm, address,
|
||||
min(vma->vm_end, address +
|
||||
(PAGE_SIZE << compound_order(page))));
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
||||
while (page_vma_mapped_walk(&pvmw)) {
|
||||
unsigned long cstart;
|
||||
@@ -949,7 +951,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
||||
(*cleaned)++;
|
||||
}
|
||||
|
||||
mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -1345,7 +1347,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
||||
pte_t pteval;
|
||||
struct page *subpage;
|
||||
bool ret = true;
|
||||
unsigned long start = address, end;
|
||||
struct mmu_notifier_range range;
|
||||
enum ttu_flags flags = (enum ttu_flags)arg;
|
||||
|
||||
/* munlock has nothing to gain from examining un-locked vmas */
|
||||
@@ -1369,15 +1371,18 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
||||
* Note that the page can not be free in this function as call of
|
||||
* try_to_unmap() must hold a reference on the page.
|
||||
*/
|
||||
end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
|
||||
mmu_notifier_range_init(&range, vma->vm_mm, vma->vm_start,
|
||||
min(vma->vm_end, vma->vm_start +
|
||||
(PAGE_SIZE << compound_order(page))));
|
||||
if (PageHuge(page)) {
|
||||
/*
|
||||
* If sharing is possible, start and end will be adjusted
|
||||
* accordingly.
|
||||
*/
|
||||
adjust_range_if_pmd_sharing_possible(vma, &start, &end);
|
||||
adjust_range_if_pmd_sharing_possible(vma, &range.start,
|
||||
&range.end);
|
||||
}
|
||||
mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
||||
while (page_vma_mapped_walk(&pvmw)) {
|
||||
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
|
||||
@@ -1428,9 +1433,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
||||
* we must flush them all. start/end were
|
||||
* already adjusted above to cover this range.
|
||||
*/
|
||||
flush_cache_range(vma, start, end);
|
||||
flush_tlb_range(vma, start, end);
|
||||
mmu_notifier_invalidate_range(mm, start, end);
|
||||
flush_cache_range(vma, range.start, range.end);
|
||||
flush_tlb_range(vma, range.start, range.end);
|
||||
mmu_notifier_invalidate_range(mm, range.start,
|
||||
range.end);
|
||||
|
||||
/*
|
||||
* The ref count of the PMD page was dropped
|
||||
@@ -1650,7 +1656,7 @@ discard:
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
Reference in New Issue
Block a user