mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 04:33:26 +02:00
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - a few random little subsystems - almost all of the MM patches which are staged ahead of linux-next material. I'll trickle to post-linux-next work in as the dependents get merged up. Subsystems affected by this patch series: kthread, kbuild, ide, ntfs, ocfs2, arch, and mm (slab-generic, slab, slub, dax, debug, pagecache, gup, swap, shmem, memcg, pagemap, mremap, hmm, vmalloc, documentation, kasan, pagealloc, memory-failure, hugetlb, vmscan, z3fold, compaction, oom-kill, migration, cma, page-poison, userfaultfd, zswap, zsmalloc, uaccess, zram, and cleanups). * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (200 commits) mm: cleanup kstrto*() usage mm: fix fall-through warnings for Clang mm: slub: convert sysfs sprintf family to sysfs_emit/sysfs_emit_at mm: shmem: convert shmem_enabled_show to use sysfs_emit_at mm:backing-dev: use sysfs_emit in macro defining functions mm: huge_memory: convert remaining use of sprintf to sysfs_emit and neatening mm: use sysfs_emit for struct kobject * uses mm: fix kernel-doc markups zram: break the strict dependency from lzo zram: add stat to gather incompressible pages since zram set up zram: support page writeback mm/process_vm_access: remove redundant initialization of iov_r mm/zsmalloc.c: rework the list_add code in insert_zspage() mm/zswap: move to use crypto_acomp API for hardware acceleration mm/zswap: fix passing zero to 'PTR_ERR' warning mm/zswap: make struct kernel_param_ops definitions const userfaultfd/selftests: hint the test runner on required privilege userfaultfd/selftests: fix retval check for userfaultfd_open() userfaultfd/selftests: always dump something in modes userfaultfd: selftests: make __{s,u}64 format specifiers portable ...
This commit is contained in:
@@ -557,8 +557,9 @@ enum page_entry_size {
|
||||
struct vm_operations_struct {
|
||||
void (*open)(struct vm_area_struct * area);
|
||||
void (*close)(struct vm_area_struct * area);
|
||||
int (*split)(struct vm_area_struct * area, unsigned long addr);
|
||||
int (*mremap)(struct vm_area_struct * area);
|
||||
/* Called any time before splitting to check if it's allowed */
|
||||
int (*may_split)(struct vm_area_struct *area, unsigned long addr);
|
||||
int (*mremap)(struct vm_area_struct *area, unsigned long flags);
|
||||
/*
|
||||
* Called by mprotect() to make driver-specific permission
|
||||
* checks before mprotect() is finalised. The VMA must not
|
||||
@@ -1723,8 +1724,8 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
|
||||
void *buf, int len, unsigned int gup_flags);
|
||||
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
|
||||
void *buf, int len, unsigned int gup_flags);
|
||||
extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long addr, void *buf, int len, unsigned int gup_flags);
|
||||
extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
|
||||
void *buf, int len, unsigned int gup_flags);
|
||||
|
||||
long get_user_pages_remote(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
@@ -2210,7 +2211,7 @@ static inline bool pgtable_pte_page_ctor(struct page *page)
|
||||
if (!ptlock_init(page))
|
||||
return false;
|
||||
__SetPageTable(page);
|
||||
inc_zone_page_state(page, NR_PAGETABLE);
|
||||
inc_lruvec_page_state(page, NR_PAGETABLE);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -2218,7 +2219,7 @@ static inline void pgtable_pte_page_dtor(struct page *page)
|
||||
{
|
||||
ptlock_free(page);
|
||||
__ClearPageTable(page);
|
||||
dec_zone_page_state(page, NR_PAGETABLE);
|
||||
dec_lruvec_page_state(page, NR_PAGETABLE);
|
||||
}
|
||||
|
||||
#define pte_offset_map_lock(mm, pmd, address, ptlp) \
|
||||
@@ -2305,7 +2306,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page)
|
||||
if (!pmd_ptlock_init(page))
|
||||
return false;
|
||||
__SetPageTable(page);
|
||||
inc_zone_page_state(page, NR_PAGETABLE);
|
||||
inc_lruvec_page_state(page, NR_PAGETABLE);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -2313,7 +2314,7 @@ static inline void pgtable_pmd_page_dtor(struct page *page)
|
||||
{
|
||||
pmd_ptlock_free(page);
|
||||
__ClearPageTable(page);
|
||||
dec_zone_page_state(page, NR_PAGETABLE);
|
||||
dec_lruvec_page_state(page, NR_PAGETABLE);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2440,9 +2441,6 @@ static inline int early_pfn_to_nid(unsigned long pfn)
|
||||
#else
|
||||
/* please see mm/page_alloc.c */
|
||||
extern int __meminit early_pfn_to_nid(unsigned long pfn);
|
||||
/* there is a per-arch backend function. */
|
||||
extern int __meminit __early_pfn_to_nid(unsigned long pfn,
|
||||
struct mminit_pfnnid_cache *state);
|
||||
#endif
|
||||
|
||||
extern void set_dma_reserve(unsigned long new_dma_reserve);
|
||||
@@ -2881,44 +2879,56 @@ extern int apply_to_existing_page_range(struct mm_struct *mm,
|
||||
unsigned long address, unsigned long size,
|
||||
pte_fn_t fn, void *data);
|
||||
|
||||
extern void init_mem_debugging_and_hardening(void);
|
||||
#ifdef CONFIG_PAGE_POISONING
|
||||
extern bool page_poisoning_enabled(void);
|
||||
extern void kernel_poison_pages(struct page *page, int numpages, int enable);
|
||||
extern void __kernel_poison_pages(struct page *page, int numpages);
|
||||
extern void __kernel_unpoison_pages(struct page *page, int numpages);
|
||||
extern bool _page_poisoning_enabled_early;
|
||||
DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
|
||||
static inline bool page_poisoning_enabled(void)
|
||||
{
|
||||
return _page_poisoning_enabled_early;
|
||||
}
|
||||
/*
|
||||
* For use in fast paths after init_mem_debugging() has run, or when a
|
||||
* false negative result is not harmful when called too early.
|
||||
*/
|
||||
static inline bool page_poisoning_enabled_static(void)
|
||||
{
|
||||
return static_branch_unlikely(&_page_poisoning_enabled);
|
||||
}
|
||||
static inline void kernel_poison_pages(struct page *page, int numpages)
|
||||
{
|
||||
if (page_poisoning_enabled_static())
|
||||
__kernel_poison_pages(page, numpages);
|
||||
}
|
||||
static inline void kernel_unpoison_pages(struct page *page, int numpages)
|
||||
{
|
||||
if (page_poisoning_enabled_static())
|
||||
__kernel_unpoison_pages(page, numpages);
|
||||
}
|
||||
#else
|
||||
static inline bool page_poisoning_enabled(void) { return false; }
|
||||
static inline void kernel_poison_pages(struct page *page, int numpages,
|
||||
int enable) { }
|
||||
static inline bool page_poisoning_enabled_static(void) { return false; }
|
||||
static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
|
||||
static inline void kernel_poison_pages(struct page *page, int numpages) { }
|
||||
static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON
|
||||
DECLARE_STATIC_KEY_TRUE(init_on_alloc);
|
||||
#else
|
||||
DECLARE_STATIC_KEY_FALSE(init_on_alloc);
|
||||
#endif
|
||||
static inline bool want_init_on_alloc(gfp_t flags)
|
||||
{
|
||||
if (static_branch_unlikely(&init_on_alloc) &&
|
||||
!page_poisoning_enabled())
|
||||
if (static_branch_unlikely(&init_on_alloc))
|
||||
return true;
|
||||
return flags & __GFP_ZERO;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON
|
||||
DECLARE_STATIC_KEY_TRUE(init_on_free);
|
||||
#else
|
||||
DECLARE_STATIC_KEY_FALSE(init_on_free);
|
||||
#endif
|
||||
static inline bool want_init_on_free(void)
|
||||
{
|
||||
return static_branch_unlikely(&init_on_free) &&
|
||||
!page_poisoning_enabled();
|
||||
return static_branch_unlikely(&init_on_free);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
extern void init_debug_pagealloc(void);
|
||||
#else
|
||||
static inline void init_debug_pagealloc(void) {}
|
||||
#endif
|
||||
extern bool _debug_pagealloc_enabled_early;
|
||||
DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
|
||||
|
||||
@@ -2940,28 +2950,28 @@ static inline bool debug_pagealloc_enabled_static(void)
|
||||
return static_branch_unlikely(&_debug_pagealloc_enabled);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
/*
|
||||
* To support DEBUG_PAGEALLOC architecture must ensure that
|
||||
* __kernel_map_pages() never fails
|
||||
*/
|
||||
extern void __kernel_map_pages(struct page *page, int numpages, int enable);
|
||||
|
||||
/*
|
||||
* When called in DEBUG_PAGEALLOC context, the call should most likely be
|
||||
* guarded by debug_pagealloc_enabled() or debug_pagealloc_enabled_static()
|
||||
*/
|
||||
static inline void
|
||||
kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
|
||||
{
|
||||
__kernel_map_pages(page, numpages, enable);
|
||||
if (debug_pagealloc_enabled_static())
|
||||
__kernel_map_pages(page, numpages, 1);
|
||||
}
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
extern bool kernel_page_present(struct page *page);
|
||||
#endif /* CONFIG_HIBERNATION */
|
||||
#else /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */
|
||||
static inline void
|
||||
kernel_map_pages(struct page *page, int numpages, int enable) {}
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
static inline bool kernel_page_present(struct page *page) { return true; }
|
||||
#endif /* CONFIG_HIBERNATION */
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */
|
||||
|
||||
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
|
||||
{
|
||||
if (debug_pagealloc_enabled_static())
|
||||
__kernel_map_pages(page, numpages, 0);
|
||||
}
|
||||
#else /* CONFIG_DEBUG_PAGEALLOC */
|
||||
static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
|
||||
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
||||
|
||||
#ifdef __HAVE_ARCH_GATE_AREA
|
||||
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
|
||||
|
Reference in New Issue
Block a user