mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 04:33:26 +02:00
Merge tag 'x86_mm_for_6.2_v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Dave Hansen: "New Feature: - Randomize the per-cpu entry areas Cleanups: - Have CR3_ADDR_MASK use PHYSICAL_PAGE_MASK instead of open coding it - Move to "native" set_memory_rox() helper - Clean up pmd_get_atomic() and i386-PAE - Remove some unused page table size macros" * tag 'x86_mm_for_6.2_v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (35 commits) x86/mm: Ensure forced page table splitting x86/kasan: Populate shadow for shared chunk of the CPU entry area x86/kasan: Add helpers to align shadow addresses up and down x86/kasan: Rename local CPU_ENTRY_AREA variables to shorten names x86/mm: Populate KASAN shadow for entire per-CPU range of CPU entry area x86/mm: Recompute physical address for every page of per-CPU CEA mapping x86/mm: Rename __change_page_attr_set_clr(.checkalias) x86/mm: Inhibit _PAGE_NX changes from cpa_process_alias() x86/mm: Untangle __change_page_attr_set_clr(.checkalias) x86/mm: Add a few comments x86/mm: Fix CR3_ADDR_MASK x86/mm: Remove P*D_PAGE_MASK and P*D_PAGE_SIZE macros mm: Convert __HAVE_ARCH_P..P_GET to the new style mm: Remove pointless barrier() after pmdp_get_lockless() x86/mm/pae: Get rid of set_64bit() x86_64: Remove pointless set_64bit() usage x86/mm/pae: Be consistent with pXXp_get_and_clear() x86/mm/pae: Use WRITE_ONCE() x86/mm/pae: Don't (ab)use atomic64 mm/gup: Fix the lockless PMD access ...
This commit is contained in:
@@ -2607,11 +2607,6 @@ struct task_struct * __init fork_idle(int cpu)
|
||||
return task;
|
||||
}
|
||||
|
||||
struct mm_struct *copy_init_mm(void)
|
||||
{
|
||||
return dup_mm(NULL, &init_mm);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is like kernel_clone(), but shaved down and tailored to just
|
||||
* creating io_uring workers. It returns a created task, or an error pointer.
|
||||
@@ -3030,10 +3025,27 @@ static void sighand_ctor(void *data)
|
||||
init_waitqueue_head(&sighand->signalfd_wqh);
|
||||
}
|
||||
|
||||
void __init proc_caches_init(void)
|
||||
void __init mm_cache_init(void)
|
||||
{
|
||||
unsigned int mm_size;
|
||||
|
||||
/*
|
||||
* The mm_cpumask is located at the end of mm_struct, and is
|
||||
* dynamically sized based on the maximum CPU number this system
|
||||
* can have, taking hotplug into account (nr_cpu_ids).
|
||||
*/
|
||||
mm_size = sizeof(struct mm_struct) + cpumask_size();
|
||||
|
||||
mm_cachep = kmem_cache_create_usercopy("mm_struct",
|
||||
mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
|
||||
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
|
||||
offsetof(struct mm_struct, saved_auxv),
|
||||
sizeof_field(struct mm_struct, saved_auxv),
|
||||
NULL);
|
||||
}
|
||||
|
||||
void __init proc_caches_init(void)
|
||||
{
|
||||
sighand_cachep = kmem_cache_create("sighand_cache",
|
||||
sizeof(struct sighand_struct), 0,
|
||||
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
|
||||
@@ -3051,19 +3063,6 @@ void __init proc_caches_init(void)
|
||||
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
|
||||
NULL);
|
||||
|
||||
/*
|
||||
* The mm_cpumask is located at the end of mm_struct, and is
|
||||
* dynamically sized based on the maximum CPU number this system
|
||||
* can have, taking hotplug into account (nr_cpu_ids).
|
||||
*/
|
||||
mm_size = sizeof(struct mm_struct) + cpumask_size();
|
||||
|
||||
mm_cachep = kmem_cache_create_usercopy("mm_struct",
|
||||
mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
|
||||
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
|
||||
offsetof(struct mm_struct, saved_auxv),
|
||||
sizeof_field(struct mm_struct, saved_auxv),
|
||||
NULL);
|
||||
vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
|
||||
mmap_init();
|
||||
nsproxy_cache_init();
|
||||
|
Reference in New Issue
Block a user