mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 12:43:29 +02:00
Merge tag 'x86_urgent_for_v5.15_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Borislav Petkov: - Prevent a infinite loop in the MCE recovery on return to user space, which was caused by a second MCE queueing work for the same page and thereby creating a circular work list. - Make kern_addr_valid() handle existing PMD entries, which are marked not present in the higher level page table, correctly instead of blindly dereferencing them. - Pass a valid address to sanitize_phys(). This was caused by the mixture of inclusive and exclusive ranges. memtype_reserve() expect 'end' being exclusive, but sanitize_phys() wants it inclusive. This worked so far, but with end being the end of the physical address space the fail is exposed. - Increase the maximum supported GPIO numbers for 64bit. Newer SoCs exceed the previous maximum. * tag 'x86_urgent_for_v5.15_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mce: Avoid infinite loop for copy from user recovery x86/mm: Fix kern_addr_valid() to cope with existing but not present entries x86/platform: Increase maximum GPIO number for X86_64 x86/pat: Pass valid address to sanitize_phys()
This commit is contained in:
@@ -339,6 +339,11 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
|
|||||||
config ARCH_HIBERNATION_POSSIBLE
|
config ARCH_HIBERNATION_POSSIBLE
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
|
config ARCH_NR_GPIO
|
||||||
|
int
|
||||||
|
default 1024 if X86_64
|
||||||
|
default 512
|
||||||
|
|
||||||
config ARCH_SUSPEND_POSSIBLE
|
config ARCH_SUSPEND_POSSIBLE
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
|
@@ -1253,6 +1253,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
|
|||||||
|
|
||||||
static void kill_me_now(struct callback_head *ch)
|
static void kill_me_now(struct callback_head *ch)
|
||||||
{
|
{
|
||||||
|
struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
|
||||||
|
|
||||||
|
p->mce_count = 0;
|
||||||
force_sig(SIGBUS);
|
force_sig(SIGBUS);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1262,6 +1265,7 @@ static void kill_me_maybe(struct callback_head *cb)
|
|||||||
int flags = MF_ACTION_REQUIRED;
|
int flags = MF_ACTION_REQUIRED;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
p->mce_count = 0;
|
||||||
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
|
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
|
||||||
|
|
||||||
if (!p->mce_ripv)
|
if (!p->mce_ripv)
|
||||||
@@ -1290,8 +1294,12 @@ static void kill_me_maybe(struct callback_head *cb)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void queue_task_work(struct mce *m, int kill_current_task)
|
static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
|
||||||
{
|
{
|
||||||
|
int count = ++current->mce_count;
|
||||||
|
|
||||||
|
/* First call, save all the details */
|
||||||
|
if (count == 1) {
|
||||||
current->mce_addr = m->addr;
|
current->mce_addr = m->addr;
|
||||||
current->mce_kflags = m->kflags;
|
current->mce_kflags = m->kflags;
|
||||||
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
|
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
|
||||||
@@ -1301,6 +1309,19 @@ static void queue_task_work(struct mce *m, int kill_current_task)
|
|||||||
current->mce_kill_me.func = kill_me_now;
|
current->mce_kill_me.func = kill_me_now;
|
||||||
else
|
else
|
||||||
current->mce_kill_me.func = kill_me_maybe;
|
current->mce_kill_me.func = kill_me_maybe;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Ten is likely overkill. Don't expect more than two faults before task_work() */
|
||||||
|
if (count > 10)
|
||||||
|
mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
|
||||||
|
|
||||||
|
/* Second or later call, make sure page address matches the one from first call */
|
||||||
|
if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
|
||||||
|
mce_panic("Consecutive machine checks to different user pages", m, msg);
|
||||||
|
|
||||||
|
/* Do not call task_work_add() more than once */
|
||||||
|
if (count > 1)
|
||||||
|
return;
|
||||||
|
|
||||||
task_work_add(current, ¤t->mce_kill_me, TWA_RESUME);
|
task_work_add(current, ¤t->mce_kill_me, TWA_RESUME);
|
||||||
}
|
}
|
||||||
@@ -1438,7 +1459,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
|
|||||||
/* If this triggers there is no way to recover. Die hard. */
|
/* If this triggers there is no way to recover. Die hard. */
|
||||||
BUG_ON(!on_thread_stack() || !user_mode(regs));
|
BUG_ON(!on_thread_stack() || !user_mode(regs));
|
||||||
|
|
||||||
queue_task_work(&m, kill_current_task);
|
queue_task_work(&m, msg, kill_current_task);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
@@ -1456,7 +1477,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (m.kflags & MCE_IN_KERNEL_COPYIN)
|
if (m.kflags & MCE_IN_KERNEL_COPYIN)
|
||||||
queue_task_work(&m, kill_current_task);
|
queue_task_work(&m, msg, kill_current_task);
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
|
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
|
||||||
|
@@ -1432,18 +1432,18 @@ int kern_addr_valid(unsigned long addr)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
p4d = p4d_offset(pgd, addr);
|
p4d = p4d_offset(pgd, addr);
|
||||||
if (p4d_none(*p4d))
|
if (!p4d_present(*p4d))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
pud = pud_offset(p4d, addr);
|
pud = pud_offset(p4d, addr);
|
||||||
if (pud_none(*pud))
|
if (!pud_present(*pud))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (pud_large(*pud))
|
if (pud_large(*pud))
|
||||||
return pfn_valid(pud_pfn(*pud));
|
return pfn_valid(pud_pfn(*pud));
|
||||||
|
|
||||||
pmd = pmd_offset(pud, addr);
|
pmd = pmd_offset(pud, addr);
|
||||||
if (pmd_none(*pmd))
|
if (!pmd_present(*pmd))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (pmd_large(*pmd))
|
if (pmd_large(*pmd))
|
||||||
|
@@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
|
|||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
start = sanitize_phys(start);
|
start = sanitize_phys(start);
|
||||||
end = sanitize_phys(end);
|
|
||||||
|
/*
|
||||||
|
* The end address passed into this function is exclusive, but
|
||||||
|
* sanitize_phys() expects an inclusive address.
|
||||||
|
*/
|
||||||
|
end = sanitize_phys(end - 1) + 1;
|
||||||
if (start >= end) {
|
if (start >= end) {
|
||||||
WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
|
WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
|
||||||
start, end - 1, cattr_name(req_type));
|
start, end - 1, cattr_name(req_type));
|
||||||
|
@@ -1471,6 +1471,7 @@ struct task_struct {
|
|||||||
mce_whole_page : 1,
|
mce_whole_page : 1,
|
||||||
__mce_reserved : 62;
|
__mce_reserved : 62;
|
||||||
struct callback_head mce_kill_me;
|
struct callback_head mce_kill_me;
|
||||||
|
int mce_count;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_KRETPROBES
|
#ifdef CONFIG_KRETPROBES
|
||||||
|
Reference in New Issue
Block a user