|
|
|
@@ -749,9 +749,25 @@ emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsi
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int emulate_store(unsigned long ifa, void *val, int len, bool kernel_mode)
|
|
|
|
|
{
|
|
|
|
|
if (kernel_mode)
|
|
|
|
|
return copy_to_kernel_nofault((void *)ifa, val, len);
|
|
|
|
|
|
|
|
|
|
return copy_to_user((void __user *)ifa, val, len);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int emulate_load(void *val, unsigned long ifa, int len, bool kernel_mode)
|
|
|
|
|
{
|
|
|
|
|
if (kernel_mode)
|
|
|
|
|
return copy_from_kernel_nofault(val, (void *)ifa, len);
|
|
|
|
|
|
|
|
|
|
return copy_from_user(val, (void __user *)ifa, len);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
|
|
|
|
|
emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
|
|
|
|
|
bool kernel_mode)
|
|
|
|
|
{
|
|
|
|
|
unsigned int len = 1 << ld.x6_sz;
|
|
|
|
|
unsigned long val = 0;
|
|
|
|
@@ -774,7 +790,7 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
/* this assumes little-endian byte-order: */
|
|
|
|
|
if (copy_from_user(&val, (void __user *) ifa, len))
|
|
|
|
|
if (emulate_load(&val, ifa, len, kernel_mode))
|
|
|
|
|
return -1;
|
|
|
|
|
setreg(ld.r1, val, 0, regs);
|
|
|
|
|
|
|
|
|
@@ -872,7 +888,8 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
|
|
|
|
|
emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
|
|
|
|
|
bool kernel_mode)
|
|
|
|
|
{
|
|
|
|
|
unsigned long r2;
|
|
|
|
|
unsigned int len = 1 << ld.x6_sz;
|
|
|
|
@@ -901,7 +918,7 @@ emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* this assumes little-endian byte-order: */
|
|
|
|
|
if (copy_to_user((void __user *) ifa, &r2, len))
|
|
|
|
|
if (emulate_store(ifa, &r2, len, kernel_mode))
|
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
@@ -1021,7 +1038,7 @@ float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
|
|
|
|
|
emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs, bool kernel_mode)
|
|
|
|
|
{
|
|
|
|
|
struct ia64_fpreg fpr_init[2];
|
|
|
|
|
struct ia64_fpreg fpr_final[2];
|
|
|
|
@@ -1050,8 +1067,8 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs
|
|
|
|
|
* This assumes little-endian byte-order. Note that there is no "ldfpe"
|
|
|
|
|
* instruction:
|
|
|
|
|
*/
|
|
|
|
|
if (copy_from_user(&fpr_init[0], (void __user *) ifa, len)
|
|
|
|
|
|| copy_from_user(&fpr_init[1], (void __user *) (ifa + len), len))
|
|
|
|
|
if (emulate_load(&fpr_init[0], ifa, len, kernel_mode)
|
|
|
|
|
|| emulate_load(&fpr_init[1], (ifa + len), len, kernel_mode))
|
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
|
|
DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, ld.x6_sz);
|
|
|
|
@@ -1126,7 +1143,8 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
|
|
|
|
|
emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
|
|
|
|
|
bool kernel_mode)
|
|
|
|
|
{
|
|
|
|
|
struct ia64_fpreg fpr_init;
|
|
|
|
|
struct ia64_fpreg fpr_final;
|
|
|
|
@@ -1152,7 +1170,7 @@ emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
|
|
|
|
|
* See comments in ldX for descriptions on how the various loads are handled.
|
|
|
|
|
*/
|
|
|
|
|
if (ld.x6_op != 0x2) {
|
|
|
|
|
if (copy_from_user(&fpr_init, (void __user *) ifa, len))
|
|
|
|
|
if (emulate_load(&fpr_init, ifa, len, kernel_mode))
|
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
|
|
DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
|
|
|
|
@@ -1202,7 +1220,8 @@ emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
|
|
|
|
|
emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
|
|
|
|
|
bool kernel_mode)
|
|
|
|
|
{
|
|
|
|
|
struct ia64_fpreg fpr_init;
|
|
|
|
|
struct ia64_fpreg fpr_final;
|
|
|
|
@@ -1244,7 +1263,7 @@ emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
|
|
|
|
|
DDUMP("fpr_init =", &fpr_init, len);
|
|
|
|
|
DDUMP("fpr_final =", &fpr_final, len);
|
|
|
|
|
|
|
|
|
|
if (copy_to_user((void __user *) ifa, &fpr_final, len))
|
|
|
|
|
if (emulate_store(ifa, &fpr_final, len, kernel_mode))
|
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
@@ -1295,7 +1314,6 @@ void
|
|
|
|
|
ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
|
|
|
|
|
{
|
|
|
|
|
struct ia64_psr *ipsr = ia64_psr(regs);
|
|
|
|
|
mm_segment_t old_fs = get_fs();
|
|
|
|
|
unsigned long bundle[2];
|
|
|
|
|
unsigned long opcode;
|
|
|
|
|
const struct exception_table_entry *eh = NULL;
|
|
|
|
@@ -1304,6 +1322,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
|
|
|
|
|
load_store_t insn;
|
|
|
|
|
} u;
|
|
|
|
|
int ret = -1;
|
|
|
|
|
bool kernel_mode = false;
|
|
|
|
|
|
|
|
|
|
if (ia64_psr(regs)->be) {
|
|
|
|
|
/* we don't support big-endian accesses */
|
|
|
|
@@ -1367,13 +1386,13 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
|
|
|
|
|
if (unaligned_dump_stack)
|
|
|
|
|
dump_stack();
|
|
|
|
|
}
|
|
|
|
|
set_fs(KERNEL_DS);
|
|
|
|
|
kernel_mode = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n",
|
|
|
|
|
regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it);
|
|
|
|
|
|
|
|
|
|
if (__copy_from_user(bundle, (void __user *) regs->cr_iip, 16))
|
|
|
|
|
if (emulate_load(bundle, regs->cr_iip, 16, kernel_mode))
|
|
|
|
|
goto failure;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
@@ -1467,7 +1486,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
|
|
|
|
|
case LDCCLR_IMM_OP:
|
|
|
|
|
case LDCNC_IMM_OP:
|
|
|
|
|
case LDCCLRACQ_IMM_OP:
|
|
|
|
|
ret = emulate_load_int(ifa, u.insn, regs);
|
|
|
|
|
ret = emulate_load_int(ifa, u.insn, regs, kernel_mode);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case ST_OP:
|
|
|
|
@@ -1478,7 +1497,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
|
|
|
|
|
fallthrough;
|
|
|
|
|
case ST_IMM_OP:
|
|
|
|
|
case STREL_IMM_OP:
|
|
|
|
|
ret = emulate_store_int(ifa, u.insn, regs);
|
|
|
|
|
ret = emulate_store_int(ifa, u.insn, regs, kernel_mode);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case LDF_OP:
|
|
|
|
@@ -1486,21 +1505,21 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
|
|
|
|
|
case LDFCCLR_OP:
|
|
|
|
|
case LDFCNC_OP:
|
|
|
|
|
if (u.insn.x)
|
|
|
|
|
ret = emulate_load_floatpair(ifa, u.insn, regs);
|
|
|
|
|
ret = emulate_load_floatpair(ifa, u.insn, regs, kernel_mode);
|
|
|
|
|
else
|
|
|
|
|
ret = emulate_load_float(ifa, u.insn, regs);
|
|
|
|
|
ret = emulate_load_float(ifa, u.insn, regs, kernel_mode);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case LDF_IMM_OP:
|
|
|
|
|
case LDFA_IMM_OP:
|
|
|
|
|
case LDFCCLR_IMM_OP:
|
|
|
|
|
case LDFCNC_IMM_OP:
|
|
|
|
|
ret = emulate_load_float(ifa, u.insn, regs);
|
|
|
|
|
ret = emulate_load_float(ifa, u.insn, regs, kernel_mode);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case STF_OP:
|
|
|
|
|
case STF_IMM_OP:
|
|
|
|
|
ret = emulate_store_float(ifa, u.insn, regs);
|
|
|
|
|
ret = emulate_store_float(ifa, u.insn, regs, kernel_mode);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
@@ -1521,7 +1540,6 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
|
|
|
|
|
|
|
|
|
|
DPRINT("ipsr->ri=%d iip=%lx\n", ipsr->ri, regs->cr_iip);
|
|
|
|
|
done:
|
|
|
|
|
set_fs(old_fs); /* restore original address limit */
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
failure:
|
|
|
|
|