Merge branch kvm-arm64/lock-inversion into kvmarm-master/next

* kvm-arm64/lock-inversion:
  : .
  : vm/vcpu lock inversion fixes, courtesy of Oliver Upton, plus a few
  : extra fixes from both Oliver and Reiji Watanabe.
  :
  : From the initial cover letter:
  :
  : As it so happens, lock ordering in KVM/arm64 is completely backwards.
  : There's a significant amount of VM-wide state that needs to be accessed
  : from the context of a vCPU. Until now, this was accomplished by
  : acquiring the kvm->lock, but that cannot be nested within vcpu->mutex.
  :
  : This series fixes the issue with some fine-grained locking for MP state
  : and a new, dedicated mutex that can nest with both kvm->lock and
  : vcpu->mutex.
  : .
  KVM: arm64: Have kvm_psci_vcpu_on() use WRITE_ONCE() to update mp_state
  KVM: arm64: Acquire mp_state_lock in kvm_arch_vcpu_ioctl_vcpu_init()
  KVM: arm64: vgic: Don't acquire its_lock before config_lock
  KVM: arm64: Use config_lock to protect vgic state
  KVM: arm64: Use config_lock to protect data ordered against KVM_RUN
  KVM: arm64: Avoid lock inversion when setting the VM register width
  KVM: arm64: Avoid vcpu->mutex v. kvm->lock inversion in CPU_ON

Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier
2023-04-21 09:30:46 +01:00
15 changed files with 186 additions and 114 deletions

View File

@@ -199,6 +199,9 @@ struct kvm_arch {
/* Mandated version of PSCI */ /* Mandated version of PSCI */
u32 psci_version; u32 psci_version;
/* Protects VM-scoped configuration data */
struct mutex config_lock;
/* /*
* If we encounter a data abort without valid instruction syndrome * If we encounter a data abort without valid instruction syndrome
* information, report this to user space. User space can (and * information, report this to user space. User space can (and
@@ -522,6 +525,7 @@ struct kvm_vcpu_arch {
/* vcpu power state */ /* vcpu power state */
struct kvm_mp_state mp_state; struct kvm_mp_state mp_state;
spinlock_t mp_state_lock;
/* Cache some mmu pages needed inside spinlock regions */ /* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache; struct kvm_mmu_memory_cache mmu_page_cache;

View File

@@ -128,6 +128,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{ {
int ret; int ret;
mutex_init(&kvm->arch.config_lock);
#ifdef CONFIG_LOCKDEP
/* Clue in lockdep that the config_lock must be taken inside kvm->lock */
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.config_lock);
mutex_unlock(&kvm->arch.config_lock);
mutex_unlock(&kvm->lock);
#endif
ret = kvm_share_hyp(kvm, kvm + 1); ret = kvm_share_hyp(kvm, kvm + 1);
if (ret) if (ret)
return ret; return ret;
@@ -326,6 +336,16 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{ {
int err; int err;
spin_lock_init(&vcpu->arch.mp_state_lock);
#ifdef CONFIG_LOCKDEP
/* Inform lockdep that the config_lock is acquired after vcpu->mutex */
mutex_lock(&vcpu->mutex);
mutex_lock(&vcpu->kvm->arch.config_lock);
mutex_unlock(&vcpu->kvm->arch.config_lock);
mutex_unlock(&vcpu->mutex);
#endif
/* Force users to call KVM_ARM_VCPU_INIT */ /* Force users to call KVM_ARM_VCPU_INIT */
vcpu->arch.target = -1; vcpu->arch.target = -1;
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
@@ -443,34 +463,41 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->cpu = -1; vcpu->cpu = -1;
} }
void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu) static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED; WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
kvm_make_request(KVM_REQ_SLEEP, vcpu); kvm_make_request(KVM_REQ_SLEEP, vcpu);
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
} }
void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
{
spin_lock(&vcpu->arch.mp_state_lock);
__kvm_arm_vcpu_power_off(vcpu);
spin_unlock(&vcpu->arch.mp_state_lock);
}
bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu) bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_STOPPED; return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
} }
static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu) static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.mp_state.mp_state = KVM_MP_STATE_SUSPENDED; WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED);
kvm_make_request(KVM_REQ_SUSPEND, vcpu); kvm_make_request(KVM_REQ_SUSPEND, vcpu);
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
} }
static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu) static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_SUSPENDED; return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED;
} }
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state) struct kvm_mp_state *mp_state)
{ {
*mp_state = vcpu->arch.mp_state; *mp_state = READ_ONCE(vcpu->arch.mp_state);
return 0; return 0;
} }
@@ -480,12 +507,14 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
{ {
int ret = 0; int ret = 0;
spin_lock(&vcpu->arch.mp_state_lock);
switch (mp_state->mp_state) { switch (mp_state->mp_state) {
case KVM_MP_STATE_RUNNABLE: case KVM_MP_STATE_RUNNABLE:
vcpu->arch.mp_state = *mp_state; WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
break; break;
case KVM_MP_STATE_STOPPED: case KVM_MP_STATE_STOPPED:
kvm_arm_vcpu_power_off(vcpu); __kvm_arm_vcpu_power_off(vcpu);
break; break;
case KVM_MP_STATE_SUSPENDED: case KVM_MP_STATE_SUSPENDED:
kvm_arm_vcpu_suspend(vcpu); kvm_arm_vcpu_suspend(vcpu);
@@ -494,6 +523,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
ret = -EINVAL; ret = -EINVAL;
} }
spin_unlock(&vcpu->arch.mp_state_lock);
return ret; return ret;
} }
@@ -593,9 +624,9 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
if (kvm_vm_is_protected(kvm)) if (kvm_vm_is_protected(kvm))
kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu); kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.config_lock);
set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags); set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.config_lock);
return ret; return ret;
} }
@@ -1210,10 +1241,14 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
/* /*
* Handle the "start in power-off" case. * Handle the "start in power-off" case.
*/ */
spin_lock(&vcpu->arch.mp_state_lock);
if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
kvm_arm_vcpu_power_off(vcpu); __kvm_arm_vcpu_power_off(vcpu);
else else
vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE; WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
spin_unlock(&vcpu->arch.mp_state_lock);
return 0; return 0;
} }

View File

@@ -957,7 +957,9 @@ int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
switch (attr->group) { switch (attr->group) {
case KVM_ARM_VCPU_PMU_V3_CTRL: case KVM_ARM_VCPU_PMU_V3_CTRL:
mutex_lock(&vcpu->kvm->arch.config_lock);
ret = kvm_arm_pmu_v3_set_attr(vcpu, attr); ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
mutex_unlock(&vcpu->kvm->arch.config_lock);
break; break;
case KVM_ARM_VCPU_TIMER_CTRL: case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_set_attr(vcpu, attr); ret = kvm_arm_timer_set_attr(vcpu, attr);

View File

@@ -377,7 +377,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
if (val & ~fw_reg_features) if (val & ~fw_reg_features)
return -EINVAL; return -EINVAL;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.config_lock);
if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) && if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) &&
val != *fw_reg_bmap) { val != *fw_reg_bmap) {
@@ -387,7 +387,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
WRITE_ONCE(*fw_reg_bmap, val); WRITE_ONCE(*fw_reg_bmap, val);
out: out:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.config_lock);
return ret; return ret;
} }

View File

@@ -874,7 +874,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
struct arm_pmu *arm_pmu; struct arm_pmu *arm_pmu;
int ret = -ENXIO; int ret = -ENXIO;
mutex_lock(&kvm->lock); lockdep_assert_held(&kvm->arch.config_lock);
mutex_lock(&arm_pmus_lock); mutex_lock(&arm_pmus_lock);
list_for_each_entry(entry, &arm_pmus, entry) { list_for_each_entry(entry, &arm_pmus, entry) {
@@ -894,7 +894,6 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
} }
mutex_unlock(&arm_pmus_lock); mutex_unlock(&arm_pmus_lock);
mutex_unlock(&kvm->lock);
return ret; return ret;
} }
@@ -902,22 +901,20 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
lockdep_assert_held(&kvm->arch.config_lock);
if (!kvm_vcpu_has_pmu(vcpu)) if (!kvm_vcpu_has_pmu(vcpu))
return -ENODEV; return -ENODEV;
if (vcpu->arch.pmu.created) if (vcpu->arch.pmu.created)
return -EBUSY; return -EBUSY;
mutex_lock(&kvm->lock);
if (!kvm->arch.arm_pmu) { if (!kvm->arch.arm_pmu) {
/* No PMU set, get the default one */ /* No PMU set, get the default one */
kvm->arch.arm_pmu = kvm_pmu_probe_armpmu(); kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
if (!kvm->arch.arm_pmu) { if (!kvm->arch.arm_pmu)
mutex_unlock(&kvm->lock);
return -ENODEV; return -ENODEV;
}
} }
mutex_unlock(&kvm->lock);
switch (attr->attr) { switch (attr->attr) {
case KVM_ARM_VCPU_PMU_V3_IRQ: { case KVM_ARM_VCPU_PMU_V3_IRQ: {
@@ -961,19 +958,13 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
filter.action != KVM_PMU_EVENT_DENY)) filter.action != KVM_PMU_EVENT_DENY))
return -EINVAL; return -EINVAL;
mutex_lock(&kvm->lock); if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags))
if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) {
mutex_unlock(&kvm->lock);
return -EBUSY; return -EBUSY;
}
if (!kvm->arch.pmu_filter) { if (!kvm->arch.pmu_filter) {
kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT); kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
if (!kvm->arch.pmu_filter) { if (!kvm->arch.pmu_filter)
mutex_unlock(&kvm->lock);
return -ENOMEM; return -ENOMEM;
}
/* /*
* The default depends on the first applied filter. * The default depends on the first applied filter.
@@ -992,8 +983,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
else else
bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents); bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
mutex_unlock(&kvm->lock);
return 0; return 0;
} }
case KVM_ARM_VCPU_PMU_V3_SET_PMU: { case KVM_ARM_VCPU_PMU_V3_SET_PMU: {

View File

@@ -62,6 +62,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
struct vcpu_reset_state *reset_state; struct vcpu_reset_state *reset_state;
struct kvm *kvm = source_vcpu->kvm; struct kvm *kvm = source_vcpu->kvm;
struct kvm_vcpu *vcpu = NULL; struct kvm_vcpu *vcpu = NULL;
int ret = PSCI_RET_SUCCESS;
unsigned long cpu_id; unsigned long cpu_id;
cpu_id = smccc_get_arg1(source_vcpu); cpu_id = smccc_get_arg1(source_vcpu);
@@ -76,11 +77,15 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
*/ */
if (!vcpu) if (!vcpu)
return PSCI_RET_INVALID_PARAMS; return PSCI_RET_INVALID_PARAMS;
spin_lock(&vcpu->arch.mp_state_lock);
if (!kvm_arm_vcpu_stopped(vcpu)) { if (!kvm_arm_vcpu_stopped(vcpu)) {
if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1) if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
return PSCI_RET_ALREADY_ON; ret = PSCI_RET_ALREADY_ON;
else else
return PSCI_RET_INVALID_PARAMS; ret = PSCI_RET_INVALID_PARAMS;
goto out_unlock;
} }
reset_state = &vcpu->arch.reset_state; reset_state = &vcpu->arch.reset_state;
@@ -96,7 +101,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
*/ */
reset_state->r0 = smccc_get_arg3(source_vcpu); reset_state->r0 = smccc_get_arg3(source_vcpu);
WRITE_ONCE(reset_state->reset, true); reset_state->reset = true;
kvm_make_request(KVM_REQ_VCPU_RESET, vcpu); kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
/* /*
@@ -105,10 +110,12 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
*/ */
smp_wmb(); smp_wmb();
vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE; WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
kvm_vcpu_wake_up(vcpu); kvm_vcpu_wake_up(vcpu);
return PSCI_RET_SUCCESS; out_unlock:
spin_unlock(&vcpu->arch.mp_state_lock);
return ret;
} }
static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
@@ -168,8 +175,11 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type, u64 flags)
* after this call is handled and before the VCPUs have been * after this call is handled and before the VCPUs have been
* re-initialized. * re-initialized.
*/ */
kvm_for_each_vcpu(i, tmp, vcpu->kvm) kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
tmp->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED; spin_lock(&tmp->arch.mp_state_lock);
WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
spin_unlock(&tmp->arch.mp_state_lock);
}
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP); kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
@@ -229,7 +239,6 @@ static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = vcpu->kvm;
u32 psci_fn = smccc_get_function(vcpu); u32 psci_fn = smccc_get_function(vcpu);
unsigned long val; unsigned long val;
int ret = 1; int ret = 1;
@@ -254,9 +263,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
kvm_psci_narrow_to_32bit(vcpu); kvm_psci_narrow_to_32bit(vcpu);
fallthrough; fallthrough;
case PSCI_0_2_FN64_CPU_ON: case PSCI_0_2_FN64_CPU_ON:
mutex_lock(&kvm->lock);
val = kvm_psci_vcpu_on(vcpu); val = kvm_psci_vcpu_on(vcpu);
mutex_unlock(&kvm->lock);
break; break;
case PSCI_0_2_FN_AFFINITY_INFO: case PSCI_0_2_FN_AFFINITY_INFO:
kvm_psci_narrow_to_32bit(vcpu); kvm_psci_narrow_to_32bit(vcpu);
@@ -395,7 +402,6 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = vcpu->kvm;
u32 psci_fn = smccc_get_function(vcpu); u32 psci_fn = smccc_get_function(vcpu);
unsigned long val; unsigned long val;
@@ -405,9 +411,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
val = PSCI_RET_SUCCESS; val = PSCI_RET_SUCCESS;
break; break;
case KVM_PSCI_FN_CPU_ON: case KVM_PSCI_FN_CPU_ON:
mutex_lock(&kvm->lock);
val = kvm_psci_vcpu_on(vcpu); val = kvm_psci_vcpu_on(vcpu);
mutex_unlock(&kvm->lock);
break; break;
default: default:
val = PSCI_RET_NOT_SUPPORTED; val = PSCI_RET_NOT_SUPPORTED;

View File

@@ -205,7 +205,7 @@ static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT); is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
lockdep_assert_held(&kvm->lock); lockdep_assert_held(&kvm->arch.config_lock);
if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) { if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
/* /*
@@ -262,17 +262,18 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
bool loaded; bool loaded;
u32 pstate; u32 pstate;
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->arch.config_lock);
ret = kvm_set_vm_width(vcpu); ret = kvm_set_vm_width(vcpu);
if (!ret) { mutex_unlock(&vcpu->kvm->arch.config_lock);
reset_state = vcpu->arch.reset_state;
WRITE_ONCE(vcpu->arch.reset_state.reset, false);
}
mutex_unlock(&vcpu->kvm->lock);
if (ret) if (ret)
return ret; return ret;
spin_lock(&vcpu->arch.mp_state_lock);
reset_state = vcpu->arch.reset_state;
vcpu->arch.reset_state.reset = false;
spin_unlock(&vcpu->arch.mp_state_lock);
/* Reset PMU outside of the non-preemptible section */ /* Reset PMU outside of the non-preemptible section */
kvm_pmu_vcpu_reset(vcpu); kvm_pmu_vcpu_reset(vcpu);

View File

@@ -85,7 +85,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
struct kvm *kvm = s->private; struct kvm *kvm = s->private;
struct vgic_state_iter *iter; struct vgic_state_iter *iter;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.config_lock);
iter = kvm->arch.vgic.iter; iter = kvm->arch.vgic.iter;
if (iter) { if (iter) {
iter = ERR_PTR(-EBUSY); iter = ERR_PTR(-EBUSY);
@@ -104,7 +104,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
if (end_of_vgic(iter)) if (end_of_vgic(iter))
iter = NULL; iter = NULL;
out: out:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.config_lock);
return iter; return iter;
} }
@@ -132,12 +132,12 @@ static void vgic_debug_stop(struct seq_file *s, void *v)
if (IS_ERR(v)) if (IS_ERR(v))
return; return;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.config_lock);
iter = kvm->arch.vgic.iter; iter = kvm->arch.vgic.iter;
kfree(iter->lpi_array); kfree(iter->lpi_array);
kfree(iter); kfree(iter);
kvm->arch.vgic.iter = NULL; kvm->arch.vgic.iter = NULL;
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.config_lock);
} }
static void print_dist_state(struct seq_file *s, struct vgic_dist *dist) static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)

View File

@@ -74,9 +74,6 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
unsigned long i; unsigned long i;
int ret; int ret;
if (irqchip_in_kernel(kvm))
return -EEXIST;
/* /*
* This function is also called by the KVM_CREATE_IRQCHIP handler, * This function is also called by the KVM_CREATE_IRQCHIP handler,
* which had no chance yet to check the availability of the GICv2 * which had no chance yet to check the availability of the GICv2
@@ -87,10 +84,20 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
!kvm_vgic_global_state.can_emulate_gicv2) !kvm_vgic_global_state.can_emulate_gicv2)
return -ENODEV; return -ENODEV;
/* Must be held to avoid race with vCPU creation */
lockdep_assert_held(&kvm->lock);
ret = -EBUSY; ret = -EBUSY;
if (!lock_all_vcpus(kvm)) if (!lock_all_vcpus(kvm))
return ret; return ret;
mutex_lock(&kvm->arch.config_lock);
if (irqchip_in_kernel(kvm)) {
ret = -EEXIST;
goto out_unlock;
}
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu_has_run_once(vcpu)) if (vcpu_has_run_once(vcpu))
goto out_unlock; goto out_unlock;
@@ -118,6 +125,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions); INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
out_unlock: out_unlock:
mutex_unlock(&kvm->arch.config_lock);
unlock_all_vcpus(kvm); unlock_all_vcpus(kvm);
return ret; return ret;
} }
@@ -227,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
* KVM io device for the redistributor that belongs to this VCPU. * KVM io device for the redistributor that belongs to this VCPU.
*/ */
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->arch.config_lock);
ret = vgic_register_redist_iodev(vcpu); ret = vgic_register_redist_iodev(vcpu);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->arch.config_lock);
} }
return ret; return ret;
} }
@@ -250,7 +258,6 @@ static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
* The function is generally called when nr_spis has been explicitly set * The function is generally called when nr_spis has been explicitly set
* by the guest through the KVM DEVICE API. If not nr_spis is set to 256. * by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
* vgic_initialized() returns true when this function has succeeded. * vgic_initialized() returns true when this function has succeeded.
* Must be called with kvm->lock held!
*/ */
int vgic_init(struct kvm *kvm) int vgic_init(struct kvm *kvm)
{ {
@@ -259,6 +266,8 @@ int vgic_init(struct kvm *kvm)
int ret = 0, i; int ret = 0, i;
unsigned long idx; unsigned long idx;
lockdep_assert_held(&kvm->arch.config_lock);
if (vgic_initialized(kvm)) if (vgic_initialized(kvm))
return 0; return 0;
@@ -373,12 +382,13 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
} }
/* To be called with kvm->lock held */
static void __kvm_vgic_destroy(struct kvm *kvm) static void __kvm_vgic_destroy(struct kvm *kvm)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
unsigned long i; unsigned long i;
lockdep_assert_held(&kvm->arch.config_lock);
vgic_debug_destroy(kvm); vgic_debug_destroy(kvm);
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
@@ -389,9 +399,9 @@ static void __kvm_vgic_destroy(struct kvm *kvm)
void kvm_vgic_destroy(struct kvm *kvm) void kvm_vgic_destroy(struct kvm *kvm)
{ {
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.config_lock);
__kvm_vgic_destroy(kvm); __kvm_vgic_destroy(kvm);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.config_lock);
} }
/** /**
@@ -414,9 +424,9 @@ int vgic_lazy_init(struct kvm *kvm)
if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
return -EBUSY; return -EBUSY;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.config_lock);
ret = vgic_init(kvm); ret = vgic_init(kvm);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.config_lock);
} }
return ret; return ret;
@@ -441,7 +451,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
if (likely(vgic_ready(kvm))) if (likely(vgic_ready(kvm)))
return 0; return 0;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.config_lock);
if (vgic_ready(kvm)) if (vgic_ready(kvm))
goto out; goto out;
@@ -459,7 +469,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
dist->ready = true; dist->ready = true;
out: out:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.config_lock);
return ret; return ret;
} }

View File

@@ -1958,6 +1958,16 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
mutex_init(&its->its_lock); mutex_init(&its->its_lock);
mutex_init(&its->cmd_lock); mutex_init(&its->cmd_lock);
/* Yep, even more trickery for lock ordering... */
#ifdef CONFIG_LOCKDEP
mutex_lock(&dev->kvm->arch.config_lock);
mutex_lock(&its->cmd_lock);
mutex_lock(&its->its_lock);
mutex_unlock(&its->its_lock);
mutex_unlock(&its->cmd_lock);
mutex_unlock(&dev->kvm->arch.config_lock);
#endif
its->vgic_its_base = VGIC_ADDR_UNDEF; its->vgic_its_base = VGIC_ADDR_UNDEF;
INIT_LIST_HEAD(&its->device_list); INIT_LIST_HEAD(&its->device_list);
@@ -2045,6 +2055,13 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock); mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
mutex_lock(&dev->kvm->arch.config_lock);
if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) { if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
ret = -ENXIO; ret = -ENXIO;
goto out; goto out;
@@ -2058,11 +2075,6 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
goto out; goto out;
} }
if (!lock_all_vcpus(dev->kvm)) {
ret = -EBUSY;
goto out;
}
addr = its->vgic_its_base + offset; addr = its->vgic_its_base + offset;
len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4; len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
@@ -2076,8 +2088,9 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
} else { } else {
*reg = region->its_read(dev->kvm, its, addr, len); *reg = region->its_read(dev->kvm, its, addr, len);
} }
unlock_all_vcpus(dev->kvm);
out: out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
return ret; return ret;
} }
@@ -2749,14 +2762,15 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
return 0; return 0;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
mutex_lock(&its->its_lock);
if (!lock_all_vcpus(kvm)) { if (!lock_all_vcpus(kvm)) {
mutex_unlock(&its->its_lock);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return -EBUSY; return -EBUSY;
} }
mutex_lock(&kvm->arch.config_lock);
mutex_lock(&its->its_lock);
switch (attr) { switch (attr) {
case KVM_DEV_ARM_ITS_CTRL_RESET: case KVM_DEV_ARM_ITS_CTRL_RESET:
vgic_its_reset(kvm, its); vgic_its_reset(kvm, its);
@@ -2769,8 +2783,9 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
break; break;
} }
unlock_all_vcpus(kvm);
mutex_unlock(&its->its_lock); mutex_unlock(&its->its_lock);
mutex_unlock(&kvm->arch.config_lock);
unlock_all_vcpus(kvm);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return ret; return ret;
} }

View File

@@ -46,7 +46,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
struct vgic_dist *vgic = &kvm->arch.vgic; struct vgic_dist *vgic = &kvm->arch.vgic;
int r; int r;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.config_lock);
switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) { switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
case KVM_VGIC_V2_ADDR_TYPE_DIST: case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
@@ -68,7 +68,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
r = -ENODEV; r = -ENODEV;
} }
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.config_lock);
return r; return r;
} }
@@ -102,7 +102,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
if (get_user(addr, uaddr)) if (get_user(addr, uaddr))
return -EFAULT; return -EFAULT;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.config_lock);
switch (attr->attr) { switch (attr->attr) {
case KVM_VGIC_V2_ADDR_TYPE_DIST: case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
@@ -191,7 +191,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
} }
out: out:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.config_lock);
if (!r && !write) if (!r && !write)
r = put_user(addr, uaddr); r = put_user(addr, uaddr);
@@ -227,7 +227,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
(val & 31)) (val & 31))
return -EINVAL; return -EINVAL;
mutex_lock(&dev->kvm->lock); mutex_lock(&dev->kvm->arch.config_lock);
if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis) if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
ret = -EBUSY; ret = -EBUSY;
@@ -235,16 +235,16 @@ static int vgic_set_common_attr(struct kvm_device *dev,
dev->kvm->arch.vgic.nr_spis = dev->kvm->arch.vgic.nr_spis =
val - VGIC_NR_PRIVATE_IRQS; val - VGIC_NR_PRIVATE_IRQS;
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->arch.config_lock);
return ret; return ret;
} }
case KVM_DEV_ARM_VGIC_GRP_CTRL: { case KVM_DEV_ARM_VGIC_GRP_CTRL: {
switch (attr->attr) { switch (attr->attr) {
case KVM_DEV_ARM_VGIC_CTRL_INIT: case KVM_DEV_ARM_VGIC_CTRL_INIT:
mutex_lock(&dev->kvm->lock); mutex_lock(&dev->kvm->arch.config_lock);
r = vgic_init(dev->kvm); r = vgic_init(dev->kvm);
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->arch.config_lock);
return r; return r;
case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES: case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
/* /*
@@ -260,7 +260,10 @@ static int vgic_set_common_attr(struct kvm_device *dev,
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
return -EBUSY; return -EBUSY;
} }
mutex_lock(&dev->kvm->arch.config_lock);
r = vgic_v3_save_pending_tables(dev->kvm); r = vgic_v3_save_pending_tables(dev->kvm);
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm); unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
return r; return r;
@@ -411,15 +414,17 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock); mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
mutex_lock(&dev->kvm->arch.config_lock);
ret = vgic_init(dev->kvm); ret = vgic_init(dev->kvm);
if (ret) if (ret)
goto out; goto out;
if (!lock_all_vcpus(dev->kvm)) {
ret = -EBUSY;
goto out;
}
switch (attr->group) { switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val); ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
@@ -432,8 +437,9 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
break; break;
} }
unlock_all_vcpus(dev->kvm);
out: out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
if (!ret && !is_write) if (!ret && !is_write)
@@ -569,12 +575,14 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock); mutex_lock(&dev->kvm->lock);
if (unlikely(!vgic_initialized(dev->kvm))) { if (!lock_all_vcpus(dev->kvm)) {
ret = -EBUSY; mutex_unlock(&dev->kvm->lock);
goto out; return -EBUSY;
} }
if (!lock_all_vcpus(dev->kvm)) { mutex_lock(&dev->kvm->arch.config_lock);
if (unlikely(!vgic_initialized(dev->kvm))) {
ret = -EBUSY; ret = -EBUSY;
goto out; goto out;
} }
@@ -609,8 +617,9 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
break; break;
} }
unlock_all_vcpus(dev->kvm);
out: out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
if (!ret && uaccess && !is_write) { if (!ret && uaccess && !is_write) {

View File

@@ -111,7 +111,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
case GICD_CTLR: { case GICD_CTLR: {
bool was_enabled, is_hwsgi; bool was_enabled, is_hwsgi;
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->arch.config_lock);
was_enabled = dist->enabled; was_enabled = dist->enabled;
is_hwsgi = dist->nassgireq; is_hwsgi = dist->nassgireq;
@@ -139,7 +139,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
else if (!was_enabled && dist->enabled) else if (!was_enabled && dist->enabled)
vgic_kick_vcpus(vcpu->kvm); vgic_kick_vcpus(vcpu->kvm);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->arch.config_lock);
break; break;
} }
case GICD_TYPER: case GICD_TYPER:

View File

@@ -530,13 +530,13 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
u32 intid = VGIC_ADDR_TO_INTID(addr, 1); u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
u32 val; u32 val;
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid); vgic_access_active_prepare(vcpu, intid);
val = __vgic_mmio_read_active(vcpu, addr, len); val = __vgic_mmio_read_active(vcpu, addr, len);
vgic_access_active_finish(vcpu, intid); vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->arch.config_lock);
return val; return val;
} }
@@ -625,13 +625,13 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
{ {
u32 intid = VGIC_ADDR_TO_INTID(addr, 1); u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid); vgic_access_active_prepare(vcpu, intid);
__vgic_mmio_write_cactive(vcpu, addr, len, val); __vgic_mmio_write_cactive(vcpu, addr, len, val);
vgic_access_active_finish(vcpu, intid); vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->arch.config_lock);
} }
int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu, int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
@@ -662,13 +662,13 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
{ {
u32 intid = VGIC_ADDR_TO_INTID(addr, 1); u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid); vgic_access_active_prepare(vcpu, intid);
__vgic_mmio_write_sactive(vcpu, addr, len, val); __vgic_mmio_write_sactive(vcpu, addr, len, val);
vgic_access_active_finish(vcpu, intid); vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->arch.config_lock);
} }
int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu, int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,

View File

@@ -232,9 +232,8 @@ int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
* @kvm: Pointer to the VM being initialized * @kvm: Pointer to the VM being initialized
* *
* We may be called each time a vITS is created, or when the * We may be called each time a vITS is created, or when the
* vgic is initialized. This relies on kvm->lock to be * vgic is initialized. In both cases, the number of vcpus
* held. In both cases, the number of vcpus should now be * should now be fixed.
* fixed.
*/ */
int vgic_v4_init(struct kvm *kvm) int vgic_v4_init(struct kvm *kvm)
{ {
@@ -243,6 +242,8 @@ int vgic_v4_init(struct kvm *kvm)
int nr_vcpus, ret; int nr_vcpus, ret;
unsigned long i; unsigned long i;
lockdep_assert_held(&kvm->arch.config_lock);
if (!kvm_vgic_global_state.has_gicv4) if (!kvm_vgic_global_state.has_gicv4)
return 0; /* Nothing to see here... move along. */ return 0; /* Nothing to see here... move along. */
@@ -309,14 +310,14 @@ int vgic_v4_init(struct kvm *kvm)
/** /**
* vgic_v4_teardown - Free the GICv4 data structures * vgic_v4_teardown - Free the GICv4 data structures
* @kvm: Pointer to the VM being destroyed * @kvm: Pointer to the VM being destroyed
*
* Relies on kvm->lock to be held.
*/ */
void vgic_v4_teardown(struct kvm *kvm) void vgic_v4_teardown(struct kvm *kvm)
{ {
struct its_vm *its_vm = &kvm->arch.vgic.its_vm; struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
int i; int i;
lockdep_assert_held(&kvm->arch.config_lock);
if (!its_vm->vpes) if (!its_vm->vpes)
return; return;

View File

@@ -24,11 +24,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
/* /*
* Locking order is always: * Locking order is always:
* kvm->lock (mutex) * kvm->lock (mutex)
* its->cmd_lock (mutex) * vcpu->mutex (mutex)
* its->its_lock (mutex) * kvm->arch.config_lock (mutex)
* vgic_cpu->ap_list_lock must be taken with IRQs disabled * its->cmd_lock (mutex)
* kvm->lpi_list_lock must be taken with IRQs disabled * its->its_lock (mutex)
* vgic_irq->irq_lock must be taken with IRQs disabled * vgic_cpu->ap_list_lock must be taken with IRQs disabled
* kvm->lpi_list_lock must be taken with IRQs disabled
* vgic_irq->irq_lock must be taken with IRQs disabled
* *
* As the ap_list_lock might be taken from the timer interrupt handler, * As the ap_list_lock might be taken from the timer interrupt handler,
* we have to disable IRQs before taking this lock and everything lower * we have to disable IRQs before taking this lock and everything lower