mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 20:51:03 +02:00
KVM: Fix comments that refer to the non-existent install_new_memslots()
Fix stale comments that were left behind when install_new_memslots() was
replaced by kvm_swap_active_memslots() as part of the scalable memslots
rework.
Fixes: a54d806688
("KVM: Keep memslots in tree-based structures instead of array-based ones")
Signed-off-by: Jun Miao <jun.miao@intel.com>
Link: https://lore.kernel.org/r/20230223052851.1054799-1-jun.miao@intel.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
committed by
Sean Christopherson
parent
7ffc2e8951
commit
b0d237087c
@@ -21,7 +21,7 @@ The acquisition orders for mutexes are as follows:
|
|||||||
- kvm->mn_active_invalidate_count ensures that pairs of
|
- kvm->mn_active_invalidate_count ensures that pairs of
|
||||||
invalidate_range_start() and invalidate_range_end() callbacks
|
invalidate_range_start() and invalidate_range_end() callbacks
|
||||||
use the same memslots array. kvm->slots_lock and kvm->slots_arch_lock
|
use the same memslots array. kvm->slots_lock and kvm->slots_arch_lock
|
||||||
are taken on the waiting side in install_new_memslots, so MMU notifiers
|
are taken on the waiting side when modifying memslots, so MMU notifiers
|
||||||
must not take either kvm->slots_lock or kvm->slots_arch_lock.
|
must not take either kvm->slots_lock or kvm->slots_arch_lock.
|
||||||
|
|
||||||
For SRCU:
|
For SRCU:
|
||||||
|
@@ -58,7 +58,7 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Bit 63 of the memslot generation number is an "update in-progress flag",
|
* Bit 63 of the memslot generation number is an "update in-progress flag",
|
||||||
* e.g. is temporarily set for the duration of install_new_memslots().
|
* e.g. is temporarily set for the duration of kvm_swap_active_memslots().
|
||||||
* This flag effectively creates a unique generation number that is used to
|
* This flag effectively creates a unique generation number that is used to
|
||||||
* mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
|
* mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
|
||||||
* i.e. may (or may not) have come from the previous memslots generation.
|
* i.e. may (or may not) have come from the previous memslots generation.
|
||||||
@@ -713,7 +713,7 @@ struct kvm {
|
|||||||
* use by the VM. To be used under the slots_lock (above) or in a
|
* use by the VM. To be used under the slots_lock (above) or in a
|
||||||
* kvm->srcu critical section where acquiring the slots_lock would
|
* kvm->srcu critical section where acquiring the slots_lock would
|
||||||
* lead to deadlock with the synchronize_srcu in
|
* lead to deadlock with the synchronize_srcu in
|
||||||
* install_new_memslots.
|
* kvm_swap_active_memslots().
|
||||||
*/
|
*/
|
||||||
struct mutex slots_arch_lock;
|
struct mutex slots_arch_lock;
|
||||||
struct mm_struct *mm; /* userspace tied to this vm */
|
struct mm_struct *mm; /* userspace tied to this vm */
|
||||||
|
@@ -1298,7 +1298,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
|
|||||||
* At this point, pending calls to invalidate_range_start()
|
* At this point, pending calls to invalidate_range_start()
|
||||||
* have completed but no more MMU notifiers will run, so
|
* have completed but no more MMU notifiers will run, so
|
||||||
* mn_active_invalidate_count may remain unbalanced.
|
* mn_active_invalidate_count may remain unbalanced.
|
||||||
* No threads can be waiting in install_new_memslots as the
|
* No threads can be waiting in kvm_swap_active_memslots() as the
|
||||||
* last reference on KVM has been dropped, but freeing
|
* last reference on KVM has been dropped, but freeing
|
||||||
* memslots would deadlock without this manual intervention.
|
* memslots would deadlock without this manual intervention.
|
||||||
*/
|
*/
|
||||||
@@ -1742,13 +1742,13 @@ static void kvm_invalidate_memslot(struct kvm *kvm,
|
|||||||
kvm_arch_flush_shadow_memslot(kvm, old);
|
kvm_arch_flush_shadow_memslot(kvm, old);
|
||||||
kvm_arch_guest_memory_reclaimed(kvm);
|
kvm_arch_guest_memory_reclaimed(kvm);
|
||||||
|
|
||||||
/* Was released by kvm_swap_active_memslots, reacquire. */
|
/* Was released by kvm_swap_active_memslots(), reacquire. */
|
||||||
mutex_lock(&kvm->slots_arch_lock);
|
mutex_lock(&kvm->slots_arch_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy the arch-specific field of the newly-installed slot back to the
|
* Copy the arch-specific field of the newly-installed slot back to the
|
||||||
* old slot as the arch data could have changed between releasing
|
* old slot as the arch data could have changed between releasing
|
||||||
* slots_arch_lock in install_new_memslots() and re-acquiring the lock
|
* slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock
|
||||||
* above. Writers are required to retrieve memslots *after* acquiring
|
* above. Writers are required to retrieve memslots *after* acquiring
|
||||||
* slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
|
* slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
|
||||||
*/
|
*/
|
||||||
@@ -1810,11 +1810,11 @@ static int kvm_set_memslot(struct kvm *kvm,
|
|||||||
int r;
|
int r;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Released in kvm_swap_active_memslots.
|
* Released in kvm_swap_active_memslots().
|
||||||
*
|
*
|
||||||
* Must be held from before the current memslots are copied until
|
* Must be held from before the current memslots are copied until after
|
||||||
* after the new memslots are installed with rcu_assign_pointer,
|
* the new memslots are installed with rcu_assign_pointer, then
|
||||||
* then released before the synchronize srcu in kvm_swap_active_memslots.
|
* released before the synchronize srcu in kvm_swap_active_memslots().
|
||||||
*
|
*
|
||||||
* When modifying memslots outside of the slots_lock, must be held
|
* When modifying memslots outside of the slots_lock, must be held
|
||||||
* before reading the pointer to the current memslots until after all
|
* before reading the pointer to the current memslots until after all
|
||||||
|
Reference in New Issue
Block a user