mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 20:51:03 +02:00
KVM: Rename mmu_notifier_* to mmu_invalidate_*
The motivation of this renaming is to make these variables and related helper functions less mmu_notifier bound and can also be used for non mmu_notifier based page invalidation. mmu_invalidate_* was chosen to better describe the purpose of 'invalidating' a page that those variables are used for. - mmu_notifier_seq/range_start/range_end are renamed to mmu_invalidate_seq/range_start/range_end. - mmu_notifier_retry{_hva} helper functions are renamed to mmu_invalidate_retry{_hva}. - mmu_notifier_count is renamed to mmu_invalidate_in_progress to avoid confusion with mn_active_invalidate_count. - While here, also update kvm_inc/dec_notifier_count() to kvm_mmu_invalidate_begin/end() to match the change for mmu_notifier_count. No functional change intended. Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com> Message-Id: <20220816125322.1110439-3-chao.p.peng@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
@@ -765,10 +765,10 @@ struct kvm {
|
||||
|
||||
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
|
||||
struct mmu_notifier mmu_notifier;
|
||||
unsigned long mmu_notifier_seq;
|
||||
long mmu_notifier_count;
|
||||
unsigned long mmu_notifier_range_start;
|
||||
unsigned long mmu_notifier_range_end;
|
||||
unsigned long mmu_invalidate_seq;
|
||||
long mmu_invalidate_in_progress;
|
||||
unsigned long mmu_invalidate_range_start;
|
||||
unsigned long mmu_invalidate_range_end;
|
||||
#endif
|
||||
struct list_head devices;
|
||||
u64 manual_dirty_log_protect;
|
||||
@@ -1357,10 +1357,10 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
|
||||
void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
|
||||
#endif
|
||||
|
||||
void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
|
||||
unsigned long end);
|
||||
void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start,
|
||||
unsigned long end);
|
||||
void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
|
||||
unsigned long end);
|
||||
void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
long kvm_arch_dev_ioctl(struct file *filp,
|
||||
unsigned int ioctl, unsigned long arg);
|
||||
@@ -1907,42 +1907,44 @@ extern const struct kvm_stats_header kvm_vcpu_stats_header;
|
||||
extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
|
||||
|
||||
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
|
||||
static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
|
||||
static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
|
||||
{
|
||||
if (unlikely(kvm->mmu_notifier_count))
|
||||
if (unlikely(kvm->mmu_invalidate_in_progress))
|
||||
return 1;
|
||||
/*
|
||||
* Ensure the read of mmu_notifier_count happens before the read
|
||||
* of mmu_notifier_seq. This interacts with the smp_wmb() in
|
||||
* mmu_notifier_invalidate_range_end to make sure that the caller
|
||||
* either sees the old (non-zero) value of mmu_notifier_count or
|
||||
* the new (incremented) value of mmu_notifier_seq.
|
||||
* PowerPC Book3s HV KVM calls this under a per-page lock
|
||||
* rather than under kvm->mmu_lock, for scalability, so
|
||||
* can't rely on kvm->mmu_lock to keep things ordered.
|
||||
* Ensure the read of mmu_invalidate_in_progress happens before
|
||||
* the read of mmu_invalidate_seq. This interacts with the
|
||||
* smp_wmb() in mmu_notifier_invalidate_range_end to make sure
|
||||
* that the caller either sees the old (non-zero) value of
|
||||
* mmu_invalidate_in_progress or the new (incremented) value of
|
||||
* mmu_invalidate_seq.
|
||||
*
|
||||
* PowerPC Book3s HV KVM calls this under a per-page lock rather
|
||||
* than under kvm->mmu_lock, for scalability, so can't rely on
|
||||
* kvm->mmu_lock to keep things ordered.
|
||||
*/
|
||||
smp_rmb();
|
||||
if (kvm->mmu_notifier_seq != mmu_seq)
|
||||
if (kvm->mmu_invalidate_seq != mmu_seq)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int mmu_notifier_retry_hva(struct kvm *kvm,
|
||||
unsigned long mmu_seq,
|
||||
unsigned long hva)
|
||||
static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
|
||||
unsigned long mmu_seq,
|
||||
unsigned long hva)
|
||||
{
|
||||
lockdep_assert_held(&kvm->mmu_lock);
|
||||
/*
|
||||
* If mmu_notifier_count is non-zero, then the range maintained by
|
||||
* kvm_mmu_notifier_invalidate_range_start contains all addresses that
|
||||
* might be being invalidated. Note that it may include some false
|
||||
* If mmu_invalidate_in_progress is non-zero, then the range maintained
|
||||
* by kvm_mmu_notifier_invalidate_range_start contains all addresses
|
||||
* that might be being invalidated. Note that it may include some false
|
||||
* positives, due to shortcuts when handing concurrent invalidations.
|
||||
*/
|
||||
if (unlikely(kvm->mmu_notifier_count) &&
|
||||
hva >= kvm->mmu_notifier_range_start &&
|
||||
hva < kvm->mmu_notifier_range_end)
|
||||
if (unlikely(kvm->mmu_invalidate_in_progress) &&
|
||||
hva >= kvm->mmu_invalidate_range_start &&
|
||||
hva < kvm->mmu_invalidate_range_end)
|
||||
return 1;
|
||||
if (kvm->mmu_notifier_seq != mmu_seq)
|
||||
if (kvm->mmu_invalidate_seq != mmu_seq)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
Reference in New Issue
Block a user