mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 12:43:29 +02:00
Merge tag 'vfio-v6.2-rc6' of https://github.com/awilliam/linux-vfio
Pull VFIO fixes from Alex Williamson: - Honor reserved regions when testing for IOMMU find grained super page support, avoiding a regression on s390 for a firmware device where the existence of the mapping, even if unused can trigger an error state. (Niklas Schnelle) - Fix a deadlock in releasing KVM references by using the alternate .release() rather than .destroy() callback for the kvm-vfio device. (Yi Liu) * tag 'vfio-v6.2-rc6' of https://github.com/awilliam/linux-vfio: kvm/vfio: Fix potential deadlock on vfio group_lock vfio/type1: Respect IOMMU reserved regions in vfio_test_domain_fgsp()
This commit is contained in:
@@ -1856,25 +1856,34 @@ unwind:
|
||||
* significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
|
||||
* hugetlbfs is in use.
|
||||
*/
|
||||
static void vfio_test_domain_fgsp(struct vfio_domain *domain)
|
||||
static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
|
||||
{
|
||||
struct page *pages;
|
||||
int ret, order = get_order(PAGE_SIZE * 2);
|
||||
struct vfio_iova *region;
|
||||
struct page *pages;
|
||||
dma_addr_t start;
|
||||
|
||||
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
|
||||
if (!pages)
|
||||
return;
|
||||
|
||||
ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
|
||||
list_for_each_entry(region, regions, list) {
|
||||
start = ALIGN(region->start, PAGE_SIZE * 2);
|
||||
if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
|
||||
continue;
|
||||
|
||||
ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
|
||||
IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
|
||||
if (!ret) {
|
||||
size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
|
||||
size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
|
||||
|
||||
if (unmapped == PAGE_SIZE)
|
||||
iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
|
||||
iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
|
||||
else
|
||||
domain->fgsp = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
__free_pages(pages, order);
|
||||
}
|
||||
@@ -2326,7 +2335,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
||||
}
|
||||
}
|
||||
|
||||
vfio_test_domain_fgsp(domain);
|
||||
vfio_test_domain_fgsp(domain, &iova_copy);
|
||||
|
||||
/* replay mappings on new domains */
|
||||
ret = vfio_iommu_replay(iommu, domain);
|
||||
|
@@ -336,7 +336,7 @@ static int kvm_vfio_has_attr(struct kvm_device *dev,
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static void kvm_vfio_destroy(struct kvm_device *dev)
|
||||
static void kvm_vfio_release(struct kvm_device *dev)
|
||||
{
|
||||
struct kvm_vfio *kv = dev->private;
|
||||
struct kvm_vfio_group *kvg, *tmp;
|
||||
@@ -355,7 +355,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
|
||||
kvm_vfio_update_coherency(dev);
|
||||
|
||||
kfree(kv);
|
||||
kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
|
||||
kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */
|
||||
}
|
||||
|
||||
static int kvm_vfio_create(struct kvm_device *dev, u32 type);
|
||||
@@ -363,7 +363,7 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type);
|
||||
static struct kvm_device_ops kvm_vfio_ops = {
|
||||
.name = "kvm-vfio",
|
||||
.create = kvm_vfio_create,
|
||||
.destroy = kvm_vfio_destroy,
|
||||
.release = kvm_vfio_release,
|
||||
.set_attr = kvm_vfio_set_attr,
|
||||
.has_attr = kvm_vfio_has_attr,
|
||||
};
|
||||
|
Reference in New Issue
Block a user