mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 20:51:03 +02:00
Merge tag 'for-linus-5.19-rc1b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull more xen updates from Juergen Gross: "Two cleanup patches for Xen related code and (more important) an update of MAINTAINERS for Xen, as Boris Ostrovsky decided to step down" * tag 'for-linus-5.19-rc1b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen: replace xen_remap() with memremap() MAINTAINERS: Update Xen maintainership xen: switch gnttab_end_foreign_access() to take a struct page pointer
This commit is contained in:
18
MAINTAINERS
18
MAINTAINERS
@@ -21744,23 +21744,29 @@ F: arch/arm64/include/asm/xen/
|
|||||||
F: arch/arm64/xen/
|
F: arch/arm64/xen/
|
||||||
|
|
||||||
XEN HYPERVISOR INTERFACE
|
XEN HYPERVISOR INTERFACE
|
||||||
M: Boris Ostrovsky <boris.ostrovsky@oracle.com>
|
|
||||||
M: Juergen Gross <jgross@suse.com>
|
M: Juergen Gross <jgross@suse.com>
|
||||||
R: Stefano Stabellini <sstabellini@kernel.org>
|
M: Stefano Stabellini <sstabellini@kernel.org>
|
||||||
|
R: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
|
||||||
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
|
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
|
||||||
S: Supported
|
S: Supported
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
|
||||||
F: Documentation/ABI/stable/sysfs-hypervisor-xen
|
F: Documentation/ABI/stable/sysfs-hypervisor-xen
|
||||||
F: Documentation/ABI/testing/sysfs-hypervisor-xen
|
F: Documentation/ABI/testing/sysfs-hypervisor-xen
|
||||||
F: arch/x86/include/asm/pvclock-abi.h
|
|
||||||
F: arch/x86/include/asm/xen/
|
|
||||||
F: arch/x86/platform/pvh/
|
|
||||||
F: arch/x86/xen/
|
|
||||||
F: drivers/*/xen-*front.c
|
F: drivers/*/xen-*front.c
|
||||||
F: drivers/xen/
|
F: drivers/xen/
|
||||||
F: include/uapi/xen/
|
F: include/uapi/xen/
|
||||||
F: include/xen/
|
F: include/xen/
|
||||||
|
|
||||||
|
XEN HYPERVISOR X86
|
||||||
|
M: Juergen Gross <jgross@suse.com>
|
||||||
|
R: Boris Ostrovsky <boris.ostrovsky@oracle.com>
|
||||||
|
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
|
||||||
|
S: Supported
|
||||||
|
F: arch/x86/include/asm/pvclock-abi.h
|
||||||
|
F: arch/x86/include/asm/xen/
|
||||||
|
F: arch/x86/platform/pvh/
|
||||||
|
F: arch/x86/xen/
|
||||||
|
|
||||||
XEN NETWORK BACKEND DRIVER
|
XEN NETWORK BACKEND DRIVER
|
||||||
M: Wei Liu <wei.liu@kernel.org>
|
M: Wei Liu <wei.liu@kernel.org>
|
||||||
M: Paul Durrant <paul@xen.org>
|
M: Paul Durrant <paul@xen.org>
|
||||||
|
@@ -347,9 +347,6 @@ unsigned long arbitrary_virt_to_mfn(void *vaddr);
|
|||||||
void make_lowmem_page_readonly(void *vaddr);
|
void make_lowmem_page_readonly(void *vaddr);
|
||||||
void make_lowmem_page_readwrite(void *vaddr);
|
void make_lowmem_page_readwrite(void *vaddr);
|
||||||
|
|
||||||
#define xen_remap(cookie, size) ioremap((cookie), (size))
|
|
||||||
#define xen_unmap(cookie) iounmap((cookie))
|
|
||||||
|
|
||||||
static inline bool xen_arch_need_swiotlb(struct device *dev,
|
static inline bool xen_arch_need_swiotlb(struct device *dev,
|
||||||
phys_addr_t phys,
|
phys_addr_t phys,
|
||||||
dma_addr_t dev_addr)
|
dma_addr_t dev_addr)
|
||||||
|
@@ -1221,7 +1221,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
|
|||||||
list_del(&persistent_gnt->node);
|
list_del(&persistent_gnt->node);
|
||||||
if (persistent_gnt->gref != INVALID_GRANT_REF) {
|
if (persistent_gnt->gref != INVALID_GRANT_REF) {
|
||||||
gnttab_end_foreign_access(persistent_gnt->gref,
|
gnttab_end_foreign_access(persistent_gnt->gref,
|
||||||
0UL);
|
NULL);
|
||||||
rinfo->persistent_gnts_c--;
|
rinfo->persistent_gnts_c--;
|
||||||
}
|
}
|
||||||
if (info->feature_persistent)
|
if (info->feature_persistent)
|
||||||
@@ -1244,7 +1244,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
|
|||||||
rinfo->shadow[i].req.u.rw.nr_segments;
|
rinfo->shadow[i].req.u.rw.nr_segments;
|
||||||
for (j = 0; j < segs; j++) {
|
for (j = 0; j < segs; j++) {
|
||||||
persistent_gnt = rinfo->shadow[i].grants_used[j];
|
persistent_gnt = rinfo->shadow[i].grants_used[j];
|
||||||
gnttab_end_foreign_access(persistent_gnt->gref, 0UL);
|
gnttab_end_foreign_access(persistent_gnt->gref, NULL);
|
||||||
if (info->feature_persistent)
|
if (info->feature_persistent)
|
||||||
__free_page(persistent_gnt->page);
|
__free_page(persistent_gnt->page);
|
||||||
kfree(persistent_gnt);
|
kfree(persistent_gnt);
|
||||||
@@ -1259,7 +1259,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
|
|||||||
|
|
||||||
for (j = 0; j < INDIRECT_GREFS(segs); j++) {
|
for (j = 0; j < INDIRECT_GREFS(segs); j++) {
|
||||||
persistent_gnt = rinfo->shadow[i].indirect_grants[j];
|
persistent_gnt = rinfo->shadow[i].indirect_grants[j];
|
||||||
gnttab_end_foreign_access(persistent_gnt->gref, 0UL);
|
gnttab_end_foreign_access(persistent_gnt->gref, NULL);
|
||||||
__free_page(persistent_gnt->page);
|
__free_page(persistent_gnt->page);
|
||||||
kfree(persistent_gnt);
|
kfree(persistent_gnt);
|
||||||
}
|
}
|
||||||
|
@@ -481,7 +481,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
|
|||||||
error_evtchan:
|
error_evtchan:
|
||||||
xenbus_free_evtchn(dev, evtchn);
|
xenbus_free_evtchn(dev, evtchn);
|
||||||
error_grant:
|
error_grant:
|
||||||
gnttab_end_foreign_access(info->gref, 0UL);
|
gnttab_end_foreign_access(info->gref, NULL);
|
||||||
info->gref = -1;
|
info->gref = -1;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -492,7 +492,7 @@ static void xenkbd_disconnect_backend(struct xenkbd_info *info)
|
|||||||
unbind_from_irqhandler(info->irq, info);
|
unbind_from_irqhandler(info->irq, info);
|
||||||
info->irq = -1;
|
info->irq = -1;
|
||||||
if (info->gref >= 0)
|
if (info->gref >= 0)
|
||||||
gnttab_end_foreign_access(info->gref, 0UL);
|
gnttab_end_foreign_access(info->gref, NULL);
|
||||||
info->gref = -1;
|
info->gref = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1386,7 +1386,7 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
|
|||||||
queue->tx_skbs[i] = NULL;
|
queue->tx_skbs[i] = NULL;
|
||||||
get_page(queue->grant_tx_page[i]);
|
get_page(queue->grant_tx_page[i]);
|
||||||
gnttab_end_foreign_access(queue->grant_tx_ref[i],
|
gnttab_end_foreign_access(queue->grant_tx_ref[i],
|
||||||
(unsigned long)page_address(queue->grant_tx_page[i]));
|
queue->grant_tx_page[i]);
|
||||||
queue->grant_tx_page[i] = NULL;
|
queue->grant_tx_page[i] = NULL;
|
||||||
queue->grant_tx_ref[i] = INVALID_GRANT_REF;
|
queue->grant_tx_ref[i] = INVALID_GRANT_REF;
|
||||||
add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
|
add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
|
||||||
@@ -1418,8 +1418,7 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue)
|
|||||||
* foreign access is ended (which may be deferred).
|
* foreign access is ended (which may be deferred).
|
||||||
*/
|
*/
|
||||||
get_page(page);
|
get_page(page);
|
||||||
gnttab_end_foreign_access(ref,
|
gnttab_end_foreign_access(ref, page);
|
||||||
(unsigned long)page_address(page));
|
|
||||||
queue->grant_rx_ref[id] = INVALID_GRANT_REF;
|
queue->grant_rx_ref[id] = INVALID_GRANT_REF;
|
||||||
|
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
@@ -1760,7 +1759,7 @@ static void xennet_end_access(int ref, void *page)
|
|||||||
{
|
{
|
||||||
/* This frees the page as a side-effect */
|
/* This frees the page as a side-effect */
|
||||||
if (ref != INVALID_GRANT_REF)
|
if (ref != INVALID_GRANT_REF)
|
||||||
gnttab_end_foreign_access(ref, (unsigned long)page);
|
gnttab_end_foreign_access(ref, virt_to_page(page));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xennet_disconnect_backend(struct netfront_info *info)
|
static void xennet_disconnect_backend(struct netfront_info *info)
|
||||||
|
@@ -253,7 +253,7 @@ static int xen_hvm_console_init(void)
|
|||||||
if (r < 0 || v == 0)
|
if (r < 0 || v == 0)
|
||||||
goto err;
|
goto err;
|
||||||
gfn = v;
|
gfn = v;
|
||||||
info->intf = xen_remap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE);
|
info->intf = memremap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE, MEMREMAP_WB);
|
||||||
if (info->intf == NULL)
|
if (info->intf == NULL)
|
||||||
goto err;
|
goto err;
|
||||||
info->vtermno = HVC_COOKIE;
|
info->vtermno = HVC_COOKIE;
|
||||||
|
@@ -175,8 +175,6 @@ undo:
|
|||||||
|
|
||||||
static void __del_gref(struct gntalloc_gref *gref)
|
static void __del_gref(struct gntalloc_gref *gref)
|
||||||
{
|
{
|
||||||
unsigned long addr;
|
|
||||||
|
|
||||||
if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
|
if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
|
||||||
uint8_t *tmp = kmap_local_page(gref->page);
|
uint8_t *tmp = kmap_local_page(gref->page);
|
||||||
tmp[gref->notify.pgoff] = 0;
|
tmp[gref->notify.pgoff] = 0;
|
||||||
@@ -190,10 +188,9 @@ static void __del_gref(struct gntalloc_gref *gref)
|
|||||||
gref->notify.flags = 0;
|
gref->notify.flags = 0;
|
||||||
|
|
||||||
if (gref->gref_id) {
|
if (gref->gref_id) {
|
||||||
if (gref->page) {
|
if (gref->page)
|
||||||
addr = (unsigned long)page_to_virt(gref->page);
|
gnttab_end_foreign_access(gref->gref_id, gref->page);
|
||||||
gnttab_end_foreign_access(gref->gref_id, addr);
|
else
|
||||||
} else
|
|
||||||
gnttab_free_grant_reference(gref->gref_id);
|
gnttab_free_grant_reference(gref->gref_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -524,7 +524,7 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
|
|||||||
|
|
||||||
for (i = 0; i < count; i++)
|
for (i = 0; i < count; i++)
|
||||||
if (refs[i] != INVALID_GRANT_REF)
|
if (refs[i] != INVALID_GRANT_REF)
|
||||||
gnttab_end_foreign_access(refs[i], 0UL);
|
gnttab_end_foreign_access(refs[i], NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
|
static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
|
||||||
|
@@ -430,13 +430,13 @@ int gnttab_try_end_foreign_access(grant_ref_t ref)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
|
EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
|
||||||
|
|
||||||
void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page)
|
void gnttab_end_foreign_access(grant_ref_t ref, struct page *page)
|
||||||
{
|
{
|
||||||
if (gnttab_try_end_foreign_access(ref)) {
|
if (gnttab_try_end_foreign_access(ref)) {
|
||||||
if (page != 0)
|
if (page)
|
||||||
put_page(virt_to_page(page));
|
put_page(page);
|
||||||
} else
|
} else
|
||||||
gnttab_add_deferred(ref, page ? virt_to_page(page) : NULL);
|
gnttab_add_deferred(ref, page);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
|
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
|
||||||
|
|
||||||
@@ -632,7 +632,7 @@ int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
|
|||||||
if (xen_auto_xlat_grant_frames.count)
|
if (xen_auto_xlat_grant_frames.count)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
|
vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
|
||||||
if (vaddr == NULL) {
|
if (vaddr == NULL) {
|
||||||
pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
|
pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
|
||||||
&addr);
|
&addr);
|
||||||
@@ -640,7 +640,7 @@ int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
|
|||||||
}
|
}
|
||||||
pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
|
pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
|
||||||
if (!pfn) {
|
if (!pfn) {
|
||||||
xen_unmap(vaddr);
|
memunmap(vaddr);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
for (i = 0; i < max_nr_gframes; i++)
|
for (i = 0; i < max_nr_gframes; i++)
|
||||||
@@ -659,7 +659,7 @@ void gnttab_free_auto_xlat_frames(void)
|
|||||||
if (!xen_auto_xlat_grant_frames.count)
|
if (!xen_auto_xlat_grant_frames.count)
|
||||||
return;
|
return;
|
||||||
kfree(xen_auto_xlat_grant_frames.pfn);
|
kfree(xen_auto_xlat_grant_frames.pfn);
|
||||||
xen_unmap(xen_auto_xlat_grant_frames.vaddr);
|
memunmap(xen_auto_xlat_grant_frames.vaddr);
|
||||||
|
|
||||||
xen_auto_xlat_grant_frames.pfn = NULL;
|
xen_auto_xlat_grant_frames.pfn = NULL;
|
||||||
xen_auto_xlat_grant_frames.count = 0;
|
xen_auto_xlat_grant_frames.count = 0;
|
||||||
|
@@ -238,8 +238,8 @@ static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
|
|||||||
spin_unlock(&bedata->socket_lock);
|
spin_unlock(&bedata->socket_lock);
|
||||||
|
|
||||||
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
|
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
|
||||||
gnttab_end_foreign_access(map->active.ring->ref[i], 0);
|
gnttab_end_foreign_access(map->active.ring->ref[i], NULL);
|
||||||
gnttab_end_foreign_access(map->active.ref, 0);
|
gnttab_end_foreign_access(map->active.ref, NULL);
|
||||||
free_page((unsigned long)map->active.ring);
|
free_page((unsigned long)map->active.ring);
|
||||||
|
|
||||||
kfree(map);
|
kfree(map);
|
||||||
@@ -1117,7 +1117,7 @@ static int pvcalls_front_remove(struct xenbus_device *dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (bedata->ref != -1)
|
if (bedata->ref != -1)
|
||||||
gnttab_end_foreign_access(bedata->ref, 0);
|
gnttab_end_foreign_access(bedata->ref, NULL);
|
||||||
kfree(bedata->ring.sring);
|
kfree(bedata->ring.sring);
|
||||||
kfree(bedata);
|
kfree(bedata);
|
||||||
xenbus_switch_state(dev, XenbusStateClosed);
|
xenbus_switch_state(dev, XenbusStateClosed);
|
||||||
|
@@ -135,7 +135,7 @@ void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
|
|||||||
|
|
||||||
for (i = 0; i < buf->num_grefs; i++)
|
for (i = 0; i < buf->num_grefs; i++)
|
||||||
if (buf->grefs[i] != INVALID_GRANT_REF)
|
if (buf->grefs[i] != INVALID_GRANT_REF)
|
||||||
gnttab_end_foreign_access(buf->grefs[i], 0UL);
|
gnttab_end_foreign_access(buf->grefs[i], NULL);
|
||||||
}
|
}
|
||||||
kfree(buf->grefs);
|
kfree(buf->grefs);
|
||||||
kfree(buf->directory);
|
kfree(buf->directory);
|
||||||
|
@@ -439,7 +439,7 @@ void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages,
|
|||||||
|
|
||||||
for (i = 0; i < nr_pages; i++) {
|
for (i = 0; i < nr_pages; i++) {
|
||||||
if (grefs[i] != INVALID_GRANT_REF) {
|
if (grefs[i] != INVALID_GRANT_REF) {
|
||||||
gnttab_end_foreign_access(grefs[i], 0);
|
gnttab_end_foreign_access(grefs[i], NULL);
|
||||||
grefs[i] = INVALID_GRANT_REF;
|
grefs[i] = INVALID_GRANT_REF;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -752,8 +752,8 @@ static void xenbus_probe(void)
|
|||||||
xenstored_ready = 1;
|
xenstored_ready = 1;
|
||||||
|
|
||||||
if (!xen_store_interface) {
|
if (!xen_store_interface) {
|
||||||
xen_store_interface = xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
|
xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT,
|
||||||
XEN_PAGE_SIZE);
|
XEN_PAGE_SIZE, MEMREMAP_WB);
|
||||||
/*
|
/*
|
||||||
* Now it is safe to free the IRQ used for xenstore late
|
* Now it is safe to free the IRQ used for xenstore late
|
||||||
* initialization. No need to unbind: it is about to be
|
* initialization. No need to unbind: it is about to be
|
||||||
@@ -1009,8 +1009,8 @@ static int __init xenbus_init(void)
|
|||||||
#endif
|
#endif
|
||||||
xen_store_gfn = (unsigned long)v;
|
xen_store_gfn = (unsigned long)v;
|
||||||
xen_store_interface =
|
xen_store_interface =
|
||||||
xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
|
memremap(xen_store_gfn << XEN_PAGE_SHIFT,
|
||||||
XEN_PAGE_SIZE);
|
XEN_PAGE_SIZE, MEMREMAP_WB);
|
||||||
if (xen_store_interface->connection != XENSTORE_CONNECTED)
|
if (xen_store_interface->connection != XENSTORE_CONNECTED)
|
||||||
wait = true;
|
wait = true;
|
||||||
}
|
}
|
||||||
|
@@ -109,9 +109,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
|
|||||||
return __set_phys_to_machine(pfn, mfn);
|
return __set_phys_to_machine(pfn, mfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
|
|
||||||
#define xen_unmap(cookie) iounmap((cookie))
|
|
||||||
|
|
||||||
bool xen_arch_need_swiotlb(struct device *dev,
|
bool xen_arch_need_swiotlb(struct device *dev,
|
||||||
phys_addr_t phys,
|
phys_addr_t phys,
|
||||||
dma_addr_t dev_addr);
|
dma_addr_t dev_addr);
|
||||||
|
@@ -101,10 +101,10 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref);
|
|||||||
* Eventually end access through the given grant reference, and once that
|
* Eventually end access through the given grant reference, and once that
|
||||||
* access has been ended, free the given page too. Access will be ended
|
* access has been ended, free the given page too. Access will be ended
|
||||||
* immediately iff the grant entry is not in use, otherwise it will happen
|
* immediately iff the grant entry is not in use, otherwise it will happen
|
||||||
* some time later. page may be 0, in which case no freeing will occur.
|
* some time later. page may be NULL, in which case no freeing will occur.
|
||||||
* Note that the granted page might still be accessed (read or write) by the
|
* Note that the granted page might still be accessed (read or write) by the
|
||||||
* other side after gnttab_end_foreign_access() returns, so even if page was
|
* other side after gnttab_end_foreign_access() returns, so even if page was
|
||||||
* specified as 0 it is not allowed to just reuse the page for other
|
* specified as NULL it is not allowed to just reuse the page for other
|
||||||
* purposes immediately. gnttab_end_foreign_access() will take an additional
|
* purposes immediately. gnttab_end_foreign_access() will take an additional
|
||||||
* reference to the granted page in this case, which is dropped only after
|
* reference to the granted page in this case, which is dropped only after
|
||||||
* the grant is no longer in use.
|
* the grant is no longer in use.
|
||||||
@@ -112,7 +112,7 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref);
|
|||||||
* gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
|
* gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
|
||||||
* via free_pages_exact()) in order to avoid high order pages.
|
* via free_pages_exact()) in order to avoid high order pages.
|
||||||
*/
|
*/
|
||||||
void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page);
|
void gnttab_end_foreign_access(grant_ref_t ref, struct page *page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* End access through the given grant reference, iff the grant entry is
|
* End access through the given grant reference, iff the grant entry is
|
||||||
|
@@ -279,13 +279,13 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
|
|||||||
grant_ref_t ref;
|
grant_ref_t ref;
|
||||||
|
|
||||||
ref = priv->rings[i].intf->ref[j];
|
ref = priv->rings[i].intf->ref[j];
|
||||||
gnttab_end_foreign_access(ref, 0);
|
gnttab_end_foreign_access(ref, NULL);
|
||||||
}
|
}
|
||||||
free_pages_exact(priv->rings[i].data.in,
|
free_pages_exact(priv->rings[i].data.in,
|
||||||
1UL << (priv->rings[i].intf->ring_order +
|
1UL << (priv->rings[i].intf->ring_order +
|
||||||
XEN_PAGE_SHIFT));
|
XEN_PAGE_SHIFT));
|
||||||
}
|
}
|
||||||
gnttab_end_foreign_access(priv->rings[i].ref, 0);
|
gnttab_end_foreign_access(priv->rings[i].ref, NULL);
|
||||||
free_page((unsigned long)priv->rings[i].intf);
|
free_page((unsigned long)priv->rings[i].intf);
|
||||||
}
|
}
|
||||||
kfree(priv->rings);
|
kfree(priv->rings);
|
||||||
@@ -353,10 +353,10 @@ static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
|
|||||||
out:
|
out:
|
||||||
if (bytes) {
|
if (bytes) {
|
||||||
for (i--; i >= 0; i--)
|
for (i--; i >= 0; i--)
|
||||||
gnttab_end_foreign_access(ring->intf->ref[i], 0);
|
gnttab_end_foreign_access(ring->intf->ref[i], NULL);
|
||||||
free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT));
|
free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT));
|
||||||
}
|
}
|
||||||
gnttab_end_foreign_access(ring->ref, 0);
|
gnttab_end_foreign_access(ring->ref, NULL);
|
||||||
free_page((unsigned long)ring->intf);
|
free_page((unsigned long)ring->intf);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user