mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 20:51:03 +02:00
Merge tag 'for-linus-5.12b-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen fixes from Juergen Gross: "Two fix series and a single cleanup: - a small cleanup patch to remove unneeded symbol exports - a series to cleanup Xen grant handling (avoiding allocations in some cases, and using common defines for "invalid" values) - a series to address a race issue in Xen event channel handling" * tag 'for-linus-5.12b-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: Xen/gntdev: don't needlessly use kvcalloc() Xen/gnttab: introduce common INVALID_GRANT_{HANDLE,REF} Xen/gntdev: don't needlessly allocate k{,un}map_ops[] Xen: drop exports of {set,clear}_foreign_p2m_mapping() xen/events: avoid handling the same event on two cpus at the same time xen/events: don't unmask an event channel when an eoi is pending xen/events: reset affinity of 2-level event when tearing it down
This commit is contained in:
@@ -11,6 +11,7 @@
|
|||||||
|
|
||||||
#include <xen/xen.h>
|
#include <xen/xen.h>
|
||||||
#include <xen/interface/memory.h>
|
#include <xen/interface/memory.h>
|
||||||
|
#include <xen/grant_table.h>
|
||||||
#include <xen/page.h>
|
#include <xen/page.h>
|
||||||
#include <xen/swiotlb-xen.h>
|
#include <xen/swiotlb-xen.h>
|
||||||
|
|
||||||
@@ -109,7 +110,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||||||
map_ops[i].status = GNTST_general_error;
|
map_ops[i].status = GNTST_general_error;
|
||||||
unmap.host_addr = map_ops[i].host_addr,
|
unmap.host_addr = map_ops[i].host_addr,
|
||||||
unmap.handle = map_ops[i].handle;
|
unmap.handle = map_ops[i].handle;
|
||||||
map_ops[i].handle = ~0;
|
map_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||||
if (map_ops[i].flags & GNTMAP_device_map)
|
if (map_ops[i].flags & GNTMAP_device_map)
|
||||||
unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
|
unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
|
||||||
else
|
else
|
||||||
@@ -130,7 +131,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
|
|
||||||
|
|
||||||
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
||||||
struct gnttab_unmap_grant_ref *kunmap_ops,
|
struct gnttab_unmap_grant_ref *kunmap_ops,
|
||||||
@@ -145,7 +145,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
|
|
||||||
|
|
||||||
bool __set_phys_to_machine_multi(unsigned long pfn,
|
bool __set_phys_to_machine_multi(unsigned long pfn,
|
||||||
unsigned long mfn, unsigned long nr_pages)
|
unsigned long mfn, unsigned long nr_pages)
|
||||||
|
@@ -741,7 +741,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||||||
map_ops[i].status = GNTST_general_error;
|
map_ops[i].status = GNTST_general_error;
|
||||||
unmap[0].host_addr = map_ops[i].host_addr,
|
unmap[0].host_addr = map_ops[i].host_addr,
|
||||||
unmap[0].handle = map_ops[i].handle;
|
unmap[0].handle = map_ops[i].handle;
|
||||||
map_ops[i].handle = ~0;
|
map_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||||
if (map_ops[i].flags & GNTMAP_device_map)
|
if (map_ops[i].flags & GNTMAP_device_map)
|
||||||
unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr;
|
unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr;
|
||||||
else
|
else
|
||||||
@@ -751,7 +751,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||||||
kmap_ops[i].status = GNTST_general_error;
|
kmap_ops[i].status = GNTST_general_error;
|
||||||
unmap[1].host_addr = kmap_ops[i].host_addr,
|
unmap[1].host_addr = kmap_ops[i].host_addr,
|
||||||
unmap[1].handle = kmap_ops[i].handle;
|
unmap[1].handle = kmap_ops[i].handle;
|
||||||
kmap_ops[i].handle = ~0;
|
kmap_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||||
if (kmap_ops[i].flags & GNTMAP_device_map)
|
if (kmap_ops[i].flags & GNTMAP_device_map)
|
||||||
unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr;
|
unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr;
|
||||||
else
|
else
|
||||||
@@ -776,7 +776,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
|
|
||||||
|
|
||||||
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
||||||
struct gnttab_unmap_grant_ref *kunmap_ops,
|
struct gnttab_unmap_grant_ref *kunmap_ops,
|
||||||
@@ -802,7 +801,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
|||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
|
|
||||||
|
|
||||||
#ifdef CONFIG_XEN_DEBUG_FS
|
#ifdef CONFIG_XEN_DEBUG_FS
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
|
@@ -26,7 +26,7 @@
|
|||||||
#include <xen/platform_pci.h>
|
#include <xen/platform_pci.h>
|
||||||
|
|
||||||
#include <asm/xen/swiotlb-xen.h>
|
#include <asm/xen/swiotlb-xen.h>
|
||||||
#define INVALID_GRANT_REF (0)
|
|
||||||
#define INVALID_EVTCHN (-1)
|
#define INVALID_EVTCHN (-1)
|
||||||
|
|
||||||
struct pci_bus_entry {
|
struct pci_bus_entry {
|
||||||
@@ -42,7 +42,7 @@ struct pcifront_device {
|
|||||||
struct list_head root_buses;
|
struct list_head root_buses;
|
||||||
|
|
||||||
int evtchn;
|
int evtchn;
|
||||||
int gnt_ref;
|
grant_ref_t gnt_ref;
|
||||||
|
|
||||||
int irq;
|
int irq;
|
||||||
|
|
||||||
|
@@ -47,6 +47,11 @@ static unsigned evtchn_2l_max_channels(void)
|
|||||||
return EVTCHN_2L_NR_CHANNELS;
|
return EVTCHN_2L_NR_CHANNELS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu)
|
||||||
|
{
|
||||||
|
clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
|
||||||
|
}
|
||||||
|
|
||||||
static void evtchn_2l_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
|
static void evtchn_2l_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
|
||||||
unsigned int old_cpu)
|
unsigned int old_cpu)
|
||||||
{
|
{
|
||||||
@@ -72,12 +77,6 @@ static bool evtchn_2l_is_pending(evtchn_port_t port)
|
|||||||
return sync_test_bit(port, BM(&s->evtchn_pending[0]));
|
return sync_test_bit(port, BM(&s->evtchn_pending[0]));
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool evtchn_2l_test_and_set_mask(evtchn_port_t port)
|
|
||||||
{
|
|
||||||
struct shared_info *s = HYPERVISOR_shared_info;
|
|
||||||
return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void evtchn_2l_mask(evtchn_port_t port)
|
static void evtchn_2l_mask(evtchn_port_t port)
|
||||||
{
|
{
|
||||||
struct shared_info *s = HYPERVISOR_shared_info;
|
struct shared_info *s = HYPERVISOR_shared_info;
|
||||||
@@ -355,18 +354,27 @@ static void evtchn_2l_resume(void)
|
|||||||
EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
|
EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int evtchn_2l_percpu_deinit(unsigned int cpu)
|
||||||
|
{
|
||||||
|
memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) *
|
||||||
|
EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct evtchn_ops evtchn_ops_2l = {
|
static const struct evtchn_ops evtchn_ops_2l = {
|
||||||
.max_channels = evtchn_2l_max_channels,
|
.max_channels = evtchn_2l_max_channels,
|
||||||
.nr_channels = evtchn_2l_max_channels,
|
.nr_channels = evtchn_2l_max_channels,
|
||||||
|
.remove = evtchn_2l_remove,
|
||||||
.bind_to_cpu = evtchn_2l_bind_to_cpu,
|
.bind_to_cpu = evtchn_2l_bind_to_cpu,
|
||||||
.clear_pending = evtchn_2l_clear_pending,
|
.clear_pending = evtchn_2l_clear_pending,
|
||||||
.set_pending = evtchn_2l_set_pending,
|
.set_pending = evtchn_2l_set_pending,
|
||||||
.is_pending = evtchn_2l_is_pending,
|
.is_pending = evtchn_2l_is_pending,
|
||||||
.test_and_set_mask = evtchn_2l_test_and_set_mask,
|
|
||||||
.mask = evtchn_2l_mask,
|
.mask = evtchn_2l_mask,
|
||||||
.unmask = evtchn_2l_unmask,
|
.unmask = evtchn_2l_unmask,
|
||||||
.handle_events = evtchn_2l_handle_events,
|
.handle_events = evtchn_2l_handle_events,
|
||||||
.resume = evtchn_2l_resume,
|
.resume = evtchn_2l_resume,
|
||||||
|
.percpu_deinit = evtchn_2l_percpu_deinit,
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init xen_evtchn_2l_init(void)
|
void __init xen_evtchn_2l_init(void)
|
||||||
|
@@ -98,13 +98,19 @@ struct irq_info {
|
|||||||
short refcnt;
|
short refcnt;
|
||||||
u8 spurious_cnt;
|
u8 spurious_cnt;
|
||||||
u8 is_accounted;
|
u8 is_accounted;
|
||||||
enum xen_irq_type type; /* type */
|
short type; /* type: IRQT_* */
|
||||||
|
u8 mask_reason; /* Why is event channel masked */
|
||||||
|
#define EVT_MASK_REASON_EXPLICIT 0x01
|
||||||
|
#define EVT_MASK_REASON_TEMPORARY 0x02
|
||||||
|
#define EVT_MASK_REASON_EOI_PENDING 0x04
|
||||||
|
u8 is_active; /* Is event just being handled? */
|
||||||
unsigned irq;
|
unsigned irq;
|
||||||
evtchn_port_t evtchn; /* event channel */
|
evtchn_port_t evtchn; /* event channel */
|
||||||
unsigned short cpu; /* cpu bound */
|
unsigned short cpu; /* cpu bound */
|
||||||
unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
|
unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
|
||||||
unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
|
unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
|
||||||
u64 eoi_time; /* Time in jiffies when to EOI. */
|
u64 eoi_time; /* Time in jiffies when to EOI. */
|
||||||
|
spinlock_t lock;
|
||||||
|
|
||||||
union {
|
union {
|
||||||
unsigned short virq;
|
unsigned short virq;
|
||||||
@@ -154,6 +160,7 @@ static DEFINE_RWLOCK(evtchn_rwlock);
|
|||||||
* evtchn_rwlock
|
* evtchn_rwlock
|
||||||
* IRQ-desc lock
|
* IRQ-desc lock
|
||||||
* percpu eoi_list_lock
|
* percpu eoi_list_lock
|
||||||
|
* irq_info->lock
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static LIST_HEAD(xen_irq_list_head);
|
static LIST_HEAD(xen_irq_list_head);
|
||||||
@@ -304,6 +311,8 @@ static int xen_irq_info_common_setup(struct irq_info *info,
|
|||||||
info->irq = irq;
|
info->irq = irq;
|
||||||
info->evtchn = evtchn;
|
info->evtchn = evtchn;
|
||||||
info->cpu = cpu;
|
info->cpu = cpu;
|
||||||
|
info->mask_reason = EVT_MASK_REASON_EXPLICIT;
|
||||||
|
spin_lock_init(&info->lock);
|
||||||
|
|
||||||
ret = set_evtchn_to_irq(evtchn, irq);
|
ret = set_evtchn_to_irq(evtchn, irq);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
@@ -377,6 +386,7 @@ static int xen_irq_info_pirq_setup(unsigned irq,
|
|||||||
static void xen_irq_info_cleanup(struct irq_info *info)
|
static void xen_irq_info_cleanup(struct irq_info *info)
|
||||||
{
|
{
|
||||||
set_evtchn_to_irq(info->evtchn, -1);
|
set_evtchn_to_irq(info->evtchn, -1);
|
||||||
|
xen_evtchn_port_remove(info->evtchn, info->cpu);
|
||||||
info->evtchn = 0;
|
info->evtchn = 0;
|
||||||
channels_on_cpu_dec(info);
|
channels_on_cpu_dec(info);
|
||||||
}
|
}
|
||||||
@@ -458,6 +468,34 @@ unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void do_mask(struct irq_info *info, u8 reason)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&info->lock, flags);
|
||||||
|
|
||||||
|
if (!info->mask_reason)
|
||||||
|
mask_evtchn(info->evtchn);
|
||||||
|
|
||||||
|
info->mask_reason |= reason;
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&info->lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void do_unmask(struct irq_info *info, u8 reason)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&info->lock, flags);
|
||||||
|
|
||||||
|
info->mask_reason &= ~reason;
|
||||||
|
|
||||||
|
if (!info->mask_reason)
|
||||||
|
unmask_evtchn(info->evtchn);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&info->lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86
|
#ifdef CONFIG_X86
|
||||||
static bool pirq_check_eoi_map(unsigned irq)
|
static bool pirq_check_eoi_map(unsigned irq)
|
||||||
{
|
{
|
||||||
@@ -604,7 +642,7 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
|
|||||||
}
|
}
|
||||||
|
|
||||||
info->eoi_time = 0;
|
info->eoi_time = 0;
|
||||||
unmask_evtchn(evtchn);
|
do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_irq_lateeoi_worker(struct work_struct *work)
|
static void xen_irq_lateeoi_worker(struct work_struct *work)
|
||||||
@@ -773,6 +811,12 @@ static void xen_evtchn_close(evtchn_port_t port)
|
|||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void event_handler_exit(struct irq_info *info)
|
||||||
|
{
|
||||||
|
smp_store_release(&info->is_active, 0);
|
||||||
|
clear_evtchn(info->evtchn);
|
||||||
|
}
|
||||||
|
|
||||||
static void pirq_query_unmask(int irq)
|
static void pirq_query_unmask(int irq)
|
||||||
{
|
{
|
||||||
struct physdev_irq_status_query irq_status;
|
struct physdev_irq_status_query irq_status;
|
||||||
@@ -791,14 +835,15 @@ static void pirq_query_unmask(int irq)
|
|||||||
|
|
||||||
static void eoi_pirq(struct irq_data *data)
|
static void eoi_pirq(struct irq_data *data)
|
||||||
{
|
{
|
||||||
evtchn_port_t evtchn = evtchn_from_irq(data->irq);
|
struct irq_info *info = info_for_irq(data->irq);
|
||||||
|
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||||
struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
|
struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
if (!VALID_EVTCHN(evtchn))
|
if (!VALID_EVTCHN(evtchn))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
clear_evtchn(evtchn);
|
event_handler_exit(info);
|
||||||
|
|
||||||
if (pirq_needs_eoi(data->irq)) {
|
if (pirq_needs_eoi(data->irq)) {
|
||||||
rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
|
rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
|
||||||
@@ -849,7 +894,8 @@ static unsigned int __startup_pirq(unsigned int irq)
|
|||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
unmask_evtchn(evtchn);
|
do_unmask(info, EVT_MASK_REASON_EXPLICIT);
|
||||||
|
|
||||||
eoi_pirq(irq_get_irq_data(irq));
|
eoi_pirq(irq_get_irq_data(irq));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -876,7 +922,7 @@ static void shutdown_pirq(struct irq_data *data)
|
|||||||
if (!VALID_EVTCHN(evtchn))
|
if (!VALID_EVTCHN(evtchn))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mask_evtchn(evtchn);
|
do_mask(info, EVT_MASK_REASON_EXPLICIT);
|
||||||
xen_evtchn_close(evtchn);
|
xen_evtchn_close(evtchn);
|
||||||
xen_irq_info_cleanup(info);
|
xen_irq_info_cleanup(info);
|
||||||
}
|
}
|
||||||
@@ -1628,6 +1674,8 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
|
|||||||
}
|
}
|
||||||
|
|
||||||
info = info_for_irq(irq);
|
info = info_for_irq(irq);
|
||||||
|
if (xchg_acquire(&info->is_active, 1))
|
||||||
|
return;
|
||||||
|
|
||||||
dev = (info->type == IRQT_EVTCHN) ? info->u.interdomain : NULL;
|
dev = (info->type == IRQT_EVTCHN) ? info->u.interdomain : NULL;
|
||||||
if (dev)
|
if (dev)
|
||||||
@@ -1720,10 +1768,10 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Rebind an evtchn so that it gets delivered to a specific cpu */
|
/* Rebind an evtchn so that it gets delivered to a specific cpu */
|
||||||
static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
|
static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
|
||||||
{
|
{
|
||||||
struct evtchn_bind_vcpu bind_vcpu;
|
struct evtchn_bind_vcpu bind_vcpu;
|
||||||
int masked;
|
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||||
|
|
||||||
if (!VALID_EVTCHN(evtchn))
|
if (!VALID_EVTCHN(evtchn))
|
||||||
return -1;
|
return -1;
|
||||||
@@ -1739,7 +1787,7 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
|
|||||||
* Mask the event while changing the VCPU binding to prevent
|
* Mask the event while changing the VCPU binding to prevent
|
||||||
* it being delivered on an unexpected VCPU.
|
* it being delivered on an unexpected VCPU.
|
||||||
*/
|
*/
|
||||||
masked = test_and_set_mask(evtchn);
|
do_mask(info, EVT_MASK_REASON_TEMPORARY);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this fails, it usually just indicates that we're dealing with a
|
* If this fails, it usually just indicates that we're dealing with a
|
||||||
@@ -1749,8 +1797,7 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
|
|||||||
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
|
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
|
||||||
bind_evtchn_to_cpu(evtchn, tcpu, false);
|
bind_evtchn_to_cpu(evtchn, tcpu, false);
|
||||||
|
|
||||||
if (!masked)
|
do_unmask(info, EVT_MASK_REASON_TEMPORARY);
|
||||||
unmask_evtchn(evtchn);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -1789,7 +1836,7 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
|
|||||||
unsigned int tcpu = select_target_cpu(dest);
|
unsigned int tcpu = select_target_cpu(dest);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu);
|
ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
irq_data_update_effective_affinity(data, cpumask_of(tcpu));
|
irq_data_update_effective_affinity(data, cpumask_of(tcpu));
|
||||||
|
|
||||||
@@ -1798,28 +1845,29 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
|
|||||||
|
|
||||||
static void enable_dynirq(struct irq_data *data)
|
static void enable_dynirq(struct irq_data *data)
|
||||||
{
|
{
|
||||||
evtchn_port_t evtchn = evtchn_from_irq(data->irq);
|
struct irq_info *info = info_for_irq(data->irq);
|
||||||
|
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||||
|
|
||||||
if (VALID_EVTCHN(evtchn))
|
if (VALID_EVTCHN(evtchn))
|
||||||
unmask_evtchn(evtchn);
|
do_unmask(info, EVT_MASK_REASON_EXPLICIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void disable_dynirq(struct irq_data *data)
|
static void disable_dynirq(struct irq_data *data)
|
||||||
{
|
{
|
||||||
evtchn_port_t evtchn = evtchn_from_irq(data->irq);
|
struct irq_info *info = info_for_irq(data->irq);
|
||||||
|
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||||
|
|
||||||
if (VALID_EVTCHN(evtchn))
|
if (VALID_EVTCHN(evtchn))
|
||||||
mask_evtchn(evtchn);
|
do_mask(info, EVT_MASK_REASON_EXPLICIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ack_dynirq(struct irq_data *data)
|
static void ack_dynirq(struct irq_data *data)
|
||||||
{
|
{
|
||||||
evtchn_port_t evtchn = evtchn_from_irq(data->irq);
|
struct irq_info *info = info_for_irq(data->irq);
|
||||||
|
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||||
|
|
||||||
if (!VALID_EVTCHN(evtchn))
|
if (VALID_EVTCHN(evtchn))
|
||||||
return;
|
event_handler_exit(info);
|
||||||
|
|
||||||
clear_evtchn(evtchn);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mask_ack_dynirq(struct irq_data *data)
|
static void mask_ack_dynirq(struct irq_data *data)
|
||||||
@@ -1828,18 +1876,39 @@ static void mask_ack_dynirq(struct irq_data *data)
|
|||||||
ack_dynirq(data);
|
ack_dynirq(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void lateeoi_ack_dynirq(struct irq_data *data)
|
||||||
|
{
|
||||||
|
struct irq_info *info = info_for_irq(data->irq);
|
||||||
|
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||||
|
|
||||||
|
if (VALID_EVTCHN(evtchn)) {
|
||||||
|
do_mask(info, EVT_MASK_REASON_EOI_PENDING);
|
||||||
|
event_handler_exit(info);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void lateeoi_mask_ack_dynirq(struct irq_data *data)
|
||||||
|
{
|
||||||
|
struct irq_info *info = info_for_irq(data->irq);
|
||||||
|
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||||
|
|
||||||
|
if (VALID_EVTCHN(evtchn)) {
|
||||||
|
do_mask(info, EVT_MASK_REASON_EXPLICIT);
|
||||||
|
event_handler_exit(info);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int retrigger_dynirq(struct irq_data *data)
|
static int retrigger_dynirq(struct irq_data *data)
|
||||||
{
|
{
|
||||||
evtchn_port_t evtchn = evtchn_from_irq(data->irq);
|
struct irq_info *info = info_for_irq(data->irq);
|
||||||
int masked;
|
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||||
|
|
||||||
if (!VALID_EVTCHN(evtchn))
|
if (!VALID_EVTCHN(evtchn))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
masked = test_and_set_mask(evtchn);
|
do_mask(info, EVT_MASK_REASON_TEMPORARY);
|
||||||
set_evtchn(evtchn);
|
set_evtchn(evtchn);
|
||||||
if (!masked)
|
do_unmask(info, EVT_MASK_REASON_TEMPORARY);
|
||||||
unmask_evtchn(evtchn);
|
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
@@ -1938,10 +2007,11 @@ static void restore_cpu_ipis(unsigned int cpu)
|
|||||||
/* Clear an irq's pending state, in preparation for polling on it */
|
/* Clear an irq's pending state, in preparation for polling on it */
|
||||||
void xen_clear_irq_pending(int irq)
|
void xen_clear_irq_pending(int irq)
|
||||||
{
|
{
|
||||||
evtchn_port_t evtchn = evtchn_from_irq(irq);
|
struct irq_info *info = info_for_irq(irq);
|
||||||
|
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||||
|
|
||||||
if (VALID_EVTCHN(evtchn))
|
if (VALID_EVTCHN(evtchn))
|
||||||
clear_evtchn(evtchn);
|
event_handler_exit(info);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(xen_clear_irq_pending);
|
EXPORT_SYMBOL(xen_clear_irq_pending);
|
||||||
void xen_set_irq_pending(int irq)
|
void xen_set_irq_pending(int irq)
|
||||||
@@ -2053,8 +2123,8 @@ static struct irq_chip xen_lateeoi_chip __read_mostly = {
|
|||||||
.irq_mask = disable_dynirq,
|
.irq_mask = disable_dynirq,
|
||||||
.irq_unmask = enable_dynirq,
|
.irq_unmask = enable_dynirq,
|
||||||
|
|
||||||
.irq_ack = mask_ack_dynirq,
|
.irq_ack = lateeoi_ack_dynirq,
|
||||||
.irq_mask_ack = mask_ack_dynirq,
|
.irq_mask_ack = lateeoi_mask_ack_dynirq,
|
||||||
|
|
||||||
.irq_set_affinity = set_affinity_irq,
|
.irq_set_affinity = set_affinity_irq,
|
||||||
.irq_retrigger = retrigger_dynirq,
|
.irq_retrigger = retrigger_dynirq,
|
||||||
|
@@ -209,12 +209,6 @@ static bool evtchn_fifo_is_pending(evtchn_port_t port)
|
|||||||
return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
|
return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool evtchn_fifo_test_and_set_mask(evtchn_port_t port)
|
|
||||||
{
|
|
||||||
event_word_t *word = event_word_from_port(port);
|
|
||||||
return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void evtchn_fifo_mask(evtchn_port_t port)
|
static void evtchn_fifo_mask(evtchn_port_t port)
|
||||||
{
|
{
|
||||||
event_word_t *word = event_word_from_port(port);
|
event_word_t *word = event_word_from_port(port);
|
||||||
@@ -423,7 +417,6 @@ static const struct evtchn_ops evtchn_ops_fifo = {
|
|||||||
.clear_pending = evtchn_fifo_clear_pending,
|
.clear_pending = evtchn_fifo_clear_pending,
|
||||||
.set_pending = evtchn_fifo_set_pending,
|
.set_pending = evtchn_fifo_set_pending,
|
||||||
.is_pending = evtchn_fifo_is_pending,
|
.is_pending = evtchn_fifo_is_pending,
|
||||||
.test_and_set_mask = evtchn_fifo_test_and_set_mask,
|
|
||||||
.mask = evtchn_fifo_mask,
|
.mask = evtchn_fifo_mask,
|
||||||
.unmask = evtchn_fifo_unmask,
|
.unmask = evtchn_fifo_unmask,
|
||||||
.handle_events = evtchn_fifo_handle_events,
|
.handle_events = evtchn_fifo_handle_events,
|
||||||
|
@@ -14,13 +14,13 @@ struct evtchn_ops {
|
|||||||
unsigned (*nr_channels)(void);
|
unsigned (*nr_channels)(void);
|
||||||
|
|
||||||
int (*setup)(evtchn_port_t port);
|
int (*setup)(evtchn_port_t port);
|
||||||
|
void (*remove)(evtchn_port_t port, unsigned int cpu);
|
||||||
void (*bind_to_cpu)(evtchn_port_t evtchn, unsigned int cpu,
|
void (*bind_to_cpu)(evtchn_port_t evtchn, unsigned int cpu,
|
||||||
unsigned int old_cpu);
|
unsigned int old_cpu);
|
||||||
|
|
||||||
void (*clear_pending)(evtchn_port_t port);
|
void (*clear_pending)(evtchn_port_t port);
|
||||||
void (*set_pending)(evtchn_port_t port);
|
void (*set_pending)(evtchn_port_t port);
|
||||||
bool (*is_pending)(evtchn_port_t port);
|
bool (*is_pending)(evtchn_port_t port);
|
||||||
bool (*test_and_set_mask)(evtchn_port_t port);
|
|
||||||
void (*mask)(evtchn_port_t port);
|
void (*mask)(evtchn_port_t port);
|
||||||
void (*unmask)(evtchn_port_t port);
|
void (*unmask)(evtchn_port_t port);
|
||||||
|
|
||||||
@@ -54,6 +54,13 @@ static inline int xen_evtchn_port_setup(evtchn_port_t evtchn)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void xen_evtchn_port_remove(evtchn_port_t evtchn,
|
||||||
|
unsigned int cpu)
|
||||||
|
{
|
||||||
|
if (evtchn_ops->remove)
|
||||||
|
evtchn_ops->remove(evtchn, cpu);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void xen_evtchn_port_bind_to_cpu(evtchn_port_t evtchn,
|
static inline void xen_evtchn_port_bind_to_cpu(evtchn_port_t evtchn,
|
||||||
unsigned int cpu,
|
unsigned int cpu,
|
||||||
unsigned int old_cpu)
|
unsigned int old_cpu)
|
||||||
@@ -76,11 +83,6 @@ static inline bool test_evtchn(evtchn_port_t port)
|
|||||||
return evtchn_ops->is_pending(port);
|
return evtchn_ops->is_pending(port);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool test_and_set_mask(evtchn_port_t port)
|
|
||||||
{
|
|
||||||
return evtchn_ops->test_and_set_mask(port);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void mask_evtchn(evtchn_port_t port)
|
static inline void mask_evtchn(evtchn_port_t port)
|
||||||
{
|
{
|
||||||
return evtchn_ops->mask(port);
|
return evtchn_ops->mask(port);
|
||||||
|
@@ -133,20 +133,26 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
|
|||||||
if (NULL == add)
|
if (NULL == add)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
add->grants = kvcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
|
add->grants = kvmalloc_array(count, sizeof(add->grants[0]),
|
||||||
add->map_ops = kvcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
add->unmap_ops = kvcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
|
add->map_ops = kvmalloc_array(count, sizeof(add->map_ops[0]),
|
||||||
add->kmap_ops = kvcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
add->kunmap_ops = kvcalloc(count,
|
add->unmap_ops = kvmalloc_array(count, sizeof(add->unmap_ops[0]),
|
||||||
sizeof(add->kunmap_ops[0]), GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
|
add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
|
||||||
if (NULL == add->grants ||
|
if (NULL == add->grants ||
|
||||||
NULL == add->map_ops ||
|
NULL == add->map_ops ||
|
||||||
NULL == add->unmap_ops ||
|
NULL == add->unmap_ops ||
|
||||||
NULL == add->kmap_ops ||
|
|
||||||
NULL == add->kunmap_ops ||
|
|
||||||
NULL == add->pages)
|
NULL == add->pages)
|
||||||
goto err;
|
goto err;
|
||||||
|
if (use_ptemod) {
|
||||||
|
add->kmap_ops = kvmalloc_array(count, sizeof(add->kmap_ops[0]),
|
||||||
|
GFP_KERNEL);
|
||||||
|
add->kunmap_ops = kvmalloc_array(count, sizeof(add->kunmap_ops[0]),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (NULL == add->kmap_ops || NULL == add->kunmap_ops)
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
|
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
|
||||||
add->dma_flags = dma_flags;
|
add->dma_flags = dma_flags;
|
||||||
@@ -183,10 +189,14 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
|
|||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
add->map_ops[i].handle = -1;
|
add->grants[i].domid = DOMID_INVALID;
|
||||||
add->unmap_ops[i].handle = -1;
|
add->grants[i].ref = INVALID_GRANT_REF;
|
||||||
add->kmap_ops[i].handle = -1;
|
add->map_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||||
add->kunmap_ops[i].handle = -1;
|
add->unmap_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||||
|
if (use_ptemod) {
|
||||||
|
add->kmap_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||||
|
add->kunmap_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
add->index = 0;
|
add->index = 0;
|
||||||
@@ -274,7 +284,7 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
|
|||||||
map->grants[pgnr].ref,
|
map->grants[pgnr].ref,
|
||||||
map->grants[pgnr].domid);
|
map->grants[pgnr].domid);
|
||||||
gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
|
gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
|
||||||
-1 /* handle */);
|
INVALID_GRANT_HANDLE);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -292,7 +302,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
|
|||||||
|
|
||||||
if (!use_ptemod) {
|
if (!use_ptemod) {
|
||||||
/* Note: it could already be mapped */
|
/* Note: it could already be mapped */
|
||||||
if (map->map_ops[0].handle != -1)
|
if (map->map_ops[0].handle != INVALID_GRANT_HANDLE)
|
||||||
return 0;
|
return 0;
|
||||||
for (i = 0; i < map->count; i++) {
|
for (i = 0; i < map->count; i++) {
|
||||||
unsigned long addr = (unsigned long)
|
unsigned long addr = (unsigned long)
|
||||||
@@ -301,7 +311,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
|
|||||||
map->grants[i].ref,
|
map->grants[i].ref,
|
||||||
map->grants[i].domid);
|
map->grants[i].domid);
|
||||||
gnttab_set_unmap_op(&map->unmap_ops[i], addr,
|
gnttab_set_unmap_op(&map->unmap_ops[i], addr,
|
||||||
map->flags, -1 /* handle */);
|
map->flags, INVALID_GRANT_HANDLE);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
@@ -327,13 +337,13 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
|
|||||||
map->grants[i].ref,
|
map->grants[i].ref,
|
||||||
map->grants[i].domid);
|
map->grants[i].domid);
|
||||||
gnttab_set_unmap_op(&map->kunmap_ops[i], address,
|
gnttab_set_unmap_op(&map->kunmap_ops[i], address,
|
||||||
flags, -1);
|
flags, INVALID_GRANT_HANDLE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_debug("map %d+%d\n", map->index, map->count);
|
pr_debug("map %d+%d\n", map->index, map->count);
|
||||||
err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
|
err = gnttab_map_refs(map->map_ops, map->kmap_ops, map->pages,
|
||||||
map->pages, map->count);
|
map->count);
|
||||||
|
|
||||||
for (i = 0; i < map->count; i++) {
|
for (i = 0; i < map->count; i++) {
|
||||||
if (map->map_ops[i].status == GNTST_okay)
|
if (map->map_ops[i].status == GNTST_okay)
|
||||||
@@ -385,7 +395,7 @@ static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
|
|||||||
pr_debug("unmap handle=%d st=%d\n",
|
pr_debug("unmap handle=%d st=%d\n",
|
||||||
map->unmap_ops[offset+i].handle,
|
map->unmap_ops[offset+i].handle,
|
||||||
map->unmap_ops[offset+i].status);
|
map->unmap_ops[offset+i].status);
|
||||||
map->unmap_ops[offset+i].handle = -1;
|
map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
|
||||||
}
|
}
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@@ -401,13 +411,15 @@ static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
|
|||||||
* already unmapped some of the grants. Only unmap valid ranges.
|
* already unmapped some of the grants. Only unmap valid ranges.
|
||||||
*/
|
*/
|
||||||
while (pages && !err) {
|
while (pages && !err) {
|
||||||
while (pages && map->unmap_ops[offset].handle == -1) {
|
while (pages &&
|
||||||
|
map->unmap_ops[offset].handle == INVALID_GRANT_HANDLE) {
|
||||||
offset++;
|
offset++;
|
||||||
pages--;
|
pages--;
|
||||||
}
|
}
|
||||||
range = 0;
|
range = 0;
|
||||||
while (range < pages) {
|
while (range < pages) {
|
||||||
if (map->unmap_ops[offset+range].handle == -1)
|
if (map->unmap_ops[offset + range].handle ==
|
||||||
|
INVALID_GRANT_HANDLE)
|
||||||
break;
|
break;
|
||||||
range++;
|
range++;
|
||||||
}
|
}
|
||||||
|
@@ -50,6 +50,13 @@
|
|||||||
#include <linux/page-flags.h>
|
#include <linux/page-flags.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Technically there's no reliably invalid grant reference or grant handle,
|
||||||
|
* so pick the value that is the most unlikely one to be observed valid.
|
||||||
|
*/
|
||||||
|
#define INVALID_GRANT_REF ((grant_ref_t)-1)
|
||||||
|
#define INVALID_GRANT_HANDLE ((grant_handle_t)-1)
|
||||||
|
|
||||||
#define GNTTAB_RESERVED_XENSTORE 1
|
#define GNTTAB_RESERVED_XENSTORE 1
|
||||||
|
|
||||||
/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
|
/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
|
||||||
|
@@ -51,7 +51,6 @@
|
|||||||
|
|
||||||
#define XENBUS_MAX_RING_GRANT_ORDER 4
|
#define XENBUS_MAX_RING_GRANT_ORDER 4
|
||||||
#define XENBUS_MAX_RING_GRANTS (1U << XENBUS_MAX_RING_GRANT_ORDER)
|
#define XENBUS_MAX_RING_GRANTS (1U << XENBUS_MAX_RING_GRANT_ORDER)
|
||||||
#define INVALID_GRANT_HANDLE (~0U)
|
|
||||||
|
|
||||||
/* Register callback to watch this node. */
|
/* Register callback to watch this node. */
|
||||||
struct xenbus_watch
|
struct xenbus_watch
|
||||||
|
Reference in New Issue
Block a user