On Tue, 19 Oct 2010, Jan Beulich wrote:
> >>> On 18.10.10 at 20:14, Stefano Stabellini
> >>> <stefano.stabellini@xxxxxxxxxxxxx> wrote:
> > On Mon, 18 Oct 2010, Jan Beulich wrote:
> > The other type of pirqs are the ones that have ack_type != ACKTYPE_NONE,
> > in these cases xen waits for the guest to issue the eoi before acking
> > the machine irq so it shouldn't be possible to receive any pirqs while
> > we are handling the first one.
>
> But even in that case you do the clear after the EOI notification, i.e.
> you still have a window where you may lose an event.
>
New version of this patch:
- I am not reverting PHYSDEVOP_pirq_eoi_gmfn anymore because I found
that Xen unconditionally reports that all the pirqs need eoi if dom0
doesn't use PHYSDEVOP_pirq_eoi_gmfn.
- This causes complications in pirq_eoi because we need to be able to
handle the case in which the pirq has to stay masked. A new hypercall or
fixing the current interface would also be a good idea, because the
current behaviour breaks the interface, look at this comment on top of
the shared_info page:
* Event channels are addressed by a "port index". Each channel is
* associated with two bits of information:
* 1. PENDING -- notifies the domain that there is a pending
notification
* to be processed. This bit is cleared by the guest.
* 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
* will cause an asynchronous upcall to be scheduled. This bit is
only
-> * updated by the guest. It is read-only within Xen. If a channel
* becomes pending while the channel is masked then the 'edge' is
lost
* (i.e., when the channel is unmasked, the guest must manually
handle
* pending notifications as no upcall will be scheduled by Xen).
*
- I am using handle_edge_irq as irq handler for virqs and pirqs that
don't need eoi (in Xen acktype == ACKTYPE_NONE, that means the machine
irq is actually edge).
- I am using handle_fasteoi_irq for pirqs that need eoi (they generally
correspond to level triggered irqs), no risk in loosing interrupts
because we have to eoi the irq anyway.
In any case it would be a good idea to implement a basic "retry
send_guest_pirq" in Xen, in case the first time failed.
This patch has the following benefits:
- it uses the very same handlers that linux would use on native for the
same irqs;
- it uses these handlers in the same way linux would use them: it let
linux mask\unmask and ack the irq when linux want to mask\unmask and ack
the irq;
However it is obviously not easy to understand and it has to work around
the limitations of PHYSDEVOP_pirq_eoi_gmfn.
---
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 175e931..717b30e 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -436,21 +436,50 @@ static bool identity_mapped_irq(unsigned irq)
return irq < get_nr_hw_irqs();
}
-static void pirq_eoi(unsigned int irq)
+static void eoi_pirq(unsigned int irq)
{
struct irq_info *info = info_for_irq(irq);
struct physdev_eoi eoi = { .irq = info->u.pirq.gsi };
- bool need_eoi;
+ int evtchn = evtchn_from_irq(irq);
+ int rc = 0, need_mask = 0;
+ struct shared_info *s = HYPERVISOR_shared_info;
- need_eoi = pirq_needs_eoi(irq);
+ if (!VALID_EVTCHN(evtchn))
+ return;
- if (!need_eoi || !pirq_eoi_does_unmask)
- unmask_evtchn(info->evtchn);
+ move_masked_irq(irq);
- if (need_eoi) {
- int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
+ /* If the pirq doesn't need an eoi, just clear the evtchn and exit.
+ * If the pirq is currently unmasked, or !pirq_eoi_does_unmask,
+ * clear the evtchn and continue because the hypercall won't affect
+ * us anyway.
+ * If the pirq needs an eoi AND pirq_eoi_does_unmask AND the pirq is
+ * currently masked than we have a problem because the eoi hypercall
+ * will automatically unmasked the pirq. That means we cannot clear
+ * the evtchn right away because we could receive an unwanted evtchn
+ * notification after the hypercall and before masking the pirq
+ * again. Therefore in this case we clear the evtchn after the
+ * hypercall. */
+ if (pirq_needs_eoi(irq)) {
+ if (pirq_eoi_does_unmask)
+ need_mask = sync_test_bit(evtchn, &s->evtchn_mask[0]);
+ if (!need_mask)
+ clear_evtchn(evtchn);
+
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
WARN_ON(rc);
- }
+ if (need_mask) {
+ mask_evtchn(evtchn);
+ clear_evtchn(evtchn);
+ }
+ } else
+ clear_evtchn(evtchn);
+}
+
+static void mask_ack_pirq(unsigned int irq)
+{
+ mask_irq(irq);
+ eoi_pirq(irq);
}
static void pirq_query_unmask(int irq)
@@ -481,6 +510,7 @@ static bool probing_irq(int irq)
static unsigned int startup_pirq(unsigned int irq)
{
+ struct irq_desc *desc;
struct evtchn_bind_pirq bind_pirq;
struct irq_info *info = info_for_irq(irq);
int evtchn = evtchn_from_irq(irq);
@@ -510,8 +540,25 @@ static unsigned int startup_pirq(unsigned int irq)
bind_evtchn_to_cpu(evtchn, 0);
info->evtchn = evtchn;
+ /* If the pirq does not need an eoi than we can use handle_edge_irq
+ * and ack it right away, clearing the evtchn before calling
+ * handle_IRQ_event. If the pirq does need an eoi than we can use
+ * the fasteoi handler without loosing any interrupts because the
+ * physical interrupt will still be waiting for an eoi as well.
+ *
+ * Only after EVTCHNOP_bind_pirq Xen reliably tells us what
+ * kind of pirq this is, so we have to wait until now to make the
+ * choice.
+ * Afterwards Xen might temporarily set the needs_eoi flag for a
+ * particular pirq, but that doesn't affect our choice here that
+ * depends on the nature of the interrupt. */
+ desc = irq_to_desc(irq);
+ if (!pirq_needs_eoi(irq))
+ desc->handle_irq = handle_edge_irq;
+
out:
- pirq_eoi(irq);
+ eoi_pirq(irq);
+ unmask_irq(irq);
return 0;
}
@@ -538,29 +585,6 @@ static void shutdown_pirq(unsigned int irq)
info->evtchn = 0;
}
-static void ack_pirq(unsigned int irq)
-{
- move_masked_irq(irq);
-
- pirq_eoi(irq);
-}
-
-static void end_pirq(unsigned int irq)
-{
- int evtchn = evtchn_from_irq(irq);
- struct irq_desc *desc = irq_to_desc(irq);
-
- if (WARN_ON(!desc))
- return;
-
- if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
- (IRQ_DISABLED|IRQ_PENDING)) {
- shutdown_pirq(irq);
- } else if (VALID_EVTCHN(evtchn)) {
- pirq_eoi(irq);
- }
-}
-
static int find_irq_by_gsi(unsigned gsi)
{
int irq;
@@ -750,7 +774,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
irq = find_unbound_irq();
set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
- handle_fasteoi_irq, "event");
+ handle_edge_irq, "event");
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_evtchn_info(evtchn);
@@ -1074,9 +1098,6 @@ static void __xen_evtchn_do_upcall(struct pt_regs *regs)
int irq = evtchn_to_irq[port];
struct irq_desc *desc;
- mask_evtchn(port);
- clear_evtchn(port);
-
if (irq != -1) {
desc = irq_to_desc(irq);
if (desc)
@@ -1202,7 +1223,13 @@ static void ack_dynirq(unsigned int irq)
move_masked_irq(irq);
if (VALID_EVTCHN(evtchn))
- unmask_evtchn(evtchn);
+ clear_evtchn(evtchn);
+}
+
+static void mask_ack_dynirq(unsigned int irq)
+{
+ mask_irq(irq);
+ ack_dynirq(irq);
}
static int retrigger_irq(unsigned int irq)
@@ -1384,11 +1411,11 @@ void xen_irq_resume(void)
static struct irq_chip xen_dynamic_chip __read_mostly = {
.name = "xen-dyn",
- .disable = mask_irq,
.mask = mask_irq,
.unmask = unmask_irq,
- .eoi = ack_dynirq,
+ .ack = ack_dynirq,
+ .mask_ack = mask_ack_dynirq,
.set_affinity = set_affinity_irq,
.retrigger = retrigger_irq,
};
@@ -1409,14 +1436,12 @@ static struct irq_chip xen_pirq_chip __read_mostly = {
.startup = startup_pirq,
.shutdown = shutdown_pirq,
- .enable = pirq_eoi,
- .unmask = unmask_irq,
-
- .disable = mask_irq,
.mask = mask_irq,
+ .unmask = unmask_irq,
- .eoi = ack_pirq,
- .end = end_pirq,
+ .ack = eoi_pirq,
+ .eoi = eoi_pirq,
+ .mask_ack = mask_ack_pirq,
.set_affinity = set_affinity_irq,
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|