WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 2/2] linux/x86: use shared page indicating the need f

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 2/2] linux/x86: use shared page indicating the need for an EOI notification
From: "Jan Beulich" <jbeulich@xxxxxxxxxx>
Date: Fri, 28 Nov 2008 09:59:25 +0000
Delivery-date: Fri, 28 Nov 2008 01:59:17 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
As usual, written and tested on 2.6.27.7 and made apply to the 2.6.18
tree without further testing.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

Index: head-2008-11-25/drivers/xen/core/evtchn.c
===================================================================
--- head-2008-11-25.orig/drivers/xen/core/evtchn.c      2008-11-26 
15:59:04.000000000 +0100
+++ head-2008-11-25/drivers/xen/core/evtchn.c   2008-11-26 16:00:52.000000000 
+0100
@@ -130,9 +130,6 @@ DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS])
 /* Reference counts for bindings to IRQs. */
 static int irq_bindcount[NR_IRQS];
 
-/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
-static DECLARE_BITMAP(pirq_needs_eoi, NR_PIRQS);
-
 #ifdef CONFIG_SMP
 
 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
@@ -786,16 +783,48 @@ static struct irq_chip dynirq_chip = {
        .retrigger = resend_irq_on_evtchn,
 };
 
-static inline void pirq_unmask_notify(int irq)
+/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
+static bool pirq_eoi_does_unmask;
+static DECLARE_BITMAP(pirq_needs_eoi, ALIGN(NR_PIRQS, PAGE_SIZE * 8))
+       __attribute__ ((__section__(".bss.page_aligned"), 
__aligned__(PAGE_SIZE)));
+
+static void pirq_unmask_and_notify(unsigned int evtchn, unsigned int irq)
 {
        struct physdev_eoi eoi = { .irq = evtchn_get_xen_pirq(irq) };
-       if (unlikely(test_bit(irq - PIRQ_BASE, pirq_needs_eoi)))
-               VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
+
+       if (pirq_eoi_does_unmask) {
+               if (test_bit(eoi.irq, pirq_needs_eoi))
+                       VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
+               else
+                       unmask_evtchn(evtchn);
+       } else if (test_bit(irq - PIRQ_BASE, pirq_needs_eoi)) {
+               if (smp_processor_id() != cpu_from_evtchn(evtchn)) {
+                       struct evtchn_unmask unmask = { .port = evtchn };
+                       struct multicall_entry mcl[2];
+
+                       mcl[0].op = __HYPERVISOR_event_channel_op;
+                       mcl[0].args[0] = EVTCHNOP_unmask;
+                       mcl[0].args[1] = (unsigned long)&unmask;
+                       mcl[1].op = __HYPERVISOR_physdev_op;
+                       mcl[1].args[0] = PHYSDEVOP_eoi;
+                       mcl[1].args[1] = (unsigned long)&eoi;
+
+                       if (HYPERVISOR_multicall(mcl, 2))
+                               BUG();
+               } else {
+                       unmask_evtchn(evtchn);
+                       VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
+               }
+       } else
+               unmask_evtchn(evtchn);
 }
 
 static inline void pirq_query_unmask(int irq)
 {
        struct physdev_irq_status_query irq_status;
+
+       if (pirq_eoi_does_unmask)
+               return;
        irq_status.irq = evtchn_get_xen_pirq(irq);
        if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
                irq_status.flags = 0;
@@ -836,8 +864,7 @@ static unsigned int startup_pirq(unsigne
        irq_info[irq] = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn);
 
  out:
-       unmask_evtchn(evtchn);
-       pirq_unmask_notify(irq);
+       pirq_unmask_and_notify(evtchn, irq);
 
        return 0;
 }
@@ -889,10 +916,8 @@ static void end_pirq(unsigned int irq)
        if ((irq_desc[irq].status & (IRQ_DISABLED|IRQ_PENDING)) ==
            (IRQ_DISABLED|IRQ_PENDING)) {
                shutdown_pirq(irq);
-       } else if (VALID_EVTCHN(evtchn)) {
-               unmask_evtchn(evtchn);
-               pirq_unmask_notify(irq);
-       }
+       } else if (VALID_EVTCHN(evtchn))
+               pirq_unmask_and_notify(evtchn, irq);
 }
 
 static struct hw_interrupt_type pirq_type = {
@@ -1045,6 +1070,14 @@ static int evtchn_resume(struct sys_devi
 
        init_evtchn_cpu_bindings();
 
+       if (pirq_eoi_does_unmask) {
+               struct physdev_pirq_eoi_mfn eoi_mfn;
+
+               eoi_mfn.mfn = virt_to_bus(pirq_needs_eoi) >> PAGE_SHIFT;
+               if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_mfn, &eoi_mfn))
+                       BUG();
+       }
+
        /* New event-channel space is not 'live' yet. */
        for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
                mask_evtchn(evtchn);
@@ -1167,6 +1200,7 @@ int evtchn_get_xen_pirq(int irq)
 void __init xen_init_IRQ(void)
 {
        unsigned int i;
+       struct physdev_pirq_eoi_mfn eoi_mfn;
 
        init_evtchn_cpu_bindings();
 
@@ -1179,6 +1213,11 @@ void __init xen_init_IRQ(void)
 
        init_evtchn_cpu_bindings();
 
+       BUG_ON(!bitmap_empty(pirq_needs_eoi, PAGE_SIZE * 8));
+       eoi_mfn.mfn = virt_to_bus(pirq_needs_eoi) >> PAGE_SHIFT;
+       if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_mfn, &eoi_mfn) == 0)
+               pirq_eoi_does_unmask = true;
+
        /* No event channels are 'live' right now. */
        for (i = 0; i < NR_EVENT_CHANNELS; i++)
                mask_evtchn(i);
Index: head-2008-11-25/include/xen/interface/physdev.h
===================================================================
--- head-2008-11-25.orig/include/xen/interface/physdev.h        2008-11-26 
15:59:04.000000000 +0100
+++ head-2008-11-25/include/xen/interface/physdev.h     2008-11-24 
15:16:10.000000000 +0100
@@ -41,6 +41,21 @@ typedef struct physdev_eoi physdev_eoi_t
 DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
 
 /*
+ * Register a shared page for the hypervisor to indicate whether the guest
+ * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly
+ * once the guest used this function in that the associated event channel
+ * will automatically get unmasked. The page registered is used as a bit
+ * array indexed by Xen's PIRQ value.
+ */
+#define PHYSDEVOP_pirq_eoi_mfn          17
+struct physdev_pirq_eoi_mfn {
+    /* IN */
+    xen_pfn_t mfn;
+};
+typedef struct physdev_pirq_eoi_mfn physdev_pirq_eoi_mfn_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_mfn_t);
+
+/*
  * Query the status of an IRQ line.
  * @arg == pointer to physdev_irq_status_query structure.
  */



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 2/2] linux/x86: use shared page indicating the need for an EOI notification, Jan Beulich <=