WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 1/2] Xen: enabling emulated MSI injection

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 1/2] Xen: enabling emulated MSI injection
From: Wei Liu <liuw@xxxxxxxxx>
Date: Thu, 26 May 2011 11:08:41 +0800
Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Delivery-date: Wed, 25 May 2011 20:10:18 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
commit 176dc2a26b4b9dd0fe30fab3b168722766218245
Author: Wei Liu <liuw@xxxxxxxxx>
Date:   Thu May 26 10:23:01 2011 +0800

    x86: Add a new operation in HVMOP to inject emulated MSI.

    The original vmsi_deliver is renamed to vmsi_deliver_irq. New
    vmsi_deliver is dedicated to the actually delivering.

    Signed-off-by: Wei Liu <liuw@xxxxxxxxx>

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index b02be7b..d88e8b8 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3293,6 +3293,36 @@ static int hvmop_set_pci_link_route(
     return rc;
 }

+static int hvmop_inj_msi(
+    XEN_GUEST_HANDLE(xen_hvm_inj_msi_t) uop)
+{
+    struct xen_hvm_inj_msi op;
+    struct domain *d;
+    int rc;
+
+    if ( copy_from_guest(&op, uop, 1) )
+        return -EFAULT;
+
+    rc = rcu_lock_remote_target_domain_by_id(op.domid, &d);
+    if ( rc != 0 )
+        return rc;
+
+    rc = -EINVAL;
+    if ( !is_hvm_domain(d) )
+        goto out;
+
+    rc = xsm_hvm_inj_msi(d);
+    if ( rc )
+        goto out;
+
+    rc = 0;
+    hvm_inj_msi(d, op.addr, op.data);
+
+ out:
+    rcu_unlock_domain(d);
+    return rc;
+}
+
 static int hvmop_flush_tlb_all(void)
 {
     struct domain *d = current->domain;
@@ -3571,6 +3601,11 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE(void) arg)
             guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
         break;

+    case HVMOP_inj_msi:
+        rc = hvmop_inj_msi(
+            guest_handle_cast(arg, xen_hvm_inj_msi_t));
+        break;
+
     case HVMOP_set_pci_link_route:
         rc = hvmop_set_pci_link_route(
             guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index f560e39..57271e6 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -26,6 +26,7 @@
 #include <xen/irq.h>
 #include <asm/hvm/domain.h>
 #include <asm/hvm/support.h>
+#include <asm/msi.h>

 /* Must be called with hvm_domain->irq_lock hold */
 static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq)
@@ -259,6 +260,24 @@ void hvm_set_pci_link_route(struct domain *d, u8
link, u8 isa_irq)
             d->domain_id, link, old_isa_irq, isa_irq);
 }

+
+extern void vmsi_deliver(struct domain *d, int vector,
+                         uint8_t dest, uint8_t dest_mode,
+                         uint8_t delivery_mode, uint8_t trig_mode);
+void hvm_inj_msi(struct domain *d, u64 addr, u32 data)
+{
+    uint32_t tmp = (uint32_t) addr;
+    uint8_t  dest = (tmp & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
+    uint8_t  dest_mode = !!(tmp & MSI_ADDR_DESTMODE_MASK);
+    uint8_t  delivery_mode = (data & MSI_DATA_DELIVERY_MODE_MASK)
+        >> MSI_DATA_DELIVERY_MODE_SHIFT;
+    uint8_t trig_mode = (data & MSI_DATA_TRIGGER_MASK)
+        >> MSI_DATA_TRIGGER_SHIFT;
+    uint8_t vector = data & MSI_DATA_VECTOR_MASK;
+
+    vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode);
+}
+
 void hvm_set_callback_via(struct domain *d, uint64_t via)
 {
     struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index eee802a..cc6de8b 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -65,29 +65,13 @@ static void vmsi_inj_irq(
     }
 }

-int vmsi_deliver(struct domain *d, int pirq)
+void vmsi_deliver(struct domain *d, int vector,
+                 uint8_t dest, uint8_t dest_mode,
+                 uint8_t delivery_mode, uint8_t trig_mode)
 {
-    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
-    uint32_t flags = hvm_irq_dpci->mirq[pirq].gmsi.gflags;
-    int vector = hvm_irq_dpci->mirq[pirq].gmsi.gvec;
-    uint8_t dest = (uint8_t)flags;
-    uint8_t dest_mode = !!(flags & VMSI_DM_MASK);
-    uint8_t delivery_mode = (flags & VMSI_DELIV_MASK) >>
GFLAGS_SHIFT_DELIV_MODE;
-    uint8_t trig_mode = (flags & VMSI_TRIG_MODE) >> GFLAGS_SHIFT_TRG_MODE;
     struct vlapic *target;
     struct vcpu *v;

-    HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
-                "msi: dest=%x dest_mode=%x delivery_mode=%x "
-                "vector=%x trig_mode=%x\n",
-                dest, dest_mode, delivery_mode, vector, trig_mode);
-
-    if ( !( hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_GUEST_MSI ) )
-    {
-        gdprintk(XENLOG_WARNING, "pirq %x not msi \n", pirq);
-        return 0;
-    }
-
     switch ( delivery_mode )
     {
     case dest_LowestPrio:
@@ -122,6 +106,30 @@ int vmsi_deliver(struct domain *d, int pirq)
                  delivery_mode);
         break;
     }
+}
+
+int vmsi_deliver_pirq(struct domain *d, int pirq)
+{
+    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
+    uint32_t flags = hvm_irq_dpci->mirq[pirq].gmsi.gflags;
+    int vector = hvm_irq_dpci->mirq[pirq].gmsi.gvec;
+    uint8_t dest = (uint8_t)flags;
+    uint8_t dest_mode = !!(flags & VMSI_DM_MASK);
+    uint8_t delivery_mode = (flags & VMSI_DELIV_MASK) >>
GFLAGS_SHIFT_DELIV_MODE;
+    uint8_t trig_mode = (flags & VMSI_TRIG_MODE) >> GFLAGS_SHIFT_TRG_MODE;
+
+    HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
+                "msi: dest=%x dest_mode=%x delivery_mode=%x "
+                "vector=%x trig_mode=%x\n",
+                dest, dest_mode, delivery_mode, vector, trig_mode);
+
+    if ( !( hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_GUEST_MSI ) )
+    {
+        gdprintk(XENLOG_WARNING, "pirq %x not msi \n", pirq);
+        return 0;
+    }
+
+    vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode);
     return 1;
 }

diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index 67b0223..848c3e5 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -452,13 +452,13 @@ void hvm_dpci_msi_eoi(struct domain *d, int vector)
     spin_unlock(&d->event_lock);
 }

-extern int vmsi_deliver(struct domain *d, int pirq);
+extern int vmsi_deliver_pirq(struct domain *d, int pirq);
 static int hvm_pci_msi_assert(struct domain *d, int pirq)
 {
     if ( hvm_domain_use_pirq(d, pirq) )
         return send_guest_pirq(d, pirq);
     else
-        return vmsi_deliver(d, pirq);
+        return vmsi_deliver_pirq(d, pirq);
 }
 #endif

diff --git a/xen/include/asm-x86/msi.h b/xen/include/asm-x86/msi.h
index 0848616..ba114a4 100644
--- a/xen/include/asm-x86/msi.h
+++ b/xen/include/asm-x86/msi.h
@@ -17,6 +17,7 @@
 #define MSI_DATA_DELIVERY_MODE_SHIFT   8
 #define  MSI_DATA_DELIVERY_FIXED       (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
 #define  MSI_DATA_DELIVERY_LOWPRI      (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
+#define  MSI_DATA_DELIVERY_MODE_MASK    0x00000700

 #define MSI_DATA_LEVEL_SHIFT           14
 #define         MSI_DATA_LEVEL_DEASSERT        (0 << MSI_DATA_LEVEL_SHIFT)
@@ -25,6 +26,7 @@
 #define MSI_DATA_TRIGGER_SHIFT         15
 #define  MSI_DATA_TRIGGER_EDGE         (0 << MSI_DATA_TRIGGER_SHIFT)
 #define  MSI_DATA_TRIGGER_LEVEL                (1 << MSI_DATA_TRIGGER_SHIFT)
+#define  MSI_DATA_TRIGGER_MASK          0x00008000

 /*
  * Shift/mask fields for msi address
@@ -37,6 +39,7 @@
 #define MSI_ADDR_DESTMODE_SHIFT     2
 #define MSI_ADDR_DESTMODE_PHYS      (0 << MSI_ADDR_DESTMODE_SHIFT)
 #define MSI_ADDR_DESTMODE_LOGIC     (1 << MSI_ADDR_DESTMODE_SHIFT)
+#define MSI_ADDR_DESTMODE_MASK      0x4

 #define MSI_ADDR_REDIRECTION_SHIFT  3
 #define MSI_ADDR_REDIRECTION_CPU    (0 << MSI_ADDR_REDIRECTION_SHIFT)
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index 2a597da..e9f7994 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -82,11 +82,24 @@ typedef enum {
     HVMMEM_mmio_dm,            /* Reads and write go to the device model */
 } hvmmem_type_t;

+/* MSI injection for emulated devices */
+#define HVMOP_inj_msi         6
+struct xen_hvm_inj_msi {
+    /* Domain to be injected */
+    domid_t   domid;
+    /* Address (0xfeeXXXXX) */
+    uint64_t  addr;
+    /* Data -- lower 32 bits */
+    uint32_t  data;
+};
+typedef struct xen_hvm_inj_msi xen_hvm_inj_msi_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_inj_msi_t);
+
 /* Following tools-only interfaces may change in future. */
 #if defined(__XEN__) || defined(__XEN_TOOLS__)

 /* Track dirty VRAM. */
-#define HVMOP_track_dirty_vram    6
+#define HVMOP_track_dirty_vram    7
 struct xen_hvm_track_dirty_vram {
     /* Domain to be tracked. */
     domid_t  domid;
@@ -102,7 +115,7 @@ typedef struct xen_hvm_track_dirty_vram
xen_hvm_track_dirty_vram_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t);

 /* Notify that some pages got modified by the Device Model. */
-#define HVMOP_modified_memory    7
+#define HVMOP_modified_memory    8
 struct xen_hvm_modified_memory {
     /* Domain to be updated. */
     domid_t  domid;
@@ -114,7 +127,7 @@ struct xen_hvm_modified_memory {
 typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);

-#define HVMOP_set_mem_type    8
+#define HVMOP_set_mem_type    9
 /* Notify that a region of memory is to be treated in a specific way. */
 struct xen_hvm_set_mem_type {
     /* Domain to be updated. */
@@ -132,7 +145,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */

 /* Hint from PV drivers for pagetable destruction. */
-#define HVMOP_pagetable_dying        9
+#define HVMOP_pagetable_dying        10
 struct xen_hvm_pagetable_dying {
     /* Domain with a pagetable about to be destroyed. */
     domid_t  domid;
@@ -144,14 +157,14 @@ typedef struct xen_hvm_pagetable_dying
xen_hvm_pagetable_dying_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_pagetable_dying_t);

 /* Get the current Xen time, in nanoseconds since system boot. */
-#define HVMOP_get_time              10
+#define HVMOP_get_time              11
 struct xen_hvm_get_time {
     uint64_t now;      /* OUT */
 };
 typedef struct xen_hvm_get_time xen_hvm_get_time_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_time_t);

-#define HVMOP_xentrace              11
+#define HVMOP_xentrace              12
 struct xen_hvm_xentrace {
     uint16_t event, extra_bytes;
     uint8_t extra[TRACE_EXTRA_MAX * sizeof(uint32_t)];
@@ -162,7 +175,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t);
 /* Following tools-only interfaces may change in future. */
 #if defined(__XEN__) || defined(__XEN_TOOLS__)

-#define HVMOP_set_mem_access        12
+#define HVMOP_set_mem_access        13
 typedef enum {
     HVMMEM_access_n,
     HVMMEM_access_r,
@@ -190,7 +203,7 @@ struct xen_hvm_set_mem_access {
 typedef struct xen_hvm_set_mem_access xen_hvm_set_mem_access_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_t);

-#define HVMOP_get_mem_access        13
+#define HVMOP_get_mem_access        14
 /* Get the specific access type for that region of memory */
 struct xen_hvm_get_mem_access {
     /* Domain to be queried. */
@@ -203,7 +216,7 @@ struct xen_hvm_get_mem_access {
 typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t);

-#define HVMOP_inject_trap            14
+#define HVMOP_inject_trap            15
 /* Inject a trap into a VCPU, which will get taken up on the next
  * scheduling of it. Note that the caller should know enough of the
  * state of the CPU before injecting, to know what the effect of
@@ -226,7 +239,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);

 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */

-#define HVMOP_get_mem_type    15
+#define HVMOP_get_mem_type    16
 /* Return hvmmem_type_t for the specified pfn. */
 struct xen_hvm_get_mem_type {
     /* Domain to be queried. */
diff --git a/xen/include/xen/hvm/irq.h b/xen/include/xen/hvm/irq.h
index ae0531b..129a880 100644
--- a/xen/include/xen/hvm/irq.h
+++ b/xen/include/xen/hvm/irq.h
@@ -115,6 +115,7 @@ void hvm_isa_irq_deassert(
     struct domain *d, unsigned int isa_irq);

 void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq);
+void hvm_inj_msi(struct domain *d, u64 addr, u32 data);

 void hvm_maybe_deassert_evtchn_irq(void);
 void hvm_assert_evtchn_irq(struct vcpu *v);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 7539cc7..c16818c 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -123,6 +123,7 @@ struct xsm_operations {
     int (*hvm_set_pci_intx_level) (struct domain *d);
     int (*hvm_set_isa_irq_level) (struct domain *d);
     int (*hvm_set_pci_link_route) (struct domain *d);
+    int (*hvm_inj_msi) (struct domain *d);
     int (*apic) (struct domain *d, int cmd);
     int (*assign_vector) (struct domain *d, uint32_t pirq);
     int (*xen_settime) (void);
@@ -507,6 +508,11 @@ static inline int xsm_hvm_set_pci_link_route
(struct domain *d)
     return xsm_call(hvm_set_pci_link_route(d));
 }

+static inline int xsm_hvm_inj_msi (struct domain *d)
+{
+    return xsm_call(hvm_inj_msi(d));
+}
+
 static inline int xsm_apic (struct domain *d, int cmd)
 {
     return xsm_call(apic(d, cmd));

-- 
Best regards
Wei Liu
Twitter: @iliuw
Site: http://liuw.name

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel