WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] PCI passthru: VT-d I/O hooks.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] PCI passthru: VT-d I/O hooks.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 26 Sep 2007 03:40:37 -0700
Delivery-date: Wed, 26 Sep 2007 03:42:31 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1190189526 -3600
# Node ID aad813d8a8ad6f9d7d4f0d04a14fe088dd671e96
# Parent  b7eb2bb9b6251c0533bac7361218c398ad3e8cbf
PCI passthru: VT-d I/O hooks.
Signed-off-by: Allen Kay <allen.m.kay@xxxxxxxxx>
Signed-off-by: Guy Zana <guy@xxxxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c                 |    5 +
 xen/arch/x86/hvm/intercept.c           |    4 +
 xen/arch/x86/hvm/io.c                  |  118 +++++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/vmx/vtd/intel-iommu.c |   19 ++---
 xen/arch/x86/hvm/vmx/vtd/io.c          |   32 ++++++++
 xen/drivers/acpi/tables.c              |    1 
 xen/include/asm-x86/hvm/iommu.h        |    7 +
 xen/include/asm-x86/iommu.h            |    5 +
 8 files changed, 181 insertions(+), 10 deletions(-)

diff -r b7eb2bb9b625 -r aad813d8a8ad xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Tue Sep 18 16:09:19 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Wed Sep 19 09:12:06 2007 +0100
@@ -48,6 +48,7 @@
 #include <public/hvm/ioreq.h>
 #include <public/version.h>
 #include <public/memory.h>
+#include <asm/iommu.h>
 
 int hvm_enabled __read_mostly;
 
@@ -214,6 +215,10 @@ int hvm_domain_initialise(struct domain 
 
     spin_lock_init(&d->arch.hvm_domain.pbuf_lock);
     spin_lock_init(&d->arch.hvm_domain.irq_lock);
+
+    rc = iommu_domain_init(d);
+    if ( rc != 0 )
+        return rc;
 
     rc = paging_enable(d, PG_refcounts|PG_translate|PG_external);
     if ( rc != 0 )
diff -r b7eb2bb9b625 -r aad813d8a8ad xen/arch/x86/hvm/intercept.c
--- a/xen/arch/x86/hvm/intercept.c      Tue Sep 18 16:09:19 2007 +0100
+++ b/xen/arch/x86/hvm/intercept.c      Wed Sep 19 09:12:06 2007 +0100
@@ -29,6 +29,7 @@
 #include <asm/current.h>
 #include <io_ports.h>
 #include <xen/event.h>
+#include <asm/iommu.h>
 
 
 extern struct hvm_mmio_handler hpet_mmio_handler;
@@ -242,6 +243,9 @@ int hvm_io_intercept(ioreq_t *p, int typ
                            &(v->domain->arch.hvm_domain.io_handler);
     int i;
     unsigned long addr, size;
+
+    if ( (type == HVM_PORTIO) && (dpci_ioport_intercept(p)) )
+        return 1;
 
     for (i = 0; i < handler->num_slot; i++) {
         if( type != handler->hdl_list[i].type)
diff -r b7eb2bb9b625 -r aad813d8a8ad xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c     Tue Sep 18 16:09:19 2007 +0100
+++ b/xen/arch/x86/hvm/io.c     Wed Sep 19 09:12:06 2007 +0100
@@ -42,6 +42,7 @@
 #include <asm/hvm/vlapic.h>
 
 #include <public/sched.h>
+#include <xen/iocap.h>
 #include <public/hvm/ioreq.h>
 
 #if defined (__i386__)
@@ -864,6 +865,123 @@ void hvm_io_assist(void)
     vcpu_end_shutdown_deferral(v);
 }
 
+void dpci_ioport_read(uint32_t mport, ioreq_t *p)
+{
+    uint64_t i;
+    uint64_t z_data;
+    uint64_t length = (p->count * p->size);
+
+    for ( i = 0; i < length; i += p->size )
+    {
+        z_data = ~0ULL;
+        
+        switch ( p->size )
+        {
+        case BYTE:
+            z_data = (uint64_t)inb(mport);
+            break;
+        case WORD:
+            z_data = (uint64_t)inw(mport);
+            break;
+        case LONG:
+            z_data = (uint64_t)inl(mport);
+            break;
+        default:
+            gdprintk(XENLOG_ERR, "Error: unable to handle size: %"
+                     PRId64 "\n", p->size);
+            return;
+        }
+
+        p->data = z_data;
+        if ( p->data_is_ptr &&
+             hvm_copy_to_guest_phys(p->data + i, (void *)&z_data,
+                                    (int)p->size) )
+        {
+            gdprintk(XENLOG_ERR, "Error: couldn't copy to hvm phys\n");
+            return;
+        }
+    }
+}
+
+void dpci_ioport_write(uint32_t mport, ioreq_t *p)
+{
+    uint64_t i;
+    uint64_t z_data = 0;
+    uint64_t length = (p->count * p->size);
+
+    for ( i = 0; i < length; i += p->size )
+    {
+        z_data = p->data;
+        if ( p->data_is_ptr &&
+             hvm_copy_from_guest_phys((void *)&z_data,
+                                      p->data + i, (int)p->size) )
+        {
+            gdprintk(XENLOG_ERR, "Error: couldn't copy from hvm phys\n");
+            return;
+        }
+
+        switch ( p->size )
+        {
+        case BYTE:
+            outb((uint8_t) z_data, mport);
+            break;
+        case WORD:
+            outw((uint16_t) z_data, mport);
+            break;
+        case LONG:
+            outl((uint32_t) z_data, mport);
+            break;
+        default:
+            gdprintk(XENLOG_ERR, "Error: unable to handle size: %"
+                     PRId64 "\n", p->size);
+            break;
+        }
+    }
+}
+
+int dpci_ioport_intercept(ioreq_t *p)
+{
+    struct domain *d = current->domain;
+    struct hvm_iommu *hd = domain_hvm_iommu(d);
+    struct g2m_ioport *g2m_ioport;
+    unsigned int mport, gport = p->addr;
+    unsigned int s = 0, e = 0;
+
+    list_for_each_entry( g2m_ioport, &hd->g2m_ioport_list, list )
+    {
+        s = g2m_ioport->gport;
+        e = s + g2m_ioport->np;
+        if ( (gport >= s) && (gport < e) )
+            goto found;
+    }
+
+    return 0;
+
+ found:
+    mport = (gport - s) + g2m_ioport->mport;
+
+    if ( !ioports_access_permitted(d, mport, mport + p->size - 1) ) 
+    {
+        gdprintk(XENLOG_ERR, "Error: access to gport=0x%x denied!\n",
+                 (uint32_t)p->addr);
+        return 0;
+    }
+
+    switch ( p->dir )
+    {
+    case IOREQ_READ:
+        dpci_ioport_read(mport, p);
+        break;
+    case IOREQ_WRITE:
+        dpci_ioport_write(mport, p);
+        break;
+    default:
+        gdprintk(XENLOG_ERR, "Error: couldn't handle p->dir = %d", p->dir);
+    }
+
+    return 1;
+}
+
 /*
  * Local variables:
  * mode: C
diff -r b7eb2bb9b625 -r aad813d8a8ad xen/arch/x86/hvm/vmx/vtd/intel-iommu.c
--- a/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c    Tue Sep 18 16:09:19 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c    Wed Sep 19 09:12:06 2007 +0100
@@ -929,18 +929,16 @@ int iommu_domain_init(struct domain *dom
     unsigned long sagaw;
     struct acpi_drhd_unit *drhd;
 
-    if (list_empty(&acpi_drhd_units))
-        return 0;
     spin_lock_init(&hd->mapping_lock);
     spin_lock_init(&hd->iommu_list_lock);
     INIT_LIST_HEAD(&hd->pdev_list);
-
-    for_each_drhd_unit(drhd) {
-        if (drhd->iommu)
-            iommu = drhd->iommu;
-        else
-            iommu = iommu_alloc(drhd);
-    }
+    INIT_LIST_HEAD(&hd->g2m_ioport_list);
+
+    if ( !vtd_enabled || list_empty(&acpi_drhd_units) )
+        return 0;
+
+    for_each_drhd_unit ( drhd )
+        iommu = drhd->iommu ? : iommu_alloc(drhd);
 
     /* calculate AGAW */
     if (guest_width > cap_mgaw(iommu->cap))
@@ -949,7 +947,8 @@ int iommu_domain_init(struct domain *dom
     agaw = width_to_agaw(adjust_width);
     /* FIXME: hardware doesn't support it, choose a bigger one? */
     sagaw = cap_sagaw(iommu->cap);
-    if (!test_bit(agaw, &sagaw)) {
+    if ( !test_bit(agaw, &sagaw) )
+    {
         gdprintk(XENLOG_ERR VTDPREFIX,
             "IOMMU: hardware doesn't support the agaw\n");
         agaw = find_next_bit(&sagaw, 5, agaw);
diff -r b7eb2bb9b625 -r aad813d8a8ad xen/arch/x86/hvm/vmx/vtd/io.c
--- a/xen/arch/x86/hvm/vmx/vtd/io.c     Tue Sep 18 16:09:19 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vtd/io.c     Wed Sep 19 09:12:06 2007 +0100
@@ -43,7 +43,39 @@
 #include <public/sched.h>
 #include <xen/iocap.h>
 #include <public/hvm/ioreq.h>
+#include <public/domctl.h>
 
+int pt_irq_create_bind_vtd(
+    struct domain *d,
+    xen_domctl_bind_pt_irq_t * pt_irq_bind)
+{
+    struct hvm_domain *hd = &d->arch.hvm_domain;
+    uint32_t machine_gsi, guest_gsi;
+    uint32_t device, intx;
+
+    machine_gsi = pt_irq_bind->machine_irq;
+    device = pt_irq_bind->u.pci.device;
+    intx = pt_irq_bind->u.pci.intx;
+    guest_gsi = hvm_pci_intx_gsi(device, intx);
+
+    hd->irq.mirq[machine_gsi].valid = 1;
+    hd->irq.mirq[machine_gsi].device = device;
+    hd->irq.mirq[machine_gsi].intx = intx;
+    hd->irq.mirq[machine_gsi].guest_gsi = guest_gsi;
+
+    hd->irq.girq[guest_gsi].valid = 1;
+    hd->irq.girq[guest_gsi].device = device;
+    hd->irq.girq[guest_gsi].intx = intx;
+    hd->irq.girq[guest_gsi].machine_gsi = machine_gsi;
+
+    /* Deal with gsi for legacy devices */
+    pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE);
+    gdprintk(XENLOG_ERR,
+        "XEN_DOMCTL_irq_mapping: m_irq = %x device = %x intx = %x\n",
+        machine_gsi, device, intx);
+
+    return 0;
+}
 int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
 {
     uint32_t device, intx;
diff -r b7eb2bb9b625 -r aad813d8a8ad xen/drivers/acpi/tables.c
--- a/xen/drivers/acpi/tables.c Tue Sep 18 16:09:19 2007 +0100
+++ b/xen/drivers/acpi/tables.c Wed Sep 19 09:12:06 2007 +0100
@@ -59,6 +59,7 @@ static char *acpi_table_signatures[ACPI_
        [ACPI_SPMI] = "SPMI",
        [ACPI_HPET] = "HPET",
        [ACPI_MCFG] = "MCFG",
+       [ACPI_DMAR] = "DMAR",
 };
 
 static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" };
diff -r b7eb2bb9b625 -r aad813d8a8ad xen/include/asm-x86/hvm/iommu.h
--- a/xen/include/asm-x86/hvm/iommu.h   Tue Sep 18 16:09:19 2007 +0100
+++ b/xen/include/asm-x86/hvm/iommu.h   Wed Sep 19 09:12:06 2007 +0100
@@ -28,6 +28,13 @@
 #include <public/hvm/params.h>
 #include <public/hvm/save.h>
 
+struct g2m_ioport {
+    struct list_head list;
+    unsigned int gport;
+    unsigned int mport;
+    unsigned int np;
+};
+
 struct hvm_iommu {
     spinlock_t iommu_list_lock;    /* protect iommu specific lists */
     struct list_head pdev_list;    /* direct accessed pci devices */
diff -r b7eb2bb9b625 -r aad813d8a8ad xen/include/asm-x86/iommu.h
--- a/xen/include/asm-x86/iommu.h       Tue Sep 18 16:09:19 2007 +0100
+++ b/xen/include/asm-x86/iommu.h       Wed Sep 19 09:12:06 2007 +0100
@@ -28,6 +28,7 @@
 #include <xen/xmalloc.h>
 #include <asm/hvm/vmx/intel-iommu.h>
 #include <public/hvm/ioreq.h>
+#include <public/domctl.h>
 
 extern int vtd_enabled;
 
@@ -75,5 +76,9 @@ void iommu_set_pgd(struct domain *d);
 void iommu_set_pgd(struct domain *d);
 void iommu_domain_teardown(struct domain *d);
 int hvm_do_IRQ_dpci(struct domain *d, unsigned int irq);
+int dpci_ioport_intercept(ioreq_t *p);
+int pt_irq_create_bind_vtd(struct domain *d,
+    xen_domctl_bind_pt_irq_t * pt_irq_bind);
+
 
 #endif // _IOMMU_H_

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] PCI passthru: VT-d I/O hooks., Xen patchbot-unstable <=