WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 1/7] PCI device register/unregister + pci_dev cleanup

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 1/7] PCI device register/unregister + pci_dev cleanups
From: Espen Skoglund <espen.skoglund@xxxxxxxxxxxxx>
Date: Fri, 4 Jul 2008 17:35:08 +0100
Delivery-date: Fri, 04 Jul 2008 09:35:48 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <18542.20587.995970.962113@xxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <18542.20587.995970.962113@xxxxxxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Move pci_dev lists from hvm to arch_domain

Move the pci_dev list from hvm to arch_domain since PCI devs are no
longer hvm specific.  Also removed locking for pci_dev lists.  Will
reintroduce them later.

Signed-off-by: Espen Skoglund <espen.skoglund@xxxxxxxxxxxxx>

--
 arch/x86/domain.c                         |    2 ++
 arch/x86/hvm/hvm.c                        |    2 +-
 arch/x86/hvm/svm/svm.c                    |    2 +-
 arch/x86/hvm/vmx/vmcs.c                   |    3 +--
 arch/x86/hvm/vmx/vmx.c                    |    2 +-
 arch/x86/mm/shadow/multi.c                |    3 +--
 drivers/passthrough/amd/pci_amd_iommu.c   |   17 ++++-------------
 drivers/passthrough/iommu.c               |    9 +++------
 drivers/passthrough/vtd/dmar.h            |    4 ----
 drivers/passthrough/vtd/iommu.c           |   17 ++++-------------
 include/asm-x86/domain.h                  |    4 ++++
 include/asm-x86/hvm/svm/amd-iommu-proto.h |    4 ----
 include/xen/hvm/iommu.h                   |    5 -----
 include/xen/pci.h                         |    7 ++++++-
 14 files changed, 28 insertions(+), 53 deletions(-)
--
diff -r d826a1479fec xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Mon Jun 30 19:52:08 2008 +0100
+++ b/xen/arch/x86/domain.c     Tue Jul 01 17:10:44 2008 +0100
@@ -349,6 +349,8 @@
         is_hvm_domain(d) &&
         hvm_funcs.hap_supported &&
         (domcr_flags & DOMCRF_hap);
+
+    INIT_LIST_HEAD(&d->arch.pdev_list);
 
     d->arch.relmem = RELMEM_not_started;
     INIT_LIST_HEAD(&d->arch.relmem_list);
diff -r d826a1479fec xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Mon Jun 30 19:52:08 2008 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Tue Jul 01 17:10:44 2008 +0100
@@ -903,7 +903,7 @@
         }
     }
 
-    if ( !list_empty(&domain_hvm_iommu(v->domain)->pdev_list) )
+    if ( has_arch_pdevs(v->domain) )
     {
         if ( (value & X86_CR0_CD) && !(value & X86_CR0_NW) )
         {
diff -r d826a1479fec xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Mon Jun 30 19:52:08 2008 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Tue Jul 01 17:10:44 2008 +0100
@@ -1132,7 +1132,7 @@
 
 static void svm_wbinvd_intercept(void)
 {
-    if ( !list_empty(&(domain_hvm_iommu(current->domain)->pdev_list)) )
+    if ( has_arch_pdevs(current->domain) )
         on_each_cpu(wbinvd_ipi, NULL, 1, 1);
 }
 
diff -r d826a1479fec xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Mon Jun 30 19:52:08 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Tue Jul 01 17:10:44 2008 +0100
@@ -849,8 +849,7 @@
          *     there is no wbinvd exit, or
          *  2: execute wbinvd on all dirty pCPUs when guest wbinvd exits.
          */
-        if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) &&
-             !cpu_has_wbinvd_exiting )
+        if ( has_arch_pdevs(v->domain) && !cpu_has_wbinvd_exiting )
         {
             int cpu = v->arch.hvm_vmx.active_cpu;
             if ( cpu != -1 )
diff -r d826a1479fec xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Mon Jun 30 19:52:08 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Tue Jul 01 17:10:44 2008 +0100
@@ -1926,7 +1926,7 @@
 
 static void vmx_wbinvd_intercept(void)
 {
-    if ( list_empty(&(domain_hvm_iommu(current->domain)->pdev_list)) )
+    if ( !has_arch_pdevs(current->domain) )
         return;
 
     if ( cpu_has_wbinvd_exiting )
diff -r d826a1479fec xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Mon Jun 30 19:52:08 2008 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Tue Jul 01 17:10:44 2008 +0100
@@ -840,8 +840,7 @@
      * For HVM domains with direct access to MMIO areas, set the correct
      * caching attributes in the shadows to match what was asked for.
      */
-    if ( (level == 1) && is_hvm_domain(d) &&
-         !list_empty(&(domain_hvm_iommu(d)->pdev_list)) &&
+    if ( (level == 1) && is_hvm_domain(d) && has_arch_pdevs(d) &&
          !is_xen_heap_mfn(mfn_x(target_mfn)) )
     {
         unsigned int type;
diff -r d826a1479fec xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c       Mon Jun 30 19:52:08 
2008 +0100
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c       Tue Jul 01 17:10:44 
2008 +0100
@@ -292,7 +292,6 @@
 
 static void amd_iommu_setup_dom0_devices(struct domain *d)
 {
-    struct hvm_iommu *hd = domain_hvm_iommu(d);
     struct amd_iommu *iommu;
     struct pci_dev *pdev;
     int bus, dev, func;
@@ -314,7 +313,7 @@
                 pdev = xmalloc(struct pci_dev);
                 pdev->bus = bus;
                 pdev->devfn = PCI_DEVFN(dev, func);
-                list_add_tail(&pdev->list, &hd->pdev_list);
+                list_add_tail(&pdev->domain_list, &d->arch.pdev_list);
 
                 bdf = (bus << 8) | pdev->devfn;
                 /* supported device? */
@@ -490,12 +489,9 @@
 static int reassign_device( struct domain *source, struct domain *target,
                             u8 bus, u8 devfn)
 {
-    struct hvm_iommu *source_hd = domain_hvm_iommu(source);
-    struct hvm_iommu *target_hd = domain_hvm_iommu(target);
     struct pci_dev *pdev;
     struct amd_iommu *iommu;
     int bdf;
-    unsigned long flags;
 
     for_each_pdev ( source, pdev )
     {
@@ -520,11 +516,7 @@
 
         amd_iommu_disable_domain_device(source, iommu, bdf);
         /* Move pci device from the source domain to target domain. */
-        spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
-        spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
-        list_move(&pdev->list, &target_hd->pdev_list);
-        spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
-        spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
+        list_move(&pdev->domain_list, &target->arch.pdev_list);
 
         amd_iommu_setup_domain_device(target, iommu, bdf);
         amd_iov_info("reassign %x:%x.%x domain %d -> domain %d\n",
@@ -559,12 +551,11 @@
 
 static void release_domain_devices(struct domain *d)
 {
-    struct hvm_iommu *hd  = domain_hvm_iommu(d);
     struct pci_dev *pdev;
 
-    while ( !list_empty(&hd->pdev_list) )
+    while ( has_arch_pdevs(d) )
     {
-        pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list);
+        pdev = list_entry(d->arch.pdev_list.next, typeof(*pdev), domain_list);
         pdev_flr(pdev->bus, pdev->devfn);
         amd_iov_info("release domain %d devices %x:%x.%x\n", d->domain_id,
                  pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
diff -r d826a1479fec xen/drivers/passthrough/iommu.c
--- a/xen/drivers/passthrough/iommu.c   Mon Jun 30 19:52:08 2008 +0100
+++ b/xen/drivers/passthrough/iommu.c   Tue Jul 01 17:10:44 2008 +0100
@@ -35,8 +35,6 @@
     struct hvm_iommu *hd = domain_hvm_iommu(domain);
 
     spin_lock_init(&hd->mapping_lock);
-    spin_lock_init(&hd->iommu_list_lock);
-    INIT_LIST_HEAD(&hd->pdev_list);
     INIT_LIST_HEAD(&hd->g2m_ioport_list);
 
     if ( !iommu_enabled )
@@ -68,7 +66,7 @@
     if ( (rc = hd->platform_ops->assign_device(d, bus, devfn)) )
         return rc;
 
-    if ( has_iommu_pdevs(d) && !is_hvm_domain(d) && !need_iommu(d) )
+    if ( has_arch_pdevs(d) && !is_hvm_domain(d) && !need_iommu(d) )
     {
         d->need_iommu = 1;
         return iommu_populate_page_table(d);
@@ -190,7 +188,7 @@
 
     hd->platform_ops->reassign_device(d, dom0, bus, devfn);
 
-    if ( !has_iommu_pdevs(d) && need_iommu(d) )
+    if ( !has_arch_pdevs(d) && need_iommu(d) )
     {
         d->need_iommu = 0;
         hd->platform_ops->teardown(d);
@@ -242,8 +240,7 @@
 
     group_id = ops->get_device_group_id(bus, devfn);
 
-    list_for_each_entry(pdev,
-        &(dom0->arch.hvm_domain.hvm_iommu.pdev_list), list)
+    for_each_pdev( d, pdev )
     {
         if ( (pdev->bus == bus) && (pdev->devfn == devfn) )
             continue;
diff -r d826a1479fec xen/drivers/passthrough/vtd/dmar.h
--- a/xen/drivers/passthrough/vtd/dmar.h        Mon Jun 30 19:52:08 2008 +0100
+++ b/xen/drivers/passthrough/vtd/dmar.h        Tue Jul 01 17:10:44 2008 +0100
@@ -70,10 +70,6 @@
     list_for_each_entry(iommu, \
         &(domain->arch.hvm_domain.hvm_iommu.iommu_list), list)
 
-#define for_each_pdev(domain, pdev) \
-    list_for_each_entry(pdev, \
-         &(domain->arch.hvm_domain.hvm_iommu.pdev_list), list)
-
 #define for_each_drhd_unit(drhd) \
     list_for_each_entry(drhd, &acpi_drhd_units, list)
 #define for_each_rmrr_device(rmrr, pdev) \
diff -r d826a1479fec xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Mon Jun 30 19:52:08 2008 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Tue Jul 01 17:10:44 2008 +0100
@@ -1023,8 +1023,6 @@
     u64 i;
     struct acpi_drhd_unit *drhd;
 
-    INIT_LIST_HEAD(&hd->pdev_list);
-
     drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
     iommu = drhd->iommu;
 
@@ -1366,12 +1364,10 @@
     u8 bus, u8 devfn)
 {
     struct hvm_iommu *source_hd = domain_hvm_iommu(source);
-    struct hvm_iommu *target_hd = domain_hvm_iommu(target);
     struct pci_dev *pdev, *pdev2;
     struct acpi_drhd_unit *drhd;
     struct iommu *iommu;
     int status;
-    unsigned long flags;
     int found = 0;
 
     pdev_flr(bus, devfn);
@@ -1388,11 +1384,7 @@
     domain_context_unmap(iommu, pdev);
 
     /* Move pci device from the source domain to target domain. */
-    spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
-    spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
-    list_move(&pdev->list, &target_hd->pdev_list);
-    spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
-    spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
+    list_move(&pdev->domain_list, &target->arch.pdev_list);
 
     for_each_pdev ( source, pdev2 )
     {
@@ -1413,12 +1405,11 @@
 
 void return_devices_to_dom0(struct domain *d)
 {
-    struct hvm_iommu *hd  = domain_hvm_iommu(d);
     struct pci_dev *pdev;
 
-    while ( !list_empty(&hd->pdev_list) )
+    while ( has_arch_pdevs(d) )
     {
-        pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list);
+        pdev = list_entry(d->arch.pdev_list.next, typeof(*pdev), domain_list);
         pci_cleanup_msi(pdev->bus, pdev->devfn);
         reassign_device_ownership(d, dom0, pdev->bus, pdev->devfn);
     }
@@ -1631,7 +1622,7 @@
                 pdev = xmalloc(struct pci_dev);
                 pdev->bus = bus;
                 pdev->devfn = PCI_DEVFN(dev, func);
-                list_add_tail(&pdev->list, &hd->pdev_list);
+                list_add_tail(&pdev->domain_list, &d->arch.pdev_list);
 
                 drhd = acpi_find_matched_drhd_unit(pdev);
                 ret = domain_context_mapping(d, drhd->iommu, pdev);
diff -r d826a1479fec xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Mon Jun 30 19:52:08 2008 +0100
+++ b/xen/include/asm-x86/domain.h      Tue Jul 01 17:10:44 2008 +0100
@@ -228,6 +228,7 @@
     struct rangeset *ioport_caps;
     uint32_t pci_cf8;
 
+    struct list_head pdev_list;
     struct hvm_domain hvm_domain;
 
     struct paging_domain paging;
@@ -265,6 +266,9 @@
 
     cpuid_input_t cpuids[MAX_CPUID_INPUT];
 } __cacheline_aligned;
+
+#define has_arch_pdevs(d)    (!list_empty(&(d)->arch.pdev_list))
+
 
 #ifdef __i386__
 struct pae_l3_cache {
diff -r d826a1479fec xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Mon Jun 30 19:52:08 
2008 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Tue Jul 01 17:10:44 
2008 +0100
@@ -27,10 +27,6 @@
 #define for_each_amd_iommu(amd_iommu) \
     list_for_each_entry(amd_iommu, \
         &amd_iommu_head, list)
-
-#define for_each_pdev(domain, pdev) \
-    list_for_each_entry(pdev, \
-         &(domain->arch.hvm_domain.hvm_iommu.pdev_list), list)
 
 #define DMA_32BIT_MASK  0x00000000ffffffffULL
 #define PAGE_ALIGN(addr)    (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
diff -r d826a1479fec xen/include/xen/hvm/iommu.h
--- a/xen/include/xen/hvm/iommu.h       Mon Jun 30 19:52:08 2008 +0100
+++ b/xen/include/xen/hvm/iommu.h       Tue Jul 01 17:10:44 2008 +0100
@@ -36,8 +36,6 @@
 };
 
 struct hvm_iommu {
-    spinlock_t iommu_list_lock;    /* protect iommu specific lists */
-    struct list_head pdev_list;    /* direct accessed pci devices */
     u64 pgd_maddr;                 /* io page directory machine address */
     spinlock_t mapping_lock;       /* io page table lock */
     int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
@@ -55,7 +53,4 @@
     struct iommu_ops *platform_ops;
 };
 
-#define has_iommu_pdevs(domain) \
-    (!list_empty(&(domain->arch.hvm_domain.hvm_iommu.pdev_list)))
-
 #endif /* __ASM_X86_HVM_IOMMU_H__ */
diff -r d826a1479fec xen/include/xen/pci.h
--- a/xen/include/xen/pci.h     Mon Jun 30 19:52:08 2008 +0100
+++ b/xen/include/xen/pci.h     Tue Jul 01 17:10:44 2008 +0100
@@ -25,7 +25,7 @@
 #define PCI_FUNC(devfn)       ((devfn) & 0x07)
 
 struct pci_dev {
-    struct list_head list;
+    struct list_head domain_list;
     struct list_head msi_dev_list;
     u8 bus;
     u8 devfn;
@@ -50,4 +50,9 @@
 int pci_find_cap_offset(u8 bus, u8 dev, u8 func, u8 cap);
 int pci_find_next_cap(u8 bus, unsigned int devfn, u8 pos, int cap);
 
+
+#define for_each_pdev(domain, pdev) \
+    list_for_each_entry(pdev, &(domain->arch.pdev_list), domain_list)
+
+
 #endif /* __XEN_PCI_H__ */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel