WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] PCI device register/unregister + pci_dev

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] PCI device register/unregister + pci_dev cleanups
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 04 Jul 2008 16:20:11 -0700
Delivery-date: Fri, 04 Jul 2008 16:20:03 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1215190276 -3600
# Node ID 1e9df5cb885f8848f147c21758040978019aefd0
# Parent  e42135b61dc6cf27fb192e71bb33e6d57ffe80f0
PCI device register/unregister + pci_dev cleanups

Move pci_dev lists from hvm to arch_domain

Move the pci_dev list from hvm to arch_domain since PCI devs are no
longer hvm specific.  Also removed locking for pci_dev lists.  Will
reintroduce them later.

Signed-off-by: Espen Skoglund <espen.skoglund@xxxxxxxxxxxxx>
---
 xen/arch/x86/domain.c                         |    2 ++
 xen/arch/x86/hvm/hvm.c                        |    2 +-
 xen/arch/x86/hvm/svm/svm.c                    |    2 +-
 xen/arch/x86/hvm/vmx/vmcs.c                   |    3 +--
 xen/arch/x86/hvm/vmx/vmx.c                    |    2 +-
 xen/arch/x86/mm/shadow/multi.c                |    3 +--
 xen/drivers/passthrough/amd/pci_amd_iommu.c   |   19 +++++--------------
 xen/drivers/passthrough/iommu.c               |    9 +++------
 xen/drivers/passthrough/vtd/dmar.h            |    4 ----
 xen/drivers/passthrough/vtd/iommu.c           |   19 +++++--------------
 xen/include/asm-x86/domain.h                  |    4 ++++
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |    4 ----
 xen/include/xen/hvm/iommu.h                   |    5 -----
 xen/include/xen/pci.h                         |    7 ++++++-
 14 files changed, 30 insertions(+), 55 deletions(-)

diff -r e42135b61dc6 -r 1e9df5cb885f xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Fri Jul 04 17:50:31 2008 +0100
+++ b/xen/arch/x86/domain.c     Fri Jul 04 17:51:16 2008 +0100
@@ -350,6 +350,8 @@ int arch_domain_create(struct domain *d,
         hvm_funcs.hap_supported &&
         (domcr_flags & DOMCRF_hap);
 
+    INIT_LIST_HEAD(&d->arch.pdev_list);
+
     d->arch.relmem = RELMEM_not_started;
     INIT_LIST_HEAD(&d->arch.relmem_list);
 
diff -r e42135b61dc6 -r 1e9df5cb885f xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri Jul 04 17:50:31 2008 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Fri Jul 04 17:51:16 2008 +0100
@@ -911,7 +911,7 @@ int hvm_set_cr0(unsigned long value)
         }
     }
 
-    if ( !list_empty(&domain_hvm_iommu(v->domain)->pdev_list) )
+    if ( has_arch_pdevs(v->domain) )
     {
         if ( (value & X86_CR0_CD) && !(value & X86_CR0_NW) )
         {
diff -r e42135b61dc6 -r 1e9df5cb885f xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Fri Jul 04 17:50:31 2008 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Fri Jul 04 17:51:16 2008 +0100
@@ -1132,7 +1132,7 @@ static void wbinvd_ipi(void *info)
 
 static void svm_wbinvd_intercept(void)
 {
-    if ( !list_empty(&(domain_hvm_iommu(current->domain)->pdev_list)) )
+    if ( has_arch_pdevs(current->domain) )
         on_each_cpu(wbinvd_ipi, NULL, 1, 1);
 }
 
diff -r e42135b61dc6 -r 1e9df5cb885f xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Fri Jul 04 17:50:31 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Fri Jul 04 17:51:16 2008 +0100
@@ -849,8 +849,7 @@ void vmx_do_resume(struct vcpu *v)
          *     there is no wbinvd exit, or
          *  2: execute wbinvd on all dirty pCPUs when guest wbinvd exits.
          */
-        if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) &&
-             !cpu_has_wbinvd_exiting )
+        if ( has_arch_pdevs(v->domain) && !cpu_has_wbinvd_exiting )
         {
             int cpu = v->arch.hvm_vmx.active_cpu;
             if ( cpu != -1 )
diff -r e42135b61dc6 -r 1e9df5cb885f xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Fri Jul 04 17:50:31 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Fri Jul 04 17:51:16 2008 +0100
@@ -1926,7 +1926,7 @@ static void wbinvd_ipi(void *info)
 
 static void vmx_wbinvd_intercept(void)
 {
-    if ( list_empty(&(domain_hvm_iommu(current->domain)->pdev_list)) )
+    if ( !has_arch_pdevs(current->domain) )
         return;
 
     if ( cpu_has_wbinvd_exiting )
diff -r e42135b61dc6 -r 1e9df5cb885f xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Fri Jul 04 17:50:31 2008 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Fri Jul 04 17:51:16 2008 +0100
@@ -840,8 +840,7 @@ _sh_propagate(struct vcpu *v,
      * For HVM domains with direct access to MMIO areas, set the correct
      * caching attributes in the shadows to match what was asked for.
      */
-    if ( (level == 1) && is_hvm_domain(d) &&
-         !list_empty(&(domain_hvm_iommu(d)->pdev_list)) &&
+    if ( (level == 1) && is_hvm_domain(d) && has_arch_pdevs(d) &&
          !is_xen_heap_mfn(mfn_x(target_mfn)) )
     {
         unsigned int type;
diff -r e42135b61dc6 -r 1e9df5cb885f xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c       Fri Jul 04 17:50:31 
2008 +0100
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c       Fri Jul 04 17:51:16 
2008 +0100
@@ -292,7 +292,6 @@ static void amd_iommu_setup_domain_devic
 
 static void amd_iommu_setup_dom0_devices(struct domain *d)
 {
-    struct hvm_iommu *hd = domain_hvm_iommu(d);
     struct amd_iommu *iommu;
     struct pci_dev *pdev;
     int bus, dev, func;
@@ -314,7 +313,7 @@ static void amd_iommu_setup_dom0_devices
                 pdev = xmalloc(struct pci_dev);
                 pdev->bus = bus;
                 pdev->devfn = PCI_DEVFN(dev, func);
-                list_add_tail(&pdev->list, &hd->pdev_list);
+                list_add_tail(&pdev->domain_list, &d->arch.pdev_list);
 
                 bdf = (bus << 8) | pdev->devfn;
                 /* supported device? */
@@ -490,12 +489,9 @@ static int reassign_device( struct domai
 static int reassign_device( struct domain *source, struct domain *target,
                             u8 bus, u8 devfn)
 {
-    struct hvm_iommu *source_hd = domain_hvm_iommu(source);
-    struct hvm_iommu *target_hd = domain_hvm_iommu(target);
     struct pci_dev *pdev;
     struct amd_iommu *iommu;
     int bdf;
-    unsigned long flags;
 
     for_each_pdev ( source, pdev )
     {
@@ -520,11 +516,7 @@ static int reassign_device( struct domai
 
         amd_iommu_disable_domain_device(source, iommu, bdf);
         /* Move pci device from the source domain to target domain. */
-        spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
-        spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
-        list_move(&pdev->list, &target_hd->pdev_list);
-        spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
-        spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
+        list_move(&pdev->domain_list, &target->arch.pdev_list);
 
         amd_iommu_setup_domain_device(target, iommu, bdf);
         amd_iov_info("reassign %x:%x.%x domain %d -> domain %d\n",
@@ -559,12 +551,11 @@ static int amd_iommu_assign_device(struc
 
 static void release_domain_devices(struct domain *d)
 {
-    struct hvm_iommu *hd  = domain_hvm_iommu(d);
     struct pci_dev *pdev;
 
-    while ( !list_empty(&hd->pdev_list) )
-    {
-        pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list);
+    while ( has_arch_pdevs(d) )
+    {
+        pdev = list_entry(d->arch.pdev_list.next, typeof(*pdev), domain_list);
         pdev_flr(pdev->bus, pdev->devfn);
         amd_iov_info("release domain %d devices %x:%x.%x\n", d->domain_id,
                  pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
diff -r e42135b61dc6 -r 1e9df5cb885f xen/drivers/passthrough/iommu.c
--- a/xen/drivers/passthrough/iommu.c   Fri Jul 04 17:50:31 2008 +0100
+++ b/xen/drivers/passthrough/iommu.c   Fri Jul 04 17:51:16 2008 +0100
@@ -35,8 +35,6 @@ int iommu_domain_init(struct domain *dom
     struct hvm_iommu *hd = domain_hvm_iommu(domain);
 
     spin_lock_init(&hd->mapping_lock);
-    spin_lock_init(&hd->iommu_list_lock);
-    INIT_LIST_HEAD(&hd->pdev_list);
     INIT_LIST_HEAD(&hd->g2m_ioport_list);
 
     if ( !iommu_enabled )
@@ -68,7 +66,7 @@ int assign_device(struct domain *d, u8 b
     if ( (rc = hd->platform_ops->assign_device(d, bus, devfn)) )
         return rc;
 
-    if ( has_iommu_pdevs(d) && !is_hvm_domain(d) && !need_iommu(d) )
+    if ( has_arch_pdevs(d) && !is_hvm_domain(d) && !need_iommu(d) )
     {
         d->need_iommu = 1;
         return iommu_populate_page_table(d);
@@ -190,7 +188,7 @@ void deassign_device(struct domain *d, u
 
     hd->platform_ops->reassign_device(d, dom0, bus, devfn);
 
-    if ( !has_iommu_pdevs(d) && need_iommu(d) )
+    if ( !has_arch_pdevs(d) && need_iommu(d) )
     {
         d->need_iommu = 0;
         hd->platform_ops->teardown(d);
@@ -242,8 +240,7 @@ int iommu_get_device_group(struct domain
 
     group_id = ops->get_device_group_id(bus, devfn);
 
-    list_for_each_entry(pdev,
-        &(dom0->arch.hvm_domain.hvm_iommu.pdev_list), list)
+    for_each_pdev( d, pdev )
     {
         if ( (pdev->bus == bus) && (pdev->devfn == devfn) )
             continue;
diff -r e42135b61dc6 -r 1e9df5cb885f xen/drivers/passthrough/vtd/dmar.h
--- a/xen/drivers/passthrough/vtd/dmar.h        Fri Jul 04 17:50:31 2008 +0100
+++ b/xen/drivers/passthrough/vtd/dmar.h        Fri Jul 04 17:51:16 2008 +0100
@@ -70,10 +70,6 @@ struct acpi_atsr_unit {
     list_for_each_entry(iommu, \
         &(domain->arch.hvm_domain.hvm_iommu.iommu_list), list)
 
-#define for_each_pdev(domain, pdev) \
-    list_for_each_entry(pdev, \
-         &(domain->arch.hvm_domain.hvm_iommu.pdev_list), list)
-
 #define for_each_drhd_unit(drhd) \
     list_for_each_entry(drhd, &acpi_drhd_units, list)
 #define for_each_rmrr_device(rmrr, pdev) \
diff -r e42135b61dc6 -r 1e9df5cb885f xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Fri Jul 04 17:50:31 2008 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Fri Jul 04 17:51:16 2008 +0100
@@ -1022,8 +1022,6 @@ static int intel_iommu_domain_init(struc
     struct iommu *iommu = NULL;
     u64 i;
     struct acpi_drhd_unit *drhd;
-
-    INIT_LIST_HEAD(&hd->pdev_list);
 
     drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
     iommu = drhd->iommu;
@@ -1366,12 +1364,10 @@ void reassign_device_ownership(
     u8 bus, u8 devfn)
 {
     struct hvm_iommu *source_hd = domain_hvm_iommu(source);
-    struct hvm_iommu *target_hd = domain_hvm_iommu(target);
     struct pci_dev *pdev, *pdev2;
     struct acpi_drhd_unit *drhd;
     struct iommu *iommu;
     int status;
-    unsigned long flags;
     int found = 0;
 
     pdev_flr(bus, devfn);
@@ -1388,11 +1384,7 @@ void reassign_device_ownership(
     domain_context_unmap(iommu, pdev);
 
     /* Move pci device from the source domain to target domain. */
-    spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
-    spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
-    list_move(&pdev->list, &target_hd->pdev_list);
-    spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
-    spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
+    list_move(&pdev->domain_list, &target->arch.pdev_list);
 
     for_each_pdev ( source, pdev2 )
     {
@@ -1413,12 +1405,11 @@ void reassign_device_ownership(
 
 void return_devices_to_dom0(struct domain *d)
 {
-    struct hvm_iommu *hd  = domain_hvm_iommu(d);
     struct pci_dev *pdev;
 
-    while ( !list_empty(&hd->pdev_list) )
-    {
-        pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list);
+    while ( has_arch_pdevs(d) )
+    {
+        pdev = list_entry(d->arch.pdev_list.next, typeof(*pdev), domain_list);
         pci_cleanup_msi(pdev->bus, pdev->devfn);
         reassign_device_ownership(d, dom0, pdev->bus, pdev->devfn);
     }
@@ -1631,7 +1622,7 @@ static void setup_dom0_devices(struct do
                 pdev = xmalloc(struct pci_dev);
                 pdev->bus = bus;
                 pdev->devfn = PCI_DEVFN(dev, func);
-                list_add_tail(&pdev->list, &hd->pdev_list);
+                list_add_tail(&pdev->domain_list, &d->arch.pdev_list);
 
                 drhd = acpi_find_matched_drhd_unit(pdev);
                 ret = domain_context_mapping(d, drhd->iommu, pdev);
diff -r e42135b61dc6 -r 1e9df5cb885f xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Fri Jul 04 17:50:31 2008 +0100
+++ b/xen/include/asm-x86/domain.h      Fri Jul 04 17:51:16 2008 +0100
@@ -228,6 +228,7 @@ struct arch_domain
     struct rangeset *ioport_caps;
     uint32_t pci_cf8;
 
+    struct list_head pdev_list;
     struct hvm_domain hvm_domain;
 
     struct paging_domain paging;
@@ -265,6 +266,9 @@ struct arch_domain
 
     cpuid_input_t cpuids[MAX_CPUID_INPUT];
 } __cacheline_aligned;
+
+#define has_arch_pdevs(d)    (!list_empty(&(d)->arch.pdev_list))
+
 
 #ifdef __i386__
 struct pae_l3_cache {
diff -r e42135b61dc6 -r 1e9df5cb885f 
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Fri Jul 04 17:50:31 
2008 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Fri Jul 04 17:51:16 
2008 +0100
@@ -27,10 +27,6 @@
 #define for_each_amd_iommu(amd_iommu) \
     list_for_each_entry(amd_iommu, \
         &amd_iommu_head, list)
-
-#define for_each_pdev(domain, pdev) \
-    list_for_each_entry(pdev, \
-         &(domain->arch.hvm_domain.hvm_iommu.pdev_list), list)
 
 #define DMA_32BIT_MASK  0x00000000ffffffffULL
 #define PAGE_ALIGN(addr)    (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
diff -r e42135b61dc6 -r 1e9df5cb885f xen/include/xen/hvm/iommu.h
--- a/xen/include/xen/hvm/iommu.h       Fri Jul 04 17:50:31 2008 +0100
+++ b/xen/include/xen/hvm/iommu.h       Fri Jul 04 17:51:16 2008 +0100
@@ -36,8 +36,6 @@ struct g2m_ioport {
 };
 
 struct hvm_iommu {
-    spinlock_t iommu_list_lock;    /* protect iommu specific lists */
-    struct list_head pdev_list;    /* direct accessed pci devices */
     u64 pgd_maddr;                 /* io page directory machine address */
     spinlock_t mapping_lock;       /* io page table lock */
     int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
@@ -55,7 +53,4 @@ struct hvm_iommu {
     struct iommu_ops *platform_ops;
 };
 
-#define has_iommu_pdevs(domain) \
-    (!list_empty(&(domain->arch.hvm_domain.hvm_iommu.pdev_list)))
-
 #endif /* __ASM_X86_HVM_IOMMU_H__ */
diff -r e42135b61dc6 -r 1e9df5cb885f xen/include/xen/pci.h
--- a/xen/include/xen/pci.h     Fri Jul 04 17:50:31 2008 +0100
+++ b/xen/include/xen/pci.h     Fri Jul 04 17:51:16 2008 +0100
@@ -25,7 +25,7 @@
 #define PCI_FUNC(devfn)       ((devfn) & 0x07)
 
 struct pci_dev {
-    struct list_head list;
+    struct list_head domain_list;
     struct list_head msi_dev_list;
     u8 bus;
     u8 devfn;
@@ -50,4 +50,9 @@ int pci_find_cap_offset(u8 bus, u8 dev, 
 int pci_find_cap_offset(u8 bus, u8 dev, u8 func, u8 cap);
 int pci_find_next_cap(u8 bus, unsigned int devfn, u8 pos, int cap);
 
+
+#define for_each_pdev(domain, pdev) \
+    list_for_each_entry(pdev, &(domain->arch.pdev_list), domain_list)
+
+
 #endif /* __XEN_PCI_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] PCI device register/unregister + pci_dev cleanups, Xen patchbot-unstable <=