WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

RE: [Xen-devel] [PATCH] remove ASSERT(spin_is_locked(&pcidevs_lock));

To: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>, "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: RE: [Xen-devel] [PATCH] remove ASSERT(spin_is_locked(&pcidevs_lock));
From: "Jiang, Yunhong" <yunhong.jiang@xxxxxxxxx>
Date: Fri, 12 Dec 2008 12:12:34 +0800
Accept-language: en-US
Acceptlanguage: en-US
Cc:
Delivery-date: Thu, 11 Dec 2008 20:14:32 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <20081212023640.GD26390%yamahata@xxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20081212023640.GD26390%yamahata@xxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: AclcAoWDCDgAvamvRCys4BEgnC9QCQAAB8aw
Thread-topic: [Xen-devel] [PATCH] remove ASSERT(spin_is_locked(&pcidevs_lock));
Per discussion with Espen, the rw_lock could be changed to spin_lock. But I 
didn't get more information from him, so I didn't try it. Since it cause 
compilation error on IA64, I cook a patch for it and verified on x86 side. 

The attached patch change the pcidevs_lock from rw_lock to spin_lock, also two 
defect fixed:
a) deassign_device will deadlock when try to get the pcidevs_lock if called by 
pci_release_devices, remove the lock to the caller.
b) The iommu_domain_teardown should not ASSERT for the pcidevs_lock because it 
just update the domain's vt-d mapping.

Signed-off-by: Yunhong Jiang <yunhong.jiang@xxxxxxxxx>

Thanks
Yunhong Jiang


xen-devel-bounces@xxxxxxxxxxxxxxxxxxx <> wrote:
> remove ASSERT(spin_is_locked(&pcidevs_lock));
> 
> spin_is_locked() is for spinlock, but pcidevs_lock is rwlock.
> so the statement is bogus. remove them.
> In fact they caused compilation error for ia64. This patch fixes it.
> 
> Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
> 
> diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c ---
> a/xen/arch/x86/irq.c +++ b/xen/arch/x86/irq.c
> @@ -850,7 +850,6 @@ int map_domain_pirq(
>     struct msi_desc *msi_desc;
>     struct pci_dev *pdev = NULL;
> 
> -    ASSERT(spin_is_locked(&pcidevs_lock));
>     ASSERT(spin_is_locked(&d->event_lock));
> 
>     if ( !IS_PRIV(current->domain) )
> @@ -930,7 +929,6 @@ int unmap_domain_pirq(struct domain *d,
>     if ( !IS_PRIV(current->domain) )
>         return -EINVAL;
> 
> -    ASSERT(spin_is_locked(&pcidevs_lock));
>     ASSERT(spin_is_locked(&d->event_lock));
> 
>     vector = d->arch.pirq_vector[pirq];
> diff --git a/xen/arch/x86/msi.c b/xen/arch/x86/msi.c ---
> a/xen/arch/x86/msi.c +++ b/xen/arch/x86/msi.c
> @@ -440,7 +440,6 @@ static int msi_capability_init(struct pc
>     u8 slot = PCI_SLOT(dev->devfn);
>     u8 func = PCI_FUNC(dev->devfn);
> 
> -    ASSERT(spin_is_locked(&pcidevs_lock));
>     pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSI);
>     control = pci_conf_read16(bus, slot, func, msi_control_reg(pos));
>     /* MSI Entry Initialization */
> @@ -509,7 +508,6 @@ static int msix_capability_init(struct p
>     u8 slot = PCI_SLOT(dev->devfn);
>     u8 func = PCI_FUNC(dev->devfn);
> 
> -    ASSERT(spin_is_locked(&pcidevs_lock));
>     ASSERT(desc);
> 
>     pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSIX);
> @@ -574,7 +572,6 @@ static int __pci_enable_msi(struct msi_i     int status;
>     struct pci_dev *pdev;
> 
> -    ASSERT(spin_is_locked(&pcidevs_lock));
>     pdev = pci_get_pdev(msi->bus, msi->devfn);
>     if ( !pdev )
>         return -ENODEV;
> @@ -634,7 +631,6 @@ static int __pci_enable_msix(struct msi_
>     u8 slot = PCI_SLOT(msi->devfn);
>     u8 func = PCI_FUNC(msi->devfn);
> 
> -    ASSERT(spin_is_locked(&pcidevs_lock));
>     pdev = pci_get_pdev(msi->bus, msi->devfn);
>     if ( !pdev )
>         return -ENODEV;
> @@ -688,7 +684,6 @@ static void __pci_disable_msix(struct ms  */
> int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc) {
> -    ASSERT(spin_is_locked(&pcidevs_lock));
> 
>     return  msi->table_base ? __pci_enable_msix(msi, desc) :
>         __pci_enable_msi(msi, desc);
> diff --git a/xen/drivers/passthrough/iommu.c
> b/xen/drivers/passthrough/iommu.c
> --- a/xen/drivers/passthrough/iommu.c
> +++ b/xen/drivers/passthrough/iommu.c
> @@ -86,8 +86,6 @@ int iommu_add_device(struct pci_dev *pde
> 
>     if ( !pdev->domain )
>         return -EINVAL;
> -
> -    ASSERT(spin_is_locked(&pcidevs_lock));
> 
>     hd = domain_hvm_iommu(pdev->domain);
>     if ( !iommu_enabled || !hd->platform_ops )
> diff --git a/xen/drivers/passthrough/pci.c
> b/xen/drivers/passthrough/pci.c
> --- a/xen/drivers/passthrough/pci.c
> +++ b/xen/drivers/passthrough/pci.c
> @@ -62,7 +62,6 @@ struct pci_dev *pci_get_pdev(int bus, in {
>     struct pci_dev *pdev = NULL;
> 
> -    ASSERT(spin_is_locked(&pcidevs_lock));
> 
>     list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
>         if ( (pdev->bus == bus || bus == -1) &&
> @@ -78,7 +77,6 @@ struct pci_dev *pci_get_pdev_by_domain(s {
>     struct pci_dev *pdev = NULL;
> 
> -    ASSERT(spin_is_locked(&pcidevs_lock));
> 
>     list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
>          if ( (pdev->bus == bus || bus == -1) &&
> diff --git a/xen/drivers/passthrough/vtd/iommu.c
> b/xen/drivers/passthrough/vtd/iommu.c
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -1037,7 +1037,6 @@ static int domain_context_mapping_one(
>     struct pci_dev *pdev = NULL;
>     int agaw;
> 
> -    ASSERT(spin_is_locked(&pcidevs_lock));
>     spin_lock(&iommu->lock);
>     maddr = bus_to_context_maddr(iommu, bus);
>     context_entries = (struct context_entry
> *)map_vtd_domain_page(maddr);
> @@ -1214,7 +1213,6 @@ static int domain_context_mapping(struct     if (
>         !drhd ) return -ENODEV;
> 
> -    ASSERT(spin_is_locked(&pcidevs_lock));
> 
>     type = pdev_type(bus, devfn);
>     switch ( type )
> @@ -1298,7 +1296,6 @@ static int domain_context_unmap_one(
>     struct context_entry *context, *context_entries;     u64 maddr;
> 
> -    ASSERT(spin_is_locked(&pcidevs_lock));
>     spin_lock(&iommu->lock);
> 
>     maddr = bus_to_context_maddr(iommu, bus);
> @@ -1388,7 +1385,6 @@ static int reassign_device_ownership(     struct
>     iommu *pdev_iommu; int ret, found = 0;
> 
> -    ASSERT(spin_is_locked(&pcidevs_lock));
>     pdev = pci_get_pdev_by_domain(source, bus, devfn);
> 
>     if (!pdev)
> @@ -1428,7 +1424,6 @@ void iommu_domain_teardown(struct domain
>     if ( list_empty(&acpi_drhd_units) )
>         return;
> 
> -    ASSERT(spin_is_locked(&pcidevs_lock));
>     spin_lock(&hd->mapping_lock);
>     iommu_free_pagetable(hd->pgd_maddr, agaw_to_level(hd->agaw));    
> hd->pgd_maddr = 0; @@ -1518,7 +1513,6 @@ static int
>     iommu_prepare_rmrr_dev(struct     u64 base, end; unsigned long
> base_pfn, end_pfn; 
> 
> -    ASSERT(spin_is_locked(&pcidevs_lock));
>     ASSERT(rmrr->base_address < rmrr->end_address);
> 
>     base = rmrr->base_address & PAGE_MASK_4K;
> @@ -1543,7 +1537,6 @@ static int intel_iommu_add_device(struct     u16 bdf;
>     int ret, i;
> 
> -    ASSERT(spin_is_locked(&pcidevs_lock));
> 
>     if ( !pdev->domain )
>         return -EINVAL;
> @@ -1782,7 +1775,6 @@ int intel_iommu_assign_device(struct dom
>     if ( list_empty(&acpi_drhd_units) )
>         return -ENODEV;
> 
> -    ASSERT(spin_is_locked(&pcidevs_lock));
>     pdev = pci_get_pdev(bus, devfn);
>     if (!pdev)
>         return -ENODEV;
> 
> 
> --
> yamahata
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel

Attachment: msi_lock.patch
Description: msi_lock.patch

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>