Hi,
This patch fixes some spinlock issues related to changeset:
19246:9bc5799566be "passthrough MSI-X mask bit acceleration"
19263:9c5b4efc934d "hvm: passthrough MSI-X: fix ia64 link and MSI-X clean up"
Without this patch, I got:
(XEN) Assertion '_spin_is_locked(&pcidevs_lock)' failed at vmsi.c:389
or
(XEN) Xen BUG at spinlock.c:25
I'm not sure this fix is right. Please review it.
- d->arch.hvm_domain.msixtbl_list_lock is redundant.
irq_desc->lock covers it, thus remove the spinlock.
- ASSERT(spin_is_locked(&pcidevs_lock)) is not needed.
At first, I intended to add the enclosure of pcidevs_lock.
But this assertion seems pointless. pcidevs_lock is
for alldevs_list and msixtbl_pt_xxx functions never refer it.
- In "debug=y" environment, xmalloc must not be called from both
irq_enable and irq_disable state. Otherwise,
the assertion failure occurs in check_lock().
So, in msixtbl_pt_register, xmalloc is called beforehand.
I thinks this is ugly, though.
Thanks,
Kouya
Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>
diff -r cff29d694a89 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Thu Mar 05 17:50:05 2009 +0000
+++ b/xen/arch/x86/hvm/hvm.c Fri Mar 06 11:01:43 2009 +0900
@@ -309,7 +309,6 @@ int hvm_domain_initialise(struct domain
spin_lock_init(&d->arch.hvm_domain.uc_lock);
INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list);
- spin_lock_init(&d->arch.hvm_domain.msixtbl_list_lock);
hvm_init_guest_time(d);
diff -r cff29d694a89 xen/arch/x86/hvm/vmsi.c
--- a/xen/arch/x86/hvm/vmsi.c Thu Mar 05 17:50:05 2009 +0000
+++ b/xen/arch/x86/hvm/vmsi.c Fri Mar 06 11:01:43 2009 +0900
@@ -336,16 +336,12 @@ struct hvm_mmio_handler msixtbl_mmio_han
.write_handler = msixtbl_write
};
-static struct msixtbl_entry *add_msixtbl_entry(struct domain *d,
- struct pci_dev *pdev,
- uint64_t gtable)
+static void add_msixtbl_entry(struct domain *d,
+ struct pci_dev *pdev,
+ uint64_t gtable,
+ struct msixtbl_entry *entry)
{
- struct msixtbl_entry *entry;
u32 len;
-
- entry = xmalloc(struct msixtbl_entry);
- if ( !entry )
- return NULL;
memset(entry, 0, sizeof(struct msixtbl_entry));
@@ -359,8 +355,6 @@ static struct msixtbl_entry *add_msixtbl
entry->gtable = (unsigned long) gtable;
list_add_rcu(&entry->list, &d->arch.hvm_domain.msixtbl_list);
-
- return entry;
}
static void free_msixtbl_entry(struct rcu_head *rcu)
@@ -383,12 +377,17 @@ int msixtbl_pt_register(struct domain *d
irq_desc_t *irq_desc;
struct msi_desc *msi_desc;
struct pci_dev *pdev;
- struct msixtbl_entry *entry;
+ struct msixtbl_entry *entry, *new_entry;
int r = -EINVAL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ /* XXX: beforehand allocation is needed since check_lock() fails. */
+ new_entry = xmalloc(struct msixtbl_entry);
+ if ( !new_entry )
+ return r;
irq_desc = domain_spin_lock_irq_desc(d, pirq, NULL);
+ if ( !irq_desc )
+ return r;
if ( irq_desc->handler != &pci_msi_type )
goto out;
@@ -399,28 +398,22 @@ int msixtbl_pt_register(struct domain *d
pdev = msi_desc->dev;
- spin_lock(&d->arch.hvm_domain.msixtbl_list_lock);
-
list_for_each_entry( entry, &d->arch.hvm_domain.msixtbl_list, list )
if ( pdev == entry->pdev )
goto found;
- entry = add_msixtbl_entry(d, pdev, gtable);
- if ( !entry )
- {
- spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
- goto out;
- }
+ entry = new_entry;
+ new_entry = NULL;
+ add_msixtbl_entry(d, pdev, gtable, entry);
found:
atomic_inc(&entry->refcnt);
- spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
r = 0;
out:
spin_unlock_irq(&irq_desc->lock);
+ xfree(new_entry);
return r;
-
}
void msixtbl_pt_unregister(struct domain *d, int pirq)
@@ -430,9 +423,9 @@ void msixtbl_pt_unregister(struct domain
struct pci_dev *pdev;
struct msixtbl_entry *entry;
- ASSERT(spin_is_locked(&pcidevs_lock));
-
irq_desc = domain_spin_lock_irq_desc(d, pirq, NULL);
+ if ( !irq_desc )
+ return;
if ( irq_desc->handler != &pci_msi_type )
goto out;
@@ -443,36 +436,33 @@ void msixtbl_pt_unregister(struct domain
pdev = msi_desc->dev;
- spin_lock(&d->arch.hvm_domain.msixtbl_list_lock);
-
list_for_each_entry( entry, &d->arch.hvm_domain.msixtbl_list, list )
if ( pdev == entry->pdev )
goto found;
- spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
-
-
out:
- spin_unlock(&irq_desc->lock);
+ spin_unlock_irq(&irq_desc->lock);
return;
found:
if ( !atomic_dec_and_test(&entry->refcnt) )
del_msixtbl_entry(entry);
- spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
- spin_unlock(&irq_desc->lock);
+ spin_unlock_irq(&irq_desc->lock);
}
void msixtbl_pt_cleanup(struct domain *d, int pirq)
{
+ irq_desc_t *irq_desc;
struct msixtbl_entry *entry, *temp;
- spin_lock(&d->arch.hvm_domain.msixtbl_list_lock);
+ irq_desc = domain_spin_lock_irq_desc(d, pirq, NULL);
+ if ( !irq_desc )
+ return;
list_for_each_entry_safe( entry, temp,
&d->arch.hvm_domain.msixtbl_list, list )
del_msixtbl_entry(entry);
- spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
+ spin_unlock_irq(&irq_desc->lock);
}
diff -r cff29d694a89 xen/include/asm-x86/hvm/domain.h
--- a/xen/include/asm-x86/hvm/domain.h Thu Mar 05 17:50:05 2009 +0000
+++ b/xen/include/asm-x86/hvm/domain.h Fri Mar 06 11:01:43 2009 +0900
@@ -77,7 +77,6 @@ struct hvm_domain {
/* hypervisor intercepted msix table */
struct list_head msixtbl_list;
- spinlock_t msixtbl_list_lock;
struct viridian_domain viridian;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|