# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1256749706 0
# Node ID a2c17e320c47e4d188d8b32b8543ed8e8852e8b2
# Parent 47136dbb972da01b52bc9f8fde123a3cad80c3bd
AMD IOMMU: Use global interrupt remapping table by default
Using a global interrupt remapping table shared by all devices has
better compatibility with certain old BIOSes. Per-device interrupt
remapping table can still be enabled by using a new parameter
"amd-iommu-perdev-intremap".
Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
---
xen/drivers/passthrough/amd/iommu_acpi.c | 18 +++++++++---
xen/drivers/passthrough/amd/iommu_init.c | 3 +-
xen/drivers/passthrough/amd/iommu_intr.c | 37 ++++++++++++++++++--------
xen/drivers/passthrough/iommu.c | 4 ++
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h | 1
5 files changed, 47 insertions(+), 16 deletions(-)
diff -r 47136dbb972d -r a2c17e320c47 xen/drivers/passthrough/amd/iommu_acpi.c
--- a/xen/drivers/passthrough/amd/iommu_acpi.c Wed Oct 28 10:59:55 2009 +0000
+++ b/xen/drivers/passthrough/amd/iommu_acpi.c Wed Oct 28 17:08:26 2009 +0000
@@ -29,6 +29,7 @@ extern struct ivrs_mappings *ivrs_mappin
extern struct ivrs_mappings *ivrs_mappings;
extern unsigned short last_bdf;
extern int ioapic_bdf[MAX_IO_APICS];
+extern void *shared_intremap_table;
static void add_ivrs_mapping_entry(
u16 bdf, u16 alias_id, u8 flags, struct amd_iommu *iommu)
@@ -66,10 +67,19 @@ static void add_ivrs_mapping_entry(
ivrs_mappings[bdf].dte_ext_int_pass = ext_int_pass;
ivrs_mappings[bdf].dte_init_pass = init_pass;
- /* allocate per-device interrupt remapping table */
- if ( ivrs_mappings[alias_id].intremap_table == NULL )
- ivrs_mappings[alias_id].intremap_table =
- amd_iommu_alloc_intremap_table();
+ if (ivrs_mappings[alias_id].intremap_table == NULL )
+ {
+ /* allocate per-device interrupt remapping table */
+ if ( amd_iommu_perdev_intremap )
+ ivrs_mappings[alias_id].intremap_table =
+ amd_iommu_alloc_intremap_table();
+ else
+ {
+ if ( shared_intremap_table == NULL )
+ shared_intremap_table = amd_iommu_alloc_intremap_table();
+ ivrs_mappings[alias_id].intremap_table = shared_intremap_table;
+ }
+ }
/* assgin iommu hardware */
ivrs_mappings[bdf].iommu = iommu;
}
diff -r 47136dbb972d -r a2c17e320c47 xen/drivers/passthrough/amd/iommu_init.c
--- a/xen/drivers/passthrough/amd/iommu_init.c Wed Oct 28 10:59:55 2009 +0000
+++ b/xen/drivers/passthrough/amd/iommu_init.c Wed Oct 28 17:08:26 2009 +0000
@@ -706,7 +706,8 @@ static int __init init_ivrs_mapping(void
ivrs_mappings[bdf].dte_ext_int_pass = IOMMU_CONTROL_DISABLED;
ivrs_mappings[bdf].dte_init_pass = IOMMU_CONTROL_DISABLED;
- spin_lock_init(&ivrs_mappings[bdf].intremap_lock);
+ if ( amd_iommu_perdev_intremap )
+ spin_lock_init(&ivrs_mappings[bdf].intremap_lock);
}
return 0;
}
diff -r 47136dbb972d -r a2c17e320c47 xen/drivers/passthrough/amd/iommu_intr.c
--- a/xen/drivers/passthrough/amd/iommu_intr.c Wed Oct 28 10:59:55 2009 +0000
+++ b/xen/drivers/passthrough/amd/iommu_intr.c Wed Oct 28 17:08:26 2009 +0000
@@ -26,6 +26,15 @@ int ioapic_bdf[MAX_IO_APICS];
int ioapic_bdf[MAX_IO_APICS];
extern struct ivrs_mappings *ivrs_mappings;
extern unsigned short ivrs_bdf_entries;
+void *shared_intremap_table;
+static DEFINE_SPINLOCK(shared_intremap_lock);
+
+static spinlock_t* get_intremap_lock(int req_id)
+{
+ return (amd_iommu_perdev_intremap ?
+ &ivrs_mappings[req_id].intremap_lock:
+ &shared_intremap_lock);
+}
static int get_intremap_requestor_id(int bdf)
{
@@ -101,9 +110,10 @@ static void update_intremap_entry_from_i
u8 delivery_mode, dest, vector, dest_mode;
struct IO_APIC_route_entry *rte = ioapic_rte;
int req_id;
+ spinlock_t *lock;
req_id = get_intremap_requestor_id(bdf);
-
+ lock = get_intremap_lock(req_id);
/* only remap interrupt vector when lower 32 bits in ioapic ire changed */
if ( likely(!rte_upper) )
{
@@ -112,10 +122,10 @@ static void update_intremap_entry_from_i
dest_mode = rte->dest_mode;
dest = rte->dest.logical.logical_dest;
- spin_lock_irqsave(&ivrs_mappings[req_id].intremap_lock, flags);
+ spin_lock_irqsave(lock, flags);
entry = (u32*)get_intremap_entry(req_id, vector, delivery_mode);
update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
- spin_unlock_irqrestore(&ivrs_mappings[req_id].intremap_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
if ( iommu->enabled )
{
@@ -136,6 +146,7 @@ int __init amd_iommu_setup_ioapic_remapp
u8 delivery_mode, dest, vector, dest_mode;
u16 bdf, req_id;
struct amd_iommu *iommu;
+ spinlock_t *lock;
/* Read ioapic entries and update interrupt remapping table accordingly */
for ( apic = 0; apic < nr_ioapics; apic++ )
@@ -159,15 +170,17 @@ int __init amd_iommu_setup_ioapic_remapp
}
req_id = get_intremap_requestor_id(bdf);
+ lock = get_intremap_lock(req_id);
+
delivery_mode = rte.delivery_mode;
vector = rte.vector;
dest_mode = rte.dest_mode;
dest = rte.dest.logical.logical_dest;
- spin_lock_irqsave(&ivrs_mappings[req_id].intremap_lock, flags);
+ spin_lock_irqsave(lock, flags);
entry = (u32*)get_intremap_entry(req_id, vector, delivery_mode);
update_intremap_entry(entry, vector, delivery_mode, dest_mode,
dest);
- spin_unlock_irqrestore(&ivrs_mappings[req_id].intremap_lock,
flags);
+ spin_unlock_irqrestore(lock, flags);
if ( iommu->enabled )
{
@@ -234,13 +247,14 @@ static void update_intremap_entry_from_m
unsigned long flags;
u32* entry;
u16 bdf, req_id, alias_id;
-
u8 delivery_mode, dest, vector, dest_mode;
+ spinlock_t *lock;
bdf = (pdev->bus << 8) | pdev->devfn;
req_id = get_dma_requestor_id(bdf);
-
- spin_lock_irqsave(&ivrs_mappings[req_id].intremap_lock, flags);
+ lock = get_intremap_lock(req_id);
+
+ spin_lock_irqsave(lock, flags);
dest_mode = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
delivery_mode = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK;
@@ -248,7 +262,7 @@ static void update_intremap_entry_from_m
entry = (u32*)get_intremap_entry(req_id, vector, delivery_mode);
update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
- spin_unlock_irqrestore(&ivrs_mappings[req_id].intremap_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
/*
* In some special cases, a pci-e device(e.g SATA controller in IDE mode)
@@ -257,14 +271,15 @@ static void update_intremap_entry_from_m
* devices.
*/
alias_id = get_intremap_requestor_id(bdf);
+ lock = get_intremap_lock(alias_id);
if ( ( bdf != alias_id ) &&
ivrs_mappings[alias_id].intremap_table != NULL )
{
- spin_lock_irqsave(&ivrs_mappings[alias_id].intremap_lock, flags);
+ spin_lock_irqsave(lock, flags);
entry = (u32*)get_intremap_entry(alias_id, vector, delivery_mode);
update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
invalidate_interrupt_table(iommu, alias_id);
- spin_unlock_irqrestore(&ivrs_mappings[alias_id].intremap_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
}
if ( iommu->enabled )
diff -r 47136dbb972d -r a2c17e320c47 xen/drivers/passthrough/iommu.c
--- a/xen/drivers/passthrough/iommu.c Wed Oct 28 10:59:55 2009 +0000
+++ b/xen/drivers/passthrough/iommu.c Wed Oct 28 17:08:26 2009 +0000
@@ -45,6 +45,7 @@ int iommu_qinval = 0;
int iommu_qinval = 0;
int iommu_intremap = 0;
int amd_iommu_debug = 0;
+int amd_iommu_perdev_intremap = 0;
static void __init parse_iommu_param(char *s)
{
@@ -54,6 +55,7 @@ static void __init parse_iommu_param(cha
iommu_qinval = 1;
iommu_intremap = 1;
amd_iommu_debug = 0;
+ amd_iommu_perdev_intremap = 0;
do {
ss = strchr(s, ',');
@@ -79,6 +81,8 @@ static void __init parse_iommu_param(cha
iommu_intremap = 0;
else if ( !strcmp(s, "amd-iommu-debug") )
amd_iommu_debug = 1;
+ else if ( !strcmp(s, "amd-iommu-perdev-intremap") )
+ amd_iommu_perdev_intremap = 1;
s = ss + 1;
} while ( ss );
diff -r 47136dbb972d -r a2c17e320c47
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Wed Oct 28 10:59:55
2009 +0000
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Wed Oct 28 17:08:26
2009 +0000
@@ -33,6 +33,7 @@
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
extern int amd_iommu_debug;
+extern int amd_iommu_perdev_intremap;
#define AMD_IOMMU_DEBUG(fmt, args...) \
do \
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|