# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1211618255 -3600
# Node ID 62f1c837057f33e1e58f90bbe90f33335a89558f
# Parent 36bbcc6baadf40b93372c39a387109b65dde20ad
Enable IOMMU for PV guests
Introduce 'iommu_pv' boot parameter (default off). Added a need_iommu
flag which is set if guest has PCI devices assigned. IOMMU page
tables are populated with current guest memory when IOMMU is first
enabled for the guest.
Signed-off-by: Espen Skoglund <espen.skoglund@xxxxxxxxxxxxx>
---
xen/drivers/passthrough/iommu.c | 73 +++++++++++++++++++++++++++++++++---
xen/drivers/passthrough/vtd/iommu.c | 37 +++++++++++-------
xen/include/xen/hvm/iommu.h | 3 +
xen/include/xen/iommu.h | 1
xen/include/xen/sched.h | 3 +
5 files changed, 97 insertions(+), 20 deletions(-)
diff -r 36bbcc6baadf -r 62f1c837057f xen/drivers/passthrough/iommu.c
--- a/xen/drivers/passthrough/iommu.c Sat May 24 09:35:05 2008 +0100
+++ b/xen/drivers/passthrough/iommu.c Sat May 24 09:37:35 2008 +0100
@@ -15,14 +15,19 @@
#include <xen/sched.h>
#include <xen/iommu.h>
+#include <xen/paging.h>
extern struct iommu_ops intel_iommu_ops;
extern struct iommu_ops amd_iommu_ops;
+static int iommu_populate_page_table(struct domain *d);
int intel_vtd_setup(void);
int amd_iov_detect(void);
int iommu_enabled = 1;
boolean_param("iommu", iommu_enabled);
+
+int iommu_pv_enabled = 0;
+boolean_param("iommu_pv", iommu_pv_enabled);
int iommu_domain_init(struct domain *domain)
{
@@ -54,11 +59,46 @@ int assign_device(struct domain *d, u8 b
int assign_device(struct domain *d, u8 bus, u8 devfn)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
-
- if ( !iommu_enabled || !hd->platform_ops )
- return 0;
-
- return hd->platform_ops->assign_device(d, bus, devfn);
+ int rc;
+
+ if ( !iommu_enabled || !hd->platform_ops )
+ return 0;
+
+ if ( (rc = hd->platform_ops->assign_device(d, bus, devfn)) )
+ return rc;
+
+ if ( has_iommu_pdevs(d) && !need_iommu(d) )
+ {
+ d->need_iommu = 1;
+ return iommu_populate_page_table(d);
+ }
+ return 0;
+}
+
+static int iommu_populate_page_table(struct domain *d)
+{
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
+ struct page_info *page;
+ int rc;
+
+ spin_lock(&d->page_alloc_lock);
+
+ list_for_each_entry ( page, &d->page_list, list )
+ {
+ if ( (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
+ {
+ rc = hd->platform_ops->map_page(
+ d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page));
+ if (rc)
+ {
+ spin_unlock(&d->page_alloc_lock);
+ hd->platform_ops->teardown(d);
+ return rc;
+ }
+ }
+ }
+ spin_unlock(&d->page_alloc_lock);
+ return 0;
}
void iommu_domain_destroy(struct domain *d)
@@ -137,7 +177,13 @@ void deassign_device(struct domain *d, u
if ( !iommu_enabled || !hd->platform_ops )
return;
- return hd->platform_ops->reassign_device(d, dom0, bus, devfn);
+ hd->platform_ops->reassign_device(d, dom0, bus, devfn);
+
+ if ( !has_iommu_pdevs(d) && need_iommu(d) )
+ {
+ d->need_iommu = 0;
+ hd->platform_ops->teardown(d);
+ }
}
static int iommu_setup(void)
@@ -160,7 +206,22 @@ static int iommu_setup(void)
iommu_enabled = (rc == 0);
out:
+ if ( !iommu_enabled || !vtd_enabled )
+ iommu_pv_enabled = 0;
printk("I/O virtualisation %sabled\n", iommu_enabled ? "en" : "dis");
+ if (iommu_enabled)
+ printk("I/O virtualisation for PV guests %sabled\n",
+ iommu_pv_enabled ? "en" : "dis");
return rc;
}
__initcall(iommu_setup);
+
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 36bbcc6baadf -r 62f1c837057f xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c Sat May 24 09:35:05 2008 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c Sat May 24 09:37:35 2008 +0100
@@ -1138,26 +1138,35 @@ static int domain_context_mapping_one(
}
spin_lock_irqsave(&iommu->lock, flags);
+
+#ifdef CONTEXT_PASSTHRU
+ if ( ecap_pass_thru(iommu->ecap) && (domain->domain_id == 0) )
+ context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);
+ else
+ {
+#endif
+ if ( hd->pgd_maddr == 0 )
+ {
+ hd->pgd_maddr = alloc_pgtable_maddr();
+ if ( hd->pgd_maddr == 0 )
+ {
+ unmap_vtd_domain_page(context_entries);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ return -ENOMEM;
+ }
+ }
+ context_set_address_root(*context, hd->pgd_maddr);
+ context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
+#ifdef CONTEXT_PASSTHRU
+ }
+#endif
+
/*
* domain_id 0 is not valid on Intel's IOMMU, force domain_id to
* be 1 based as required by intel's iommu hw.
*/
context_set_domain_id(context, domain);
context_set_address_width(*context, hd->agaw);
-
- if ( ecap_pass_thru(iommu->ecap) )
- context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);
-#ifdef CONTEXT_PASSTHRU
- else
- {
-#endif
- ASSERT(hd->pgd_maddr != 0);
- context_set_address_root(*context, hd->pgd_maddr);
- context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
-#ifdef CONTEXT_PASSTHRU
- }
-#endif
-
context_set_fault_enable(*context);
context_set_present(*context);
iommu_flush_cache_entry(iommu, context);
diff -r 36bbcc6baadf -r 62f1c837057f xen/include/xen/hvm/iommu.h
--- a/xen/include/xen/hvm/iommu.h Sat May 24 09:35:05 2008 +0100
+++ b/xen/include/xen/hvm/iommu.h Sat May 24 09:37:35 2008 +0100
@@ -54,4 +54,7 @@ struct hvm_iommu {
struct iommu_ops *platform_ops;
};
+#define has_iommu_pdevs(domain) \
+ (!list_empty(&(domain->arch.hvm_domain.hvm_iommu.pdev_list)))
+
#endif /* __ASM_X86_HVM_IOMMU_H__ */
diff -r 36bbcc6baadf -r 62f1c837057f xen/include/xen/iommu.h
--- a/xen/include/xen/iommu.h Sat May 24 09:35:05 2008 +0100
+++ b/xen/include/xen/iommu.h Sat May 24 09:37:35 2008 +0100
@@ -29,6 +29,7 @@
extern int vtd_enabled;
extern int iommu_enabled;
+extern int iommu_pv_enabled;
#define domain_hvm_iommu(d) (&d->arch.hvm_domain.hvm_iommu)
#define domain_vmx_iommu(d) (&d->arch.hvm_domain.hvm_iommu.vmx_iommu)
diff -r 36bbcc6baadf -r 62f1c837057f xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Sat May 24 09:35:05 2008 +0100
+++ b/xen/include/xen/sched.h Sat May 24 09:37:35 2008 +0100
@@ -186,6 +186,8 @@ struct domain
/* Is this an HVM guest? */
bool_t is_hvm;
+ /* Does this guest need iommu mappings? */
+ bool_t need_iommu;
/* Is this guest fully privileged (aka dom0)? */
bool_t is_privileged;
/* Which guest this guest has privileges on */
@@ -515,6 +517,7 @@ static inline void vcpu_unblock(struct v
#define is_hvm_domain(d) ((d)->is_hvm)
#define is_hvm_vcpu(v) (is_hvm_domain(v->domain))
+#define need_iommu(d) ((d)->need_iommu && !(d)->is_hvm)
extern enum cpufreq_controller {
FREQCTL_none, FREQCTL_dom0_kernel
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|