WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] [IOMMU] dynamic VTd page table for HVM guest

To: Keir Fraser <keir.fraser@xxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] [IOMMU] dynamic VTd page table for HVM guest
From: "Zhai, Edwin" <edwin.zhai@xxxxxxxxx>
Date: Thu, 03 Sep 2009 09:39:43 +0800
Cc: wei.wang2@xxxxxxx, Xen Developers <xen-devel@xxxxxxxxxxxxxxxxxxx>, "Zhai, Edwin" <edwin.zhai@xxxxxxxxx>
Delivery-date: Wed, 02 Sep 2009 18:41:07 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Thunderbird 2.0.0.17 (X11/20080914)
This patch makes HVM's VTd page table dynamic just like what PV guest
does, so that avoid the overhead of maintaining page table until a PCI
device is truly assigned to the HVM guest.

Signed-Off-By: Zhai, Edwin <edwin.zhai@xxxxxxxxx>

Wei,
This patch also remove some duplicated codes on your side, and is safe
in theory. You can have a check.
Thanks,


--
best rgds,
edwin

Index: hv/xen/arch/ia64/xen/domain.c
===================================================================
--- hv.orig/xen/arch/ia64/xen/domain.c
+++ hv/xen/arch/ia64/xen/domain.c
@@ -669,7 +669,7 @@ void arch_domain_destroy(struct domain *
                free_xenheap_pages(d->shared_info,
                                   get_order_from_shift(XSI_SHIFT));
 
-       if ( iommu_enabled && (is_hvm_domain(d) || need_iommu(d)) )     {
+       if ( iommu_enabled && need_iommu(d) )   {
                pci_release_devices(d);
                iommu_domain_destroy(d);
        }
Index: hv/xen/arch/ia64/xen/mm.c
===================================================================
--- hv.orig/xen/arch/ia64/xen/mm.c
+++ hv/xen/arch/ia64/xen/mm.c
@@ -1479,7 +1479,7 @@ zap_domain_page_one(struct domain *d, un
     if(!mfn_valid(mfn))
         return;
 
-    if ( iommu_enabled && (is_hvm_domain(d) || need_iommu(d)) ){
+    if ( iommu_enabled && need_iommu(d) ){
         int i, j;
         j = 1 << (PAGE_SHIFT-PAGE_SHIFT_4K);
         for(i = 0 ; i < j; i++)
@@ -2885,7 +2885,7 @@ __guest_physmap_add_page(struct domain *
     smp_mb();
     assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn,
                                ASSIGN_writable | ASSIGN_pgc_allocated);
-    if ( iommu_enabled && (is_hvm_domain(d) || need_iommu(d)) ){
+    if ( iommu_enabled && need_iommu(d) ){
         int i, j;
         j = 1 << (PAGE_SHIFT-PAGE_SHIFT_4K);
         for(i = 0 ; i < j; i++)
Index: hv/xen/arch/x86/mm/hap/p2m-ept.c
===================================================================
--- hv.orig/xen/arch/x86/mm/hap/p2m-ept.c
+++ hv/xen/arch/x86/mm/hap/p2m-ept.c
@@ -282,7 +282,7 @@ out:
     ept_sync_domain(d);
 
     /* Now the p2m table is not shared with vt-d page table */
-    if ( iommu_enabled && is_hvm_domain(d) && need_modify_vtd_table )
+    if ( iommu_enabled && need_iommu(d) && need_modify_vtd_table )
     {
         if ( p2mt == p2m_ram_rw )
         {
Index: hv/xen/arch/x86/mm/p2m.c
===================================================================
--- hv.orig/xen/arch/x86/mm/p2m.c
+++ hv/xen/arch/x86/mm/p2m.c
@@ -1199,7 +1199,7 @@ p2m_set_entry(struct domain *d, unsigned
          && (gfn + (1UL << page_order) - 1 > d->arch.p2m->max_mapped_pfn) )
         d->arch.p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
 
-    if ( iommu_enabled && (is_hvm_domain(d) || need_iommu(d)) )
+    if ( iommu_enabled && need_iommu(d) )
     {
         if ( p2mt == p2m_ram_rw )
             for ( i = 0; i < (1UL << page_order); i++ )
Index: hv/xen/drivers/passthrough/amd/iommu_map.c
===================================================================
--- hv.orig/xen/drivers/passthrough/amd/iommu_map.c
+++ hv/xen/drivers/passthrough/amd/iommu_map.c
@@ -555,58 +555,6 @@ int amd_iommu_reserve_domain_unity_map(
     return 0;
 }
 
-int amd_iommu_sync_p2m(struct domain *d)
-{
-    unsigned long mfn, gfn;
-    u64 iommu_l2e;
-    struct page_info *page;
-    struct hvm_iommu *hd;
-    int iw = IOMMU_IO_WRITE_ENABLED;
-    int ir = IOMMU_IO_READ_ENABLED;
-
-    if ( !is_hvm_domain(d) )
-        return 0;
-
-    hd = domain_hvm_iommu(d);
-
-    spin_lock(&hd->mapping_lock);
-
-    if ( hd->p2m_synchronized )
-        goto out;
-
-    spin_lock(&d->page_alloc_lock);
-
-    page_list_for_each ( page, &d->page_list )
-    {
-        mfn = page_to_mfn(page);
-        gfn = get_gpfn_from_mfn(mfn);
-
-        if ( gfn == INVALID_M2P_ENTRY )
-            continue;
-
-        iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
-
-        if ( iommu_l2e == 0 )
-        {
-            spin_unlock(&d->page_alloc_lock);
-            spin_unlock(&hd->mapping_lock);
-            amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
-            domain_crash(d);
-            return -EFAULT;
-        }
-
-        set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
-    }
-
-    spin_unlock(&d->page_alloc_lock);
-
-    hd->p2m_synchronized = 1;
-
-out:
-    spin_unlock(&hd->mapping_lock);
-    return 0;
-}
-
 void invalidate_all_iommu_pages(struct domain *d)
 {
     u32 cmd[4], entry;
Index: hv/xen/drivers/passthrough/amd/pci_amd_iommu.c
===================================================================
--- hv.orig/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ hv/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -311,8 +311,6 @@ static int amd_iommu_assign_device(struc
     int bdf = (bus << 8) | devfn;
     int req_id = ivrs_mappings[bdf].dte_requestor_id;
 
-    amd_iommu_sync_p2m(d);
-
     if ( ivrs_mappings[req_id].unity_map_enable )
     {
         amd_iommu_reserve_domain_unity_map(
Index: hv/xen/drivers/passthrough/iommu.c
===================================================================
--- hv.orig/xen/drivers/passthrough/iommu.c
+++ hv/xen/drivers/passthrough/iommu.c
@@ -137,7 +137,7 @@ int assign_device(struct domain *d, u8 b
     if ( (rc = hd->platform_ops->assign_device(d, bus, devfn)) )
         goto done;
 
-    if ( has_arch_pdevs(d) && !is_hvm_domain(d) && !need_iommu(d) )
+    if ( has_arch_pdevs(d) && !need_iommu(d) )
     {
         d->need_iommu = 1;
         rc = iommu_populate_page_table(d);
@@ -184,7 +184,7 @@ void iommu_domain_destroy(struct domain 
     if ( !iommu_enabled || !hd->platform_ops )
         return;
 
-    if ( !is_hvm_domain(d) && !need_iommu(d)  )
+    if ( !need_iommu(d)  )
         return;
 
     if ( need_iommu(d) )
Index: hv/xen/drivers/passthrough/pci.c
===================================================================
--- hv.orig/xen/drivers/passthrough/pci.c
+++ hv/xen/drivers/passthrough/pci.c
@@ -202,7 +202,7 @@ static void pci_clean_dpci_irqs(struct d
     if ( !iommu_enabled )
         return;
 
-    if ( !is_hvm_domain(d) && !need_iommu(d) )
+    if ( !need_iommu(d) )
         return;
 
     spin_lock(&d->event_lock);
Index: hv/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
===================================================================
--- hv.orig/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
+++ hv/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
@@ -63,7 +63,6 @@ int amd_iommu_unmap_page(struct domain *
 u64 amd_iommu_get_next_table_from_pte(u32 *entry);
 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
         unsigned long phys_addr, unsigned long size, int iw, int ir);
-int amd_iommu_sync_p2m(struct domain *d);
 void invalidate_all_iommu_pages(struct domain *d);
 
 /* device table functions */
Index: hv/xen/include/xen/sched.h
===================================================================
--- hv.orig/xen/include/xen/sched.h
+++ hv/xen/include/xen/sched.h
@@ -557,7 +557,7 @@ uint64_t get_cpu_idle_time(unsigned int 
 
 #define is_hvm_domain(d) ((d)->is_hvm)
 #define is_hvm_vcpu(v)   (is_hvm_domain(v->domain))
-#define need_iommu(d)    ((d)->need_iommu && !(d)->is_hvm)
+#define need_iommu(d)    ((d)->need_iommu)
 
 void set_vcpu_migration_delay(unsigned int delay);
 unsigned int get_vcpu_migration_delay(void);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH] [IOMMU] dynamic VTd page table for HVM guest, Zhai, Edwin <=