WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-4.0-testing] iommu: Map correct permissions in IOMM

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-4.0-testing] iommu: Map correct permissions in IOMMU on grant read-only map request.
From: "Xen patchbot-4.0-testing" <patchbot-4.0-testing@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 04 Jun 2010 03:45:51 -0700
Delivery-date: Fri, 04 Jun 2010 03:49:42 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1275644301 -3600
# Node ID 104e6ab14f40edb4e5934664670db319f20802c8
# Parent  969ba353c8465d5d4abf4147b6fac98281ef6681
iommu: Map correct permissions in IOMMU on grant read-only map request.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
xen-unstable changeset:   21476:69a8e9b6961f
xen-unstable date:        Fri May 28 09:08:00 2010 +0100

iommu: Specify access permissions to iommu_map_page().

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
xen-unstable changeset:   21475:9ee5c292b112
xen-unstable date:        Fri May 28 08:48:50 2010 +0100
---
 xen/arch/ia64/xen/mm.c                        |    3 +-
 xen/arch/x86/mm.c                             |    3 +-
 xen/arch/x86/mm/hap/p2m-ept.c                 |    7 ++++-
 xen/arch/x86/mm/p2m.c                         |    9 +++++--
 xen/arch/x86/x86_64/mm.c                      |    2 -
 xen/common/grant_table.c                      |   32 ++++++++++++++++----------
 xen/drivers/passthrough/amd/iommu_map.c       |   10 ++++----
 xen/drivers/passthrough/amd/pci_amd_iommu.c   |    3 +-
 xen/drivers/passthrough/iommu.c               |   20 ++++++++--------
 xen/drivers/passthrough/vtd/ia64/vtd.c        |    3 +-
 xen/drivers/passthrough/vtd/iommu.c           |   10 +++++---
 xen/drivers/passthrough/vtd/x86/vtd.c         |    3 +-
 xen/include/asm-x86/hvm/svm/amd-iommu-defs.h  |    3 --
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |    3 +-
 xen/include/xen/iommu.h                       |   13 ++++++++--
 15 files changed, 80 insertions(+), 44 deletions(-)

diff -r 969ba353c846 -r 104e6ab14f40 xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c    Fri Jun 04 10:36:53 2010 +0100
+++ b/xen/arch/ia64/xen/mm.c    Fri Jun 04 10:38:21 2010 +0100
@@ -2897,7 +2897,8 @@ __guest_physmap_add_page(struct domain *
         int i, j;
         j = 1 << (PAGE_SHIFT-PAGE_SHIFT_4K);
         for(i = 0 ; i < j; i++)
-            iommu_map_page(d, gpfn*j + i, mfn*j + i);
+            iommu_map_page(d, gpfn*j + i, mfn*j + i,
+                           IOMMUF_readable|IOMMUF_writable);
     }
 }
 
diff -r 969ba353c846 -r 104e6ab14f40 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Fri Jun 04 10:36:53 2010 +0100
+++ b/xen/arch/x86/mm.c Fri Jun 04 10:38:21 2010 +0100
@@ -2399,7 +2399,8 @@ static int __get_page_type(struct page_i
                 iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
             else if ( type == PGT_writable_page )
                 iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
-                               page_to_mfn(page));
+                               page_to_mfn(page),
+                               IOMMUF_readable|IOMMUF_writable);
         }
     }
 
diff -r 969ba353c846 -r 104e6ab14f40 xen/arch/x86/mm/hap/p2m-ept.c
--- a/xen/arch/x86/mm/hap/p2m-ept.c     Fri Jun 04 10:36:53 2010 +0100
+++ b/xen/arch/x86/mm/hap/p2m-ept.c     Fri Jun 04 10:38:21 2010 +0100
@@ -350,10 +350,13 @@ out:
             if ( order == EPT_TABLE_ORDER )
             {
                 for ( i = 0; i < (1 << order); i++ )
-                    iommu_map_page(d, gfn - offset + i, mfn_x(mfn) - offset + 
i);
+                    iommu_map_page(
+                        d, gfn - offset + i, mfn_x(mfn) - offset + i,
+                        IOMMUF_readable|IOMMUF_writable);
             }
             else if ( !order )
-                iommu_map_page(d, gfn, mfn_x(mfn));
+                iommu_map_page(
+                    d, gfn, mfn_x(mfn), IOMMUF_readable|IOMMUF_writable);
         }
         else
         {
diff -r 969ba353c846 -r 104e6ab14f40 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Fri Jun 04 10:36:53 2010 +0100
+++ b/xen/arch/x86/mm/p2m.c     Fri Jun 04 10:38:21 2010 +0100
@@ -1290,7 +1290,8 @@ p2m_set_entry(struct domain *d, unsigned
     {
         if ( p2mt == p2m_ram_rw )
             for ( i = 0; i < (1UL << page_order); i++ )
-                iommu_map_page(d, gfn+i, mfn_x(mfn)+i );
+                iommu_map_page(d, gfn+i, mfn_x(mfn)+i,
+                               IOMMUF_readable|IOMMUF_writable);
         else
             for ( int i = 0; i < (1UL << page_order); i++ )
                 iommu_unmap_page(d, gfn+i);
@@ -2108,12 +2109,16 @@ guest_physmap_add_entry(struct domain *d
         if ( need_iommu(d) && t == p2m_ram_rw )
         {
             for ( i = 0; i < (1 << page_order); i++ )
-                if ( (rc = iommu_map_page(d, mfn + i, mfn + i)) != 0 )
+            {
+                rc = iommu_map_page(
+                    d, mfn + i, mfn + i, IOMMUF_readable|IOMMUF_writable);
+                if ( rc != 0 )
                 {
                     while ( i-- > 0 )
                         iommu_unmap_page(d, mfn + i);
                     return rc;
                 }
+            }
         }
         return 0;
     }
diff -r 969ba353c846 -r 104e6ab14f40 xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Fri Jun 04 10:36:53 2010 +0100
+++ b/xen/arch/x86/x86_64/mm.c  Fri Jun 04 10:38:21 2010 +0100
@@ -1461,7 +1461,7 @@ int memory_add(unsigned long spfn, unsig
         goto destroy_m2p;
 
     for ( i = spfn; i < epfn; i++ )
-        if ( iommu_map_page(dom0, i, i) )
+        if ( iommu_map_page(dom0, i, i, IOMMUF_readable|IOMMUF_writable) )
             break;
 
     if ( i != epfn )
diff -r 969ba353c846 -r 104e6ab14f40 xen/common/grant_table.c
--- a/xen/common/grant_table.c  Fri Jun 04 10:36:53 2010 +0100
+++ b/xen/common/grant_table.c  Fri Jun 04 10:38:21 2010 +0100
@@ -596,16 +596,20 @@ __gnttab_map_grant_ref(
         goto undo_out;
     }
 
-    if ( (!is_hvm_domain(ld) && need_iommu(ld)) &&
-         !(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) &&
-         (act_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
-    {
-        /* Shouldn't happen, because you can't use iommu in a HVM
-         * domain. */
+    if ( !is_hvm_domain(ld) && need_iommu(ld) )
+    {
+        int err = 0;
+        /* Shouldn't happen, because you can't use iommu in a HVM domain. */
         BUG_ON(paging_mode_translate(ld));
         /* We're not translated, so we know that gmfns and mfns are
            the same things, so the IOMMU entry is always 1-to-1. */
-        if ( iommu_map_page(ld, frame, frame) )
+        if ( (act_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) &&
+             !(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
+            err = iommu_map_page(ld, frame, frame,
+                                 IOMMUF_readable|IOMMUF_writable);
+        else if ( act_pin && !old_pin )
+            err = iommu_map_page(ld, frame, frame, IOMMUF_readable);
+        if ( err )
         {
             rc = GNTST_general_error;
             goto undo_out;
@@ -779,12 +783,16 @@ __gnttab_unmap_common(
             act->pin -= GNTPIN_hstw_inc;
     }
 
-    if ( (!is_hvm_domain(ld) && need_iommu(ld)) &&
-         (old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) &&
-         !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
-    {
+    if ( !is_hvm_domain(ld) && need_iommu(ld) )
+    {
+        int err = 0;
         BUG_ON(paging_mode_translate(ld));
-        if ( iommu_unmap_page(ld, op->frame) )
+        if ( old_pin && !act->pin )
+            err = iommu_unmap_page(ld, op->frame);
+        else if ( (old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) &&
+                  !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
+            err = iommu_map_page(ld, op->frame, op->frame, IOMMUF_readable);
+        if ( err )
         {
             rc = GNTST_general_error;
             goto unmap_out;
diff -r 969ba353c846 -r 104e6ab14f40 xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c   Fri Jun 04 10:36:53 2010 +0100
+++ b/xen/drivers/passthrough/amd/iommu_map.c   Fri Jun 04 10:38:21 2010 +0100
@@ -450,12 +450,11 @@ static u64 iommu_l2e_from_pfn(struct pag
     return next_table_maddr;
 }
 
-int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
+int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
+                       unsigned int flags)
 {
     u64 iommu_l2e;
     struct hvm_iommu *hd = domain_hvm_iommu(d);
-    int iw = IOMMU_IO_WRITE_ENABLED;
-    int ir = IOMMU_IO_READ_ENABLED;
 
     BUG_ON( !hd->root_table );
 
@@ -469,7 +468,10 @@ int amd_iommu_map_page(struct domain *d,
         domain_crash(d);
         return -EFAULT;
     }
-    set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
+
+    set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT,
+                          !!(flags & IOMMUF_writable),
+                          !!(flags & IOMMUF_readable));
 
     spin_unlock(&hd->mapping_lock);
     return 0;
diff -r 969ba353c846 -r 104e6ab14f40 xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c       Fri Jun 04 10:36:53 
2010 +0100
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c       Fri Jun 04 10:38:21 
2010 +0100
@@ -235,7 +235,8 @@ static int amd_iommu_domain_init(struct 
         {
             /* setup 1:1 page table for dom0 */
             for ( i = 0; i < max_page; i++ )
-                amd_iommu_map_page(domain, i, i);
+                amd_iommu_map_page(domain, i, i,
+                                   IOMMUF_readable|IOMMUF_writable);
         }
 
         amd_iommu_setup_dom0_devices(domain);
diff -r 969ba353c846 -r 104e6ab14f40 xen/drivers/passthrough/iommu.c
--- a/xen/drivers/passthrough/iommu.c   Fri Jun 04 10:36:53 2010 +0100
+++ b/xen/drivers/passthrough/iommu.c   Fri Jun 04 10:38:21 2010 +0100
@@ -172,7 +172,8 @@ static int iommu_populate_page_table(str
         {
             BUG_ON(SHARED_M2P(mfn_to_gmfn(d, page_to_mfn(page))));
             rc = hd->platform_ops->map_page(
-                d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page));
+                d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page),
+                IOMMUF_readable|IOMMUF_writable);
             if (rc)
             {
                 spin_unlock(&d->page_alloc_lock);
@@ -217,14 +218,15 @@ void iommu_domain_destroy(struct domain 
     }
 }
 
-int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
-{
-    struct hvm_iommu *hd = domain_hvm_iommu(d);
-
-    if ( !iommu_enabled || !hd->platform_ops )
-        return 0;
-
-    return hd->platform_ops->map_page(d, gfn, mfn);
+int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
+                   unsigned int flags)
+{
+    struct hvm_iommu *hd = domain_hvm_iommu(d);
+
+    if ( !iommu_enabled || !hd->platform_ops )
+        return 0;
+
+    return hd->platform_ops->map_page(d, gfn, mfn, flags);
 }
 
 int iommu_unmap_page(struct domain *d, unsigned long gfn)
diff -r 969ba353c846 -r 104e6ab14f40 xen/drivers/passthrough/vtd/ia64/vtd.c
--- a/xen/drivers/passthrough/vtd/ia64/vtd.c    Fri Jun 04 10:36:53 2010 +0100
+++ b/xen/drivers/passthrough/vtd/ia64/vtd.c    Fri Jun 04 10:38:21 2010 +0100
@@ -108,7 +108,8 @@ static int do_dom0_iommu_mapping(unsigne
         pfn = page_addr >> PAGE_SHIFT;
         tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
         for ( j = 0; j < tmp; j++ )
-            iommu_map_page(d, (pfn*tmp+j), (pfn*tmp+j));
+            iommu_map_page(d, (pfn*tmp+j), (pfn*tmp+j),
+                           IOMMUF_readable|IOMMUF_writable);
 
        page_addr += PAGE_SIZE;
 
diff -r 969ba353c846 -r 104e6ab14f40 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Fri Jun 04 10:36:53 2010 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Fri Jun 04 10:38:21 2010 +0100
@@ -1571,7 +1571,8 @@ void iommu_domain_teardown(struct domain
 }
 
 static int intel_iommu_map_page(
-    struct domain *d, unsigned long gfn, unsigned long mfn)
+    struct domain *d, unsigned long gfn, unsigned long mfn,
+    unsigned int flags)
 {
     struct hvm_iommu *hd = domain_hvm_iommu(d);
     struct acpi_drhd_unit *drhd;
@@ -1598,7 +1599,9 @@ static int intel_iommu_map_page(
     pte = page + (gfn & LEVEL_MASK);
     pte_present = dma_pte_present(*pte);
     dma_set_pte_addr(*pte, (paddr_t)mfn << PAGE_SHIFT_4K);
-    dma_set_pte_prot(*pte, DMA_PTE_READ | DMA_PTE_WRITE);
+    dma_set_pte_prot(*pte,
+                     ((flags & IOMMUF_readable) ? DMA_PTE_READ  : 0) |
+                     ((flags & IOMMUF_writable) ? DMA_PTE_WRITE : 0));
 
     /* Set the SNP on leaf page table if Snoop Control available */
     if ( iommu_snoop )
@@ -1680,7 +1683,8 @@ static int rmrr_identity_mapping(struct 
 
     while ( base_pfn < end_pfn )
     {
-        if ( intel_iommu_map_page(d, base_pfn, base_pfn) )
+        if ( intel_iommu_map_page(d, base_pfn, base_pfn,
+                                  IOMMUF_readable|IOMMUF_writable) )
             return -1;
         base_pfn++;
     }
diff -r 969ba353c846 -r 104e6ab14f40 xen/drivers/passthrough/vtd/x86/vtd.c
--- a/xen/drivers/passthrough/vtd/x86/vtd.c     Fri Jun 04 10:36:53 2010 +0100
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c     Fri Jun 04 10:38:21 2010 +0100
@@ -153,7 +153,8 @@ void iommu_set_dom0_mapping(struct domai
 
         tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
         for ( j = 0; j < tmp; j++ )
-            iommu_map_page(d, (i*tmp+j), (i*tmp+j));
+            iommu_map_page(d, (i*tmp+j), (i*tmp+j),
+                           IOMMUF_readable|IOMMUF_writable);
 
         if (!(i & (0xfffff >> (PAGE_SHIFT - PAGE_SHIFT_4K))))
             process_pending_softirqs();
diff -r 969ba353c846 -r 104e6ab14f40 
xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h      Fri Jun 04 10:36:53 
2010 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h      Fri Jun 04 10:38:21 
2010 +0100
@@ -388,9 +388,6 @@
 #define MAX_AMD_IOMMUS                  32
 #define IOMMU_PAGE_TABLE_LEVEL_3        3
 #define IOMMU_PAGE_TABLE_LEVEL_4        4
-#define IOMMU_IO_WRITE_ENABLED          1
-#define IOMMU_IO_READ_ENABLED           1
-#define HACK_BIOS_SETTINGS                  0
 
 /* interrupt remapping table */
 #define INT_REMAP_INDEX_DM_MASK         0x1C00
diff -r 969ba353c846 -r 104e6ab14f40 
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Fri Jun 04 10:36:53 
2010 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Fri Jun 04 10:38:21 
2010 +0100
@@ -52,7 +52,8 @@ int __init amd_iommu_update_ivrs_mapping
 int __init amd_iommu_update_ivrs_mapping_acpi(void);
 
 /* mapping functions */
-int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
+int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
+                       unsigned int flags);
 int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
 u64 amd_iommu_get_next_table_from_pte(u32 *entry);
 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
diff -r 969ba353c846 -r 104e6ab14f40 xen/include/xen/iommu.h
--- a/xen/include/xen/iommu.h   Fri Jun 04 10:36:53 2010 +0100
+++ b/xen/include/xen/iommu.h   Fri Jun 04 10:38:21 2010 +0100
@@ -68,8 +68,16 @@ int deassign_device(struct domain *d, u8
 int deassign_device(struct domain *d, u8 bus, u8 devfn);
 int iommu_get_device_group(struct domain *d, u8 bus, u8 devfn,
     XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs);
-int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
+
+/* iommu_map_page() takes flags to direct the mapping operation. */
+#define _IOMMUF_readable 0
+#define IOMMUF_readable  (1u<<_IOMMUF_readable)
+#define _IOMMUF_writable 1
+#define IOMMUF_writable  (1u<<_IOMMUF_writable)
+int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
+                   unsigned int flags);
 int iommu_unmap_page(struct domain *d, unsigned long gfn);
+
 void iommu_domain_teardown(struct domain *d);
 int hvm_do_IRQ_dpci(struct domain *d, unsigned int irq);
 int dpci_ioport_intercept(ioreq_t *p);
@@ -102,7 +110,8 @@ struct iommu_ops {
     int (*remove_device)(struct pci_dev *pdev);
     int (*assign_device)(struct domain *d, u8 bus, u8 devfn);
     void (*teardown)(struct domain *d);
-    int (*map_page)(struct domain *d, unsigned long gfn, unsigned long mfn);
+    int (*map_page)(struct domain *d, unsigned long gfn, unsigned long mfn,
+                    unsigned int flags);
     int (*unmap_page)(struct domain *d, unsigned long gfn);
     int (*reassign_device)(struct domain *s, struct domain *t,
                           u8 bus, u8 devfn);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-4.0-testing] iommu: Map correct permissions in IOMMU on grant read-only map request., Xen patchbot-4.0-testing <=