WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] iommu: Specify access permissions to iomm

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] iommu: Specify access permissions to iommu_map_page().
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Sat, 29 May 2010 00:30:42 -0700
Delivery-date: Sat, 29 May 2010 00:34:57 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1275032930 -3600
# Node ID 9ee5c292b1125bcd281629fc957bbe4c92545014
# Parent  b74af3abf72d2ac86ae271f4169929f22674a7bb
iommu: Specify access permissions to iommu_map_page().

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/ia64/xen/mm.c                        |    3 ++-
 xen/arch/x86/mm.c                             |    3 ++-
 xen/arch/x86/mm/hap/p2m-ept.c                 |    7 +++++--
 xen/arch/x86/mm/p2m.c                         |    9 +++++++--
 xen/arch/x86/x86_64/mm.c                      |    2 +-
 xen/common/grant_table.c                      |    3 ++-
 xen/drivers/passthrough/amd/iommu_map.c       |   10 ++++++----
 xen/drivers/passthrough/amd/pci_amd_iommu.c   |    3 ++-
 xen/drivers/passthrough/iommu.c               |   20 +++++++++++---------
 xen/drivers/passthrough/vtd/ia64/vtd.c        |    3 ++-
 xen/drivers/passthrough/vtd/iommu.c           |   10 +++++++---
 xen/drivers/passthrough/vtd/x86/vtd.c         |    3 ++-
 xen/include/asm-x86/hvm/svm/amd-iommu-defs.h  |    3 ---
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |    3 ++-
 xen/include/xen/iommu.h                       |   13 +++++++++++--
 15 files changed, 62 insertions(+), 33 deletions(-)

diff -r b74af3abf72d -r 9ee5c292b112 xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c    Fri May 28 08:14:54 2010 +0100
+++ b/xen/arch/ia64/xen/mm.c    Fri May 28 08:48:50 2010 +0100
@@ -2897,7 +2897,8 @@ __guest_physmap_add_page(struct domain *
         int i, j;
         j = 1 << (PAGE_SHIFT-PAGE_SHIFT_4K);
         for(i = 0 ; i < j; i++)
-            iommu_map_page(d, gpfn*j + i, mfn*j + i);
+            iommu_map_page(d, gpfn*j + i, mfn*j + i,
+                           IOMMUF_readable|IOMMUF_writable);
     }
 }
 
diff -r b74af3abf72d -r 9ee5c292b112 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Fri May 28 08:14:54 2010 +0100
+++ b/xen/arch/x86/mm.c Fri May 28 08:48:50 2010 +0100
@@ -2402,7 +2402,8 @@ static int __get_page_type(struct page_i
                 iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
             else if ( type == PGT_writable_page )
                 iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
-                               page_to_mfn(page));
+                               page_to_mfn(page),
+                               IOMMUF_readable|IOMMUF_writable);
         }
     }
 
diff -r b74af3abf72d -r 9ee5c292b112 xen/arch/x86/mm/hap/p2m-ept.c
--- a/xen/arch/x86/mm/hap/p2m-ept.c     Fri May 28 08:14:54 2010 +0100
+++ b/xen/arch/x86/mm/hap/p2m-ept.c     Fri May 28 08:48:50 2010 +0100
@@ -353,10 +353,13 @@ out:
             if ( order == EPT_TABLE_ORDER )
             {
                 for ( i = 0; i < (1 << order); i++ )
-                    iommu_map_page(d, gfn - offset + i, mfn_x(mfn) - offset + 
i);
+                    iommu_map_page(
+                        d, gfn - offset + i, mfn_x(mfn) - offset + i,
+                        IOMMUF_readable|IOMMUF_writable);
             }
             else if ( !order )
-                iommu_map_page(d, gfn, mfn_x(mfn));
+                iommu_map_page(
+                    d, gfn, mfn_x(mfn), IOMMUF_readable|IOMMUF_writable);
         }
         else
         {
diff -r b74af3abf72d -r 9ee5c292b112 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Fri May 28 08:14:54 2010 +0100
+++ b/xen/arch/x86/mm/p2m.c     Fri May 28 08:48:50 2010 +0100
@@ -1377,7 +1377,8 @@ p2m_set_entry(struct domain *d, unsigned
     {
         if ( p2mt == p2m_ram_rw )
             for ( i = 0; i < (1UL << page_order); i++ )
-                iommu_map_page(d, gfn+i, mfn_x(mfn)+i );
+                iommu_map_page(d, gfn+i, mfn_x(mfn)+i,
+                               IOMMUF_readable|IOMMUF_writable);
         else
             for ( int i = 0; i < (1UL << page_order); i++ )
                 iommu_unmap_page(d, gfn+i);
@@ -2294,12 +2295,16 @@ guest_physmap_add_entry(struct domain *d
         if ( need_iommu(d) && t == p2m_ram_rw )
         {
             for ( i = 0; i < (1 << page_order); i++ )
-                if ( (rc = iommu_map_page(d, mfn + i, mfn + i)) != 0 )
+            {
+                rc = iommu_map_page(
+                    d, mfn + i, mfn + i, IOMMUF_readable|IOMMUF_writable);
+                if ( rc != 0 )
                 {
                     while ( i-- > 0 )
                         iommu_unmap_page(d, mfn + i);
                     return rc;
                 }
+            }
         }
         return 0;
     }
diff -r b74af3abf72d -r 9ee5c292b112 xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Fri May 28 08:14:54 2010 +0100
+++ b/xen/arch/x86/x86_64/mm.c  Fri May 28 08:48:50 2010 +0100
@@ -1463,7 +1463,7 @@ int memory_add(unsigned long spfn, unsig
         goto destroy_m2p;
 
     for ( i = spfn; i < epfn; i++ )
-        if ( iommu_map_page(dom0, i, i) )
+        if ( iommu_map_page(dom0, i, i, IOMMUF_readable|IOMMUF_writable) )
             break;
 
     if ( i != epfn )
diff -r b74af3abf72d -r 9ee5c292b112 xen/common/grant_table.c
--- a/xen/common/grant_table.c  Fri May 28 08:14:54 2010 +0100
+++ b/xen/common/grant_table.c  Fri May 28 08:48:50 2010 +0100
@@ -605,7 +605,8 @@ __gnttab_map_grant_ref(
         BUG_ON(paging_mode_translate(ld));
         /* We're not translated, so we know that gmfns and mfns are
            the same things, so the IOMMU entry is always 1-to-1. */
-        if ( iommu_map_page(ld, frame, frame) )
+        if ( iommu_map_page(ld, frame, frame,
+                            IOMMUF_readable|IOMMUF_writable) )
         {
             rc = GNTST_general_error;
             goto undo_out;
diff -r b74af3abf72d -r 9ee5c292b112 xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c   Fri May 28 08:14:54 2010 +0100
+++ b/xen/drivers/passthrough/amd/iommu_map.c   Fri May 28 08:48:50 2010 +0100
@@ -450,12 +450,11 @@ static u64 iommu_l2e_from_pfn(struct pag
     return next_table_maddr;
 }
 
-int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
+int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
+                       unsigned int flags)
 {
     u64 iommu_l2e;
     struct hvm_iommu *hd = domain_hvm_iommu(d);
-    int iw = IOMMU_IO_WRITE_ENABLED;
-    int ir = IOMMU_IO_READ_ENABLED;
 
     BUG_ON( !hd->root_table );
 
@@ -469,7 +468,10 @@ int amd_iommu_map_page(struct domain *d,
         domain_crash(d);
         return -EFAULT;
     }
-    set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
+
+    set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT,
+                          !!(flags & IOMMUF_writable),
+                          !!(flags & IOMMUF_readable));
 
     spin_unlock(&hd->mapping_lock);
     return 0;
diff -r b74af3abf72d -r 9ee5c292b112 xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c       Fri May 28 08:14:54 
2010 +0100
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c       Fri May 28 08:48:50 
2010 +0100
@@ -235,7 +235,8 @@ static int amd_iommu_domain_init(struct 
         {
             /* setup 1:1 page table for dom0 */
             for ( i = 0; i < max_page; i++ )
-                amd_iommu_map_page(domain, i, i);
+                amd_iommu_map_page(domain, i, i,
+                                   IOMMUF_readable|IOMMUF_writable);
         }
 
         amd_iommu_setup_dom0_devices(domain);
diff -r b74af3abf72d -r 9ee5c292b112 xen/drivers/passthrough/iommu.c
--- a/xen/drivers/passthrough/iommu.c   Fri May 28 08:14:54 2010 +0100
+++ b/xen/drivers/passthrough/iommu.c   Fri May 28 08:48:50 2010 +0100
@@ -172,7 +172,8 @@ static int iommu_populate_page_table(str
         {
             BUG_ON(SHARED_M2P(mfn_to_gmfn(d, page_to_mfn(page))));
             rc = hd->platform_ops->map_page(
-                d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page));
+                d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page),
+                IOMMUF_readable|IOMMUF_writable);
             if (rc)
             {
                 spin_unlock(&d->page_alloc_lock);
@@ -217,14 +218,15 @@ void iommu_domain_destroy(struct domain 
     }
 }
 
-int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
-{
-    struct hvm_iommu *hd = domain_hvm_iommu(d);
-
-    if ( !iommu_enabled || !hd->platform_ops )
-        return 0;
-
-    return hd->platform_ops->map_page(d, gfn, mfn);
+int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
+                   unsigned int flags)
+{
+    struct hvm_iommu *hd = domain_hvm_iommu(d);
+
+    if ( !iommu_enabled || !hd->platform_ops )
+        return 0;
+
+    return hd->platform_ops->map_page(d, gfn, mfn, flags);
 }
 
 int iommu_unmap_page(struct domain *d, unsigned long gfn)
diff -r b74af3abf72d -r 9ee5c292b112 xen/drivers/passthrough/vtd/ia64/vtd.c
--- a/xen/drivers/passthrough/vtd/ia64/vtd.c    Fri May 28 08:14:54 2010 +0100
+++ b/xen/drivers/passthrough/vtd/ia64/vtd.c    Fri May 28 08:48:50 2010 +0100
@@ -108,7 +108,8 @@ static int do_dom0_iommu_mapping(unsigne
         pfn = page_addr >> PAGE_SHIFT;
         tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
         for ( j = 0; j < tmp; j++ )
-            iommu_map_page(d, (pfn*tmp+j), (pfn*tmp+j));
+            iommu_map_page(d, (pfn*tmp+j), (pfn*tmp+j),
+                           IOMMUF_readable|IOMMUF_writable);
 
        page_addr += PAGE_SIZE;
 
diff -r b74af3abf72d -r 9ee5c292b112 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Fri May 28 08:14:54 2010 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Fri May 28 08:48:50 2010 +0100
@@ -1578,7 +1578,8 @@ void iommu_domain_teardown(struct domain
 }
 
 static int intel_iommu_map_page(
-    struct domain *d, unsigned long gfn, unsigned long mfn)
+    struct domain *d, unsigned long gfn, unsigned long mfn,
+    unsigned int flags)
 {
     struct hvm_iommu *hd = domain_hvm_iommu(d);
     struct acpi_drhd_unit *drhd;
@@ -1605,7 +1606,9 @@ static int intel_iommu_map_page(
     pte = page + (gfn & LEVEL_MASK);
     pte_present = dma_pte_present(*pte);
     dma_set_pte_addr(*pte, (paddr_t)mfn << PAGE_SHIFT_4K);
-    dma_set_pte_prot(*pte, DMA_PTE_READ | DMA_PTE_WRITE);
+    dma_set_pte_prot(*pte,
+                     ((flags & IOMMUF_readable) ? DMA_PTE_READ  : 0) |
+                     ((flags & IOMMUF_writable) ? DMA_PTE_WRITE : 0));
 
     /* Set the SNP on leaf page table if Snoop Control available */
     if ( iommu_snoop )
@@ -1687,7 +1690,8 @@ static int rmrr_identity_mapping(struct 
 
     while ( base_pfn < end_pfn )
     {
-        if ( intel_iommu_map_page(d, base_pfn, base_pfn) )
+        if ( intel_iommu_map_page(d, base_pfn, base_pfn,
+                                  IOMMUF_readable|IOMMUF_writable) )
             return -1;
         base_pfn++;
     }
diff -r b74af3abf72d -r 9ee5c292b112 xen/drivers/passthrough/vtd/x86/vtd.c
--- a/xen/drivers/passthrough/vtd/x86/vtd.c     Fri May 28 08:14:54 2010 +0100
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c     Fri May 28 08:48:50 2010 +0100
@@ -153,7 +153,8 @@ void iommu_set_dom0_mapping(struct domai
 
         tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
         for ( j = 0; j < tmp; j++ )
-            iommu_map_page(d, (i*tmp+j), (i*tmp+j));
+            iommu_map_page(d, (i*tmp+j), (i*tmp+j),
+                           IOMMUF_readable|IOMMUF_writable);
 
         if (!(i & (0xfffff >> (PAGE_SHIFT - PAGE_SHIFT_4K))))
             process_pending_softirqs();
diff -r b74af3abf72d -r 9ee5c292b112 
xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h      Fri May 28 08:14:54 
2010 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h      Fri May 28 08:48:50 
2010 +0100
@@ -388,9 +388,6 @@
 #define MAX_AMD_IOMMUS                  32
 #define IOMMU_PAGE_TABLE_LEVEL_3        3
 #define IOMMU_PAGE_TABLE_LEVEL_4        4
-#define IOMMU_IO_WRITE_ENABLED          1
-#define IOMMU_IO_READ_ENABLED           1
-#define HACK_BIOS_SETTINGS                  0
 
 /* interrupt remapping table */
 #define INT_REMAP_INDEX_DM_MASK         0x1C00
diff -r b74af3abf72d -r 9ee5c292b112 
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Fri May 28 08:14:54 
2010 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Fri May 28 08:48:50 
2010 +0100
@@ -52,7 +52,8 @@ int __init amd_iommu_update_ivrs_mapping
 int __init amd_iommu_update_ivrs_mapping_acpi(void);
 
 /* mapping functions */
-int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
+int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
+                       unsigned int flags);
 int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
 u64 amd_iommu_get_next_table_from_pte(u32 *entry);
 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
diff -r b74af3abf72d -r 9ee5c292b112 xen/include/xen/iommu.h
--- a/xen/include/xen/iommu.h   Fri May 28 08:14:54 2010 +0100
+++ b/xen/include/xen/iommu.h   Fri May 28 08:48:50 2010 +0100
@@ -68,8 +68,16 @@ int deassign_device(struct domain *d, u8
 int deassign_device(struct domain *d, u8 bus, u8 devfn);
 int iommu_get_device_group(struct domain *d, u8 bus, u8 devfn,
     XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs);
-int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
+
+/* iommu_map_page() takes flags to direct the mapping operation. */
+#define _IOMMUF_readable 0
+#define IOMMUF_readable  (1u<<_IOMMUF_readable)
+#define _IOMMUF_writable 1
+#define IOMMUF_writable  (1u<<_IOMMUF_writable)
+int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
+                   unsigned int flags);
 int iommu_unmap_page(struct domain *d, unsigned long gfn);
+
 void iommu_domain_teardown(struct domain *d);
 int hvm_do_IRQ_dpci(struct domain *d, unsigned int irq);
 int dpci_ioport_intercept(ioreq_t *p);
@@ -102,7 +110,8 @@ struct iommu_ops {
     int (*remove_device)(struct pci_dev *pdev);
     int (*assign_device)(struct domain *d, u8 bus, u8 devfn);
     void (*teardown)(struct domain *d);
-    int (*map_page)(struct domain *d, unsigned long gfn, unsigned long mfn);
+    int (*map_page)(struct domain *d, unsigned long gfn, unsigned long mfn,
+                    unsigned int flags);
     int (*unmap_page)(struct domain *d, unsigned long gfn);
     int (*reassign_device)(struct domain *s, struct domain *t,
                           u8 bus, u8 devfn);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] iommu: Specify access permissions to iommu_map_page()., Xen patchbot-unstable <=