WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Introduce new flavour of map_domain_page(

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Introduce new flavour of map_domain_page()
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 22 Sep 2009 01:25:29 -0700
Delivery-date: Tue, 22 Sep 2009 01:26:34 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1253604376 -3600
# Node ID fed2dd73980f2815d46303f799baf493046135d3
# Parent  51152e4f995f383eccc7c686afc3ab67d626327d
Introduce new flavour of map_domain_page()

Introduce a variant of map_domain_page() directly getting passed a
struct page_info * argument, based on the observation that in many
places the argument to this function so far simply was the result of
page_to_mfn(). This is meaningful for the x86-64 case where
map_domain_page() really just is an invocation of mfn_to_virt(), and
hence the combined mfn_to_virt(page_to_mfn()) now represents a
needless round trip conversion compressed -> uncompressed ->
compressed of the MFN representation.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 xen/arch/x86/hvm/stdvga.c                     |    6 +++---
 xen/arch/x86/hvm/vlapic.c                     |    2 +-
 xen/arch/x86/mm.c                             |    2 +-
 xen/arch/x86/mm/hap/hap.c                     |    4 ++--
 xen/arch/x86/mm/p2m.c                         |    2 +-
 xen/arch/x86/mm/paging.c                      |    8 +++-----
 xen/arch/x86/mm/shadow/common.c               |    4 ++--
 xen/arch/x86/tboot.c                          |    3 +--
 xen/common/grant_table.c                      |    2 +-
 xen/common/page_alloc.c                       |    2 +-
 xen/common/tmem_xen.c                         |    2 +-
 xen/drivers/passthrough/amd/iommu_map.c       |    2 +-
 xen/drivers/passthrough/amd/pci_amd_iommu.c   |    2 +-
 xen/drivers/passthrough/vtd/iommu.c           |    2 +-
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |    2 +-
 xen/include/xen/domain_page.h                 |    5 +++++
 16 files changed, 26 insertions(+), 24 deletions(-)

diff -r 51152e4f995f -r fed2dd73980f xen/arch/x86/hvm/stdvga.c
--- a/xen/arch/x86/hvm/stdvga.c Tue Sep 22 08:19:16 2009 +0100
+++ b/xen/arch/x86/hvm/stdvga.c Tue Sep 22 08:26:16 2009 +0100
@@ -85,14 +85,14 @@ static uint8_t *vram_getb(struct hvm_hw_
 static uint8_t *vram_getb(struct hvm_hw_stdvga *s, unsigned int a)
 {
     struct page_info *pg = s->vram_page[(a >> 12) & 0x3f];
-    uint8_t *p = map_domain_page(page_to_mfn(pg));
+    uint8_t *p = __map_domain_page(pg);
     return &p[a & 0xfff];
 }
 
 static uint32_t *vram_getl(struct hvm_hw_stdvga *s, unsigned int a)
 {
     struct page_info *pg = s->vram_page[(a >> 10) & 0x3f];
-    uint32_t *p = map_domain_page(page_to_mfn(pg));
+    uint32_t *p = __map_domain_page(pg);
     return &p[a & 0x3ff];
 }
 
@@ -601,7 +601,7 @@ void stdvga_init(struct domain *d)
         if ( pg == NULL )
             break;
         s->vram_page[i] = pg;
-        p = map_domain_page(page_to_mfn(pg));
+        p = __map_domain_page(pg);
         clear_page(p);
         unmap_domain_page(p);
     }
diff -r 51152e4f995f -r fed2dd73980f xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Tue Sep 22 08:19:16 2009 +0100
+++ b/xen/arch/x86/hvm/vlapic.c Tue Sep 22 08:26:16 2009 +0100
@@ -978,7 +978,7 @@ int vlapic_init(struct vcpu *v)
     }
     if (vlapic->regs == NULL) 
     {
-        vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
+        vlapic->regs = __map_domain_page_global(vlapic->regs_page);
         if ( vlapic->regs == NULL )
         {
             dprintk(XENLOG_ERR, "map vlapic regs error: %d/%d\n",
diff -r 51152e4f995f -r fed2dd73980f xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue Sep 22 08:19:16 2009 +0100
+++ b/xen/arch/x86/mm.c Tue Sep 22 08:26:16 2009 +0100
@@ -543,7 +543,7 @@ static int alloc_segdesc_page(struct pag
     struct desc_struct *descs;
     int i;
 
-    descs = map_domain_page(page_to_mfn(page));
+    descs = __map_domain_page(page);
 
     for ( i = 0; i < 512; i++ )
         if ( unlikely(!check_descriptor(page_get_owner(page), &descs[i])) )
diff -r 51152e4f995f -r fed2dd73980f xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Tue Sep 22 08:19:16 2009 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Tue Sep 22 08:26:16 2009 +0100
@@ -255,7 +255,7 @@ static struct page_info *hap_alloc(struc
 
     d->arch.paging.hap.free_pages--;
 
-    p = hap_map_domain_page(page_to_mfn(pg));
+    p = __map_domain_page(pg);
     ASSERT(p != NULL);
     clear_page(p);
     hap_unmap_domain_page(p);
@@ -293,7 +293,7 @@ static struct page_info *hap_alloc_p2m_p
             NULL, MEMF_bits(32) | MEMF_node(domain_to_node(d)));
         if ( likely(pg != NULL) )
         {
-            void *p = hap_map_domain_page(page_to_mfn(pg));
+            void *p = __map_domain_page(pg);
             clear_page(p);
             hap_unmap_domain_page(p);
         }
diff -r 51152e4f995f -r fed2dd73980f xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Tue Sep 22 08:19:16 2009 +0100
+++ b/xen/arch/x86/mm/p2m.c     Tue Sep 22 08:26:16 2009 +0100
@@ -200,7 +200,7 @@ p2m_next_level(struct domain *d, mfn_t *
         else
             flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */
         
-        l1_entry = map_domain_page(mfn_x(page_to_mfn(pg)));
+        l1_entry = __map_domain_page(pg);
         for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         {
             new_entry = l1e_from_pfn(pfn + i, flags);
diff -r 51152e4f995f -r fed2dd73980f xen/arch/x86/mm/paging.c
--- a/xen/arch/x86/mm/paging.c  Tue Sep 22 08:19:16 2009 +0100
+++ b/xen/arch/x86/mm/paging.c  Tue Sep 22 08:26:16 2009 +0100
@@ -99,7 +99,6 @@
 
 static mfn_t paging_new_log_dirty_page(struct domain *d, void **mapping_p)
 {
-    mfn_t mfn;
     struct page_info *page;
 
     page = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
@@ -110,10 +109,9 @@ static mfn_t paging_new_log_dirty_page(s
     }
 
     d->arch.paging.log_dirty.allocs++;
-    mfn = page_to_mfn(page);
-    *mapping_p = map_domain_page(mfn_x(mfn));
-
-    return mfn;
+    *mapping_p = __map_domain_page(page);
+
+    return page_to_mfn(page);
 }
 
 static mfn_t paging_new_log_dirty_leaf(
diff -r 51152e4f995f -r fed2dd73980f xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Tue Sep 22 08:19:16 2009 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Tue Sep 22 08:26:16 2009 +0100
@@ -1578,7 +1578,7 @@ mfn_t shadow_alloc(struct domain *d,
             flush_tlb_mask(&mask);
         }
         /* Now safe to clear the page for reuse */
-        p = sh_map_domain_page(page_to_mfn(sp+i));
+        p = __map_domain_page(sp+i);
         ASSERT(p != NULL);
         clear_page(p);
         sh_unmap_domain_page(p);
@@ -3130,7 +3130,7 @@ int shadow_enable(struct domain *d, u32 
         }
         /* Fill it with 32-bit, non-PAE superpage entries, each mapping 4MB
          * of virtual address space onto the same physical address range */ 
-        e = sh_map_domain_page(page_to_mfn(pg));
+        e = __map_domain_page(pg);
         for ( i = 0; i < PAGE_SIZE / sizeof(*e); i++ )
             e[i] = ((0x400000U * i)
                     | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER 
diff -r 51152e4f995f -r fed2dd73980f xen/arch/x86/tboot.c
--- a/xen/arch/x86/tboot.c      Tue Sep 22 08:19:16 2009 +0100
+++ b/xen/arch/x86/tboot.c      Tue Sep 22 08:26:16 2009 +0100
@@ -205,8 +205,7 @@ static void tboot_gen_domain_integrity(c
 
         page_list_for_each(page, &d->page_list)
         {
-            void *pg;
-            pg = map_domain_page(page_to_mfn(page));
+            void *pg = __map_domain_page(page);
             vmac_update(pg, PAGE_SIZE, &ctx);
             unmap_domain_page(pg);
         }
diff -r 51152e4f995f -r fed2dd73980f xen/common/grant_table.c
--- a/xen/common/grant_table.c  Tue Sep 22 08:19:16 2009 +0100
+++ b/xen/common/grant_table.c  Tue Sep 22 08:26:16 2009 +0100
@@ -1195,7 +1195,7 @@ gnttab_transfer(
             }
 
             sp = map_domain_page(mfn);
-            dp = map_domain_page(page_to_mfn(new_page));
+            dp = __map_domain_page(new_page);
             memcpy(dp, sp, PAGE_SIZE);
             unmap_domain_page(dp);
             unmap_domain_page(sp);
diff -r 51152e4f995f -r fed2dd73980f xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Tue Sep 22 08:19:16 2009 +0100
+++ b/xen/common/page_alloc.c   Tue Sep 22 08:26:16 2009 +0100
@@ -1235,7 +1235,7 @@ __initcall(pagealloc_keyhandler_init);
 
 void scrub_one_page(struct page_info *pg)
 {
-    void *p = map_domain_page(page_to_mfn(pg));
+    void *p = __map_domain_page(pg);
 
 #ifndef NDEBUG
     /* Avoid callers relying on allocations returning zeroed pages. */
diff -r 51152e4f995f -r fed2dd73980f xen/common/tmem_xen.c
--- a/xen/common/tmem_xen.c     Tue Sep 22 08:19:16 2009 +0100
+++ b/xen/common/tmem_xen.c     Tue Sep 22 08:26:16 2009 +0100
@@ -268,7 +268,7 @@ static void *tmh_persistent_pool_page_ge
     if ( (pi = _tmh_alloc_page_thispool(d)) == NULL )
         return NULL;
     ASSERT(IS_VALID_PAGE(pi));
-    return map_domain_page(page_to_mfn(pi));
+    return __map_domain_page(pi);
 }
 
 static void tmh_persistent_pool_page_put(void *page_va)
diff -r 51152e4f995f -r fed2dd73980f xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c   Tue Sep 22 08:19:16 2009 +0100
+++ b/xen/drivers/passthrough/amd/iommu_map.c   Tue Sep 22 08:26:16 2009 +0100
@@ -430,7 +430,7 @@ static u64 iommu_l2e_from_pfn(struct pag
                              (level - IOMMU_PAGING_MODE_LEVEL_1)));
         offset &= ~PTE_PER_TABLE_MASK;
 
-        table_vaddr = map_domain_page(page_to_mfn(table));
+        table_vaddr = __map_domain_page(table);
         pde = table_vaddr + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE);
         next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
 
diff -r 51152e4f995f -r fed2dd73980f xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c       Tue Sep 22 08:19:16 
2009 +0100
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c       Tue Sep 22 08:26:16 
2009 +0100
@@ -335,7 +335,7 @@ static void deallocate_next_page_table(s
     u64 next_table_maddr;
     int index;
 
-    table_vaddr = map_domain_page(page_to_mfn(pg));
+    table_vaddr = __map_domain_page(pg);
 
     if ( level > 1 )
     {
diff -r 51152e4f995f -r fed2dd73980f xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Tue Sep 22 08:19:16 2009 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Tue Sep 22 08:26:16 2009 +0100
@@ -154,7 +154,7 @@ u64 alloc_pgtable_maddr(struct acpi_drhd
                              (node == -1 ) ? 0 : MEMF_node(node));
     if ( !pg )
         return 0;
-    vaddr = map_domain_page(page_to_mfn(pg));
+    vaddr = __map_domain_page(pg);
     memset(vaddr, 0, PAGE_SIZE * npages);
 
     iommu_flush_cache_page(vaddr, npages);
diff -r 51152e4f995f -r fed2dd73980f 
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Tue Sep 22 08:19:16 
2009 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Tue Sep 22 08:26:16 
2009 +0100
@@ -131,7 +131,7 @@ static inline struct page_info* alloc_am
     pg = alloc_domheap_page(NULL, 0);
     if ( pg == NULL )
         return 0;
-    vaddr = map_domain_page(page_to_mfn(pg));
+    vaddr = __map_domain_page(pg);
     memset(vaddr, 0, PAGE_SIZE);
     unmap_domain_page(vaddr);
     return pg;
diff -r 51152e4f995f -r fed2dd73980f xen/include/xen/domain_page.h
--- a/xen/include/xen/domain_page.h     Tue Sep 22 08:19:16 2009 +0100
+++ b/xen/include/xen/domain_page.h     Tue Sep 22 08:26:16 2009 +0100
@@ -33,6 +33,9 @@ void unmap_domain_page(const void *va);
  */
 void *map_domain_page_global(unsigned long mfn);
 void unmap_domain_page_global(const void *va);
+
+#define __map_domain_page(pg)        map_domain_page(__page_to_mfn(pg))
+#define __map_domain_page_global(pg) map_domain_page_global(__page_to_mfn(pg))
 
 #define DMCACHE_ENTRY_VALID 1U
 #define DMCACHE_ENTRY_HELD  2U
@@ -97,9 +100,11 @@ domain_mmap_cache_destroy(struct domain_
 #else /* !CONFIG_DOMAIN_PAGE */
 
 #define map_domain_page(mfn)                mfn_to_virt(mfn)
+#define __map_domain_page(pg)               page_to_virt(pg)
 #define unmap_domain_page(va)               ((void)(va))
 
 #define map_domain_page_global(mfn)         mfn_to_virt(mfn)
+#define __map_domain_page_global(pg)        page_to_virt(pg)
 #define unmap_domain_page_global(va)        ((void)(va))
 
 struct domain_mmap_cache { 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Introduce new flavour of map_domain_page(), Xen patchbot-unstable <=