WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 5/7] introduce flavor of map_domain_page()

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 5/7] introduce flavor of map_domain_page()
From: "Jan Beulich" <JBeulich@xxxxxxxxxx>
Date: Mon, 21 Sep 2009 13:10:33 +0100
Delivery-date: Mon, 21 Sep 2009 05:13:43 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Introduce a variant of map_domain_page() directly getting passed a
struct page_info * argument, based on the observation that in many
places the argument to this function so far simply was the result of
page_to_mfn(). This is meaningful for the x86-64 case where
map_domain_page() really just is an invocation of mfn_to_virt(), and
hence the combined mfn_to_virt(page_to_mfn()) now represents a
needless round trip conversion compressed -> uncompressed ->
compressed of the MFN representation.

While it might seem more logical to rename map_domain_page() to e.g.
map_domain_mfn(), this would require touching a lot more code.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

--- 2009-09-21.orig/xen/arch/x86/hvm/stdvga.c   2009-07-14 08:55:22.000000000 
+0200
+++ 2009-09-21/xen/arch/x86/hvm/stdvga.c        2009-09-21 11:14:58.000000000 
+0200
@@ -85,14 +85,14 @@ const uint8_t gr_mask[9] = {
 static uint8_t *vram_getb(struct hvm_hw_stdvga *s, unsigned int a)
 {
     struct page_info *pg = s->vram_page[(a >> 12) & 0x3f];
-    uint8_t *p = map_domain_page(page_to_mfn(pg));
+    uint8_t *p = __map_domain_page(pg);
     return &p[a & 0xfff];
 }
 
 static uint32_t *vram_getl(struct hvm_hw_stdvga *s, unsigned int a)
 {
     struct page_info *pg = s->vram_page[(a >> 10) & 0x3f];
-    uint32_t *p = map_domain_page(page_to_mfn(pg));
+    uint32_t *p = __map_domain_page(pg);
     return &p[a & 0x3ff];
 }
 
@@ -601,7 +601,7 @@ void stdvga_init(struct domain *d)
         if ( pg == NULL )
             break;
         s->vram_page[i] = pg;
-        p = map_domain_page(page_to_mfn(pg));
+        p = __map_domain_page(pg);
         clear_page(p);
         unmap_domain_page(p);
     }
--- 2009-09-21.orig/xen/arch/x86/hvm/vlapic.c   2009-09-21 08:39:42.000000000 
+0200
+++ 2009-09-21/xen/arch/x86/hvm/vlapic.c        2009-09-21 11:14:58.000000000 
+0200
@@ -978,7 +978,7 @@ int vlapic_init(struct vcpu *v)
     }
     if (vlapic->regs == NULL) 
     {
-        vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
+        vlapic->regs = __map_domain_page_global(vlapic->regs_page);
         if ( vlapic->regs == NULL )
         {
             dprintk(XENLOG_ERR, "map vlapic regs error: %d/%d\n",
--- 2009-09-21.orig/xen/arch/x86/mm.c   2009-09-21 11:14:51.000000000 +0200
+++ 2009-09-21/xen/arch/x86/mm.c        2009-09-21 11:14:58.000000000 +0200
@@ -543,7 +543,7 @@ static int alloc_segdesc_page(struct pag
     struct desc_struct *descs;
     int i;
 
-    descs = map_domain_page(page_to_mfn(page));
+    descs = __map_domain_page(page);
 
     for ( i = 0; i < 512; i++ )
         if ( unlikely(!check_descriptor(page_get_owner(page), &descs[i])) )
--- 2009-09-21.orig/xen/arch/x86/mm/hap/hap.c   2009-06-29 11:58:15.000000000 
+0200
+++ 2009-09-21/xen/arch/x86/mm/hap/hap.c        2009-09-21 11:14:58.000000000 
+0200
@@ -255,7 +255,7 @@ static struct page_info *hap_alloc(struc
 
     d->arch.paging.hap.free_pages--;
 
-    p = hap_map_domain_page(page_to_mfn(pg));
+    p = __map_domain_page(pg);
     ASSERT(p != NULL);
     clear_page(p);
     hap_unmap_domain_page(p);
@@ -293,7 +293,7 @@ static struct page_info *hap_alloc_p2m_p
             NULL, MEMF_bits(32) | MEMF_node(domain_to_node(d)));
         if ( likely(pg != NULL) )
         {
-            void *p = hap_map_domain_page(page_to_mfn(pg));
+            void *p = __map_domain_page(pg);
             clear_page(p);
             hap_unmap_domain_page(p);
         }
--- 2009-09-21.orig/xen/arch/x86/mm/p2m.c       2009-09-21 08:39:42.000000000 
+0200
+++ 2009-09-21/xen/arch/x86/mm/p2m.c    2009-09-21 11:14:58.000000000 +0200
@@ -200,7 +200,7 @@ p2m_next_level(struct domain *d, mfn_t *
         else
             flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */
         
-        l1_entry = map_domain_page(mfn_x(page_to_mfn(pg)));
+        l1_entry = __map_domain_page(pg);
         for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         {
             new_entry = l1e_from_pfn(pfn + i, flags);
--- 2009-09-21.orig/xen/arch/x86/mm/paging.c    2009-07-14 08:55:23.000000000 
+0200
+++ 2009-09-21/xen/arch/x86/mm/paging.c 2009-09-21 11:14:58.000000000 +0200
@@ -99,7 +99,6 @@
 
 static mfn_t paging_new_log_dirty_page(struct domain *d, void **mapping_p)
 {
-    mfn_t mfn;
     struct page_info *page;
 
     page = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
@@ -110,10 +109,9 @@ static mfn_t paging_new_log_dirty_page(s
     }
 
     d->arch.paging.log_dirty.allocs++;
-    mfn = page_to_mfn(page);
-    *mapping_p = map_domain_page(mfn_x(mfn));
+    *mapping_p = __map_domain_page(page);
 
-    return mfn;
+    return page_to_mfn(page);
 }
 
 static mfn_t paging_new_log_dirty_leaf(
--- 2009-09-21.orig/xen/arch/x86/mm/shadow/common.c     2009-09-21 
11:13:02.000000000 +0200
+++ 2009-09-21/xen/arch/x86/mm/shadow/common.c  2009-09-21 11:14:58.000000000 
+0200
@@ -1578,7 +1578,7 @@ mfn_t shadow_alloc(struct domain *d,  
             flush_tlb_mask(&mask);
         }
         /* Now safe to clear the page for reuse */
-        p = sh_map_domain_page(page_to_mfn(sp+i));
+        p = __map_domain_page(sp+i);
         ASSERT(p != NULL);
         clear_page(p);
         sh_unmap_domain_page(p);
@@ -3130,7 +3130,7 @@ int shadow_enable(struct domain *d, u32 
         }
         /* Fill it with 32-bit, non-PAE superpage entries, each mapping 4MB
          * of virtual address space onto the same physical address range */ 
-        e = sh_map_domain_page(page_to_mfn(pg));
+        e = __map_domain_page(pg);
         for ( i = 0; i < PAGE_SIZE / sizeof(*e); i++ )
             e[i] = ((0x400000U * i)
                     | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER 
--- 2009-09-21.orig/xen/arch/x86/tboot.c        2009-09-21 11:13:02.000000000 
+0200
+++ 2009-09-21/xen/arch/x86/tboot.c     2009-09-21 11:14:58.000000000 +0200
@@ -205,8 +205,7 @@ static void tboot_gen_domain_integrity(c
 
         page_list_for_each(page, &d->page_list)
         {
-            void *pg;
-            pg = map_domain_page(page_to_mfn(page));
+            void *pg = __map_domain_page(page);
             vmac_update(pg, PAGE_SIZE, &ctx);
             unmap_domain_page(pg);
         }
--- 2009-09-21.orig/xen/common/grant_table.c    2009-07-14 08:55:23.000000000 
+0200
+++ 2009-09-21/xen/common/grant_table.c 2009-09-21 11:14:58.000000000 +0200
@@ -1195,7 +1195,7 @@ gnttab_transfer(
             }
 
             sp = map_domain_page(mfn);
-            dp = map_domain_page(page_to_mfn(new_page));
+            dp = __map_domain_page(new_page);
             memcpy(dp, sp, PAGE_SIZE);
             unmap_domain_page(dp);
             unmap_domain_page(sp);
--- 2009-09-21.orig/xen/common/page_alloc.c     2009-09-09 17:57:45.000000000 
+0200
+++ 2009-09-21/xen/common/page_alloc.c  2009-09-21 11:14:58.000000000 +0200
@@ -1235,7 +1235,7 @@ __initcall(pagealloc_keyhandler_init);
 
 void scrub_one_page(struct page_info *pg)
 {
-    void *p = map_domain_page(page_to_mfn(pg));
+    void *p = __map_domain_page(pg);
 
 #ifndef NDEBUG
     /* Avoid callers relying on allocations returning zeroed pages. */
--- 2009-09-21.orig/xen/common/tmem_xen.c       2009-08-17 11:37:45.000000000 
+0200
+++ 2009-09-21/xen/common/tmem_xen.c    2009-09-21 11:14:58.000000000 +0200
@@ -268,7 +268,7 @@ static void *tmh_persistent_pool_page_ge
     if ( (pi = _tmh_alloc_page_thispool(d)) == NULL )
         return NULL;
     ASSERT(IS_VALID_PAGE(pi));
-    return map_domain_page(page_to_mfn(pi));
+    return __map_domain_page(pi);
 }
 
 static void tmh_persistent_pool_page_put(void *page_va)
--- 2009-09-21.orig/xen/drivers/passthrough/amd/iommu_map.c     2009-09-21 
08:39:42.000000000 +0200
+++ 2009-09-21/xen/drivers/passthrough/amd/iommu_map.c  2009-09-21 
11:14:58.000000000 +0200
@@ -430,7 +430,7 @@ static u64 iommu_l2e_from_pfn(struct pag
                              (level - IOMMU_PAGING_MODE_LEVEL_1)));
         offset &= ~PTE_PER_TABLE_MASK;
 
-        table_vaddr = map_domain_page(page_to_mfn(table));
+        table_vaddr = __map_domain_page(table);
         pde = table_vaddr + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE);
         next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
 
--- 2009-09-21.orig/xen/drivers/passthrough/amd/pci_amd_iommu.c 2009-09-21 
08:39:42.000000000 +0200
+++ 2009-09-21/xen/drivers/passthrough/amd/pci_amd_iommu.c      2009-09-21 
11:14:58.000000000 +0200
@@ -335,7 +335,7 @@ static void deallocate_next_page_table(s
     u64 next_table_maddr;
     int index;
 
-    table_vaddr = map_domain_page(page_to_mfn(pg));
+    table_vaddr = __map_domain_page(pg);
 
     if ( level > 1 )
     {
--- 2009-09-21.orig/xen/drivers/passthrough/vtd/iommu.c 2009-09-16 
08:35:05.000000000 +0200
+++ 2009-09-21/xen/drivers/passthrough/vtd/iommu.c      2009-09-21 
11:14:58.000000000 +0200
@@ -154,7 +154,7 @@ u64 alloc_pgtable_maddr(struct acpi_drhd
                              (node == -1 ) ? 0 : MEMF_node(node));
     if ( !pg )
         return 0;
-    vaddr = map_domain_page(page_to_mfn(pg));
+    vaddr = __map_domain_page(pg);
     memset(vaddr, 0, PAGE_SIZE * npages);
 
     iommu_flush_cache_page(vaddr, npages);
--- 2009-09-21.orig/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h       
2009-09-21 08:39:42.000000000 +0200
+++ 2009-09-21/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h    2009-09-21 
11:14:58.000000000 +0200
@@ -131,7 +131,7 @@ static inline struct page_info* alloc_am
     pg = alloc_domheap_page(NULL, 0);
     if ( pg == NULL )
         return 0;
-    vaddr = map_domain_page(page_to_mfn(pg));
+    vaddr = __map_domain_page(pg);
     memset(vaddr, 0, PAGE_SIZE);
     unmap_domain_page(vaddr);
     return pg;
--- 2009-09-21.orig/xen/include/xen/domain_page.h       2008-11-05 
16:54:22.000000000 +0100
+++ 2009-09-21/xen/include/xen/domain_page.h    2009-09-21 11:14:58.000000000 
+0200
@@ -34,6 +34,9 @@ void unmap_domain_page(const void *va);
 void *map_domain_page_global(unsigned long mfn);
 void unmap_domain_page_global(const void *va);
 
+#define __map_domain_page(pg)        map_domain_page(__page_to_mfn(pg))
+#define __map_domain_page_global(pg) map_domain_page_global(__page_to_mfn(pg))
+
 #define DMCACHE_ENTRY_VALID 1U
 #define DMCACHE_ENTRY_HELD  2U
 
@@ -97,9 +100,11 @@ domain_mmap_cache_destroy(struct domain_
 #else /* !CONFIG_DOMAIN_PAGE */
 
 #define map_domain_page(mfn)                mfn_to_virt(mfn)
+#define __map_domain_page(pg)               page_to_virt(pg)
 #define unmap_domain_page(va)               ((void)(va))
 
 #define map_domain_page_global(mfn)         mfn_to_virt(mfn)
+#define __map_domain_page_global(pg)        page_to_virt(pg)
 #define unmap_domain_page_global(va)        ((void)(va))
 
 struct domain_mmap_cache { 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 5/7] introduce flavor of map_domain_page(), Jan Beulich <=