WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Clean up shadow code after the removal of

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Clean up shadow code after the removal of non-PAE 32-bit builds
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 08 May 2008 14:30:08 -0700
Delivery-date: Thu, 08 May 2008 14:30:12 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1210262313 -3600
# Node ID 810d8c3ac992e8979abb2e60cac5cef5ed6ccb36
# Parent  c99a88623eda83d8e02f4b6d7c32bc4c6d298d8a
Clean up shadow code after the removal of non-PAE 32-bit builds

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---
 xen/arch/x86/mm/p2m.c            |   41 ++-------
 xen/arch/x86/mm/shadow/Makefile  |   13 --
 xen/arch/x86/mm/shadow/common.c  |  174 ++++++++++++++++-----------------------
 xen/arch/x86/mm/shadow/multi.c   |  100 +++++++---------------
 xen/arch/x86/mm/shadow/multi.h   |   58 ++++++-------
 xen/arch/x86/mm/shadow/private.h |   38 +-------
 xen/arch/x86/mm/shadow/types.h   |  107 +++++------------------
 xen/include/asm-x86/mtrr.h       |    3 
 8 files changed, 180 insertions(+), 354 deletions(-)

diff -r c99a88623eda -r 810d8c3ac992 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu May 08 14:33:31 2008 +0100
+++ b/xen/arch/x86/mm/p2m.c     Thu May 08 16:58:33 2008 +0100
@@ -220,7 +220,6 @@ p2m_set_entry(struct domain *d, unsigned
                          L4_PAGETABLE_ENTRIES, PGT_l3_page_table) )
         goto out;
 #endif
-#if CONFIG_PAGING_LEVELS >= 3
     /*
      * When using PAE Xen, we only allow 33 bits of pseudo-physical
      * address in translated guests (i.e. 8 GBytes).  This restriction
@@ -235,7 +234,7 @@ p2m_set_entry(struct domain *d, unsigned
                           : L3_PAGETABLE_ENTRIES),
                          PGT_l2_page_table) )
         goto out;
-#endif
+
     if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
                          L2_PAGETABLE_SHIFT - PAGE_SHIFT,
                          L2_PAGETABLE_ENTRIES, PGT_l1_page_table) )
@@ -308,7 +307,6 @@ p2m_gfn_to_mfn(struct domain *d, unsigne
         unmap_domain_page(l4e);
     }
 #endif
-#if CONFIG_PAGING_LEVELS >= 3
     {
         l3_pgentry_t *l3e = map_domain_page(mfn_x(mfn));
 #if CONFIG_PAGING_LEVELS == 3
@@ -329,7 +327,6 @@ p2m_gfn_to_mfn(struct domain *d, unsigne
         mfn = _mfn(l3e_get_pfn(*l3e));
         unmap_domain_page(l3e);
     }
-#endif
 
     l2e = map_domain_page(mfn_x(mfn));
     l2e += l2_table_offset(addr);
@@ -486,7 +483,7 @@ int p2m_alloc_table(struct domain *d,
     p2m_top->u.inuse.type_info =
 #if CONFIG_PAGING_LEVELS == 4
         PGT_l4_page_table
-#elif CONFIG_PAGING_LEVELS == 3
+#else
         PGT_l3_page_table
 #endif
         | 1 | PGT_validated;
@@ -657,16 +654,13 @@ static void audit_p2m(struct domain *d)
         l3_pgentry_t *l3e;
         int i3, i4;
         l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
-#elif CONFIG_PAGING_LEVELS == 3
+#else /* CONFIG_PAGING_LEVELS == 3 */
         l3_pgentry_t *l3e;
         int i3;
         l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
-#else /* CONFIG_PAGING_LEVELS == 2 */
-        l2e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
 #endif
 
         gfn = 0;
-#if CONFIG_PAGING_LEVELS >= 3
 #if CONFIG_PAGING_LEVELS >= 4
         for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
         {
@@ -676,7 +670,7 @@ static void audit_p2m(struct domain *d)
                 continue;
             }
             l3e = map_domain_page(mfn_x(_mfn(l4e_get_pfn(l4e[i4]))));
-#endif /* now at levels 3 or 4... */
+#endif
             for ( i3 = 0;
                   i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8);
                   i3++ )
@@ -687,7 +681,6 @@ static void audit_p2m(struct domain *d)
                     continue;
                 }
                 l2e = map_domain_page(mfn_x(_mfn(l3e_get_pfn(l3e[i3]))));
-#endif /* all levels... */
                 for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
                 {
                     if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
@@ -714,21 +707,17 @@ static void audit_p2m(struct domain *d)
                     }
                     unmap_domain_page(l1e);
                 }
-#if CONFIG_PAGING_LEVELS >= 3
                 unmap_domain_page(l2e);
             }
 #if CONFIG_PAGING_LEVELS >= 4
             unmap_domain_page(l3e);
         }
 #endif
-#endif
 
 #if CONFIG_PAGING_LEVELS == 4
         unmap_domain_page(l4e);
-#elif CONFIG_PAGING_LEVELS == 3
+#else /* CONFIG_PAGING_LEVELS == 3 */
         unmap_domain_page(l3e);
-#else /* CONFIG_PAGING_LEVELS == 2 */
-        unmap_domain_page(l2e);
 #endif
 
     }
@@ -864,14 +853,12 @@ void p2m_change_type_global(struct domai
     l2_pgentry_t *l2e;
     mfn_t l1mfn;
     int i1, i2;
-#if CONFIG_PAGING_LEVELS >= 3
     l3_pgentry_t *l3e;
     int i3;
 #if CONFIG_PAGING_LEVELS == 4
     l4_pgentry_t *l4e;
     int i4;
 #endif /* CONFIG_PAGING_LEVELS == 4 */
-#endif /* CONFIG_PAGING_LEVELS >= 3 */
 
     if ( !paging_mode_translate(d) )
         return;
@@ -883,13 +870,10 @@ void p2m_change_type_global(struct domai
 
 #if CONFIG_PAGING_LEVELS == 4
     l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
-#elif CONFIG_PAGING_LEVELS == 3
+#else /* CONFIG_PAGING_LEVELS == 3 */
     l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
-#else /* CONFIG_PAGING_LEVELS == 2 */
-    l2e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
-#endif
-
-#if CONFIG_PAGING_LEVELS >= 3
+#endif
+
 #if CONFIG_PAGING_LEVELS >= 4
     for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
     {
@@ -898,7 +882,7 @@ void p2m_change_type_global(struct domai
             continue;
         }
         l3e = map_domain_page(l4e_get_pfn(l4e[i4]));
-#endif /* now at levels 3 or 4... */
+#endif
         for ( i3 = 0;
               i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8);
               i3++ )
@@ -908,7 +892,6 @@ void p2m_change_type_global(struct domai
                 continue;
             }
             l2e = map_domain_page(l3e_get_pfn(l3e[i3]));
-#endif /* all levels... */
             for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
             {
                 if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
@@ -934,21 +917,17 @@ void p2m_change_type_global(struct domai
                 }
                 unmap_domain_page(l1e);
             }
-#if CONFIG_PAGING_LEVELS >= 3
             unmap_domain_page(l2e);
         }
 #if CONFIG_PAGING_LEVELS >= 4
         unmap_domain_page(l3e);
     }
 #endif
-#endif
 
 #if CONFIG_PAGING_LEVELS == 4
     unmap_domain_page(l4e);
-#elif CONFIG_PAGING_LEVELS == 3
+#else /* CONFIG_PAGING_LEVELS == 3 */
     unmap_domain_page(l3e);
-#else /* CONFIG_PAGING_LEVELS == 2 */
-    unmap_domain_page(l2e);
 #endif
 
 }
diff -r c99a88623eda -r 810d8c3ac992 xen/arch/x86/mm/shadow/Makefile
--- a/xen/arch/x86/mm/shadow/Makefile   Thu May 08 14:33:31 2008 +0100
+++ b/xen/arch/x86/mm/shadow/Makefile   Thu May 08 16:58:33 2008 +0100
@@ -1,10 +1,5 @@ obj-$(x86_32) += common.o g2_on_s3.o g3_
-obj-$(x86_32) += common.o g2_on_s3.o g3_on_s3.o
-obj-$(x86_64) += common.o g4_on_s4.o g3_on_s3.o g2_on_s3.o
+obj-$(x86_32) += common.o guest_2.o guest_3.o
+obj-$(x86_64) += common.o guest_2.o guest_3.o guest_4.o
 
-guest_levels  = $(subst g,,$(filter g%,$(subst ., ,$(subst _, ,$(1)))))
-shadow_levels = $(subst s,,$(filter s%,$(subst ., ,$(subst _, ,$(1)))))
-shadow_defns  = -DGUEST_PAGING_LEVELS=$(call guest_levels,$(1)) \
-                -DSHADOW_PAGING_LEVELS=$(call shadow_levels,$(1))
-
-g%.o: multi.c $(HDRS) Makefile
-       $(CC) $(CFLAGS) $(call shadow_defns,$(@F)) -c $< -o $@
+guest_%.o: multi.c $(HDRS) Makefile
+       $(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
diff -r c99a88623eda -r 810d8c3ac992 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Thu May 08 14:33:31 2008 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Thu May 08 16:58:33 2008 +0100
@@ -64,11 +64,7 @@ void shadow_domain_init(struct domain *d
  */
 void shadow_vcpu_init(struct vcpu *v)
 {
-#if CONFIG_PAGING_LEVELS == 4
-    v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
-#elif CONFIG_PAGING_LEVELS == 3
-    v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
-#endif
+    v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
 }
 
 #if SHADOW_AUDIT
@@ -503,38 +499,37 @@ sh_validate_guest_entry(struct vcpu *v, 
         return 0;  /* Not shadowed at all */
 
     if ( page->shadow_flags & SHF_L1_32 ) 
-        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 3, 2)
+        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 2)
             (v, gmfn, entry, size);
-
     if ( page->shadow_flags & SHF_L2_32 ) 
-        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 3, 2)
+        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 2)
             (v, gmfn, entry, size);
 
     if ( page->shadow_flags & SHF_L1_PAE ) 
-        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 3, 3)
+        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 3)
             (v, gmfn, entry, size);
     if ( page->shadow_flags & SHF_L2_PAE ) 
-        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 3, 3)
+        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 3)
             (v, gmfn, entry, size);
     if ( page->shadow_flags & SHF_L2H_PAE ) 
-        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 3, 3)
+        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 3)
             (v, gmfn, entry, size);
 
 #if CONFIG_PAGING_LEVELS >= 4 
     if ( page->shadow_flags & SHF_L1_64 ) 
-        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 4, 4)
+        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 4)
             (v, gmfn, entry, size);
     if ( page->shadow_flags & SHF_L2_64 ) 
-        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 4, 4)
+        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 4)
             (v, gmfn, entry, size);
     if ( page->shadow_flags & SHF_L2H_64 ) 
-        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 4, 4)
+        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 4)
             (v, gmfn, entry, size);
     if ( page->shadow_flags & SHF_L3_64 ) 
-        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl3e, 4, 4)
+        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl3e, 4)
             (v, gmfn, entry, size);
     if ( page->shadow_flags & SHF_L4_64 ) 
-        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, 4, 4)
+        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, 4)
             (v, gmfn, entry, size);
 #else /* 32-bit hypervisor does not support 64-bit guests */
     ASSERT((page->shadow_flags 
@@ -613,7 +608,7 @@ int shadow_cmpxchg_guest_entry(struct vc
  * Most shadow pages are allocated singly, but there is one case where
  * we need to allocate multiple pages together: shadowing 32-bit guest
  * tables on PAE or 64-bit shadows.  A 32-bit guest l1 table covers 4MB
- * of virtuial address space, and needs to be shadowed by two PAE/64-bit
+ * of virtual address space, and needs to be shadowed by two PAE/64-bit
  * l1 tables (covering 2MB of virtual address space each).  Similarly, a
  * 32-bit guest l2 table (4GB va) needs to be shadowed by four
  * PAE/64-bit l2 tables (1GB va each).  These multi-page shadows are
@@ -622,15 +617,15 @@ int shadow_cmpxchg_guest_entry(struct vc
  *    
  * This table shows the allocation behaviour of the different modes:
  *
- * Xen paging      32b  pae  pae  64b  64b  64b
- * Guest paging    32b  32b  pae  32b  pae  64b
- * PV or HVM        *   HVM   *   HVM  HVM   * 
- * Shadow paging   32b  pae  pae  pae  pae  64b
+ * Xen paging      pae  pae  64b  64b  64b
+ * Guest paging    32b  pae  32b  pae  64b
+ * PV or HVM       HVM   *   HVM  HVM   * 
+ * Shadow paging   pae  pae  pae  pae  64b
  *
- * sl1 size         4k   8k   4k   8k   4k   4k
- * sl2 size         4k  16k   4k  16k   4k   4k
- * sl3 size         -    -    -    -    -    4k
- * sl4 size         -    -    -    -    -    4k
+ * sl1 size         8k   4k   8k   4k   4k
+ * sl2 size        16k   4k  16k   4k   4k
+ * sl3 size         -    -    -    -    4k
+ * sl4 size         -    -    -    -    4k
  *
  * We allocate memory from xen in four-page units and break them down
  * with a simple buddy allocator.  Can't use the xen allocator to handle
@@ -723,15 +718,15 @@ static void shadow_unhook_mappings(struc
     switch ( sp->type )
     {
     case SH_type_l2_32_shadow:
-        SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings,3,2)(v,smfn);
+        SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, 2)(v,smfn);
         break;
     case SH_type_l2_pae_shadow:
     case SH_type_l2h_pae_shadow:
-        SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings,3,3)(v,smfn);
+        SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings, 3)(v,smfn);
         break;
 #if CONFIG_PAGING_LEVELS >= 4
     case SH_type_l4_64_shadow:
-        SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings,4,4)(v,smfn);
+        SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, 4)(v,smfn);
         break;
 #endif
     default:
@@ -1573,37 +1568,37 @@ void sh_destroy_shadow(struct vcpu *v, m
     {
     case SH_type_l1_32_shadow:
     case SH_type_fl1_32_shadow:
-        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 3, 2)(v, smfn);
+        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 2)(v, smfn);
         break;
     case SH_type_l2_32_shadow:
-        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3, 2)(v, smfn);
+        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 2)(v, smfn);
         break;
 
     case SH_type_l1_pae_shadow:
     case SH_type_fl1_pae_shadow:
-        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 3, 3)(v, smfn);
+        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 3)(v, smfn);
         break;
     case SH_type_l2_pae_shadow:
     case SH_type_l2h_pae_shadow:
-        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3, 3)(v, smfn);
+        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3)(v, smfn);
         break;
 
 #if CONFIG_PAGING_LEVELS >= 4
     case SH_type_l1_64_shadow:
     case SH_type_fl1_64_shadow:
-        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4, 4)(v, smfn);
+        SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4)(v, smfn);
         break;
     case SH_type_l2h_64_shadow:
         ASSERT(is_pv_32on64_vcpu(v));
         /* Fall through... */
     case SH_type_l2_64_shadow:
-        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4, 4)(v, smfn);
+        SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4)(v, smfn);
         break;
     case SH_type_l3_64_shadow:
-        SHADOW_INTERNAL_NAME(sh_destroy_l3_shadow, 4, 4)(v, smfn);
+        SHADOW_INTERNAL_NAME(sh_destroy_l3_shadow, 4)(v, smfn);
         break;
     case SH_type_l4_64_shadow:
-        SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, 4, 4)(v, smfn);
+        SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, 4)(v, smfn);
         break;
 #endif
     default:
@@ -1626,16 +1621,16 @@ int sh_remove_write_access(struct vcpu *
     /* Dispatch table for getting per-type functions */
     static hash_callback_t callbacks[SH_type_unused] = {
         NULL, /* none    */
-        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,3,2), /* l1_32   */
-        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,3,2), /* fl1_32  */
+        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* l1_32   */
+        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* fl1_32  */
         NULL, /* l2_32   */
-        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,3,3), /* l1_pae  */
-        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,3,3), /* fl1_pae */
+        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3), /* l1_pae  */
+        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3), /* fl1_pae */
         NULL, /* l2_pae  */
         NULL, /* l2h_pae */
 #if CONFIG_PAGING_LEVELS >= 4
-        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,4,4), /* l1_64   */
-        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,4,4), /* fl1_64  */
+        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4), /* l1_64   */
+        SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4), /* fl1_64  */
 #else
         NULL, /* l1_64   */
         NULL, /* fl1_64  */
@@ -1711,7 +1706,6 @@ int sh_remove_write_access(struct vcpu *
                 GUESS(0xC0000000UL + (gfn << PAGE_SHIFT), 4);
 
         }
-#if CONFIG_PAGING_LEVELS >= 3
         else if ( v->arch.paging.mode->guest_levels == 3 )
         {
             /* 32bit PAE w2k3: linear map at 0xC0000000 */
@@ -1746,7 +1740,6 @@ int sh_remove_write_access(struct vcpu *
             GUESS(0x0000010000000000UL + (gfn << PAGE_SHIFT), 4); 
         }
 #endif /* CONFIG_PAGING_LEVELS >= 4 */
-#endif /* CONFIG_PAGING_LEVELS >= 3 */
 
 #undef GUESS
     }
@@ -1810,16 +1803,16 @@ int sh_remove_all_mappings(struct vcpu *
     /* Dispatch table for getting per-type functions */
     static hash_callback_t callbacks[SH_type_unused] = {
         NULL, /* none    */
-        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,3,2), /* l1_32   */
-        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,3,2), /* fl1_32  */
+        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2), /* l1_32   */
+        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2), /* fl1_32  */
         NULL, /* l2_32   */
-        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,3,3), /* l1_pae  */
-        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,3,3), /* fl1_pae */
+        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3), /* l1_pae  */
+        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3), /* fl1_pae */
         NULL, /* l2_pae  */
         NULL, /* l2h_pae */
 #if CONFIG_PAGING_LEVELS >= 4
-        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,4,4), /* l1_64   */
-        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,4,4), /* fl1_64  */
+        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4), /* l1_64   */
+        SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4), /* fl1_64  */
 #else
         NULL, /* l1_64   */
         NULL, /* fl1_64  */
@@ -1918,12 +1911,12 @@ static int sh_remove_shadow_via_pointer(
     {
     case SH_type_l1_32_shadow:
     case SH_type_l2_32_shadow:
-        SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,3,2)(v, vaddr, pmfn);
+        SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, 2)(v, vaddr, pmfn);
         break;
     case SH_type_l1_pae_shadow:
     case SH_type_l2_pae_shadow:
     case SH_type_l2h_pae_shadow:
-        SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,3,3)(v, vaddr, pmfn);
+        SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, 3)(v, vaddr, pmfn);
         break;
 #if CONFIG_PAGING_LEVELS >= 4
     case SH_type_l1_64_shadow:
@@ -1931,7 +1924,7 @@ static int sh_remove_shadow_via_pointer(
     case SH_type_l2h_64_shadow:
     case SH_type_l3_64_shadow:
     case SH_type_l4_64_shadow:
-        SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,4,4)(v, vaddr, pmfn);
+        SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, 4)(v, vaddr, pmfn);
         break;
 #endif
     default: BUG(); /* Some wierd unknown shadow type */
@@ -1966,18 +1959,18 @@ void sh_remove_shadows(struct vcpu *v, m
         NULL, /* none    */
         NULL, /* l1_32   */
         NULL, /* fl1_32  */
-        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,3,2), /* l2_32   */
+        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 2), /* l2_32   */
         NULL, /* l1_pae  */
         NULL, /* fl1_pae */
-        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,3,3), /* l2_pae  */
-        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,3,3), /* l2h_pae */
+        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 3), /* l2_pae  */
+        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 3), /* l2h_pae */
         NULL, /* l1_64   */
         NULL, /* fl1_64  */
 #if CONFIG_PAGING_LEVELS >= 4
-        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,4,4), /* l2_64   */
-        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,4,4), /* l2h_64  */
-        SHADOW_INTERNAL_NAME(sh_remove_l2_shadow,4,4), /* l3_64   */
-        SHADOW_INTERNAL_NAME(sh_remove_l3_shadow,4,4), /* l4_64   */
+        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4), /* l2_64   */
+        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4), /* l2h_64  */
+        SHADOW_INTERNAL_NAME(sh_remove_l2_shadow, 4), /* l3_64   */
+        SHADOW_INTERNAL_NAME(sh_remove_l3_shadow, 4), /* l4_64   */
 #else
         NULL, /* l2_64   */
         NULL, /* l2h_64  */
@@ -2061,7 +2054,6 @@ void sh_remove_shadows(struct vcpu *v, m
 
     DO_UNSHADOW(SH_type_l2_32_shadow);
     DO_UNSHADOW(SH_type_l1_32_shadow);
-#if CONFIG_PAGING_LEVELS >= 3
     DO_UNSHADOW(SH_type_l2h_pae_shadow);
     DO_UNSHADOW(SH_type_l2_pae_shadow);
     DO_UNSHADOW(SH_type_l1_pae_shadow);
@@ -2072,7 +2064,6 @@ void sh_remove_shadows(struct vcpu *v, m
     DO_UNSHADOW(SH_type_l2_64_shadow);
     DO_UNSHADOW(SH_type_l1_64_shadow);
 #endif
-#endif
 
 #undef DO_UNSHADOW
 
@@ -2154,11 +2145,9 @@ static void sh_update_paging_modes(struc
         /// PV guest
         ///
 #if CONFIG_PAGING_LEVELS == 4
-        v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,4,4);
-#elif CONFIG_PAGING_LEVELS == 3
-        v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
-#else
-#error unexpected paging mode
+        v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 4);
+#else /* CONFIG_PAGING_LEVELS == 3 */
+        v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
 #endif
     }
     else
@@ -2175,11 +2164,7 @@ static void sh_update_paging_modes(struc
              * pagetable for it, mapping 4 GB one-to-one using a single l2
              * page of 1024 superpage mappings */
             v->arch.guest_table = d->arch.paging.shadow.unpaged_pagetable;
-#if CONFIG_PAGING_LEVELS >= 3
-            v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3, 2);
-#else
-            v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 2, 2);
-#endif
+            v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 2);
         }
         else
         {
@@ -2188,32 +2173,21 @@ static void sh_update_paging_modes(struc
             {
                 // long mode guest...
                 v->arch.paging.mode =
-                    &SHADOW_INTERNAL_NAME(sh_paging_mode, 4, 4);
+                    &SHADOW_INTERNAL_NAME(sh_paging_mode, 4);
             }
             else
 #endif
                 if ( hvm_pae_enabled(v) )
                 {
-#if CONFIG_PAGING_LEVELS >= 3
                     // 32-bit PAE mode guest...
                     v->arch.paging.mode =
-                        &SHADOW_INTERNAL_NAME(sh_paging_mode, 3, 3);
-#else
-                    SHADOW_ERROR("PAE not supported in 32-bit Xen\n");
-                    domain_crash(d);
-                    return;
-#endif
+                        &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
                 }
                 else
                 {
                     // 32-bit 2 level guest...
-#if CONFIG_PAGING_LEVELS >= 3
                     v->arch.paging.mode =
-                        &SHADOW_INTERNAL_NAME(sh_paging_mode, 3, 2);
-#else
-                    v->arch.paging.mode =
-                        &SHADOW_INTERNAL_NAME(sh_paging_mode, 2, 2);
-#endif
+                        &SHADOW_INTERNAL_NAME(sh_paging_mode, 2);
                 }
         }
 
@@ -2227,7 +2201,7 @@ static void sh_update_paging_modes(struc
 
         if ( v->arch.paging.mode != old_mode )
         {
-            SHADOW_PRINTK("new paging mode: d=%u v=%u pe=%d g=%u s=%u "
+            SHADOW_PRINTK("new paging mode: d=%u v=%u pe=%d gl=%u "
                           "(was g=%u s=%u)\n",
                           d->domain_id, v->vcpu_id,
                           is_hvm_domain(d) ? hvm_paging_enabled(v) : 1,
@@ -3033,20 +3007,20 @@ void shadow_audit_tables(struct vcpu *v)
     /* Dispatch table for getting per-type functions */
     static hash_callback_t callbacks[SH_type_unused] = {
         NULL, /* none    */
-        SHADOW_INTERNAL_NAME(sh_audit_l1_table,3,2),  /* l1_32   */
-        SHADOW_INTERNAL_NAME(sh_audit_fl1_table,3,2), /* fl1_32  */
-        SHADOW_INTERNAL_NAME(sh_audit_l2_table,3,2),  /* l2_32   */
-        SHADOW_INTERNAL_NAME(sh_audit_l1_table,3,3),  /* l1_pae  */
-        SHADOW_INTERNAL_NAME(sh_audit_fl1_table,3,3), /* fl1_pae */
-        SHADOW_INTERNAL_NAME(sh_audit_l2_table,3,3),  /* l2_pae  */
-        SHADOW_INTERNAL_NAME(sh_audit_l2_table,3,3),  /* l2h_pae */
+        SHADOW_INTERNAL_NAME(sh_audit_l1_table, 2),  /* l1_32   */
+        SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 2), /* fl1_32  */
+        SHADOW_INTERNAL_NAME(sh_audit_l2_table, 2),  /* l2_32   */
+        SHADOW_INTERNAL_NAME(sh_audit_l1_table, 3),  /* l1_pae  */
+        SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 3), /* fl1_pae */
+        SHADOW_INTERNAL_NAME(sh_audit_l2_table, 3),  /* l2_pae  */
+        SHADOW_INTERNAL_NAME(sh_audit_l2_table, 3),  /* l2h_pae */
 #if CONFIG_PAGING_LEVELS >= 4
-        SHADOW_INTERNAL_NAME(sh_audit_l1_table,4,4),  /* l1_64   */
-        SHADOW_INTERNAL_NAME(sh_audit_fl1_table,4,4), /* fl1_64  */
-        SHADOW_INTERNAL_NAME(sh_audit_l2_table,4,4),  /* l2_64   */
-        SHADOW_INTERNAL_NAME(sh_audit_l2_table,4,4),  /* l2h_64   */
-        SHADOW_INTERNAL_NAME(sh_audit_l3_table,4,4),  /* l3_64   */
-        SHADOW_INTERNAL_NAME(sh_audit_l4_table,4,4),  /* l4_64   */
+        SHADOW_INTERNAL_NAME(sh_audit_l1_table, 4),  /* l1_64   */
+        SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 4), /* fl1_64  */
+        SHADOW_INTERNAL_NAME(sh_audit_l2_table, 4),  /* l2_64   */
+        SHADOW_INTERNAL_NAME(sh_audit_l2_table, 4),  /* l2h_64   */
+        SHADOW_INTERNAL_NAME(sh_audit_l3_table, 4),  /* l3_64   */
+        SHADOW_INTERNAL_NAME(sh_audit_l4_table, 4),  /* l4_64   */
 #endif /* CONFIG_PAGING_LEVELS >= 4 */
         NULL  /* All the rest */
     };
diff -r c99a88623eda -r 810d8c3ac992 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Thu May 08 14:33:31 2008 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Thu May 08 16:58:33 2008 +0100
@@ -605,7 +605,7 @@ static void sh_audit_gw(struct vcpu *v, 
 #endif /* audit code */
 
 
-#if (CONFIG_PAGING_LEVELS == GUEST_PAGING_LEVELS) && (CONFIG_PAGING_LEVELS == 
SHADOW_PAGING_LEVELS)
+#if (CONFIG_PAGING_LEVELS == GUEST_PAGING_LEVELS)
 void *
 sh_guest_map_l1e(struct vcpu *v, unsigned long addr,
                   unsigned long *gl1mfn)
@@ -643,7 +643,7 @@ sh_guest_get_eff_l1e(struct vcpu *v, uns
     (void) guest_walk_tables(v, addr, &gw, PFEC_page_present);
     *(guest_l1e_t *)eff_l1e = gw.l1e;
 }
-#endif /* CONFIG==SHADOW==GUEST */
+#endif /* CONFIG == GUEST (== SHADOW) */
 
 /**************************************************************************/
 /* Functions to compute the correct index into a shadow page, given an
@@ -678,7 +678,7 @@ static u32
 static u32
 shadow_l1_index(mfn_t *smfn, u32 guest_index)
 {
-#if (GUEST_PAGING_LEVELS == 2) && (SHADOW_PAGING_LEVELS != 2)
+#if (GUEST_PAGING_LEVELS == 2)
     *smfn = _mfn(mfn_x(*smfn) +
                  (guest_index / SHADOW_L1_PAGETABLE_ENTRIES));
     return (guest_index % SHADOW_L1_PAGETABLE_ENTRIES);
@@ -690,14 +690,14 @@ static u32
 static u32
 shadow_l2_index(mfn_t *smfn, u32 guest_index)
 {
-#if (GUEST_PAGING_LEVELS == 2) && (SHADOW_PAGING_LEVELS != 2)
+#if (GUEST_PAGING_LEVELS == 2)
     // Because we use 2 shadow l2 entries for each guest entry, the number of
     // guest entries per shadow page is SHADOW_L2_PAGETABLE_ENTRIES/2
     //
     *smfn = _mfn(mfn_x(*smfn) +
                  (guest_index / (SHADOW_L2_PAGETABLE_ENTRIES / 2)));
 
-    // We multiple by two to get the index of the first of the two entries
+    // We multiply by two to get the index of the first of the two entries
     // used to shadow the specified guest entry.
     return (guest_index % (SHADOW_L2_PAGETABLE_ENTRIES / 2)) * 2;
 #else
@@ -721,12 +721,7 @@ shadow_l4_index(mfn_t *smfn, u32 guest_i
 
 #endif // GUEST_PAGING_LEVELS >= 4
 
-extern u32 get_pat_flags(struct vcpu *v,
-                  u32 gl1e_flags,
-                  paddr_t gpaddr,
-                  paddr_t spaddr);
-
-unsigned char pat_type_2_pte_flags(unsigned char pat_type);
+
 /**************************************************************************/
 /* Function which computes shadow entries from their corresponding guest
  * entries.  This is the "heart" of the shadow code. It operates using
@@ -996,7 +991,7 @@ static inline void safe_write_entry(void
     d[1] = s[1];
     d[0] = s[0];
 #else
-    /* In 32-bit and 64-bit, sizeof(pte) == sizeof(ulong) == 1 word,
+    /* In 64-bit, sizeof(pte) == sizeof(ulong) == 1 word,
      * which will be an atomic write, since the entry is aligned. */
     BUILD_BUG_ON(sizeof (shadow_l1e_t) != sizeof (unsigned long));
     *d = *s;
@@ -1204,7 +1199,7 @@ static int shadow_set_l2e(struct vcpu *v
     shadow_l2e_t old_sl2e;
     paddr_t paddr;
 
-#if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2
+#if GUEST_PAGING_LEVELS == 2
     /* In 2-on-3 we work with pairs of l2es pointing at two-page
      * shadows.  Reference counting and up-pointers track from the first
      * page of the shadow to the first l2e, so make sure that we're 
@@ -1232,7 +1227,7 @@ static int shadow_set_l2e(struct vcpu *v
         } 
 
     /* Write the new entry */
-#if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2
+#if GUEST_PAGING_LEVELS == 2
     {
         shadow_l2e_t pair[2] = { new_sl2e, new_sl2e };
         /* The l1 shadow is two pages long and need to be pointed to by
@@ -1418,7 +1413,7 @@ static inline void increment_ptr_to_gues
 #define _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code)        \
 do {                                                                    \
     int _i;                                                             \
-    shadow_l1e_t *_sp = map_shadow_page((_sl1mfn));                     \
+    shadow_l1e_t *_sp = sh_map_domain_page((_sl1mfn));                  \
     ASSERT(mfn_to_shadow_page(_sl1mfn)->type == SH_type_l1_shadow       \
            || mfn_to_shadow_page(_sl1mfn)->type == SH_type_fl1_shadow); \
     for ( _i = 0; _i < SHADOW_L1_PAGETABLE_ENTRIES; _i++ )              \
@@ -1429,7 +1424,7 @@ do {                                    
         if ( _done ) break;                                             \
         increment_ptr_to_guest_entry(_gl1p);                            \
     }                                                                   \
-    unmap_shadow_page(_sp);                                             \
+    sh_unmap_domain_page(_sp);                                          \
 } while (0)
 
 /* 32-bit l1, on PAE or 64-bit shadows: need to walk both pages of shadow */
@@ -1450,7 +1445,7 @@ do {                                    
 #endif
     
 
-#if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2
+#if GUEST_PAGING_LEVELS == 2
 
 /* 32-bit l2 on PAE/64: four pages, touch every second entry, and avoid Xen */
 #define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)     \
@@ -1460,7 +1455,7 @@ do {                                    
     ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow);    \
     for ( _j = 0; _j < 4 && !__done; _j++ )                               \
     {                                                                     \
-        shadow_l2e_t *_sp = map_shadow_page(_sl2mfn);                     \
+        shadow_l2e_t *_sp = sh_map_domain_page(_sl2mfn);                  \
         for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i += 2 )         \
             if ( (!(_xen))                                                \
                  || ((_j * SHADOW_L2_PAGETABLE_ENTRIES) + _i)             \
@@ -1472,32 +1467,9 @@ do {                                    
                 if ( (__done = (_done)) ) break;                          \
                 increment_ptr_to_guest_entry(_gl2p);                      \
             }                                                             \
-        unmap_shadow_page(_sp);                                           \
+        sh_unmap_domain_page(_sp);                                        \
         _sl2mfn = _mfn(mfn_x(_sl2mfn) + 1);                               \
     }                                                                     \
-} while (0)
-
-#elif GUEST_PAGING_LEVELS == 2
-
-/* 32-bit on 32-bit: avoid Xen entries */
-#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)      \
-do {                                                                       \
-    int _i;                                                                \
-    int _xen = !shadow_mode_external(_dom);                                \
-    shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                        \
-    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow);     \
-    for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                 \
-        if ( (!(_xen))                                                     \
-             ||                                                            \
-             (_i < (HYPERVISOR_VIRT_START >> SHADOW_L2_PAGETABLE_SHIFT)) ) \
-        {                                                                  \
-            (_sl2e) = _sp + _i;                                            \
-            if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT )          \
-                {_code}                                                    \
-            if ( _done ) break;                                            \
-            increment_ptr_to_guest_entry(_gl2p);                           \
-        }                                                                  \
-    unmap_shadow_page(_sp);                                                \
 } while (0)
 
 #elif GUEST_PAGING_LEVELS == 3
@@ -1507,7 +1479,7 @@ do {                                    
 do {                                                                       \
     int _i;                                                                \
     int _xen = !shadow_mode_external(_dom);                                \
-    shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                        \
+    shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn));                     \
     ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_pae_shadow      \
            || mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_pae_shadow);\
     for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                 \
@@ -1522,7 +1494,7 @@ do {                                    
             if ( _done ) break;                                            \
             increment_ptr_to_guest_entry(_gl2p);                           \
         }                                                                  \
-    unmap_shadow_page(_sp);                                                \
+    sh_unmap_domain_page(_sp);                                             \
 } while (0)
 
 #else 
@@ -1532,7 +1504,7 @@ do {                                    
 do {                                                                        \
     int _i;                                                                 \
     int _xen = !shadow_mode_external(_dom);                                 \
-    shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                         \
+    shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn));                      \
     ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_64_shadow ||     \
            mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_64_shadow);     \
     for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                  \
@@ -1549,7 +1521,7 @@ do {                                    
             increment_ptr_to_guest_entry(_gl2p);                            \
         }                                                                   \
     }                                                                       \
-    unmap_shadow_page(_sp);                                                 \
+    sh_unmap_domain_page(_sp);                                              \
 } while (0)
 
 #endif /* different kinds of l2 */
@@ -1560,7 +1532,7 @@ do {                                    
 #define SHADOW_FOREACH_L3E(_sl3mfn, _sl3e, _gl3p, _done, _code)         \
 do {                                                                    \
     int _i;                                                             \
-    shadow_l3e_t *_sp = map_shadow_page((_sl3mfn));                     \
+    shadow_l3e_t *_sp = sh_map_domain_page((_sl3mfn));                  \
     ASSERT(mfn_to_shadow_page(_sl3mfn)->type == SH_type_l3_64_shadow);  \
     for ( _i = 0; _i < SHADOW_L3_PAGETABLE_ENTRIES; _i++ )              \
     {                                                                   \
@@ -1570,13 +1542,13 @@ do {                                    
         if ( _done ) break;                                             \
         increment_ptr_to_guest_entry(_gl3p);                            \
     }                                                                   \
-    unmap_shadow_page(_sp);                                             \
+    sh_unmap_domain_page(_sp);                                          \
 } while (0)
 
 /* 64-bit l4: avoid Xen mappings */
 #define SHADOW_FOREACH_L4E(_sl4mfn, _sl4e, _gl4p, _done, _dom, _code)   \
 do {                                                                    \
-    shadow_l4e_t *_sp = map_shadow_page((_sl4mfn));                     \
+    shadow_l4e_t *_sp = sh_map_domain_page((_sl4mfn));                  \
     int _xen = !shadow_mode_external(_dom);                             \
     int _i;                                                             \
     ASSERT(mfn_to_shadow_page(_sl4mfn)->type == SH_type_l4_64_shadow);  \
@@ -1591,7 +1563,7 @@ do {                                    
         }                                                               \
         increment_ptr_to_guest_entry(_gl4p);                            \
     }                                                                   \
-    unmap_shadow_page(_sp);                                             \
+    sh_unmap_domain_page(_sp);                                          \
 } while (0)
 
 #endif
@@ -2606,7 +2578,7 @@ sh_map_and_validate(struct vcpu *v, mfn_
     guest_idx = guest_index(new_gp);
     map_mfn = smfn;
     shadow_idx = shadow_index(&map_mfn, guest_idx);
-    sl1p = map_shadow_page(map_mfn);
+    sl1p = sh_map_domain_page(map_mfn);
 
     /* Validate one entry at a time */
     while ( size )
@@ -2618,8 +2590,8 @@ sh_map_and_validate(struct vcpu *v, mfn_
         {
             /* We have moved to another page of the shadow */
             map_mfn = smfn2;
-            unmap_shadow_page(sl1p);
-            sl1p = map_shadow_page(map_mfn);
+            sh_unmap_domain_page(sl1p);
+            sl1p = sh_map_domain_page(map_mfn);
         }
         result |= validate_ge(v,
                               new_gp,
@@ -2628,7 +2600,7 @@ sh_map_and_validate(struct vcpu *v, mfn_
         size -= sizeof(guest_l1e_t);
         new_gp += sizeof(guest_l1e_t);
     }
-    unmap_shadow_page(sl1p);
+    sh_unmap_domain_page(sl1p);
     return result;
 }
 
@@ -2875,7 +2847,7 @@ static int sh_page_fault(struct vcpu *v,
     //      bunch of 4K maps.
     //
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) && SHADOW_PAGING_LEVELS > 2
+#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
     if ( (regs->error_code & PFEC_reserved_bit) )
     {
         /* The only reasons for reserved bits to be set in shadow entries 
@@ -3282,7 +3254,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
         if ( (!shadow_l3e_get_flags(sl3e) & _PAGE_PRESENT) )
             return 0;
     }
-#elif SHADOW_PAGING_LEVELS == 3
+#else /* SHADOW_PAGING_LEVELS == 3 */
     if ( 
!(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(va)])
            & _PAGE_PRESENT) )
         // no need to flush anything if there's no SL2...
@@ -3827,10 +3799,6 @@ sh_update_cr3(struct vcpu *v, int do_loc
 #error this should never happen
 #endif
 
-#if 0
-    printk("%s %s %d gmfn=%05lx shadow.guest_vtable=%p\n",
-           __func__, __FILE__, __LINE__, gmfn, 
v->arch.paging.shadow.guest_vtable);
-#endif
 
     ////
     //// vcpu->arch.shadow_table[]
@@ -3891,8 +3859,6 @@ sh_update_cr3(struct vcpu *v, int do_loc
 #error This should never happen 
 #endif
 
-#if (CONFIG_PAGING_LEVELS == 3) && (GUEST_PAGING_LEVELS == 3)
-#endif
 
     /// 
     /// v->arch.paging.shadow.l3table
@@ -3937,7 +3903,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
         ASSERT(virt_to_maddr(&v->arch.paging.shadow.l3table) <= 0xffffffe0ULL);
         v->arch.cr3 = virt_to_maddr(&v->arch.paging.shadow.l3table);
 #else
-        /* 2-on-2 or 4-on-4: Just use the shadow top-level directly */
+        /* 4-on-4: Just use the shadow top-level directly */
         make_cr3(v, pagetable_get_pfn(v->arch.shadow_table[0]));
 #endif
     }
@@ -3954,7 +3920,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
         v->arch.hvm_vcpu.hw_cr[3] =
             virt_to_maddr(&v->arch.paging.shadow.l3table);
 #else
-        /* 2-on-2 or 4-on-4: Just use the shadow top-level directly */
+        /* 4-on-4: Just use the shadow top-level directly */
         v->arch.hvm_vcpu.hw_cr[3] =
             pagetable_get_paddr(v->arch.shadow_table[0]);
 #endif
@@ -3988,11 +3954,9 @@ static int sh_guess_wrmap(struct vcpu *v
 {
     shadow_l1e_t sl1e, *sl1p;
     shadow_l2e_t *sl2p;
-#if SHADOW_PAGING_LEVELS >= 3
     shadow_l3e_t *sl3p;
 #if SHADOW_PAGING_LEVELS >= 4
     shadow_l4e_t *sl4p;
-#endif
 #endif
     mfn_t sl1mfn;
     int r;
@@ -4005,7 +3969,7 @@ static int sh_guess_wrmap(struct vcpu *v
     sl3p = sh_linear_l3_table(v) + shadow_l3_linear_offset(vaddr);
     if ( !(shadow_l3e_get_flags(*sl3p) & _PAGE_PRESENT) )
         return 0;
-#elif SHADOW_PAGING_LEVELS == 3
+#else /* SHADOW_PAGING_LEVELS == 3 */
     sl3p = ((shadow_l3e_t *) v->arch.paging.shadow.l3table) 
         + shadow_l3_linear_offset(vaddr);
     if ( !(shadow_l3e_get_flags(*sl3p) & _PAGE_PRESENT) )
@@ -4536,7 +4500,7 @@ int sh_audit_l1_table(struct vcpu *v, mf
 
         if ( sh_l1e_is_magic(*sl1e) ) 
         {
-#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) && SHADOW_PAGING_LEVELS > 2
+#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
             if ( sh_l1e_is_gnp(*sl1e) )
             {
                 if ( guest_l1e_get_flags(*gl1e) & _PAGE_PRESENT )
diff -r c99a88623eda -r 810d8c3ac992 xen/arch/x86/mm/shadow/multi.h
--- a/xen/arch/x86/mm/shadow/multi.h    Thu May 08 14:33:31 2008 +0100
+++ b/xen/arch/x86/mm/shadow/multi.h    Thu May 08 16:58:33 2008 +0100
@@ -22,98 +22,96 @@
  */
 
 extern int 
-SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, SHADOW_LEVELS, GUEST_LEVELS)(
+SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, GUEST_LEVELS)(
     struct vcpu *v, mfn_t gl1mfn, void *new_gl1p, u32 size);
 extern int 
-SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, SHADOW_LEVELS, GUEST_LEVELS)(
+SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, GUEST_LEVELS)(
     struct vcpu *v, mfn_t gl2mfn, void *new_gl2p, u32 size);
 extern int 
-SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, SHADOW_LEVELS, GUEST_LEVELS)(
+SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, GUEST_LEVELS)(
     struct vcpu *v, mfn_t gl2mfn, void *new_gl2p, u32 size);
 extern int 
-SHADOW_INTERNAL_NAME(sh_map_and_validate_gl3e, SHADOW_LEVELS, GUEST_LEVELS)(
+SHADOW_INTERNAL_NAME(sh_map_and_validate_gl3e, GUEST_LEVELS)(
     struct vcpu *v, mfn_t gl3mfn, void *new_gl3p, u32 size);
 extern int 
-SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, SHADOW_LEVELS, GUEST_LEVELS)(
+SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, GUEST_LEVELS)(
     struct vcpu *v, mfn_t gl4mfn, void *new_gl4p, u32 size);
 
 extern void 
-SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, SHADOW_LEVELS, GUEST_LEVELS)(
+SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, GUEST_LEVELS)(
     struct vcpu *v, mfn_t smfn);
 extern void 
-SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, SHADOW_LEVELS, GUEST_LEVELS)(
+SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, GUEST_LEVELS)(
     struct vcpu *v, mfn_t smfn);
 extern void 
-SHADOW_INTERNAL_NAME(sh_destroy_l3_shadow, SHADOW_LEVELS, GUEST_LEVELS)(
+SHADOW_INTERNAL_NAME(sh_destroy_l3_shadow, GUEST_LEVELS)(
     struct vcpu *v, mfn_t smfn);
 extern void 
-SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, SHADOW_LEVELS, GUEST_LEVELS)(
+SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, GUEST_LEVELS)(
     struct vcpu *v, mfn_t smfn);
 
 extern void 
-SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, SHADOW_LEVELS, GUEST_LEVELS)
+SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl2mfn);
 extern void 
-SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings, SHADOW_LEVELS, GUEST_LEVELS)
+SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl3mfn);
 extern void 
-SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, SHADOW_LEVELS, GUEST_LEVELS)
+SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl4mfn);
 
 extern int
-SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, SHADOW_LEVELS, GUEST_LEVELS)
+SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl1mfn, mfn_t readonly_mfn);
 extern int
-SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, SHADOW_LEVELS, GUEST_LEVELS)
+SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl1mfn, mfn_t target_mfn);
 
 extern void
-SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, SHADOW_LEVELS, GUEST_LEVELS)
+SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, GUEST_LEVELS)
     (struct vcpu *v, void *ep, mfn_t smfn);
 
 extern int
-SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, SHADOW_LEVELS, GUEST_LEVELS)
+SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl2mfn, mfn_t sl1mfn);
 extern int
-SHADOW_INTERNAL_NAME(sh_remove_l2_shadow, SHADOW_LEVELS, GUEST_LEVELS)
+SHADOW_INTERNAL_NAME(sh_remove_l2_shadow, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl3mfn, mfn_t sl2mfn);
 extern int
-SHADOW_INTERNAL_NAME(sh_remove_l3_shadow, SHADOW_LEVELS, GUEST_LEVELS)
+SHADOW_INTERNAL_NAME(sh_remove_l3_shadow, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl4mfn, mfn_t sl3mfn);
 
 #if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES
 int 
-SHADOW_INTERNAL_NAME(sh_audit_l1_table, SHADOW_LEVELS, GUEST_LEVELS)
+SHADOW_INTERNAL_NAME(sh_audit_l1_table, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl1mfn, mfn_t x);
 int 
-SHADOW_INTERNAL_NAME(sh_audit_fl1_table, SHADOW_LEVELS, GUEST_LEVELS)
+SHADOW_INTERNAL_NAME(sh_audit_fl1_table, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl1mfn, mfn_t x);
 int 
-SHADOW_INTERNAL_NAME(sh_audit_l2_table, SHADOW_LEVELS, GUEST_LEVELS)
+SHADOW_INTERNAL_NAME(sh_audit_l2_table, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl2mfn, mfn_t x);
 int 
-SHADOW_INTERNAL_NAME(sh_audit_l3_table, SHADOW_LEVELS, GUEST_LEVELS)
+SHADOW_INTERNAL_NAME(sh_audit_l3_table, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl3mfn, mfn_t x);
 int 
-SHADOW_INTERNAL_NAME(sh_audit_l4_table, SHADOW_LEVELS, GUEST_LEVELS)
+SHADOW_INTERNAL_NAME(sh_audit_l4_table, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl4mfn, mfn_t x);
 #endif
 
 extern void *
-SHADOW_INTERNAL_NAME(sh_guest_map_l1e, CONFIG_PAGING_LEVELS, 
CONFIG_PAGING_LEVELS)
+SHADOW_INTERNAL_NAME(sh_guest_map_l1e, CONFIG_PAGING_LEVELS)
     (struct vcpu *v, unsigned long va, unsigned long *gl1mfn);
 extern void
-SHADOW_INTERNAL_NAME(sh_guest_get_eff_l1e, CONFIG_PAGING_LEVELS, 
CONFIG_PAGING_LEVELS)
+SHADOW_INTERNAL_NAME(sh_guest_get_eff_l1e, CONFIG_PAGING_LEVELS)
     (struct vcpu *v, unsigned long va, void *eff_l1e);
 
-#if SHADOW_LEVELS == GUEST_LEVELS
 extern mfn_t
-SHADOW_INTERNAL_NAME(sh_make_monitor_table, SHADOW_LEVELS, GUEST_LEVELS)
+SHADOW_INTERNAL_NAME(sh_make_monitor_table, GUEST_LEVELS)
     (struct vcpu *v);
 extern void
-SHADOW_INTERNAL_NAME(sh_destroy_monitor_table, SHADOW_LEVELS, GUEST_LEVELS)
+SHADOW_INTERNAL_NAME(sh_destroy_monitor_table, GUEST_LEVELS)
     (struct vcpu *v, mfn_t mmfn);
-#endif
 
 extern struct paging_mode 
-SHADOW_INTERNAL_NAME(sh_paging_mode, SHADOW_LEVELS, GUEST_LEVELS);
+SHADOW_INTERNAL_NAME(sh_paging_mode, GUEST_LEVELS);
diff -r c99a88623eda -r 810d8c3ac992 xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h  Thu May 08 14:33:31 2008 +0100
+++ b/xen/arch/x86/mm/shadow/private.h  Thu May 08 16:58:33 2008 +0100
@@ -157,49 +157,23 @@ extern void shadow_audit_tables(struct v
  * Macro for dealing with the naming of the internal names of the
  * shadow code's external entry points.
  */
-#define SHADOW_INTERNAL_NAME_HIDDEN(name, shadow_levels, guest_levels) \
-    name ## __shadow_ ## shadow_levels ## _guest_ ## guest_levels
-#define SHADOW_INTERNAL_NAME(name, shadow_levels, guest_levels) \
-    SHADOW_INTERNAL_NAME_HIDDEN(name, shadow_levels, guest_levels)
-
-#if CONFIG_PAGING_LEVELS == 3
+#define SHADOW_INTERNAL_NAME_HIDDEN(name, guest_levels) \
+    name ## __guest_ ## guest_levels
+#define SHADOW_INTERNAL_NAME(name, guest_levels)        \
+    SHADOW_INTERNAL_NAME_HIDDEN(name, guest_levels)
+
 #define GUEST_LEVELS  2
-#define SHADOW_LEVELS 3
 #include "multi.h"
 #undef GUEST_LEVELS
-#undef SHADOW_LEVELS
 
 #define GUEST_LEVELS  3
-#define SHADOW_LEVELS 3
 #include "multi.h"
 #undef GUEST_LEVELS
-#undef SHADOW_LEVELS
-#endif /* CONFIG_PAGING_LEVELS == 3 */
 
 #if CONFIG_PAGING_LEVELS == 4
-#define GUEST_LEVELS  2
-#define SHADOW_LEVELS 3
+#define GUEST_LEVELS  4
 #include "multi.h"
 #undef GUEST_LEVELS
-#undef SHADOW_LEVELS
-
-#define GUEST_LEVELS  3
-#define SHADOW_LEVELS 3
-#include "multi.h"
-#undef GUEST_LEVELS
-#undef SHADOW_LEVELS
-
-#define GUEST_LEVELS  3
-#define SHADOW_LEVELS 4
-#include "multi.h"
-#undef GUEST_LEVELS
-#undef SHADOW_LEVELS
-
-#define GUEST_LEVELS  4
-#define SHADOW_LEVELS 4
-#include "multi.h"
-#undef GUEST_LEVELS
-#undef SHADOW_LEVELS
 #endif /* CONFIG_PAGING_LEVELS == 4 */
 
 /******************************************************************************
diff -r c99a88623eda -r 810d8c3ac992 xen/arch/x86/mm/shadow/types.h
--- a/xen/arch/x86/mm/shadow/types.h    Thu May 08 14:33:31 2008 +0100
+++ b/xen/arch/x86/mm/shadow/types.h    Thu May 08 16:58:33 2008 +0100
@@ -23,47 +23,19 @@
 #ifndef _XEN_SHADOW_TYPES_H
 #define _XEN_SHADOW_TYPES_H
 
-// Map a shadow page
-static inline void *
-map_shadow_page(mfn_t smfn)
-{
-    // XXX -- Possible optimization/measurement question for 32-bit and PAE
-    //        hypervisors:
-    //        How often is this smfn already available in the shadow linear
-    //        table?  Might it be worth checking that table first,
-    //        presumably using the reverse map hint in the page_info of this
-    //        smfn, rather than calling map_domain_page()?
-    //
-    return sh_map_domain_page(smfn);
-}
-
-// matching unmap for map_shadow_page()
-static inline void
-unmap_shadow_page(void *p)
-{
-    sh_unmap_domain_page(p);
-}
+/* The number of levels in the shadow pagetable is entirely determined
+ * by the number of levels in the guest pagetable */
+#if GUEST_PAGING_LEVELS == 4
+#define SHADOW_PAGING_LEVELS 4
+#else
+#define SHADOW_PAGING_LEVELS 3
+#endif
 
 /* 
  * Define various types for handling pagetabels, based on these options:
  * SHADOW_PAGING_LEVELS : Number of levels of shadow pagetables
  * GUEST_PAGING_LEVELS  : Number of levels of guest pagetables
  */
-
-#if (CONFIG_PAGING_LEVELS < SHADOW_PAGING_LEVELS) 
-#error Cannot have more levels of shadow pagetables than host pagetables
-#endif
-
-#if (SHADOW_PAGING_LEVELS < GUEST_PAGING_LEVELS) 
-#error Cannot have more levels of guest pagetables than shadow pagetables
-#endif
-
-#if SHADOW_PAGING_LEVELS == 2
-#define SHADOW_L1_PAGETABLE_ENTRIES    1024
-#define SHADOW_L2_PAGETABLE_ENTRIES    1024
-#define SHADOW_L1_PAGETABLE_SHIFT        12
-#define SHADOW_L2_PAGETABLE_SHIFT        22
-#endif
 
 #if SHADOW_PAGING_LEVELS == 3
 #define SHADOW_L1_PAGETABLE_ENTRIES     512
@@ -72,9 +44,7 @@ unmap_shadow_page(void *p)
 #define SHADOW_L1_PAGETABLE_SHIFT        12
 #define SHADOW_L2_PAGETABLE_SHIFT        21
 #define SHADOW_L3_PAGETABLE_SHIFT        30
-#endif
-
-#if SHADOW_PAGING_LEVELS == 4
+#else /* SHADOW_PAGING_LEVELS == 4 */
 #define SHADOW_L1_PAGETABLE_ENTRIES     512
 #define SHADOW_L2_PAGETABLE_ENTRIES     512
 #define SHADOW_L3_PAGETABLE_ENTRIES     512
@@ -88,11 +58,9 @@ unmap_shadow_page(void *p)
 /* Types of the shadow page tables */
 typedef l1_pgentry_t shadow_l1e_t;
 typedef l2_pgentry_t shadow_l2e_t;
-#if SHADOW_PAGING_LEVELS >= 3
 typedef l3_pgentry_t shadow_l3e_t;
 #if SHADOW_PAGING_LEVELS >= 4
 typedef l4_pgentry_t shadow_l4e_t;
-#endif
 #endif
 
 /* Access functions for them */
@@ -100,39 +68,33 @@ static inline paddr_t shadow_l1e_get_pad
 { return l1e_get_paddr(sl1e); }
 static inline paddr_t shadow_l2e_get_paddr(shadow_l2e_t sl2e)
 { return l2e_get_paddr(sl2e); }
-#if SHADOW_PAGING_LEVELS >= 3
 static inline paddr_t shadow_l3e_get_paddr(shadow_l3e_t sl3e)
 { return l3e_get_paddr(sl3e); }
 #if SHADOW_PAGING_LEVELS >= 4
 static inline paddr_t shadow_l4e_get_paddr(shadow_l4e_t sl4e)
 { return l4e_get_paddr(sl4e); }
-#endif
 #endif
 
 static inline mfn_t shadow_l1e_get_mfn(shadow_l1e_t sl1e)
 { return _mfn(l1e_get_pfn(sl1e)); }
 static inline mfn_t shadow_l2e_get_mfn(shadow_l2e_t sl2e)
 { return _mfn(l2e_get_pfn(sl2e)); }
-#if SHADOW_PAGING_LEVELS >= 3
 static inline mfn_t shadow_l3e_get_mfn(shadow_l3e_t sl3e)
 { return _mfn(l3e_get_pfn(sl3e)); }
 #if SHADOW_PAGING_LEVELS >= 4
 static inline mfn_t shadow_l4e_get_mfn(shadow_l4e_t sl4e)
 { return _mfn(l4e_get_pfn(sl4e)); }
-#endif
 #endif
 
 static inline u32 shadow_l1e_get_flags(shadow_l1e_t sl1e)
 { return l1e_get_flags(sl1e); }
 static inline u32 shadow_l2e_get_flags(shadow_l2e_t sl2e)
 { return l2e_get_flags(sl2e); }
-#if SHADOW_PAGING_LEVELS >= 3
 static inline u32 shadow_l3e_get_flags(shadow_l3e_t sl3e)
 { return l3e_get_flags(sl3e); }
 #if SHADOW_PAGING_LEVELS >= 4
 static inline u32 shadow_l4e_get_flags(shadow_l4e_t sl4e)
 { return l4e_get_flags(sl4e); }
-#endif
 #endif
 
 static inline shadow_l1e_t
@@ -143,26 +105,22 @@ static inline shadow_l1e_t shadow_l1e_em
 { return l1e_empty(); }
 static inline shadow_l2e_t shadow_l2e_empty(void) 
 { return l2e_empty(); }
-#if SHADOW_PAGING_LEVELS >= 3
 static inline shadow_l3e_t shadow_l3e_empty(void) 
 { return l3e_empty(); }
 #if SHADOW_PAGING_LEVELS >= 4
 static inline shadow_l4e_t shadow_l4e_empty(void) 
 { return l4e_empty(); }
-#endif
 #endif
 
 static inline shadow_l1e_t shadow_l1e_from_mfn(mfn_t mfn, u32 flags)
 { return l1e_from_pfn(mfn_x(mfn), flags); }
 static inline shadow_l2e_t shadow_l2e_from_mfn(mfn_t mfn, u32 flags)
 { return l2e_from_pfn(mfn_x(mfn), flags); }
-#if SHADOW_PAGING_LEVELS >= 3
 static inline shadow_l3e_t shadow_l3e_from_mfn(mfn_t mfn, u32 flags)
 { return l3e_from_pfn(mfn_x(mfn), flags); }
 #if SHADOW_PAGING_LEVELS >= 4
 static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t mfn, u32 flags)
 { return l4e_from_pfn(mfn_x(mfn), flags); }
-#endif
 #endif
 
 #define shadow_l1_table_offset(a) l1_table_offset(a)
@@ -441,8 +399,7 @@ struct shadow_walk_t
 /* macros for dealing with the naming of the internal function names of the
  * shadow code's external entry points.
  */
-#define INTERNAL_NAME(name) \
-    SHADOW_INTERNAL_NAME(name, SHADOW_PAGING_LEVELS, GUEST_PAGING_LEVELS)
+#define INTERNAL_NAME(name) SHADOW_INTERNAL_NAME(name, GUEST_PAGING_LEVELS)
 
 /* macros for renaming the primary entry points, so that they are more
  * easily distinguished from a debugger
@@ -481,42 +438,24 @@ struct shadow_walk_t
 #define sh_guess_wrmap             INTERNAL_NAME(sh_guess_wrmap)
 #define sh_clear_shadow_entry      INTERNAL_NAME(sh_clear_shadow_entry)
 
-/* The sh_guest_(map|get)_* functions only depends on the number of config
- * levels
- */
-#define sh_guest_map_l1e                                       \
-        SHADOW_INTERNAL_NAME(sh_guest_map_l1e,                \
-                              CONFIG_PAGING_LEVELS,             \
-                              CONFIG_PAGING_LEVELS)
-#define sh_guest_get_eff_l1e                                   \
-        SHADOW_INTERNAL_NAME(sh_guest_get_eff_l1e,            \
-                              CONFIG_PAGING_LEVELS,             \
-                              CONFIG_PAGING_LEVELS)
-
-/* sh_make_monitor_table only depends on the number of shadow levels */
-#define sh_make_monitor_table                                  \
-        SHADOW_INTERNAL_NAME(sh_make_monitor_table,           \
-                              SHADOW_PAGING_LEVELS,             \
-                              SHADOW_PAGING_LEVELS)
-#define sh_destroy_monitor_table                               \
-        SHADOW_INTERNAL_NAME(sh_destroy_monitor_table,        \
-                              SHADOW_PAGING_LEVELS,             \
-                              SHADOW_PAGING_LEVELS)
-
+
+/* The sh_guest_(map|get)_* functions depends on Xen's paging levels */
+#define sh_guest_map_l1e \
+        SHADOW_INTERNAL_NAME(sh_guest_map_l1e, CONFIG_PAGING_LEVELS)
+#define sh_guest_get_eff_l1e \
+        SHADOW_INTERNAL_NAME(sh_guest_get_eff_l1e, CONFIG_PAGING_LEVELS)
+
+/* sh_make_monitor_table depends only on the number of shadow levels */
+#define sh_make_monitor_table \
+        SHADOW_INTERNAL_NAME(sh_make_monitor_table, SHADOW_PAGING_LEVELS)
+#define sh_destroy_monitor_table \
+        SHADOW_INTERNAL_NAME(sh_destroy_monitor_table, SHADOW_PAGING_LEVELS)
 
 #if SHADOW_PAGING_LEVELS == 3
 #define MFN_FITS_IN_HVM_CR3(_MFN) !(mfn_x(_MFN) >> 20)
 #endif
 
-#if SHADOW_PAGING_LEVELS == 2
-#define SH_PRI_pte "08x"
-#else /* SHADOW_PAGING_LEVELS >= 3 */
-#ifndef __x86_64__
-#define SH_PRI_pte "016llx"
-#else
-#define SH_PRI_pte "016lx"
-#endif
-#endif /* SHADOW_PAGING_LEVELS >= 3 */
+#define SH_PRI_pte PRIpte
 
 #if GUEST_PAGING_LEVELS == 2
 #define SH_PRI_gpte "08x"
@@ -529,7 +468,7 @@ struct shadow_walk_t
 #endif /* GUEST_PAGING_LEVELS >= 3 */
 
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) && SHADOW_PAGING_LEVELS > 2
+#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
 /******************************************************************************
  * We implement a "fast path" for two special cases: faults that require
  * MMIO emulation, and faults where the guest PTE is not present.  We
diff -r c99a88623eda -r 810d8c3ac992 xen/include/asm-x86/mtrr.h
--- a/xen/include/asm-x86/mtrr.h        Thu May 08 14:33:31 2008 +0100
+++ b/xen/include/asm-x86/mtrr.h        Thu May 08 16:58:33 2008 +0100
@@ -62,5 +62,8 @@ extern int mtrr_del(int reg, unsigned lo
 extern int mtrr_del(int reg, unsigned long base, unsigned long size);
 extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
 extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
+extern u32 get_pat_flags(struct vcpu *v, u32 gl1e_flags, paddr_t gpaddr,
+                  paddr_t spaddr);
+extern unsigned char pat_type_2_pte_flags(unsigned char pat_type);
 
 #endif /* __ASM_X86_MTRR_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Clean up shadow code after the removal of non-PAE 32-bit builds, Xen patchbot-unstable <=