WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [patch] pagetable cleanups, next version

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [patch] pagetable cleanups, next version
From: Gerd Knorr <kraxel@xxxxxxxxxxx>
Date: Fri, 15 Apr 2005 14:56:20 +0200
Delivery-date: Fri, 15 Apr 2005 13:00:37 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.5.9i
  Hi,

Next version, hopefully final this time ;)

Macros turned into inline functions, changed again as discussed on the
list yesterday (pass pagetable entries as return value).  I've also
changed the names again, it's l?e_create_{phys|pfn} now, I think with
the new calling conventions that explains better what they do.

Fixed the flaw found by Christian Limpach.

64-bit changes are included.  Compiles fine, but is untested.

Enjoy,

  Gerd

==============================[ cut here ]==============================
Subject: [patch] paging cleanup

Cleanup page table handling.  Add macros to access page table
entries, fixup plenty of places in the code to use the page
table types instead of "unsigned long".

Signed-off-by: Gerd Knorr <kraxel@xxxxxxxxxxx>
---
 arch/x86/dom0_ops.c           |    2 
 arch/x86/domain.c             |   10 
 arch/x86/domain_build.c       |   76 ++++---
 arch/x86/mm.c                 |  296 +++++++++++++--------------
 arch/x86/shadow.c             |  360 ++++++++++++++++++----------------
 arch/x86/vmx.c                |    7 
 arch/x86/vmx_platform.c       |    4 
 arch/x86/x86_32/mm.c          |   58 ++---
 arch/x86/x86_32/traps.c       |    2 
 arch/x86/x86_64/mm.c          |   75 +++----
 common/grant_table.c          |   10 
 include/asm-x86/mm.h          |   17 -
 include/asm-x86/page.h        |   16 -
 include/asm-x86/shadow.h      |  287 ++++++++++++++-------------
 include/asm-x86/x86_32/page.h |  108 +++++++---
 include/asm-x86/x86_64/page.h |  192 ++++++++++++++----
 16 files changed, 876 insertions(+), 644 deletions(-)

Index: xen/arch/x86/shadow.c
===================================================================
--- xen.orig/arch/x86/shadow.c  2005-04-15 13:53:11.000000000 +0200
+++ xen/arch/x86/shadow.c       2005-04-15 14:13:23.000000000 +0200
@@ -312,7 +312,7 @@ free_shadow_l1_table(struct domain *d, u
     for ( i = min; i <= max; i++ )
     {
         put_page_from_l1e(pl1e[i], d);
-        pl1e[i] = mk_l1_pgentry(0);
+        pl1e[i] = l1e_empty();
     }
 
     unmap_domain_mem(pl1e);
@@ -337,9 +337,8 @@ free_shadow_hl2_table(struct domain *d, 
 
     for ( i = 0; i < limit; i++ )
     {
-        unsigned long hl2e = l1_pgentry_val(hl2[i]);
-        if ( hl2e & _PAGE_PRESENT )
-            put_page(pfn_to_page(hl2e >> PAGE_SHIFT));
+        if ( l1e_get_flags(hl2[i]) & _PAGE_PRESENT )
+            put_page(pfn_to_page(l1e_get_pfn(hl2[i])));
     }
 
     unmap_domain_mem(hl2);
@@ -557,15 +556,16 @@ static void free_shadow_pages(struct dom
             l2_pgentry_t *mpl2e = ed->arch.monitor_vtable;
             l2_pgentry_t hl2e = mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)];
             l2_pgentry_t smfn = 
mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)];
-            if ( l2_pgentry_val(hl2e) & _PAGE_PRESENT )
+
+            if ( l2e_get_flags(hl2e) & _PAGE_PRESENT )
             {
-                put_shadow_ref(l2_pgentry_val(hl2e) >> PAGE_SHIFT);
-                mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = 
mk_l2_pgentry(0);
+                put_shadow_ref(l2e_get_pfn(hl2e));
+                mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = l2e_empty();
             }
-            if ( l2_pgentry_val(smfn) & _PAGE_PRESENT )
+            if ( l2e_get_flags(smfn) & _PAGE_PRESENT )
             {
-                put_shadow_ref(l2_pgentry_val(smfn) >> PAGE_SHIFT);
-                mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = 
mk_l2_pgentry(0);
+                put_shadow_ref(l2e_get_pfn(smfn));
+                mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = l2e_empty();
             }
         }
     }
@@ -648,18 +648,19 @@ static void alloc_monitor_pagetable(stru
 #endif
 
     mpl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
-        mk_l2_pgentry((__pa(d->arch.mm_perdomain_pt) & PAGE_MASK) 
-                      | __PAGE_HYPERVISOR);
+        l2e_create_phys(__pa(d->arch.mm_perdomain_pt),
+                        __PAGE_HYPERVISOR);
 
     // map the phys_to_machine map into the Read-Only MPT space for this domain
     mpl2e[l2_table_offset(RO_MPT_VIRT_START)] =
-        mk_l2_pgentry(pagetable_val(d->arch.phys_table) | __PAGE_HYPERVISOR);
+        l2e_create_phys(pagetable_val(d->arch.phys_table),
+                        __PAGE_HYPERVISOR);
 
     // Don't (yet) have mappings for these...
     // Don't want to accidentally see the idle_pg_table's linear mapping.
     //
-    mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = mk_l2_pgentry(0);
-    mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = mk_l2_pgentry(0);
+    mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = l2e_empty();
+    mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = l2e_empty();
 
     ed->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT);
     ed->arch.monitor_vtable = mpl2e;
@@ -682,17 +683,17 @@ void free_monitor_pagetable(struct exec_
      * First get the mfn for hl2_table by looking at monitor_table
      */
     hl2e = mpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT];
-    if ( l2_pgentry_val(hl2e) & _PAGE_PRESENT )
+    if ( l2e_get_flags(hl2e) & _PAGE_PRESENT )
     {
-        mfn = l2_pgentry_val(hl2e) >> PAGE_SHIFT;
+        mfn = l2e_get_pfn(hl2e);
         ASSERT(mfn);
         put_shadow_ref(mfn);
     }
 
     sl2e = mpl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT];
-    if ( l2_pgentry_val(sl2e) & _PAGE_PRESENT )
+    if ( l2e_get_flags(sl2e) & _PAGE_PRESENT )
     {
-        mfn = l2_pgentry_val(sl2e) >> PAGE_SHIFT;
+        mfn = l2e_get_pfn(sl2e);
         ASSERT(mfn);
         put_shadow_ref(mfn);
     }
@@ -721,7 +722,8 @@ set_p2m_entry(struct domain *d, unsigned
     ASSERT( phystab );
 
     l2 = map_domain_mem(phystab);
-    if ( !l2_pgentry_val(l2e = l2[l2_table_offset(va)]) )
+    l2e = l2[l2_table_offset(va)];
+    if ( !l2e_get_value(l2e) ) /* FIXME: check present bit? */
     {
         l1page = alloc_domheap_page(NULL);
         if ( !l1page )
@@ -731,15 +733,13 @@ set_p2m_entry(struct domain *d, unsigned
         memset(l1, 0, PAGE_SIZE);
         unmap_domain_mem(l1);
 
-        l2e = l2[l2_table_offset(va)] =
-            mk_l2_pgentry((page_to_pfn(l1page) << PAGE_SHIFT) |
-                          __PAGE_HYPERVISOR);
+        l2e = l2e_create_pfn(page_to_pfn(l1page), __PAGE_HYPERVISOR);
+        l2[l2_table_offset(va)] = l2e;
     }
     unmap_domain_mem(l2);
 
-    l1 = map_domain_mem(l2_pgentry_val(l2e) & PAGE_MASK);
-    l1[l1_table_offset(va)] = mk_l1_pgentry((mfn << PAGE_SHIFT) |
-                                            __PAGE_HYPERVISOR);
+    l1 = map_domain_mem(l2e_get_phys(l2e));
+    l1[l1_table_offset(va)] = l1e_create_pfn(mfn, __PAGE_HYPERVISOR);
     unmap_domain_mem(l1);
 
     return 1;
@@ -1015,13 +1015,12 @@ translate_l1pgtable(struct domain *d, l1
     for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
     {
         if ( is_guest_l1_slot(i) &&
-             (l1_pgentry_val(l1[i]) & _PAGE_PRESENT) )
+             (l1e_get_flags(l1[i]) & _PAGE_PRESENT) )
         {
-            unsigned long mfn = l1_pgentry_val(l1[i]) >> PAGE_SHIFT;
+            unsigned long mfn = l1e_get_pfn(l1[i]);
             unsigned long gpfn = __mfn_to_gpfn(d, mfn);
-            ASSERT((l1_pgentry_val(p2m[gpfn]) >> PAGE_SHIFT) == mfn);
-            l1[i] = mk_l1_pgentry((gpfn << PAGE_SHIFT) |
-                                  (l1_pgentry_val(l1[i]) & ~PAGE_MASK));
+            ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn);
+            l1[i] = l1e_create_pfn(gpfn, l1e_get_flags(l1[i]));
         }
     }
     unmap_domain_mem(l1);
@@ -1043,13 +1042,12 @@ translate_l2pgtable(struct domain *d, l1
     for (i = 0; i < L2_PAGETABLE_ENTRIES; i++)
     {
         if ( is_guest_l2_slot(i) &&
-             (l2_pgentry_val(l2[i]) & _PAGE_PRESENT) )
+             (l2e_get_flags(l2[i]) & _PAGE_PRESENT) )
         {
-            unsigned long mfn = l2_pgentry_val(l2[i]) >> PAGE_SHIFT;
+            unsigned long mfn = l2e_get_pfn(l2[i]);
             unsigned long gpfn = __mfn_to_gpfn(d, mfn);
-            ASSERT((l1_pgentry_val(p2m[gpfn]) >> PAGE_SHIFT) == mfn);
-            l2[i] = mk_l2_pgentry((gpfn << PAGE_SHIFT) |
-                                  (l2_pgentry_val(l2[i]) & ~PAGE_MASK));
+            ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn);
+            l2[i] = l2e_create_pfn(gpfn, l2e_get_flags(l2[i]));
             translate_l1pgtable(d, p2m, mfn);
         }
     }
@@ -1321,13 +1319,13 @@ gpfn_to_mfn_foreign(struct domain *d, un
     l2_pgentry_t *l2 = map_domain_mem(phystab);
     l2_pgentry_t l2e = l2[l2_table_offset(va)];
     unmap_domain_mem(l2);
-    if ( !(l2_pgentry_val(l2e) & _PAGE_PRESENT) )
+    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
     {
         printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%p) => 0 l2e=%p\n",
-               d->id, gpfn, l2_pgentry_val(l2e));
+               d->id, gpfn, l2e_get_value(l2e));
         return INVALID_MFN;
     }
-    unsigned long l1tab = l2_pgentry_val(l2e) & PAGE_MASK;
+    unsigned long l1tab = l2e_get_phys(l2e);
     l1_pgentry_t *l1 = map_domain_mem(l1tab);
     l1_pgentry_t l1e = l1[l1_table_offset(va)];
     unmap_domain_mem(l1);
@@ -1337,14 +1335,14 @@ gpfn_to_mfn_foreign(struct domain *d, un
            d->id, gpfn, l1_pgentry_val(l1e) >> PAGE_SHIFT, phystab, l2e, 
l1tab, l1e);
 #endif
 
-    if ( !(l1_pgentry_val(l1e) & _PAGE_PRESENT) )
+    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
     {
         printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%p) => 0 l1e=%p\n",
-               d->id, gpfn, l1_pgentry_val(l1e));
+               d->id, gpfn, l1e_get_value(l1e));
         return INVALID_MFN;
     }
 
-    return l1_pgentry_val(l1e) >> PAGE_SHIFT;
+    return l1e_get_pfn(l1e);
 }
 
 static unsigned long
@@ -1388,11 +1386,11 @@ shadow_hl2_table(struct domain *d, unsig
         // Setup easy access to the GL2, SL2, and HL2 frames.
         //
         hl2[l2_table_offset(LINEAR_PT_VIRT_START)] =
-            mk_l1_pgentry((gmfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+            l1e_create_pfn(gmfn, __PAGE_HYPERVISOR);
         hl2[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
-            mk_l1_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+            l1e_create_pfn(smfn, __PAGE_HYPERVISOR);
         hl2[l2_table_offset(PERDOMAIN_VIRT_START)] =
-            mk_l1_pgentry((hl2mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+            l1e_create_pfn(hl2mfn, __PAGE_HYPERVISOR);
     }
 
     unmap_domain_mem(hl2);
@@ -1441,20 +1439,19 @@ static unsigned long shadow_l2_table(
                HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
 
         spl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
-            mk_l2_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+            l2e_create_pfn(smfn, __PAGE_HYPERVISOR);
 
         spl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
-            mk_l2_pgentry(__pa(page_get_owner(
-                &frame_table[gmfn])->arch.mm_perdomain_pt) |
-                          __PAGE_HYPERVISOR);
+            
l2e_create_phys(__pa(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_pt),
+                            __PAGE_HYPERVISOR);
 
         if ( shadow_mode_translate(d) ) // NB: not external
         {
             unsigned long hl2mfn;
 
             spl2e[l2_table_offset(RO_MPT_VIRT_START)] =
-                mk_l2_pgentry(pagetable_val(d->arch.phys_table) |
-                              __PAGE_HYPERVISOR);
+                l2e_create_phys(pagetable_val(d->arch.phys_table),
+                                __PAGE_HYPERVISOR);
 
             if ( unlikely(!(hl2mfn = __shadow_status(d, gpfn, 
PGT_hl2_shadow))) )
                 hl2mfn = shadow_hl2_table(d, gpfn, gmfn, smfn);
@@ -1466,11 +1463,11 @@ static unsigned long shadow_l2_table(
                 BUG();
             
             spl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
-                mk_l2_pgentry((hl2mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+                l2e_create_pfn(hl2mfn, __PAGE_HYPERVISOR);
         }
         else
             spl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
-                mk_l2_pgentry((gmfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+                l2e_create_pfn(gmfn, __PAGE_HYPERVISOR);
     }
     else
     {
@@ -1487,12 +1484,14 @@ void shadow_map_l1_into_current_l2(unsig
 { 
     struct exec_domain *ed = current;
     struct domain *d = ed->domain;
-    unsigned long    *gpl1e, *spl1e, gl2e, sl2e, gl1pfn, gl1mfn, sl1mfn;
+    l1_pgentry_t *gpl1e, *spl1e;
+    l2_pgentry_t gl2e, sl2e;
+    unsigned long gl1pfn, gl1mfn, sl1mfn;
     int i, init_table = 0;
 
     __guest_get_l2e(ed, va, &gl2e);
-    ASSERT(gl2e & _PAGE_PRESENT);
-    gl1pfn = gl2e >> PAGE_SHIFT;
+    ASSERT(l2e_get_flags(gl2e) & _PAGE_PRESENT);
+    gl1pfn = l2e_get_pfn(gl2e);
 
     if ( !(sl1mfn = __shadow_status(d, gl1pfn, PGT_l1_shadow)) )
     {
@@ -1525,9 +1524,9 @@ void shadow_map_l1_into_current_l2(unsig
     }
 
 #ifndef NDEBUG
-    unsigned long old_sl2e;
+    l2_pgentry_t old_sl2e;
     __shadow_get_l2e(ed, va, &old_sl2e);
-    ASSERT( !(old_sl2e & _PAGE_PRESENT) );
+    ASSERT( !(l2e_get_flags(old_sl2e) & _PAGE_PRESENT) );
 #endif
 
     if ( !get_shadow_ref(sl1mfn) )
@@ -1538,25 +1537,23 @@ void shadow_map_l1_into_current_l2(unsig
 
     if ( init_table )
     {
-        gpl1e = (unsigned long *)
-            &(linear_pg_table[l1_linear_offset(va) &
+        gpl1e = &(linear_pg_table[l1_linear_offset(va) &
                               ~(L1_PAGETABLE_ENTRIES-1)]);
 
-        spl1e = (unsigned long *)
-            &(shadow_linear_pg_table[l1_linear_offset(va) &
+        spl1e = &(shadow_linear_pg_table[l1_linear_offset(va) &
                                      ~(L1_PAGETABLE_ENTRIES-1)]);
 
-        unsigned long sl1e;
+        l1_pgentry_t sl1e;
         int index = l1_table_offset(va);
         int min = 1, max = 0;
 
         for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         {
             l1pte_propagate_from_guest(d, gpl1e[i], &sl1e);
-            if ( (sl1e & _PAGE_PRESENT) &&
-                 !shadow_get_page_from_l1e(mk_l1_pgentry(sl1e), d) )
-                sl1e = 0;
-            if ( sl1e == 0 )
+            if ( (l1e_get_flags(sl1e) & _PAGE_PRESENT) &&
+                 !shadow_get_page_from_l1e(sl1e, d) )
+                sl1e = l1e_empty();
+            if ( l1e_get_value(sl1e) == 0 ) /* FIXME: check flags? */
             {
                 // First copy entries from 0 until first invalid.
                 // Then copy entries from index until first invalid.
@@ -1582,7 +1579,7 @@ void shadow_map_l1_into_current_l2(unsig
 void shadow_invlpg(struct exec_domain *ed, unsigned long va)
 {
     struct domain *d = ed->domain;
-    unsigned long gpte, spte;
+    l1_pgentry_t gpte, spte;
 
     ASSERT(shadow_mode_enabled(d));
 
@@ -1595,8 +1592,8 @@ void shadow_invlpg(struct exec_domain *e
     // It's not strictly necessary to update the shadow here,
     // but it might save a fault later.
     //
-    if (__get_user(gpte, (unsigned long *)
-                   &linear_pg_table[va >> PAGE_SHIFT])) {
+    if (__copy_from_user(&gpte, &linear_pg_table[va >> PAGE_SHIFT],
+                         sizeof(gpte))) {
         perfc_incrc(shadow_invlpg_faults);
         return;
     }
@@ -1764,31 +1761,30 @@ void shadow_mark_va_out_of_sync(
 {
     struct out_of_sync_entry *entry =
         shadow_mark_mfn_out_of_sync(ed, gpfn, mfn);
-    unsigned long sl2e;
+    l2_pgentry_t sl2e;
 
     // We need the address of shadow PTE that maps @va.
     // It might not exist yet.  Make sure it's there.
     //
     __shadow_get_l2e(ed, va, &sl2e);
-    if ( !(sl2e & _PAGE_PRESENT) )
+    if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
     {
         // either this L1 isn't shadowed yet, or the shadow isn't linked into
         // the current L2.
         shadow_map_l1_into_current_l2(va);
         __shadow_get_l2e(ed, va, &sl2e);
     }
-    ASSERT(sl2e & _PAGE_PRESENT);
+    ASSERT(l2e_get_flags(sl2e) & _PAGE_PRESENT);
 
     // NB: this is stored as a machine address.
     entry->writable_pl1e =
-        ((sl2e & PAGE_MASK) |
-         (sizeof(l1_pgentry_t) * l1_table_offset(va)));
+        l2e_get_phys(sl2e) | (sizeof(l1_pgentry_t) * l1_table_offset(va));
     ASSERT( !(entry->writable_pl1e & (sizeof(l1_pgentry_t)-1)) );
 
     // Increment shadow's page count to represent the reference
     // inherent in entry->writable_pl1e
     //
-    if ( !get_shadow_ref(sl2e >> PAGE_SHIFT) )
+    if ( !get_shadow_ref(l2e_get_pfn(sl2e)) )
         BUG();
 
     FSH_LOG("mark_out_of_sync(va=%p -> writable_pl1e=%p)",
@@ -1841,7 +1837,7 @@ int __shadow_out_of_sync(struct exec_dom
 {
     struct domain *d = ed->domain;
     unsigned long l2mfn = pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT;
-    unsigned long l2e;
+    l2_pgentry_t l2e;
     unsigned long l1mfn;
 
     ASSERT(spin_is_locked(&d->arch.shadow_lock));
@@ -1853,10 +1849,10 @@ int __shadow_out_of_sync(struct exec_dom
         return 1;
 
     __guest_get_l2e(ed, va, &l2e);
-    if ( !(l2e & _PAGE_PRESENT) )
+    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
         return 0;
 
-    l1mfn = __gpfn_to_mfn(d, l2e >> PAGE_SHIFT);
+    l1mfn = __gpfn_to_mfn(d, l2e_get_pfn(l2e));
 
     // If the l1 pfn is invalid, it can't be out of sync...
     if ( !VALID_MFN(l1mfn) )
@@ -1923,31 +1919,31 @@ static u32 remove_all_write_access_in_pt
     unsigned long readonly_gpfn, unsigned long readonly_gmfn,
     u32 max_refs_to_find, unsigned long prediction)
 {
-    unsigned long *pt = map_domain_mem(pt_mfn << PAGE_SHIFT);
-    unsigned long match =
-        (readonly_gmfn << PAGE_SHIFT) | _PAGE_RW | _PAGE_PRESENT;
-    unsigned long mask = PAGE_MASK | _PAGE_RW | _PAGE_PRESENT;
+    l1_pgentry_t *pt = map_domain_mem(pt_mfn << PAGE_SHIFT);
+    l1_pgentry_t match;
+    unsigned long flags = _PAGE_RW | _PAGE_PRESENT;
     int i;
     u32 found = 0;
     int is_l1_shadow =
         ((frame_table[pt_mfn].u.inuse.type_info & PGT_type_mask) ==
          PGT_l1_shadow);
 
-#define MATCH_ENTRY(_i) (((pt[_i] ^ match) & mask) == 0)
+    match = l1e_create_pfn(readonly_gmfn, flags);
 
     // returns true if all refs have been found and fixed.
     //
     int fix_entry(int i)
     {
-        unsigned long old = pt[i];
-        unsigned long new = old & ~_PAGE_RW;
+        l1_pgentry_t old = pt[i];
+        l1_pgentry_t new = old;
 
-        if ( is_l1_shadow && !shadow_get_page_from_l1e(mk_l1_pgentry(new), d) )
+        l1e_remove_flags(&new,_PAGE_RW);
+        if ( is_l1_shadow && !shadow_get_page_from_l1e(new, d) )
             BUG();
         found++;
         pt[i] = new;
         if ( is_l1_shadow )
-            put_page_from_l1e(mk_l1_pgentry(old), d);
+            put_page_from_l1e(old, d);
 
 #if 0
         printk("removed write access to pfn=%p mfn=%p in smfn=%p entry %x "
@@ -1958,8 +1954,8 @@ static u32 remove_all_write_access_in_pt
         return (found == max_refs_to_find);
     }
 
-    if ( MATCH_ENTRY(readonly_gpfn & (L1_PAGETABLE_ENTRIES - 1)) &&
-         fix_entry(readonly_gpfn & (L1_PAGETABLE_ENTRIES - 1)) )
+    i = readonly_gpfn & (L1_PAGETABLE_ENTRIES - 1);
+    if ( !l1e_has_changed(&pt[i], &match, flags) && fix_entry(i) )
     {
         perfc_incrc(remove_write_fast_exit);
         increase_writable_pte_prediction(d, readonly_gpfn, prediction);
@@ -1969,7 +1965,7 @@ static u32 remove_all_write_access_in_pt
  
     for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
     {
-        if ( unlikely(MATCH_ENTRY(i)) && fix_entry(i) )
+        if ( unlikely(!l1e_has_changed(&pt[i], &match, flags)) && fix_entry(i) 
)
             break;
     }
 
@@ -2066,25 +2062,27 @@ int shadow_remove_all_write_access(
 static u32 remove_all_access_in_page(
     struct domain *d, unsigned long l1mfn, unsigned long forbidden_gmfn)
 {
-    unsigned long *pl1e = map_domain_mem(l1mfn << PAGE_SHIFT);
-    unsigned long match = (forbidden_gmfn << PAGE_SHIFT) | _PAGE_PRESENT;
-    unsigned long mask  = PAGE_MASK | _PAGE_PRESENT;
+    l1_pgentry_t *pl1e = map_domain_mem(l1mfn << PAGE_SHIFT);
+    l1_pgentry_t match;
+    unsigned long flags  = _PAGE_PRESENT;
     int i;
     u32 count = 0;
     int is_l1_shadow =
         ((frame_table[l1mfn].u.inuse.type_info & PGT_type_mask) ==
          PGT_l1_shadow);
 
+    match = l1e_create_pfn(forbidden_gmfn, flags);
+    
     for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
     {
-        if ( unlikely(((pl1e[i] ^ match) & mask) == 0) )
+        if ( unlikely(!l1e_has_changed(&pl1e[i], &match, flags) == 0) )
         {
-            unsigned long ol2e = pl1e[i];
-            pl1e[i] = 0;
+            l1_pgentry_t ol2e = pl1e[i];
+            pl1e[i] = l1e_empty();
             count++;
 
             if ( is_l1_shadow )
-                put_page_from_l1e(mk_l1_pgentry(ol2e), d);
+                put_page_from_l1e(ol2e, d);
             else /* must be an hl2 page */
                 put_page(&frame_table[forbidden_gmfn]);
         }
@@ -2138,7 +2136,7 @@ static int resync_all(struct domain *d, 
     struct out_of_sync_entry *entry;
     unsigned i;
     unsigned long smfn;
-    unsigned long *guest, *shadow, *snapshot;
+    void *guest, *shadow, *snapshot;
     int need_flush = 0, external = shadow_mode_external(d);
     int unshadow;
     int changed;
@@ -2176,14 +2174,18 @@ static int resync_all(struct domain *d, 
             int min_snapshot = SHADOW_MIN(min_max_snapshot);
             int max_snapshot = SHADOW_MAX(min_max_snapshot);
 
+            l1_pgentry_t *guest1 = guest;
+            l1_pgentry_t *shadow1 = shadow;
+            l1_pgentry_t *snapshot1 = snapshot;
+
             changed = 0;
 
             for ( i = min_shadow; i <= max_shadow; i++ )
             {
                 if ( (i < min_snapshot) || (i > max_snapshot) ||
-                     (guest[i] != snapshot[i]) )
+                     l1e_has_changed(&guest1[i], &snapshot1[i], 
PAGE_FLAG_MASK) )
                 {
-                    need_flush |= validate_pte_change(d, guest[i], &shadow[i]);
+                    need_flush |= validate_pte_change(d, guest1[i], 
&shadow1[i]);
 
                     // can't update snapshots of linear page tables -- they
                     // are used multiple times...
@@ -2202,16 +2204,20 @@ static int resync_all(struct domain *d, 
         {
             int max = -1;
 
+            l2_pgentry_t *guest2 = guest;
+            l2_pgentry_t *shadow2 = shadow;
+            l2_pgentry_t *snapshot2 = snapshot;
+
             changed = 0;
             for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
             {
                 if ( !is_guest_l2_slot(i) && !external )
                     continue;
 
-                unsigned long new_pde = guest[i];
-                if ( new_pde != snapshot[i] )
+                l2_pgentry_t new_pde = guest2[i];
+                if ( l2e_has_changed(&new_pde, &snapshot2[i], PAGE_FLAG_MASK))
                 {
-                    need_flush |= validate_pde_change(d, new_pde, &shadow[i]);
+                    need_flush |= validate_pde_change(d, new_pde, &shadow2[i]);
 
                     // can't update snapshots of linear page tables -- they
                     // are used multiple times...
@@ -2220,12 +2226,13 @@ static int resync_all(struct domain *d, 
 
                     changed++;
                 }
-                if ( new_pde != 0 )
+                if ( l2e_get_value(new_pde) != 0 ) /* FIXME: check flags? */
                     max = i;
 
                 // XXX - This hack works for linux guests.
                 //       Need a better solution long term.
-                if ( !(new_pde & _PAGE_PRESENT) && unlikely(new_pde != 0) &&
+                if ( !(l2e_get_flags(new_pde) & _PAGE_PRESENT) &&
+                     unlikely(l2e_get_value(new_pde) != 0) &&
                      !unshadow &&
                      (frame_table[smfn].u.inuse.type_info & PGT_pinned) )
                     unshadow = 1;
@@ -2237,16 +2244,21 @@ static int resync_all(struct domain *d, 
             break;
         }
         case PGT_hl2_shadow:
+        {
+            l2_pgentry_t *guest2 = guest;
+            l2_pgentry_t *snapshot2 = snapshot;
+            l1_pgentry_t *shadow2 = shadow;
+            
             changed = 0;
             for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
             {
                 if ( !is_guest_l2_slot(i) && !external )
                     continue;
 
-                unsigned long new_pde = guest[i];
-                if ( new_pde != snapshot[i] )
+                l2_pgentry_t new_pde = guest2[i];
+                if ( l2e_has_changed(&new_pde, &snapshot2[i], PAGE_FLAG_MASK) )
                 {
-                    need_flush |= validate_hl2e_change(d, new_pde, &shadow[i]);
+                    need_flush |= validate_hl2e_change(d, new_pde, 
&shadow2[i]);
 
                     // can't update snapshots of linear page tables -- they
                     // are used multiple times...
@@ -2259,6 +2271,7 @@ static int resync_all(struct domain *d, 
             perfc_incrc(resync_hl2);
             perfc_incr_histo(shm_hl2_updates, changed, PT_UPDATES);
             break;
+        }
         default:
             BUG();
         }
@@ -2304,15 +2317,16 @@ void __shadow_sync_all(struct domain *d)
         if ( entry->writable_pl1e & (sizeof(l1_pgentry_t)-1) )
             continue;
 
-        unsigned long *ppte = map_domain_mem(entry->writable_pl1e);
-        unsigned long opte = *ppte;
-        unsigned long npte = opte & ~_PAGE_RW;
+        l1_pgentry_t *ppte = map_domain_mem(entry->writable_pl1e);
+        l1_pgentry_t opte = *ppte;
+        l1_pgentry_t npte = opte;
+        l1e_remove_flags(&opte, _PAGE_RW);
 
-        if ( (npte & _PAGE_PRESENT) &&
-             !shadow_get_page_from_l1e(mk_l1_pgentry(npte), d) )
+        if ( (l1e_get_flags(npte) & _PAGE_PRESENT) &&
+             !shadow_get_page_from_l1e(npte, d) )
             BUG();
         *ppte = npte;
-        put_page_from_l1e(mk_l1_pgentry(opte), d);
+        put_page_from_l1e(opte, d);
 
         unmap_domain_mem(ppte);
     }
@@ -2347,10 +2361,12 @@ void __shadow_sync_all(struct domain *d)
 
 int shadow_fault(unsigned long va, struct xen_regs *regs)
 {
-    unsigned long gpte, spte = 0, orig_gpte;
+    l1_pgentry_t gpte, spte, orig_gpte;
     struct exec_domain *ed = current;
     struct domain *d = ed->domain;
-    unsigned long gpde;
+    l2_pgentry_t gpde;
+
+    spte = l1e_empty();
 
     SH_VVLOG("shadow_fault( va=%p, code=%lu )", va, regs->error_code );
     perfc_incrc(shadow_fault_calls);
@@ -2373,7 +2389,7 @@ int shadow_fault(unsigned long va, struc
      * STEP 2. Check the guest PTE.
      */
     __guest_get_l2e(ed, va, &gpde);
-    if ( unlikely(!(gpde & _PAGE_PRESENT)) )
+    if ( unlikely(!(l1e_get_flags(gpte) & _PAGE_PRESENT)) )
     {
         SH_VVLOG("shadow_fault - EXIT: L1 not present" );
         perfc_incrc(shadow_fault_bail_pde_not_present);
@@ -2384,8 +2400,8 @@ int shadow_fault(unsigned long va, struc
     // the mapping is in-sync, so the check of the PDE's present bit, above,
     // covers this access.
     //
-    orig_gpte = gpte = l1_pgentry_val(linear_pg_table[l1_linear_offset(va)]);
-    if ( unlikely(!(gpte & _PAGE_PRESENT)) )
+    orig_gpte = gpte = linear_pg_table[l1_linear_offset(va)];
+    if ( unlikely(!(l1e_get_flags(gpte) & _PAGE_PRESENT)) )
     {
         SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte );
         perfc_incrc(shadow_fault_bail_pte_not_present);
@@ -2395,7 +2411,7 @@ int shadow_fault(unsigned long va, struc
     /* Write fault? */
     if ( regs->error_code & 2 )  
     {
-        if ( unlikely(!(gpte & _PAGE_RW)) )
+        if ( unlikely(!(l1e_get_flags(gpte) & _PAGE_RW)) )
         {
             /* Write fault on a read-only mapping. */
             SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%lx)", gpte);
@@ -2427,8 +2443,8 @@ int shadow_fault(unsigned long va, struc
      */
 
     /* XXX Watch out for read-only L2 entries! (not used in Linux). */
-    if ( unlikely(__put_user(gpte, (unsigned long *)
-                             &linear_pg_table[l1_linear_offset(va)])) )
+    if ( unlikely(__copy_to_user(&linear_pg_table[l1_linear_offset(va)],
+                                 &gpte, sizeof(gpte))) )
     {
         printk("shadow_fault() failed, crashing domain %d "
                "due to a read-only L2 page table (gpde=%p), va=%p\n",
@@ -2437,8 +2453,9 @@ int shadow_fault(unsigned long va, struc
     }
 
     // if necessary, record the page table page as dirty
-    if ( unlikely(shadow_mode_log_dirty(d)) && (orig_gpte != gpte) )
-        mark_dirty(d, __gpfn_to_mfn(d, gpde >> PAGE_SHIFT));
+    if ( unlikely(shadow_mode_log_dirty(d)) &&
+         l1e_has_changed(&orig_gpte, &gpte, PAGE_FLAG_MASK))
+        mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gpde)));
 
     shadow_set_l1e(va, spte, 1);
 
@@ -2560,16 +2577,16 @@ void __update_pagetables(struct exec_dom
         if ( !get_shadow_ref(hl2mfn) )
             BUG();
         mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
-            mk_l2_pgentry((hl2mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
-        if ( l2_pgentry_val(old_hl2e) & _PAGE_PRESENT )
-            put_shadow_ref(l2_pgentry_val(old_hl2e) >> PAGE_SHIFT);
+            l2e_create_pfn(hl2mfn, __PAGE_HYPERVISOR);
+        if ( l2e_get_flags(old_hl2e) & _PAGE_PRESENT )
+            put_shadow_ref(l2e_get_pfn(old_hl2e));
 
         if ( !get_shadow_ref(smfn) )
             BUG();
         mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
-            mk_l2_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
-        if ( l2_pgentry_val(old_sl2e) & _PAGE_PRESENT )
-            put_shadow_ref(l2_pgentry_val(old_sl2e) >> PAGE_SHIFT);
+            l2e_create_pfn(smfn, __PAGE_HYPERVISOR);
+        if ( l2e_get_flags(old_sl2e) & _PAGE_PRESENT )
+            put_shadow_ref(l2e_get_pfn(old_sl2e));
 
         // XXX - maybe this can be optimized somewhat??
         local_flush_tlb();
@@ -2590,10 +2607,9 @@ char * sh_check_name;
 int shadow_status_noswap;
 
 #define v2m(adr) ({                                                      \
-    unsigned long _a = (unsigned long)(adr);                             \
-    unsigned long _pte = l1_pgentry_val(                                 \
-                            shadow_linear_pg_table[_a >> PAGE_SHIFT]);   \
-    unsigned long _pa = _pte & PAGE_MASK;                                \
+    unsigned long _a  = (unsigned long)(adr);                            \
+    l1_pgentry_t _pte = shadow_linear_pg_table[_a >> PAGE_SHIFT];        \
+    unsigned long _pa = l1e_get_phys(_pte);                              \
     _pa | (_a & ~PAGE_MASK);                                             \
 })
 
@@ -2611,49 +2627,55 @@ int shadow_status_noswap;
     } while ( 0 )
 
 static int check_pte(
-    struct domain *d, unsigned long *pgpte, unsigned long *pspte, 
+    struct domain *d, l1_pgentry_t *pgpte, l1_pgentry_t *pspte, 
     int level, int l2_idx, int l1_idx, int oos_ptes)
 {
-    unsigned gpte = *pgpte;
-    unsigned spte = *pspte;
+    l1_pgentry_t gpte = *pgpte;
+    l1_pgentry_t spte = *pspte;
     unsigned long mask, gpfn, smfn, gmfn;
     int errors = 0;
     int page_table_page;
 
-    if ( (spte == 0) || (spte == 0xdeadface) || (spte == 0x00000E00) )
+    if ( (l1e_get_value(spte) == 0) ||
+         (l1e_get_value(spte) == 0xdeadface) ||
+         (l1e_get_value(spte) == 0x00000E00) )
         return errors;  /* always safe */
 
-    if ( !(spte & _PAGE_PRESENT) )
+    if ( !(l1e_get_flags(spte) & _PAGE_PRESENT) )
         FAIL("Non zero not present spte");
 
     if ( level == 2 ) sh_l2_present++;
     if ( level == 1 ) sh_l1_present++;
 
-    if ( !(gpte & _PAGE_PRESENT) )
+    if ( !(l1e_get_flags(gpte) & _PAGE_PRESENT) )
         FAIL("Guest not present yet shadow is");
 
-    mask = ~(_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW|PAGE_MASK);
+    mask = ~(_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW);
 
-    if ( (spte & mask) != (gpte & mask) )
+    if ( l1e_has_changed(&spte, &gpte, mask) )
         FAIL("Corrupt?");
 
     if ( (level == 1) &&
-         (spte & _PAGE_DIRTY ) && !(gpte & _PAGE_DIRTY) && !oos_ptes )
+         (l1e_get_flags(spte) & _PAGE_DIRTY ) &&
+         !(l1e_get_flags(gpte) & _PAGE_DIRTY) && !oos_ptes )
         FAIL("Dirty coherence");
 
-    if ( (spte & _PAGE_ACCESSED ) && !(gpte & _PAGE_ACCESSED) && !oos_ptes )
+    if ( (l1e_get_flags(spte) & _PAGE_ACCESSED ) &&
+         !(l1e_get_flags(gpte) & _PAGE_ACCESSED) && !oos_ptes )
         FAIL("Accessed coherence");
 
-    smfn = spte >> PAGE_SHIFT;
-    gpfn = gpte >> PAGE_SHIFT;
+    smfn = l1e_get_pfn(spte);
+    gpfn = l1e_get_pfn(gpte);
     gmfn = __gpfn_to_mfn(d, gpfn);
 
     if ( !VALID_MFN(gmfn) )
-        FAIL("invalid gpfn=%p gpte=%p\n", __func__, gpfn, gpte);
+        FAIL("invalid gpfn=%p gpte=%p\n", __func__, gpfn,
+             l1e_get_value(gpte));
 
     page_table_page = mfn_is_page_table(gmfn);
 
-    if ( (spte & _PAGE_RW ) && !(gpte & _PAGE_RW) && !oos_ptes )
+    if ( (l1e_get_flags(spte) & _PAGE_RW ) &&
+         !(l1e_get_flags(gpte) & _PAGE_RW) && !oos_ptes )
     {
         printk("gpfn=%p gmfn=%p smfn=%p t=0x%08x page_table_page=%d "
                "oos_ptes=%d\n",
@@ -2664,8 +2686,9 @@ static int check_pte(
     }
 
     if ( (level == 1) &&
-         (spte & _PAGE_RW ) &&
-         !((gpte & _PAGE_RW) && (gpte & _PAGE_DIRTY)) &&
+         (l1e_get_flags(spte) & _PAGE_RW ) &&
+         !((l1e_get_flags(gpte) & _PAGE_RW) &&
+           (l1e_get_flags(gpte) & _PAGE_DIRTY)) &&
          !oos_ptes )
     {
         printk("gpfn=%p gmfn=%p smfn=%p t=0x%08x page_table_page=%d "
@@ -2704,7 +2727,7 @@ static int check_l1_table(
     unsigned long gmfn, unsigned long smfn, unsigned l2_idx)
 {
     int i;
-    unsigned long *gpl1e, *spl1e;
+    l1_pgentry_t *gpl1e, *spl1e;
     int errors = 0, oos_ptes = 0;
 
     if ( page_out_of_sync(pfn_to_page(gmfn)) )
@@ -2737,6 +2760,7 @@ int check_l2_table(
 {
     l2_pgentry_t *gpl2e = (l2_pgentry_t *)map_domain_mem(gmfn << PAGE_SHIFT);
     l2_pgentry_t *spl2e = (l2_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT);
+    l2_pgentry_t match;
     int i;
     int errors = 0;
     int limit;
@@ -2768,25 +2792,26 @@ int check_l2_table(
         FAILPT("hypervisor linear map inconsistent");
 #endif
 
+    match = l2e_create_pfn(smfn, __PAGE_HYPERVISOR);
     if ( !shadow_mode_external(d) &&
-         (l2_pgentry_val(spl2e[SH_LINEAR_PT_VIRT_START >> 
-                               L2_PAGETABLE_SHIFT]) != 
-          ((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR)) )
+         l2e_has_changed(&spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT],
+                         &match, PAGE_FLAG_MASK))
     {
         FAILPT("hypervisor shadow linear map inconsistent %p %p",
-               l2_pgentry_val(spl2e[SH_LINEAR_PT_VIRT_START >>
-                                    L2_PAGETABLE_SHIFT]),
-               (smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+               l2e_get_value(spl2e[SH_LINEAR_PT_VIRT_START >>
+                                   L2_PAGETABLE_SHIFT]),
+               l2e_get_value(match));
     }
 
+    match = l2e_create_phys(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
     if ( !shadow_mode_external(d) &&
-         (l2_pgentry_val(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
-              ((__pa(d->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR))) )
+         l2e_has_changed(&spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT],
+                         &match, PAGE_FLAG_MASK))
     {
         FAILPT("hypervisor per-domain map inconsistent saw %p, expected 
(va=%p) %p",
-               l2_pgentry_val(spl2e[PERDOMAIN_VIRT_START >> 
L2_PAGETABLE_SHIFT]),
+               l2e_get_value(spl2e[PERDOMAIN_VIRT_START >> 
L2_PAGETABLE_SHIFT]),
                d->arch.mm_perdomain_pt,
-               (__pa(d->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR));
+               l2e_get_value(match));
     }
 
 #ifdef __i386__
@@ -2800,7 +2825,10 @@ int check_l2_table(
 
     /* Check the whole L2. */
     for ( i = 0; i < limit; i++ )
-        errors += check_pte(d, &l2_pgentry_val(gpl2e[i]), 
&l2_pgentry_val(spl2e[i]), 2, i, 0, 0);
+        errors += check_pte(d,
+                            (l1_pgentry_t*)(&gpl2e[i]), /* Hmm, dirty ... */
+                            (l1_pgentry_t*)(&spl2e[i]),
+                            2, i, 0, 0);
 
     unmap_domain_mem(spl2e);
     unmap_domain_mem(gpl2e);
@@ -2864,11 +2892,11 @@ int _check_pagetable(struct exec_domain 
 
     for ( i = 0; i < limit; i++ )
     {
-        unsigned long gl1pfn = l2_pgentry_val(gpl2e[i]) >> PAGE_SHIFT;
+        unsigned long gl1pfn = l2e_get_pfn(gpl2e[i]);
         unsigned long gl1mfn = __gpfn_to_mfn(d, gl1pfn);
-        unsigned long sl1mfn = l2_pgentry_val(spl2e[i]) >> PAGE_SHIFT;
+        unsigned long sl1mfn = l2e_get_pfn(spl2e[i]);
 
-        if ( l2_pgentry_val(spl2e[i]) != 0 )
+        if ( l2e_get_value(spl2e[i]) != 0 )  /* FIXME: check flags? */
         {
             errors += check_l1_table(d, gl1pfn, gl1mfn, sl1mfn, i);
         }
Index: xen/include/asm-x86/shadow.h
===================================================================
--- xen.orig/include/asm-x86/shadow.h   2005-04-15 13:53:10.000000000 +0200
+++ xen/include/asm-x86/shadow.h        2005-04-15 14:07:22.000000000 +0200
@@ -270,18 +270,22 @@ extern int shadow_status_noswap;
 static inline int
 shadow_get_page_from_l1e(l1_pgentry_t l1e, struct domain *d)
 {
-    l1_pgentry_t nl1e = mk_l1_pgentry(l1_pgentry_val(l1e) & ~_PAGE_GLOBAL);
-    int res = get_page_from_l1e(nl1e, d);
+    l1_pgentry_t nl1e;
+    int res;
     unsigned long mfn;
     struct domain *owner;
 
-    ASSERT( l1_pgentry_val(nl1e) & _PAGE_PRESENT );
+    ASSERT(l1e_get_flags(l1e) & _PAGE_PRESENT);
 
-    if ( unlikely(!res) && IS_PRIV(d) && !shadow_mode_translate(d) &&
-         !(l1_pgentry_val(nl1e) & L1_DISALLOW_MASK) &&
-         (mfn = l1_pgentry_to_pfn(nl1e)) &&
+    nl1e = l1e;
+    l1e_remove_flags(&nl1e, _PAGE_GLOBAL);
+    res = get_page_from_l1e(nl1e, d);
+
+    if (unlikely(!res) && IS_PRIV(d) && !shadow_mode_translate(d) &&
+         !(l1e_get_flags(l1e) & L1_DISALLOW_MASK) &&
+         (mfn = l1e_get_pfn(l1e)) &&
          pfn_valid(mfn) &&
-         (owner = page_get_owner(pfn_to_page(l1_pgentry_to_pfn(nl1e)))) &&
+         (owner = page_get_owner(pfn_to_page(l1e_get_pfn(l1e)))) &&
          (d != owner) )
     {
         res = get_page_from_l1e(nl1e, owner);
@@ -293,7 +297,7 @@ shadow_get_page_from_l1e(l1_pgentry_t l1
     if ( unlikely(!res) )
     {
         perfc_incrc(shadow_get_page_fail);
-        FSH_LOG("%s failed to get ref l1e=%p", __func__, l1_pgentry_val(l1e));
+        FSH_LOG("%s failed to get ref l1e=%p\n", __func__, l1e_get_value(l1e));
     }
 
     return res;
@@ -303,34 +307,34 @@ shadow_get_page_from_l1e(l1_pgentry_t l1
 
 static inline void
 __shadow_get_l2e(
-    struct exec_domain *ed, unsigned long va, unsigned long *psl2e)
+    struct exec_domain *ed, unsigned long va, l2_pgentry_t *psl2e)
 {
     ASSERT(shadow_mode_enabled(ed->domain));
 
-    *psl2e = l2_pgentry_val( ed->arch.shadow_vtable[l2_table_offset(va)]);
+    *psl2e = ed->arch.shadow_vtable[l2_table_offset(va)];
 }
 
 static inline void
 __shadow_set_l2e(
-    struct exec_domain *ed, unsigned long va, unsigned long value)
+    struct exec_domain *ed, unsigned long va, l2_pgentry_t value)
 {
     ASSERT(shadow_mode_enabled(ed->domain));
 
-    ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
+    ed->arch.shadow_vtable[l2_table_offset(va)] = value;
 }
 
 static inline void
 __guest_get_l2e(
-    struct exec_domain *ed, unsigned long va, unsigned long *pl2e)
+    struct exec_domain *ed, unsigned long va, l2_pgentry_t *pl2e)
 {
-    *pl2e = l2_pgentry_val(ed->arch.guest_vtable[l2_table_offset(va)]);
+    *pl2e = ed->arch.guest_vtable[l2_table_offset(va)];
 }
 
 static inline void
 __guest_set_l2e(
-    struct exec_domain *ed, unsigned long va, unsigned long value)
+    struct exec_domain *ed, unsigned long va, l2_pgentry_t value)
 {
-    ed->arch.guest_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
+    ed->arch.guest_vtable[l2_table_offset(va)] = value;
 
     if ( unlikely(shadow_mode_translate(ed->domain)) )
         update_hl2e(ed, va);
@@ -340,36 +344,36 @@ static inline void
 update_hl2e(struct exec_domain *ed, unsigned long va)
 {
     int index = l2_table_offset(va);
-    unsigned long gl2e = l2_pgentry_val(ed->arch.guest_vtable[index]);
     unsigned long mfn;
-    unsigned long old_hl2e, new_hl2e;
+    l2_pgentry_t gl2e = ed->arch.guest_vtable[index];
+    l1_pgentry_t old_hl2e, new_hl2e;
     int need_flush = 0;
 
     ASSERT(shadow_mode_translate(ed->domain));
 
-    old_hl2e = l1_pgentry_val(ed->arch.hl2_vtable[index]);
+    old_hl2e = ed->arch.hl2_vtable[index];
 
-    if ( (gl2e & _PAGE_PRESENT) &&
-         VALID_MFN(mfn = phys_to_machine_mapping(gl2e >> PAGE_SHIFT)) )
-        new_hl2e = (mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR;
+    if ( (l2e_get_flags(gl2e) & _PAGE_PRESENT) &&
+         VALID_MFN(mfn = phys_to_machine_mapping(l2e_get_pfn(gl2e)) ))
+        new_hl2e = l1e_create_pfn(mfn, __PAGE_HYPERVISOR);
     else
-        new_hl2e = 0;
+        new_hl2e = l1e_empty();
 
     // only do the ref counting if something important changed.
     //
-    if ( (old_hl2e ^ new_hl2e) & (PAGE_MASK | _PAGE_PRESENT) )
+    if ( (l1e_has_changed(&old_hl2e, &new_hl2e, _PAGE_PRESENT)) )
     {
-        if ( (new_hl2e & _PAGE_PRESENT) &&
-             !get_page(pfn_to_page(new_hl2e >> PAGE_SHIFT), ed->domain) )
-            new_hl2e = 0;
-        if ( old_hl2e & _PAGE_PRESENT )
+        if ( (l1e_get_flags(new_hl2e) & _PAGE_PRESENT) &&
+             !get_page(pfn_to_page(l1e_get_pfn(new_hl2e)), ed->domain) )
+            new_hl2e = l1e_empty();
+        if ( l1e_get_flags(old_hl2e) & _PAGE_PRESENT )
         {
-            put_page(pfn_to_page(old_hl2e >> PAGE_SHIFT));
+            put_page(pfn_to_page(l1e_get_pfn(old_hl2e)));
             need_flush = 1;
         }
     }
 
-    ed->arch.hl2_vtable[l2_table_offset(va)] = mk_l1_pgentry(new_hl2e);
+    ed->arch.hl2_vtable[l2_table_offset(va)] = new_hl2e;
 
     if ( need_flush )
     {
@@ -564,13 +568,13 @@ extern void shadow_mark_va_out_of_sync(
     unsigned long va);
 
 static inline int l1pte_write_fault(
-    struct exec_domain *ed, unsigned long *gpte_p, unsigned long *spte_p,
+    struct exec_domain *ed, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p,
     unsigned long va)
 {
     struct domain *d = ed->domain;
-    unsigned long gpte = *gpte_p;
-    unsigned long spte;
-    unsigned long gpfn = gpte >> PAGE_SHIFT;
+    l1_pgentry_t gpte = *gpte_p;
+    l1_pgentry_t spte;
+    unsigned long gpfn = l1e_get_pfn(gpte);
     unsigned long gmfn = __gpfn_to_mfn(d, gpfn);
 
     //printk("l1pte_write_fault gmfn=%p\n", gmfn);
@@ -578,15 +582,16 @@ static inline int l1pte_write_fault(
     if ( unlikely(!VALID_MFN(gmfn)) )
     {
         SH_LOG("l1pte_write_fault: invalid gpfn=%p", gpfn);
-        *spte_p = 0;
+        *spte_p = l1e_empty();
         return 0;
     }
 
-    ASSERT(gpte & _PAGE_RW);
-    gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
-    spte = (gmfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
+    ASSERT(l1e_get_flags(gpte) & _PAGE_RW);
+    l1e_add_flags(&gpte, _PAGE_DIRTY | _PAGE_ACCESSED);
+    spte = l1e_create_pfn(gmfn, l1e_get_flags(gpte));
 
-    SH_VVLOG("l1pte_write_fault: updating spte=0x%p gpte=0x%p", spte, gpte);
+    SH_VVLOG("l1pte_write_fault: updating spte=0x%p gpte=0x%p",
+             l1e_get_value(spte), l1e_get_value(gpte));
 
     if ( shadow_mode_log_dirty(d) )
         __mark_dirty(d, gmfn);
@@ -601,30 +606,31 @@ static inline int l1pte_write_fault(
 }
 
 static inline int l1pte_read_fault(
-    struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
+    struct domain *d, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p)
 { 
-    unsigned long gpte = *gpte_p;
-    unsigned long spte = *spte_p;
-    unsigned long pfn = gpte >> PAGE_SHIFT;
+    l1_pgentry_t gpte = *gpte_p;
+    l1_pgentry_t spte = *spte_p;
+    unsigned long pfn = l1e_get_pfn(gpte);
     unsigned long mfn = __gpfn_to_mfn(d, pfn);
 
     if ( unlikely(!VALID_MFN(mfn)) )
     {
         SH_LOG("l1pte_read_fault: invalid gpfn=%p", pfn);
-        *spte_p = 0;
+        *spte_p = l1e_empty();
         return 0;
     }
 
-    gpte |= _PAGE_ACCESSED;
-    spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
+    l1e_add_flags(&gpte, _PAGE_ACCESSED);
+    spte = l1e_create_pfn(mfn, l1e_get_flags(gpte));
 
-    if ( shadow_mode_log_dirty(d) || !(gpte & _PAGE_DIRTY) ||
+    if ( shadow_mode_log_dirty(d) || !(l1e_get_flags(gpte) & _PAGE_DIRTY) ||
          mfn_is_page_table(mfn) )
     {
-        spte &= ~_PAGE_RW;
+        l1e_remove_flags(&spte, _PAGE_RW);
     }
 
-    SH_VVLOG("l1pte_read_fault: updating spte=0x%p gpte=0x%p", spte, gpte);
+    SH_VVLOG("l1pte_read_fault: updating spte=0x%p gpte=0x%p",
+             l1e_get_value(spte), l1e_get_value(gpte));
     *gpte_p = gpte;
     *spte_p = spte;
 
@@ -632,23 +638,24 @@ static inline int l1pte_read_fault(
 }
 
 static inline void l1pte_propagate_from_guest(
-    struct domain *d, unsigned long gpte, unsigned long *spte_p)
+    struct domain *d, l1_pgentry_t gpte, l1_pgentry_t *spte_p)
 { 
-    unsigned long mfn, spte;
+    unsigned long mfn;
+    l1_pgentry_t spte;
 
-    spte = 0;
+    spte = l1e_empty();
 
-    if ( ((gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
+    if ( ((l1e_get_flags(gpte) & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
           (_PAGE_PRESENT|_PAGE_ACCESSED)) &&
-         VALID_MFN(mfn = __gpfn_to_mfn(d, gpte >> PAGE_SHIFT)) )
+         VALID_MFN(mfn = __gpfn_to_mfn(d, l1e_get_pfn(gpte))) )
     {
-        spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
+        spte = l1e_create_pfn(mfn, l1e_get_flags(gpte));
         
         if ( shadow_mode_log_dirty(d) ||
-             !(gpte & _PAGE_DIRTY) ||
+             !(l1e_get_flags(gpte) & _PAGE_DIRTY) ||
              mfn_is_page_table(mfn) )
         {
-            spte &= ~_PAGE_RW;
+            l1e_remove_flags(&spte, _PAGE_RW);
         }
     }
 
@@ -661,14 +668,15 @@ static inline void l1pte_propagate_from_
 }
 
 static inline void hl2e_propagate_from_guest(
-    struct domain *d, unsigned long gpde, unsigned long *hl2e_p)
+    struct domain *d, l2_pgentry_t gpde, l1_pgentry_t *hl2e_p)
 {
-    unsigned long pfn = gpde >> PAGE_SHIFT;
-    unsigned long mfn, hl2e;
-
-    hl2e = 0;
+    unsigned long pfn = l2e_get_pfn(gpde);
+    unsigned long mfn;
+    l1_pgentry_t hl2e;
+    
+    hl2e = l1e_empty();
 
-    if ( gpde & _PAGE_PRESENT )
+    if ( l2e_get_flags(gpde) & _PAGE_PRESENT )
     {
         if ( unlikely((current->domain != d) && !shadow_mode_external(d)) )
         {
@@ -683,30 +691,31 @@ static inline void hl2e_propagate_from_g
             mfn = __gpfn_to_mfn(d, pfn);
 
         if ( VALID_MFN(mfn) && (mfn < max_page) )
-            hl2e = (mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR;
+            hl2e = l1e_create_pfn(mfn, __PAGE_HYPERVISOR);
     }
 
-    if ( hl2e || gpde )
-        SH_VVLOG("%s: gpde=%p hl2e=%p", __func__, gpde, hl2e);
+    if ( l1e_get_value(hl2e) || l2e_get_value(gpde) )
+        SH_VVLOG("%s: gpde=%p hl2e=%p", __func__,
+                 l2e_get_value(gpde), l1e_get_value(hl2e));
 
     *hl2e_p = hl2e;
 }
 
 static inline void l2pde_general(
     struct domain *d,
-    unsigned long *gpde_p,
-    unsigned long *spde_p,
+    l2_pgentry_t *gpde_p,
+    l2_pgentry_t *spde_p,
     unsigned long sl1mfn)
 {
-    unsigned long gpde = *gpde_p;
-    unsigned long spde;
+    l2_pgentry_t gpde = *gpde_p;
+    l2_pgentry_t spde;
 
-    spde = 0;
-    if ( (gpde & _PAGE_PRESENT) && (sl1mfn != 0) )
+    spde = l2e_empty();
+    if ( (l2e_get_flags(gpde) & _PAGE_PRESENT) && (sl1mfn != 0) )
     {
-        spde = (gpde & ~PAGE_MASK) | (sl1mfn << PAGE_SHIFT) | 
-            _PAGE_RW | _PAGE_ACCESSED;
-        gpde |= _PAGE_ACCESSED; /* N.B. PDEs do not have a dirty bit. */
+        spde = l2e_create_pfn(sl1mfn, 
+                              l2e_get_flags(gpde) | _PAGE_RW | _PAGE_ACCESSED);
+        l2e_add_flags(&gpde, _PAGE_ACCESSED); /* N.B. PDEs do not have a dirty 
bit. */
 
         // XXX mafetter: Hmm...
         //     Shouldn't the dirty log be checked/updated here?
@@ -715,19 +724,21 @@ static inline void l2pde_general(
         *gpde_p = gpde;
     }
 
-    if ( spde || gpde )
-        SH_VVLOG("%s: gpde=%p, new spde=%p", __func__, gpde, spde);
+    if ( l2e_get_value(spde) || l2e_get_value(gpde) )
+        SH_VVLOG("%s: gpde=%p, new spde=%p", __func__,
+                 l2e_get_value(gpde), l2e_get_value(spde));
 
     *spde_p = spde;
 }
 
 static inline void l2pde_propagate_from_guest(
-    struct domain *d, unsigned long *gpde_p, unsigned long *spde_p)
+    struct domain *d, l2_pgentry_t *gpde_p, l2_pgentry_t *spde_p)
 {
-    unsigned long gpde = *gpde_p, sl1mfn = 0;
+    l2_pgentry_t gpde = *gpde_p;
+    unsigned long sl1mfn = 0;
 
-    if ( gpde & _PAGE_PRESENT )
-        sl1mfn =  __shadow_status(d, gpde >> PAGE_SHIFT, PGT_l1_shadow);
+    if ( l2e_get_flags(gpde) & _PAGE_PRESENT )
+        sl1mfn =  __shadow_status(d, l2e_get_pfn(gpde), PGT_l1_shadow);
     l2pde_general(d, gpde_p, spde_p, sl1mfn);
 }
     
@@ -738,10 +749,10 @@ static inline void l2pde_propagate_from_
 static int inline
 validate_pte_change(
     struct domain *d,
-    unsigned long new_pte,
-    unsigned long *shadow_pte_p)
+    l1_pgentry_t new_pte,
+    l1_pgentry_t *shadow_pte_p)
 {
-    unsigned long old_spte, new_spte;
+    l1_pgentry_t old_spte, new_spte;
 
     perfc_incrc(validate_pte_calls);
 
@@ -754,16 +765,16 @@ validate_pte_change(
 
     // only do the ref counting if something important changed.
     //
-    if ( ((old_spte | new_spte) & _PAGE_PRESENT ) &&
-         ((old_spte ^ new_spte) & (PAGE_MASK | _PAGE_RW | _PAGE_PRESENT)) )
+    if ( ((l1e_get_value(old_spte) | l1e_get_value(new_spte)) & _PAGE_PRESENT 
) &&
+         l1e_has_changed(&old_spte, &new_spte, _PAGE_RW | _PAGE_PRESENT) )
     {
         perfc_incrc(validate_pte_changes);
 
-        if ( (new_spte & _PAGE_PRESENT) &&
-             !shadow_get_page_from_l1e(mk_l1_pgentry(new_spte), d) )
-            new_spte = 0;
-        if ( old_spte & _PAGE_PRESENT )
-            put_page_from_l1e(mk_l1_pgentry(old_spte), d);
+        if ( (l1e_get_flags(new_spte) & _PAGE_PRESENT) &&
+             !shadow_get_page_from_l1e(new_spte, d) )
+            new_spte = l1e_empty();
+        if ( l1e_get_flags(old_spte) & _PAGE_PRESENT )
+            put_page_from_l1e(old_spte, d);
     }
 
     *shadow_pte_p = new_spte;
@@ -777,10 +788,10 @@ validate_pte_change(
 static int inline
 validate_hl2e_change(
     struct domain *d,
-    unsigned long new_gpde,
-    unsigned long *shadow_hl2e_p)
+    l2_pgentry_t new_gpde,
+    l1_pgentry_t *shadow_hl2e_p)
 {
-    unsigned long old_hl2e, new_hl2e;
+    l1_pgentry_t old_hl2e, new_hl2e;
 
     perfc_incrc(validate_hl2e_calls);
 
@@ -789,16 +800,16 @@ validate_hl2e_change(
 
     // Only do the ref counting if something important changed.
     //
-    if ( ((old_hl2e | new_hl2e) & _PAGE_PRESENT) &&
-         ((old_hl2e ^ new_hl2e) & (PAGE_MASK | _PAGE_PRESENT)) )
+    if ( ((l1e_get_flags(old_hl2e) | l1e_get_flags(new_hl2e)) & _PAGE_PRESENT) 
&&
+         l1e_has_changed(&old_hl2e, &new_hl2e, _PAGE_PRESENT) )
     {
         perfc_incrc(validate_hl2e_changes);
 
-        if ( (new_hl2e & _PAGE_PRESENT) &&
-             !get_page(pfn_to_page(new_hl2e >> PAGE_SHIFT), d) )
-            new_hl2e = 0;
-        if ( old_hl2e & _PAGE_PRESENT )
-            put_page(pfn_to_page(old_hl2e >> PAGE_SHIFT));
+        if ( (l1e_get_flags(new_hl2e) & _PAGE_PRESENT) &&
+             !get_page(pfn_to_page(l1e_get_pfn(new_hl2e)), d) )
+            new_hl2e = l1e_empty();
+        if ( l1e_get_flags(old_hl2e) & _PAGE_PRESENT )
+            put_page(pfn_to_page(l1e_get_pfn(old_hl2e)));
     }
 
     *shadow_hl2e_p = new_hl2e;
@@ -813,10 +824,10 @@ validate_hl2e_change(
 static int inline
 validate_pde_change(
     struct domain *d,
-    unsigned long new_gpde,
-    unsigned long *shadow_pde_p)
+    l2_pgentry_t new_gpde,
+    l2_pgentry_t *shadow_pde_p)
 {
-    unsigned long old_spde, new_spde;
+    l2_pgentry_t old_spde, new_spde;
 
     perfc_incrc(validate_pde_calls);
 
@@ -828,16 +839,16 @@ validate_pde_change(
 
     // Only do the ref counting if something important changed.
     //
-    if ( ((old_spde | new_spde) & _PAGE_PRESENT) &&
-         ((old_spde ^ new_spde) & (PAGE_MASK | _PAGE_PRESENT)) )
+    if ( ((l2e_get_value(old_spde) | l2e_get_value(new_spde)) & _PAGE_PRESENT) 
&&
+         l2e_has_changed(&old_spde, &new_spde, _PAGE_PRESENT) )
     {
         perfc_incrc(validate_pde_changes);
 
-        if ( (new_spde & _PAGE_PRESENT) &&
-             !get_shadow_ref(new_spde >> PAGE_SHIFT) )
+        if ( (l2e_get_flags(new_spde) & _PAGE_PRESENT) &&
+             !get_shadow_ref(l2e_get_pfn(new_spde)) )
             BUG();
-        if ( old_spde & _PAGE_PRESENT )
-            put_shadow_ref(old_spde >> PAGE_SHIFT);
+        if ( l2e_get_flags(old_spde) & _PAGE_PRESENT )
+            put_shadow_ref(l2e_get_pfn(old_spde));
     }
 
     *shadow_pde_p = new_spde;
@@ -1347,19 +1358,20 @@ shadow_update_min_max(unsigned long smfn
 extern void shadow_map_l1_into_current_l2(unsigned long va);
 
 void static inline
-shadow_set_l1e(unsigned long va, unsigned long new_spte, int create_l1_shadow)
+shadow_set_l1e(unsigned long va, l1_pgentry_t new_spte, int create_l1_shadow)
 {
     struct exec_domain *ed = current;
     struct domain *d = ed->domain;
-    unsigned long sl2e, old_spte;
+    l2_pgentry_t sl2e;
+    l1_pgentry_t old_spte;
 
 #if 0
     printk("shadow_set_l1e(va=%p, new_spte=%p, create=%d)\n",
-           va, new_spte, create_l1_shadow);
+           va, l1e_get_value(new_spte), create_l1_shadow);
 #endif
 
     __shadow_get_l2e(ed, va, &sl2e);
-    if ( !(sl2e & _PAGE_PRESENT) )
+    if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
     {
         /*
          * Either the L1 is not shadowed, or the shadow isn't linked into
@@ -1372,12 +1384,11 @@ shadow_set_l1e(unsigned long va, unsigne
         }
         else /* check to see if it exists; if so, link it in */
         {
-            unsigned long gpde =
-                l2_pgentry_val(linear_l2_table(ed)[l2_table_offset(va)]);
-            unsigned long gl1pfn = gpde >> PAGE_SHIFT;
+            l2_pgentry_t gpde = linear_l2_table(ed)[l2_table_offset(va)];
+            unsigned long gl1pfn = l2e_get_pfn(gpde);
             unsigned long sl1mfn = __shadow_status(d, gl1pfn, PGT_l1_shadow);
 
-            ASSERT( gpde & _PAGE_PRESENT );
+            ASSERT( l2e_get_flags(gpde) & _PAGE_PRESENT );
 
             if ( sl1mfn )
             {
@@ -1397,47 +1408,49 @@ shadow_set_l1e(unsigned long va, unsigne
         }
     }
 
-    old_spte = l1_pgentry_val(shadow_linear_pg_table[l1_linear_offset(va)]);
+    old_spte = shadow_linear_pg_table[l1_linear_offset(va)];
 
     // only do the ref counting if something important changed.
     //
-    if ( (old_spte ^ new_spte) & (PAGE_MASK | _PAGE_RW | _PAGE_PRESENT) )
+    if ( l1e_has_changed(&old_spte, &new_spte, _PAGE_RW | _PAGE_PRESENT) )
     {
-        if ( (new_spte & _PAGE_PRESENT) &&
-             !shadow_get_page_from_l1e(mk_l1_pgentry(new_spte), d) )
-            new_spte = 0;
-        if ( old_spte & _PAGE_PRESENT )
-            put_page_from_l1e(mk_l1_pgentry(old_spte), d);
+        if ( (l1e_get_flags(new_spte) & _PAGE_PRESENT) &&
+             !shadow_get_page_from_l1e(new_spte, d) )
+            new_spte = l1e_empty();
+        if ( l1e_get_flags(old_spte) & _PAGE_PRESENT )
+            put_page_from_l1e(old_spte, d);
     }
 
-    shadow_linear_pg_table[l1_linear_offset(va)] = mk_l1_pgentry(new_spte);
+    shadow_linear_pg_table[l1_linear_offset(va)] = new_spte;
 
-    shadow_update_min_max(sl2e >> PAGE_SHIFT, l1_table_offset(va));
+    shadow_update_min_max(l2e_get_pfn(sl2e), l1_table_offset(va));
 }
 
 /************************************************************************/
 
-static inline unsigned long gva_to_gpte(unsigned long gva)
+static inline l1_pgentry_t gva_to_gpte(unsigned long gva)
 {
-    unsigned long gpde, gpte;
+    l2_pgentry_t gpde;
+    l1_pgentry_t gpte;
     struct exec_domain *ed = current;
 
     ASSERT( shadow_mode_translate(current->domain) );
 
     __guest_get_l2e(ed, gva, &gpde);
-    if ( unlikely(!(gpde & _PAGE_PRESENT)) )
-        return 0;
+    if ( unlikely(!(l2e_get_flags(gpde) & _PAGE_PRESENT)) )
+        return l1e_empty();;
 
     // This is actually overkill - we only need to make sure the hl2
     // is in-sync.
     //
     shadow_sync_va(ed, gva);
 
-    if ( unlikely(__get_user(gpte, (unsigned long *)
-                             &linear_pg_table[gva >> PAGE_SHIFT])) )
+    if ( unlikely(__copy_from_user(&gpte,
+                                   &linear_pg_table[gva >> PAGE_SHIFT],
+                                   sizeof(gpte))) )
     {
         FSH_LOG("gva_to_gpte got a fault on gva=%p", gva);
-        return 0;
+        return l1e_empty();
     }
 
     return gpte;
@@ -1445,13 +1458,13 @@ static inline unsigned long gva_to_gpte(
 
 static inline unsigned long gva_to_gpa(unsigned long gva)
 {
-    unsigned long gpte;
+    l1_pgentry_t gpte;
 
     gpte = gva_to_gpte(gva);
-    if ( !(gpte & _PAGE_PRESENT) )
+    if ( !(l1e_get_flags(gpte) & _PAGE_PRESENT) )
         return 0;
 
-    return (gpte & PAGE_MASK) + (gva & ~PAGE_MASK); 
+    return l1e_get_phys(gpte) + (gva & ~PAGE_MASK); 
 }
 
 /************************************************************************/
Index: xen/arch/x86/vmx.c
===================================================================
--- xen.orig/arch/x86/vmx.c     2005-04-15 13:53:10.000000000 +0200
+++ xen/arch/x86/vmx.c  2005-04-15 13:53:33.000000000 +0200
@@ -109,7 +109,8 @@ static int vmx_do_page_fault(unsigned lo
 {
     struct exec_domain *ed = current;
     unsigned long eip;
-    unsigned long gpte, gpa;
+    l1_pgentry_t gpte;
+    unsigned long gpa; /* FIXME: PAE */
     int result;
 
 #if VMX_DEBUG
@@ -132,9 +133,9 @@ static int vmx_do_page_fault(unsigned lo
     }
 
     gpte = gva_to_gpte(va);
-    if (!(gpte & _PAGE_PRESENT) )
+    if (!(l1e_get_flags(gpte) & _PAGE_PRESENT) )
             return 0;
-    gpa = (gpte & PAGE_MASK) + (va & ~PAGE_MASK);
+    gpa = l1e_get_phys(gpte) + (va & ~PAGE_MASK);
 
     /* Use 1:1 page table to identify MMIO address space */
     if (mmio_space(gpa))
Index: xen/arch/x86/mm.c
===================================================================
--- xen.orig/arch/x86/mm.c      2005-04-15 13:53:11.000000000 +0200
+++ xen/arch/x86/mm.c   2005-04-15 13:56:01.000000000 +0200
@@ -244,9 +244,9 @@ void invalidate_shadow_ldt(struct exec_d
 
     for ( i = 16; i < 32; i++ )
     {
-        pfn = l1_pgentry_to_pfn(d->arch.perdomain_ptes[i]);
+        pfn = l1e_get_pfn(d->arch.perdomain_ptes[i]);
         if ( pfn == 0 ) continue;
-        d->arch.perdomain_ptes[i] = mk_l1_pgentry(0);
+        d->arch.perdomain_ptes[i] = l1e_empty();
         page = &frame_table[pfn];
         ASSERT_PAGE_IS_TYPE(page, PGT_ldt_page);
         ASSERT_PAGE_IS_DOMAIN(page, d->domain);
@@ -283,7 +283,8 @@ int map_ldt_shadow_page(unsigned int off
 {
     struct exec_domain *ed = current;
     struct domain *d = ed->domain;
-    unsigned long l1e, nl1e, gpfn, gmfn;
+    unsigned long gpfn, gmfn;
+    l1_pgentry_t l1e, nl1e;
     unsigned gva = ed->arch.ldt_base + (off << PAGE_SHIFT);
     int res;
 
@@ -301,13 +302,14 @@ int map_ldt_shadow_page(unsigned int off
     shadow_sync_va(ed, gva);
 
     TOGGLE_MODE();
-    __get_user(l1e, (unsigned long *)&linear_pg_table[l1_linear_offset(gva)]);
+    __copy_from_user(&l1e, &linear_pg_table[l1_linear_offset(gva)],
+                     sizeof(l1e));
     TOGGLE_MODE();
 
-    if ( unlikely(!(l1e & _PAGE_PRESENT)) )
+    if ( unlikely(!(l1e_get_flags(l1e) & _PAGE_PRESENT)) )
         return 0;
 
-    gpfn = l1_pgentry_to_pfn(mk_l1_pgentry(l1e));
+    gpfn = l1e_get_pfn(l1e);
     gmfn = __gpfn_to_mfn(d, gpfn);
     if ( unlikely(!VALID_MFN(gmfn)) )
         return 0;
@@ -325,9 +327,9 @@ int map_ldt_shadow_page(unsigned int off
     if ( unlikely(!res) )
         return 0;
 
-    nl1e = (l1e & ~PAGE_MASK) | (gmfn << PAGE_SHIFT) | _PAGE_RW;
+    nl1e = l1e_create_pfn(gmfn, l1e_get_flags(l1e) | _PAGE_RW);
 
-    ed->arch.perdomain_ptes[off + 16] = mk_l1_pgentry(nl1e);
+    ed->arch.perdomain_ptes[off + 16] = nl1e;
     ed->arch.shadow_ldt_mapcnt++;
 
     return 1;
@@ -392,13 +394,13 @@ get_linear_pagetable(
 
     ASSERT( !shadow_mode_enabled(d) );
 
-    if ( (root_pgentry_val(re) & _PAGE_RW) )
+    if ( (root_get_flags(re) & _PAGE_RW) )
     {
         MEM_LOG("Attempt to create linear p.t. with write perms");
         return 0;
     }
 
-    if ( (pfn = root_pgentry_to_pfn(re)) != re_pfn )
+    if ( (pfn = root_get_pfn(re)) != re_pfn )
     {
         /* Make sure the mapped frame belongs to the correct domain. */
         if ( unlikely(!get_page_from_pagenr(pfn, d)) )
@@ -431,17 +433,17 @@ int
 get_page_from_l1e(
     l1_pgentry_t l1e, struct domain *d)
 {
-    unsigned long l1v = l1_pgentry_val(l1e);
-    unsigned long mfn = l1_pgentry_to_pfn(l1e);
+    unsigned long mfn = l1e_get_pfn(l1e);
     struct pfn_info *page = &frame_table[mfn];
     extern int domain_iomem_in_pfn(struct domain *d, unsigned long pfn);
 
-    if ( !(l1v & _PAGE_PRESENT) )
+    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
         return 1;
 
-    if ( unlikely(l1v & L1_DISALLOW_MASK) )
+    if ( unlikely(l1e_get_flags(l1e) & L1_DISALLOW_MASK) )
     {
-        MEM_LOG("Bad L1 type settings %p %p", l1v, l1v & L1_DISALLOW_MASK);
+        MEM_LOG("Bad L1 type settings %p %p", l1e_get_value(l1e),
+                l1e_get_value(l1e) & L1_DISALLOW_MASK);
         return 0;
     }
 
@@ -466,7 +468,7 @@ get_page_from_l1e(
         d = dom_io;
     }
 
-    return ((l1v & _PAGE_RW) ?
+    return ((l1e_get_flags(l1e) & _PAGE_RW) ?
             get_page_and_type(page, d, PGT_writable_page) :
             get_page(page, d));
 }
@@ -482,18 +484,18 @@ get_page_from_l2e(
 
     ASSERT( !shadow_mode_enabled(d) );
 
-    if ( !(l2_pgentry_val(l2e) & _PAGE_PRESENT) )
+    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
         return 1;
 
-    if ( unlikely((l2_pgentry_val(l2e) & L2_DISALLOW_MASK)) )
+    if ( unlikely((l2e_get_flags(l2e) & L2_DISALLOW_MASK)) )
     {
         MEM_LOG("Bad L2 page type settings %p",
-                l2_pgentry_val(l2e) & L2_DISALLOW_MASK);
+                l2e_get_value(l2e) & L2_DISALLOW_MASK);
         return 0;
     }
 
     rc = get_page_and_type_from_pagenr(
-        l2_pgentry_to_pfn(l2e), 
+        l2e_get_pfn(l2e), 
         PGT_l1_page_table | (va_idx<<PGT_va_shift), d);
 
 #if defined(__i386__)
@@ -510,18 +512,18 @@ static int 
 get_page_from_l3e(
     l3_pgentry_t l3e, unsigned long pfn, struct domain *d)
 {
-    if ( !(l3_pgentry_val(l3e) & _PAGE_PRESENT) )
+    if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
         return 1;
 
-    if ( unlikely((l3_pgentry_val(l3e) & L3_DISALLOW_MASK)) )
+    if ( unlikely((l3e_get_flags(l3e) & L3_DISALLOW_MASK)) )
     {
         MEM_LOG("Bad L3 page type settings %p",
-                l3_pgentry_val(l3e) & L3_DISALLOW_MASK);
+                l3e_get_value(l3e) & L3_DISALLOW_MASK);
         return 0;
     }
 
     return get_page_and_type_from_pagenr(
-        l3_pgentry_to_pfn(l3e), PGT_l2_page_table, d);
+        l3e_get_pfn(l3e), PGT_l2_page_table, d);
 }
 
 
@@ -531,18 +533,18 @@ get_page_from_l4e(
 {
     int rc;
 
-    if ( !(l4_pgentry_val(l4e) & _PAGE_PRESENT) )
+    if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
         return 1;
 
-    if ( unlikely((l4_pgentry_val(l4e) & L4_DISALLOW_MASK)) )
+    if ( unlikely((l4e_get_flags(l4e) & L4_DISALLOW_MASK)) )
     {
         MEM_LOG("Bad L4 page type settings %p",
-                l4_pgentry_val(l4e) & L4_DISALLOW_MASK);
+                l4e_get_value(l4e) & L4_DISALLOW_MASK);
         return 0;
     }
 
     rc = get_page_and_type_from_pagenr(
-        l4_pgentry_to_pfn(l4e), PGT_l3_page_table, d);
+        l4e_get_pfn(l4e), PGT_l3_page_table, d);
 
     if ( unlikely(!rc) )
         return get_linear_pagetable(l4e, pfn, d);
@@ -555,12 +557,11 @@ get_page_from_l4e(
 
 void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d)
 {
-    unsigned long    l1v  = l1_pgentry_val(l1e);
-    unsigned long    pfn  = l1_pgentry_to_pfn(l1e);
+    unsigned long    pfn  = l1e_get_pfn(l1e);
     struct pfn_info *page = &frame_table[pfn];
     struct domain   *e;
 
-    if ( !(l1v & _PAGE_PRESENT) || !pfn_valid(pfn) )
+    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !pfn_valid(pfn) )
         return;
 
     e = page_get_owner(page);
@@ -577,12 +578,13 @@ void put_page_from_l1e(l1_pgentry_t l1e,
          * mappings and which unmappings are counted via the grant entry, but
          * really it doesn't matter as privileged domains have carte blanche.
          */
-        if ( likely(gnttab_check_unmap(e, d, pfn, !(l1v & _PAGE_RW))) )
+        if (likely(gnttab_check_unmap(e, d, pfn,
+                                      !(l1e_get_flags(l1e) & _PAGE_RW))))
             return;
         /* Assume this mapping was made via MMUEXT_SET_FOREIGNDOM... */
     }
 
-    if ( l1v & _PAGE_RW )
+    if ( l1e_get_flags(l1e) & _PAGE_RW )
     {
         put_page_and_type(page);
     }
@@ -606,9 +608,9 @@ void put_page_from_l1e(l1_pgentry_t l1e,
  */
 static void put_page_from_l2e(l2_pgentry_t l2e, unsigned long pfn)
 {
-    if ( (l2_pgentry_val(l2e) & _PAGE_PRESENT) && 
-         (l2_pgentry_to_pfn(l2e) != pfn) )
-        put_page_and_type(&frame_table[l2_pgentry_to_pfn(l2e)]);
+    if ( (l2e_get_flags(l2e) & _PAGE_PRESENT) && 
+         (l2e_get_pfn(l2e) != pfn) )
+        put_page_and_type(&frame_table[l2e_get_pfn(l2e)]);
 }
 
 
@@ -616,17 +618,17 @@ static void put_page_from_l2e(l2_pgentry
 
 static void put_page_from_l3e(l3_pgentry_t l3e, unsigned long pfn)
 {
-    if ( (l3_pgentry_val(l3e) & _PAGE_PRESENT) && 
-         (l3_pgentry_to_pfn(l3e) != pfn) )
-        put_page_and_type(&frame_table[l3_pgentry_to_pfn(l3e)]);
+    if ( (l3e_get_flags(l3e) & _PAGE_PRESENT) && 
+         (l3e_get_pfn(l3e) != pfn) )
+        put_page_and_type(&frame_table[l3e_get_pfn(l3e)]);
 }
 
 
 static void put_page_from_l4e(l4_pgentry_t l4e, unsigned long pfn)
 {
-    if ( (l4_pgentry_val(l4e) & _PAGE_PRESENT) && 
-         (l4_pgentry_to_pfn(l4e) != pfn) )
-        put_page_and_type(&frame_table[l4_pgentry_to_pfn(l4e)]);
+    if ( (l4e_get_flags(l4e) & _PAGE_PRESENT) && 
+         (l4e_get_pfn(l4e) != pfn) )
+        put_page_and_type(&frame_table[l4e_get_pfn(l4e)]);
 }
 
 #endif /* __x86_64__ */
@@ -686,10 +688,10 @@ static int alloc_l2_table(struct pfn_inf
            &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
            ROOT_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
     pl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
-        mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+        l2e_create_pfn(pfn, __PAGE_HYPERVISOR);
     pl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
-        mk_l2_pgentry(__pa(page_get_owner(page)->arch.mm_perdomain_pt) | 
-                      __PAGE_HYPERVISOR);
+        l2e_create_phys(__pa(page_get_owner(page)->arch.mm_perdomain_pt),
+                        __PAGE_HYPERVISOR);
 #endif
 
     unmap_domain_mem(pl2e);
@@ -753,11 +755,11 @@ static int alloc_l4_table(struct pfn_inf
     memcpy(&pl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
            &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
            ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
-    pl4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
-        mk_l4_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+    pl4e[l4_table_offset(LINEAR_PT_VIRT_START)]
+        = l4e_create_pfn(pfn, __PAGE_HYPERVISOR);
     pl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
-        mk_l4_pgentry(__pa(page_get_owner(page)->arch.mm_perdomain_l3) | 
-                      __PAGE_HYPERVISOR);
+        l4e_create_phys(__pa(page_get_owner(page)->arch.mm_perdomain_l3),
+                        __PAGE_HYPERVISOR);
 
     return 1;
 
@@ -837,14 +839,15 @@ static inline int update_l1e(l1_pgentry_
                              l1_pgentry_t  ol1e, 
                              l1_pgentry_t  nl1e)
 {
-    unsigned long o = l1_pgentry_val(ol1e);
-    unsigned long n = l1_pgentry_val(nl1e);
+    /* FIXME: breaks with PAE */
+    unsigned long o = l1e_get_value(ol1e);
+    unsigned long n = l1e_get_value(nl1e);
 
     if ( unlikely(cmpxchg_user(pl1e, o, n) != 0) ||
-         unlikely(o != l1_pgentry_val(ol1e)) )
+         unlikely(o != l1e_get_value(ol1e)) )
     {
         MEM_LOG("Failed to update %p -> %p: saw %p",
-                l1_pgentry_val(ol1e), l1_pgentry_val(nl1e), o);
+                l1e_get_value(ol1e), l1e_get_value(nl1e), o);
         return 0;
     }
 
@@ -856,27 +859,24 @@ static inline int update_l1e(l1_pgentry_
 static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e)
 {
     l1_pgentry_t ol1e;
-    unsigned long _ol1e;
     struct domain *d = current->domain;
 
     ASSERT( !shadow_mode_enabled(d) );
 
-    if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) )
+    if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
         return 0;
-    ol1e = mk_l1_pgentry(_ol1e);
 
-    if ( l1_pgentry_val(nl1e) & _PAGE_PRESENT )
+    if ( l1e_get_flags(nl1e) & _PAGE_PRESENT )
     {
-        if ( unlikely(l1_pgentry_val(nl1e) & L1_DISALLOW_MASK) )
+        if ( unlikely(l1e_get_flags(nl1e) & L1_DISALLOW_MASK) )
         {
             MEM_LOG("Bad L1 type settings %p", 
-                    l1_pgentry_val(nl1e) & L1_DISALLOW_MASK);
+                    l1e_get_value(nl1e) & L1_DISALLOW_MASK);
             return 0;
         }
 
         /* Fast path for identical mapping, r/w and presence. */
-        if ( ((l1_pgentry_val(ol1e) ^ l1_pgentry_val(nl1e)) & 
-              ((PADDR_MASK & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT)) == 0 )
+        if ( !l1e_has_changed(&ol1e, &nl1e, _PAGE_RW | _PAGE_PRESENT))
             return update_l1e(pl1e, ol1e, nl1e);
 
         if ( unlikely(!get_page_from_l1e(nl1e, FOREIGNDOM)) )
@@ -901,12 +901,12 @@ static int mod_l1_entry(l1_pgentry_t *pl
 
 #define UPDATE_ENTRY(_t,_p,_o,_n) ({                                    \
     unsigned long __o = cmpxchg((unsigned long *)(_p),                  \
-                                _t ## _pgentry_val(_o),                 \
-                                _t ## _pgentry_val(_n));                \
-    if ( __o != _t ## _pgentry_val(_o) )                                \
+                                _t ## e_get_value(_o),                  \
+                                _t ## e_get_value(_n));                 \
+    if ( __o != _t ## e_get_value(_o) )                                 \
         MEM_LOG("Failed to update %p -> %p: saw %p",                    \
-                _t ## _pgentry_val(_o), _t ## _pgentry_val(_n), __o);   \
-    (__o == _t ## _pgentry_val(_o)); })
+                _t ## e_get_value(_o), _t ## e_get_value(_n), __o);     \
+    (__o == _t ## e_get_value(_o)); })
 
 
 /* Update the L2 entry at pl2e to new value nl2e. pl2e is within frame pfn. */
@@ -915,7 +915,6 @@ static int mod_l2_entry(l2_pgentry_t *pl
                         unsigned long pfn)
 {
     l2_pgentry_t ol2e;
-    unsigned long _ol2e;
 
     if ( unlikely(!is_guest_l2_slot(pgentry_ptr_to_slot(pl2e))) )
     {
@@ -923,22 +922,20 @@ static int mod_l2_entry(l2_pgentry_t *pl
         return 0;
     }
 
-    if ( unlikely(__get_user(_ol2e, (unsigned long *)pl2e) != 0) )
+    if ( unlikely(__copy_from_user(&ol2e, pl2e, sizeof(ol2e)) != 0) )
         return 0;
-    ol2e = mk_l2_pgentry(_ol2e);
 
-    if ( l2_pgentry_val(nl2e) & _PAGE_PRESENT )
+    if ( l2e_get_flags(nl2e) & _PAGE_PRESENT )
     {
-        if ( unlikely(l2_pgentry_val(nl2e) & L2_DISALLOW_MASK) )
+        if ( unlikely(l2e_get_flags(nl2e) & L2_DISALLOW_MASK) )
         {
             MEM_LOG("Bad L2 type settings %p", 
-                    l2_pgentry_val(nl2e) & L2_DISALLOW_MASK);
+                    l2e_get_value(nl2e) & L2_DISALLOW_MASK);
             return 0;
         }
 
         /* Fast path for identical mapping and presence. */
-        if ( ((l2_pgentry_val(ol2e) ^ l2_pgentry_val(nl2e)) & 
-              ((PADDR_MASK & PAGE_MASK) | _PAGE_PRESENT)) == 0 )
+        if ( !l2e_has_changed(&ol2e, &nl2e, _PAGE_PRESENT))
             return UPDATE_ENTRY(l2, pl2e, ol2e, nl2e);
 
         if ( unlikely(!get_page_from_l2e(nl2e, pfn, current->domain,
@@ -971,7 +968,6 @@ static int mod_l3_entry(l3_pgentry_t *pl
                         unsigned long pfn)
 {
     l3_pgentry_t ol3e;
-    unsigned long _ol3e;
 
     if ( unlikely(!is_guest_l3_slot(pgentry_ptr_to_slot(pl3e))) )
     {
@@ -979,22 +975,20 @@ static int mod_l3_entry(l3_pgentry_t *pl
         return 0;
     }
 
-    if ( unlikely(__get_user(_ol3e, (unsigned long *)pl3e) != 0) )
+    if ( unlikely(__copy_from_user(&ol3e, pl3e, sizeof(ol3e)) != 0) )
         return 0;
-    ol3e = mk_l3_pgentry(_ol3e);
 
-    if ( l3_pgentry_val(nl3e) & _PAGE_PRESENT )
+    if ( l3e_get_flags(nl3e) & _PAGE_PRESENT )
     {
-        if ( unlikely(l3_pgentry_val(nl3e) & L3_DISALLOW_MASK) )
+        if ( unlikely(l3e_get_flags(nl3e) & L3_DISALLOW_MASK) )
         {
             MEM_LOG("Bad L3 type settings %p", 
-                    l3_pgentry_val(nl3e) & L3_DISALLOW_MASK);
+                    l3e_get_value(nl3e) & L3_DISALLOW_MASK);
             return 0;
         }
 
         /* Fast path for identical mapping and presence. */
-        if ( ((l3_pgentry_val(ol3e) ^ l3_pgentry_val(nl3e)) & 
-              ((PADDR_MASK & PAGE_MASK) | _PAGE_PRESENT)) == 0 )
+        if (!l3e_has_changed(&ol3e, &nl3e, _PAGE_PRESENT))
             return UPDATE_ENTRY(l3, pl3e, ol3e, nl3e);
 
         if ( unlikely(!get_page_from_l3e(nl3e, pfn, current->domain)) )
@@ -1024,7 +1018,6 @@ static int mod_l4_entry(l4_pgentry_t *pl
                         unsigned long pfn)
 {
     l4_pgentry_t ol4e;
-    unsigned long _ol4e;
 
     if ( unlikely(!is_guest_l4_slot(pgentry_ptr_to_slot(pl4e))) )
     {
@@ -1032,22 +1025,20 @@ static int mod_l4_entry(l4_pgentry_t *pl
         return 0;
     }
 
-    if ( unlikely(__get_user(_ol4e, (unsigned long *)pl4e) != 0) )
+    if ( unlikely(__copy_from_user(&ol4e, pl4e, sizeof(ol4e)) != 0) )
         return 0;
-    ol4e = mk_l4_pgentry(_ol4e);
 
-    if ( l4_pgentry_val(nl4e) & _PAGE_PRESENT )
+    if ( l4e_get_flags(nl4e) & _PAGE_PRESENT )
     {
-        if ( unlikely(l4_pgentry_val(nl4e) & L4_DISALLOW_MASK) )
+        if ( unlikely(l4e_get_flags(nl4e) & L4_DISALLOW_MASK) )
         {
             MEM_LOG("Bad L4 type settings %p", 
-                    l4_pgentry_val(nl4e) & L4_DISALLOW_MASK);
+                    l4e_get_value(nl4e) & L4_DISALLOW_MASK);
             return 0;
         }
 
         /* Fast path for identical mapping and presence. */
-        if ( ((l4_pgentry_val(ol4e) ^ l4_pgentry_val(nl4e)) & 
-              ((PADDR_MASK & PAGE_MASK) | _PAGE_PRESENT)) == 0 )
+        if (!l4e_has_changed(&ol4e, &nl4e, _PAGE_PRESENT))
             return UPDATE_ENTRY(l4, pl4e, ol4e, nl4e);
 
         if ( unlikely(!get_page_from_l4e(nl4e, pfn, current->domain)) )
@@ -1878,8 +1869,11 @@ int do_mmu_update(
                 if ( likely(get_page_type(
                     page, type_info & (PGT_type_mask|PGT_va_mask))) )
                 {
-                    okay = mod_l1_entry((l1_pgentry_t *)va, 
-                                        mk_l1_pgentry(req.val));
+                    l1_pgentry_t pte;
+
+                    /* FIXME: doesn't work with PAE */
+                    pte = l1e_create_phys(req.val, req.val);
+                    okay = mod_l1_entry((l1_pgentry_t *)va, pte);
                     put_page_type(page);
                 }
                 break;
@@ -1887,9 +1881,11 @@ int do_mmu_update(
                 ASSERT(!shadow_mode_enabled(d));
                 if ( likely(get_page_type(page, PGT_l2_page_table)) )
                 {
-                    okay = mod_l2_entry((l2_pgentry_t *)va, 
-                                        mk_l2_pgentry(req.val),
-                                        mfn);
+                    l2_pgentry_t l2e;
+
+                    /* FIXME: doesn't work with PAE */
+                    l2e = l2e_create_phys(req.val, req.val);
+                    okay = mod_l2_entry((l2_pgentry_t *)va, l2e, mfn);
                     put_page_type(page);
                 }
                 break;
@@ -1898,9 +1894,11 @@ int do_mmu_update(
                 ASSERT(!shadow_mode_enabled(d));
                 if ( likely(get_page_type(page, PGT_l3_page_table)) )
                 {
-                    okay = mod_l3_entry((l3_pgentry_t *)va, 
-                                        mk_l3_pgentry(req.val),
-                                        mfn);
+                    l3_pgentry_t l3e;
+
+                    /* FIXME: doesn't work with PAE */
+                    l3e = l3e_create_phys(req.val,req.val);
+                    okay = mod_l3_entry((l3_pgentry_t *)va, l3e, mfn);
                     put_page_type(page);
                 }
                 break;
@@ -1908,9 +1906,10 @@ int do_mmu_update(
                 ASSERT(!shadow_mode_enabled(d));
                 if ( likely(get_page_type(page, PGT_l4_page_table)) )
                 {
-                    okay = mod_l4_entry((l4_pgentry_t *)va, 
-                                        mk_l4_pgentry(req.val),
-                                        mfn);
+                    l4_pgentry_t l4e;
+
+                    l4e = l4e_create_phys(req.val,req.val);
+                    okay = mod_l4_entry((l4_pgentry_t *)va, l4e, mfn);
                     put_page_type(page);
                 }
                 break;
@@ -2025,12 +2024,12 @@ int do_mmu_update(
  * and is running in a shadow mode
  */
 int update_shadow_va_mapping(unsigned long va,
-                             unsigned long val,
+                             l1_pgentry_t val,
                              struct exec_domain *ed,
                              struct domain *d)
 {
     unsigned long l1mfn;
-    unsigned long spte;
+    l1_pgentry_t spte;
     int rc = 0;
 
     check_pagetable(ed, "pre-va"); /* debug */
@@ -2056,8 +2055,7 @@ int update_shadow_va_mapping(unsigned lo
      *    to teach it about this boundary case.
      * So we flush this L1 page, if it's out of sync.
      */
-    l1mfn = (l2_pgentry_val(linear_l2_table(ed)[l2_table_offset(va)]) >>
-             PAGE_SHIFT);
+    l1mfn = l2e_get_pfn(linear_l2_table(ed)[l2_table_offset(va)]);
     if ( mfn_out_of_sync(l1mfn) )
     {
         perfc_incrc(extra_va_update_sync);
@@ -2065,8 +2063,8 @@ int update_shadow_va_mapping(unsigned lo
     }
 #endif /* keep check_pagetables() happy */
 
-    if ( unlikely(__put_user(val, &l1_pgentry_val(
-                                 linear_pg_table[l1_linear_offset(va)]))) )
+    if ( unlikely(__copy_to_user(&linear_pg_table[l1_linear_offset(va)],
+                                 &val, sizeof(val))))
     {
         rc = -EINVAL;
         goto out;
@@ -2093,7 +2091,7 @@ int update_shadow_va_mapping(unsigned lo
 }
 
 int update_grant_va_mapping(unsigned long va,
-                            unsigned long _nl1e, 
+                            l1_pgentry_t _nl1e, 
                             struct domain *d,
                             struct exec_domain *ed)
 {
@@ -2106,22 +2104,20 @@ int update_grant_va_mapping(unsigned lon
 
     int             rc = 0;
     l1_pgentry_t   *pl1e;
-    unsigned long   _ol1e;
-
+    l1_pgentry_t    ol1e;
+    
     cleanup_writable_pagetable(d);
 
     pl1e = &linear_pg_table[l1_linear_offset(va)];
 
-    if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) )
+    if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
         rc = -EINVAL;
     else
     {
-        l1_pgentry_t ol1e = mk_l1_pgentry(_ol1e);
-
-        if ( update_l1e(pl1e, ol1e, mk_l1_pgentry(_nl1e)) )
+        if ( update_l1e(pl1e, ol1e, _nl1e) )
         {
             put_page_from_l1e(ol1e, d);
-            if ( _ol1e & _PAGE_PRESENT )
+            if ( l1e_get_flags(ol1e) & _PAGE_PRESENT )
                 rc = 0; /* Caller needs to invalidate TLB entry */
             else
                 rc = 1; /* Caller need not invalidate TLB entry */
@@ -2138,7 +2134,7 @@ int update_grant_va_mapping(unsigned lon
 
 
 int do_update_va_mapping(unsigned long va,
-                         unsigned long val, 
+                         l1_pgentry_t  val, 
                          unsigned long flags)
 {
     struct exec_domain *ed  = current;
@@ -2172,7 +2168,7 @@ int do_update_va_mapping(unsigned long v
         rc = update_shadow_va_mapping(va, val, ed, d);
     }
     else if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],
-                                     mk_l1_pgentry(val))) )
+                                     val)) )
         rc = -EINVAL;
 
     switch ( flags & UVMF_FLUSHTYPE_MASK )
@@ -2229,7 +2225,7 @@ int do_update_va_mapping(unsigned long v
 }
 
 int do_update_va_mapping_otherdomain(unsigned long va,
-                                     unsigned long val, 
+                                     l1_pgentry_t  val, 
                                      unsigned long flags,
                                      domid_t domid)
 {
@@ -2265,9 +2261,9 @@ void destroy_gdt(struct exec_domain *ed)
 
     for ( i = 0; i < 16; i++ )
     {
-        if ( (pfn = l1_pgentry_to_pfn(ed->arch.perdomain_ptes[i])) != 0 )
+        if ( (pfn = l1e_get_pfn(ed->arch.perdomain_ptes[i])) != 0 )
             put_page_and_type(&frame_table[pfn]);
-        ed->arch.perdomain_ptes[i] = mk_l1_pgentry(0);
+        ed->arch.perdomain_ptes[i] = l1e_empty();
     }
 }
 
@@ -2324,7 +2320,7 @@ long set_gdt(struct exec_domain *ed, 
     /* Install the new GDT. */
     for ( i = 0; i < nr_pages; i++ )
         ed->arch.perdomain_ptes[i] =
-            mk_l1_pgentry((frames[i] << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+            l1e_create_pfn(frames[i], __PAGE_HYPERVISOR);
 
     SET_GDT_ADDRESS(ed, GDT_VIRT_START(ed));
     SET_GDT_ENTRIES(ed, entries);
@@ -2400,7 +2396,7 @@ long do_update_descriptor(unsigned long 
     case PGT_gdt_page:
         /* Disallow updates of Xen-reserved descriptors in the current GDT. */
         for_each_exec_domain(dom, ed) {
-            if ( (l1_pgentry_to_pfn(ed->arch.perdomain_ptes[0]) == mfn) &&
+            if ( (l1e_get_pfn(ed->arch.perdomain_ptes[0]) == mfn) &&
                  (((pa&(PAGE_SIZE-1))>>3) >= FIRST_RESERVED_GDT_ENTRY) &&
                  (((pa&(PAGE_SIZE-1))>>3) <= LAST_RESERVED_GDT_ENTRY) )
                 goto out;
@@ -2523,7 +2519,7 @@ void ptwr_flush(struct domain *d, const 
         ol1e = d->arch.ptwr[which].page[i];
         nl1e = pl1e[i];
 
-        if ( likely(l1_pgentry_val(ol1e) == l1_pgentry_val(nl1e)) )
+        if ( likely(l1e_get_value(ol1e) == l1e_get_value(nl1e)) )
             continue;
 
         /* Update number of entries modified. */
@@ -2533,10 +2529,10 @@ void ptwr_flush(struct domain *d, const 
          * Fast path for PTEs that have merely been write-protected
          * (e.g., during a Unix fork()). A strict reduction in privilege.
          */
-        if ( likely(l1_pgentry_val(ol1e) == (l1_pgentry_val(nl1e)|_PAGE_RW)) )
+        if ( likely(l1e_get_value(ol1e) == (l1e_get_value(nl1e)|_PAGE_RW)) )
         {
-            if ( likely(l1_pgentry_val(nl1e) & _PAGE_PRESENT) )
-                put_page_type(&frame_table[l1_pgentry_to_pfn(nl1e)]);
+            if ( likely(l1e_get_flags(nl1e) & _PAGE_PRESENT) )
+                put_page_type(&frame_table[l1e_get_pfn(nl1e)]);
             continue;
         }
 
@@ -2567,7 +2563,7 @@ void ptwr_flush(struct domain *d, const 
     if ( which == PTWR_PT_ACTIVE )
     {
         pl2e = &__linear_l2_table[d->arch.ptwr[which].l2_idx];
-        *pl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT); 
+        l2e_add_flags(pl2e, _PAGE_PRESENT); 
     }
 
     /*
@@ -2584,9 +2580,9 @@ static int ptwr_emulated_update(
     unsigned int bytes,
     unsigned int do_cmpxchg)
 {
-    unsigned long pte, pfn;
+    unsigned long pfn;
     struct pfn_info *page;
-    l1_pgentry_t ol1e, nl1e, *pl1e;
+    l1_pgentry_t pte, ol1e, nl1e, *pl1e;
     struct domain *d = current->domain;
 
     /* Aligned access only, thank you. */
@@ -2598,6 +2594,7 @@ static int ptwr_emulated_update(
     }
 
     /* Turn a sub-word access into a full-word access. */
+    /* FIXME: needs tweaks for PAE */
     if ( (addr & ((BITS_PER_LONG/8)-1)) != 0 )
     {
         int           rc;
@@ -2616,18 +2613,18 @@ static int ptwr_emulated_update(
     }
 
     /* Read the PTE that maps the page being updated. */
-    if ( __get_user(pte, (unsigned long *)
-                    &linear_pg_table[l1_linear_offset(addr)]) )
+    if (__copy_from_user(&pte, &linear_pg_table[l1_linear_offset(addr)],
+                         sizeof(pte)))
     {
         MEM_LOG("ptwr_emulate: Cannot read thru linear_pg_table\n");
         return X86EMUL_UNHANDLEABLE;
     }
 
-    pfn  = pte >> PAGE_SHIFT;
+    pfn  = l1e_get_pfn(pte);
     page = &frame_table[pfn];
 
     /* We are looking only for read-only mappings of p.t. pages. */
-    if ( ((pte & (_PAGE_RW | _PAGE_PRESENT)) != _PAGE_PRESENT) ||
+    if ( ((l1e_get_flags(pte) & (_PAGE_RW | _PAGE_PRESENT)) != _PAGE_PRESENT) 
||
          ((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) ||
          (page_get_owner(page) != d) )
     {
@@ -2637,7 +2634,7 @@ static int ptwr_emulated_update(
     }
 
     /* Check the new PTE. */
-    nl1e = mk_l1_pgentry(val);
+    nl1e = l1e_create_phys(val, val & ~PAGE_MASK);
     if ( unlikely(!get_page_from_l1e(nl1e, d)) )
         return X86EMUL_UNHANDLEABLE;
 
@@ -2645,7 +2642,7 @@ static int ptwr_emulated_update(
     pl1e = map_domain_mem(page_to_phys(page) + (addr & ~PAGE_MASK));
     if ( do_cmpxchg )
     {
-        ol1e = mk_l1_pgentry(old);
+        ol1e = l1e_create_phys(old, old & ~PAGE_MASK);
         if ( cmpxchg((unsigned long *)pl1e, old, val) != old )
         {
             unmap_domain_mem(pl1e);
@@ -2670,8 +2667,7 @@ static int ptwr_emulated_update(
         {
             sl1e = map_domain_mem(
                 ((sstat & PSH_pfn_mask) << PAGE_SHIFT) + (addr & ~PAGE_MASK));
-            l1pte_propagate_from_guest(
-                d, &l1_pgentry_val(nl1e), &l1_pgentry_val(*sl1e));
+            l1pte_propagate_from_guest(d, &nl1e, sl1e);
             unmap_domain_mem(sl1e);
         }
 #endif
@@ -2711,8 +2707,9 @@ static struct x86_mem_emulator ptwr_mem_
 /* Write page fault handler: check if guest is trying to modify a PTE. */
 int ptwr_do_page_fault(struct domain *d, unsigned long addr)
 {
-    unsigned long    pte, pfn, l2e;
+    unsigned long    pfn;
     struct pfn_info *page;
+    l1_pgentry_t     pte;
     l2_pgentry_t    *pl2e;
     int              which;
     u32              l2_idx;
@@ -2724,19 +2721,19 @@ int ptwr_do_page_fault(struct domain *d,
      * Attempt to read the PTE that maps the VA being accessed. By checking for
      * PDE validity in the L2 we avoid many expensive fixups in __get_user().
      */
-    if ( !(l2_pgentry_val(__linear_l2_table[addr>>L2_PAGETABLE_SHIFT]) &
+    if ( !(l2e_get_flags(__linear_l2_table[addr>>L2_PAGETABLE_SHIFT]) &
            _PAGE_PRESENT) ||
-         __get_user(pte, (unsigned long *)
-                    &linear_pg_table[l1_linear_offset(addr)]) )
+         __copy_from_user(&pte,&linear_pg_table[l1_linear_offset(addr)],
+                          sizeof(pte)) )
     {
         return 0;
     }
 
-    pfn  = pte >> PAGE_SHIFT;
+    pfn  = l1e_get_pfn(pte);
     page = &frame_table[pfn];
 
     /* We are looking only for read-only mappings of p.t. pages. */
-    if ( ((pte & (_PAGE_RW | _PAGE_PRESENT)) != _PAGE_PRESENT) ||
+    if ( ((l1e_get_flags(pte) & (_PAGE_RW | _PAGE_PRESENT)) != _PAGE_PRESENT) 
||
          ((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) ||
          (page_get_owner(page) != d) )
     {
@@ -2766,9 +2763,8 @@ int ptwr_do_page_fault(struct domain *d,
      * an ACTIVE p.t., otherwise it is INACTIVE.
      */
     pl2e = &__linear_l2_table[l2_idx];
-    l2e  = l2_pgentry_val(*pl2e);
     which = PTWR_PT_INACTIVE;
-    if ( (l2e >> PAGE_SHIFT) == pfn )
+    if ( (l2e_get_pfn(*pl2e)) == pfn )
     {
         /*
          * Check the PRESENT bit to set ACTIVE mode.
@@ -2776,7 +2772,7 @@ int ptwr_do_page_fault(struct domain *d,
          * ACTIVE p.t. (it may be the same p.t. mapped at another virt addr).
          * The ptwr_flush call below will restore the PRESENT bit.
          */
-        if ( likely(l2e & _PAGE_PRESENT) ||
+        if ( likely(l2e_get_flags(*pl2e) & _PAGE_PRESENT) ||
              (d->arch.ptwr[PTWR_PT_ACTIVE].l1va &&
               (l2_idx == d->arch.ptwr[PTWR_PT_ACTIVE].l2_idx)) )
             which = PTWR_PT_ACTIVE;
@@ -2806,7 +2802,7 @@ int ptwr_do_page_fault(struct domain *d,
     /* For safety, disconnect the L1 p.t. page from current space. */
     if ( which == PTWR_PT_ACTIVE )
     {
-        *pl2e = mk_l2_pgentry(l2e & ~_PAGE_PRESENT);
+        l2e_remove_flags(pl2e, _PAGE_PRESENT);
         local_flush_tlb(); /* XXX Multi-CPU guests? */
     }
     
@@ -2817,11 +2813,11 @@ int ptwr_do_page_fault(struct domain *d,
            L1_PAGETABLE_ENTRIES * sizeof(l1_pgentry_t));
     
     /* Finally, make the p.t. page writable by the guest OS. */
-    pte |= _PAGE_RW;
+    l1e_add_flags(&pte, _PAGE_RW);
     PTWR_PRINTK("[%c] update %p pte to %p\n", PTWR_PRINT_WHICH,
                 &linear_pg_table[addr>>PAGE_SHIFT], pte);
-    if ( unlikely(__put_user(pte, (unsigned long *)
-                             &linear_pg_table[addr>>PAGE_SHIFT])) )
+    if ( unlikely(__copy_to_user(&linear_pg_table[addr>>PAGE_SHIFT],
+                                 &pte, sizeof(pte))) )
     {
         MEM_LOG("ptwr: Could not update pte at %p", (unsigned long *)
                 &linear_pg_table[addr>>PAGE_SHIFT]);
Index: xen/arch/x86/vmx_platform.c
===================================================================
--- xen.orig/arch/x86/vmx_platform.c    2005-04-15 13:53:11.000000000 +0200
+++ xen/arch/x86/vmx_platform.c 2005-04-15 13:53:33.000000000 +0200
@@ -408,7 +408,7 @@ static int vmx_decode(const unsigned cha
 
 static int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, 
int inst_len)
 {
-    unsigned long gpte;
+    l1_pgentry_t gpte;
     unsigned long mfn;
     unsigned long ma;
     unsigned char * inst_start;
@@ -419,7 +419,7 @@ static int inst_copy_from_guest(unsigned
 
     if ((guest_eip & PAGE_MASK) == ((guest_eip + inst_len) & PAGE_MASK)) {
         gpte = gva_to_gpte(guest_eip);
-        mfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
+        mfn = phys_to_machine_mapping(l1e_get_pfn(gpte));
         ma = (mfn << PAGE_SHIFT) | (guest_eip & (PAGE_SIZE - 1));
         inst_start = (unsigned char *)map_domain_mem(ma);
                 
Index: xen/include/asm-x86/x86_32/page.h
===================================================================
--- xen.orig/include/asm-x86/x86_32/page.h      2005-04-15 13:53:10.000000000 
+0200
+++ xen/include/asm-x86/x86_32/page.h   2005-04-15 13:53:33.000000000 +0200
@@ -19,36 +19,93 @@
 #define PADDR_MASK              (~0UL)
 #define VADDR_MASK              (~0UL)
 
+#define _PAGE_NX                0UL
+#define PAGE_FLAG_MASK          0xfff
+
 #ifndef __ASSEMBLY__
 #include <xen/config.h>
-typedef struct { unsigned long l1_lo; } l1_pgentry_t;
-typedef struct { unsigned long l2_lo; } l2_pgentry_t;
+#include <asm/types.h>
+typedef struct { u32 l1_lo; } l1_pgentry_t;
+typedef struct { u32 l2_lo; } l2_pgentry_t;
 typedef l2_pgentry_t root_pgentry_t;
-#endif /* !__ASSEMBLY__ */
 
-/* Strip type from a table entry. */
-#define l1_pgentry_val(_x)   ((_x).l1_lo)
-#define l2_pgentry_val(_x)   ((_x).l2_lo)
-#define root_pgentry_val(_x) (l2_pgentry_val(_x))
+/* read access (depricated) */
+#define l1e_get_value(_x)         ((_x).l1_lo)
+#define l2e_get_value(_x)         ((_x).l2_lo)
 
-/* Add type to a table entry. */
-#define mk_l1_pgentry(_x)   ( (l1_pgentry_t) { (_x) } )
-#define mk_l2_pgentry(_x)   ( (l2_pgentry_t) { (_x) } )
-#define mk_root_pgentry(_x) (mk_l2_pgentry(_x))
+/* read access */
+#define l1e_get_pfn(_x)           ((_x).l1_lo >> PAGE_SHIFT)
+#define l1e_get_phys(_x)          ((_x).l1_lo &  PAGE_MASK)
+#define l1e_get_flags(_x)         ((_x).l1_lo &  PAGE_FLAG_MASK)
 
-/* Turn a typed table entry into a physical address. */
-#define l1_pgentry_to_phys(_x)   (l1_pgentry_val(_x) & PAGE_MASK)
-#define l2_pgentry_to_phys(_x)   (l2_pgentry_val(_x) & PAGE_MASK)
-#define root_pgentry_to_phys(_x) (l2_pgentry_to_phys(_x))
+#define l2e_get_pfn(_x)           ((_x).l2_lo >> PAGE_SHIFT)
+#define l2e_get_phys(_x)          ((_x).l2_lo &  PAGE_MASK)
+#define l2e_get_flags(_x)         ((_x).l2_lo &  PAGE_FLAG_MASK)
 
-/* Turn a typed table entry into a page index. */
-#define l1_pgentry_to_pfn(_x)   (l1_pgentry_val(_x) >> PAGE_SHIFT) 
-#define l2_pgentry_to_pfn(_x)   (l2_pgentry_val(_x) >> PAGE_SHIFT)
-#define root_pgentry_to_pfn(_x) (l2_pgentry_to_pfn(_x))
+/* write access */
+static inline l1_pgentry_t l1e_empty(void)
+{
+    l1_pgentry_t e = { .l1_lo = 0 };
+    return e;
+}
+static inline l1_pgentry_t l1e_create_pfn(u32 pfn, u32 flags)
+{
+    l1_pgentry_t e = { .l1_lo = (pfn << PAGE_SHIFT) | flags };
+    return e;
+}
+static inline l1_pgentry_t l1e_create_phys(u32 addr, u32 flags)
+{
+    l1_pgentry_t e = { .l1_lo = (addr & PAGE_MASK) | flags };
+    return e;
+}
+static inline void l1e_add_flags(l1_pgentry_t *e, u32 flags)
+{
+    e->l1_lo |= flags;
+}
+static inline void l1e_remove_flags(l1_pgentry_t *e, u32 flags)
+{
+    e->l1_lo &= ~flags;
+}
+
+static inline l2_pgentry_t l2e_empty(void)
+{
+    l2_pgentry_t e = { .l2_lo = 0 };
+    return e;
+}
+static inline l2_pgentry_t l2e_create_pfn(u32 pfn, u32 flags)
+{
+    l2_pgentry_t e = { .l2_lo = (pfn << PAGE_SHIFT) | flags };
+    return e;
+}
+static inline l2_pgentry_t l2e_create_phys(u32 addr, u32 flags)
+{
+    l2_pgentry_t e = { .l2_lo = (addr & PAGE_MASK) | flags };
+    return e;
+}
+static inline void l2e_add_flags(l2_pgentry_t *e, u32 flags)
+{
+    e->l2_lo |= flags;
+}
+static inline void l2e_remove_flags(l2_pgentry_t *e, u32 flags)
+{
+    e->l2_lo &= ~flags;
+}
+
+/* check entries */
+static inline int l1e_has_changed(l1_pgentry_t *e1, l1_pgentry_t *e2, u32 
flags)
+{
+    return ((e1->l1_lo ^ e2->l1_lo) & (PAGE_MASK | flags)) != 0;
+}
+static inline int l2e_has_changed(l2_pgentry_t *e1, l2_pgentry_t *e2, u32 
flags)
+{
+    return ((e1->l2_lo ^ e2->l2_lo) & (PAGE_MASK | flags)) != 0;
+}
+
+#endif /* !__ASSEMBLY__ */
 
 /* Pagetable walking. */
-#define l2_pgentry_to_l1(_x) \
-  ((l1_pgentry_t *)__va(l2_pgentry_to_phys(_x)))
+#define l2e_to_l1e(_x) \
+  ((l1_pgentry_t *)__va(l2e_get_phys(_x)))
 
 /* Given a virtual address, get an entry offset into a page table. */
 #define l1_table_offset(_a) \
@@ -62,9 +119,12 @@ typedef l2_pgentry_t root_pgentry_t;
 #define is_guest_l1_slot(_s) (1)
 #define is_guest_l2_slot(_s) ((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT)
 
-#define PGT_root_page_table PGT_l2_page_table
-
-#define _PAGE_NX         0UL
+#define root_get_pfn              l2e_get_pfn
+#define root_get_flags            l2e_get_flags
+#define root_get_value            l2e_get_value
+#define root_empty                l2e_empty
+#define root_create_phys          l2e_create_phys
+#define PGT_root_page_table       PGT_l2_page_table
 
 #define L1_DISALLOW_MASK (3UL << 7)
 #define L2_DISALLOW_MASK (7UL << 7)
Index: xen/include/asm-x86/mm.h
===================================================================
--- xen.orig/include/asm-x86/mm.h       2005-04-15 13:53:11.000000000 +0200
+++ xen/include/asm-x86/mm.h    2005-04-15 13:53:33.000000000 +0200
@@ -263,13 +263,14 @@ static inline unsigned long phys_to_mach
     unsigned long mfn;
     l1_pgentry_t pte;
 
-   if ( !__get_user(l1_pgentry_val(pte), (__phys_to_machine_mapping + pfn)) &&
-        (l1_pgentry_val(pte) & _PAGE_PRESENT) )
-       mfn = l1_pgentry_to_phys(pte) >> PAGE_SHIFT;
-   else
-       mfn = INVALID_MFN;
-
-   return mfn; 
+    if (!__copy_from_user(&pte, (__phys_to_machine_mapping + pfn),
+                         sizeof(pte))
+       && (l1e_get_flags(pte) & _PAGE_PRESENT) )
+       mfn = l1e_get_pfn(pte);
+    else
+       mfn = INVALID_MFN;
+    
+    return mfn; 
 }
 #define set_machinetophys(_mfn, _pfn) machine_to_phys_mapping[(_mfn)] = (_pfn)
 
@@ -352,7 +353,7 @@ void propagate_page_fault(unsigned long 
  * hold a reference to the page.
  */
 int update_grant_va_mapping(unsigned long va,
-                            unsigned long val,
+                            l1_pgentry_t _nl1e, 
                             struct domain *d,
                             struct exec_domain *ed);
 #endif /* __ASM_X86_MM_H__ */
Index: xen/arch/x86/domain_build.c
===================================================================
--- xen.orig/arch/x86/domain_build.c    2005-04-15 13:53:10.000000000 +0200
+++ xen/arch/x86/domain_build.c 2005-04-15 13:53:33.000000000 +0200
@@ -244,9 +244,9 @@ int construct_dom0(struct domain *d,
     l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE;
     memcpy(l2tab, &idle_pg_table[0], PAGE_SIZE);
     l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
-        mk_l2_pgentry((unsigned long)l2start | __PAGE_HYPERVISOR);
+        l2e_create_phys((unsigned long)l2start, __PAGE_HYPERVISOR);
     l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
-        mk_l2_pgentry(__pa(d->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR);
+        l2e_create_phys(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
     ed->arch.guest_table = mk_pagetable((unsigned long)l2start);
 
     l2tab += l2_table_offset(dsi.v_start);
@@ -257,12 +257,14 @@ int construct_dom0(struct domain *d,
         {
             l1start = l1tab = (l1_pgentry_t *)mpt_alloc; 
             mpt_alloc += PAGE_SIZE;
-            *l2tab++ = mk_l2_pgentry((unsigned long)l1start | L2_PROT);
+            *l2tab = l2e_create_phys((unsigned long)l1start, L2_PROT);
+            l2tab++;
             clear_page(l1tab);
             if ( count == 0 )
                 l1tab += l1_table_offset(dsi.v_start);
         }
-        *l1tab++ = mk_l1_pgentry((mfn << PAGE_SHIFT) | L1_PROT);
+        *l1tab = l1e_create_pfn(mfn, L1_PROT);
+        l1tab++;
         
         page = &frame_table[mfn];
         if ( !get_page_and_type(page, d, PGT_writable_page) )
@@ -273,13 +275,13 @@ int construct_dom0(struct domain *d,
 
     /* Pages that are part of page tables must be read only. */
     l2tab = l2start + l2_table_offset(vpt_start);
-    l1start = l1tab = (l1_pgentry_t *)l2_pgentry_to_phys(*l2tab);
+    l1start = l1tab = (l1_pgentry_t *)l2e_get_phys(*l2tab);
     l1tab += l1_table_offset(vpt_start);
     for ( count = 0; count < nr_pt_pages; count++ ) 
     {
-        page = &frame_table[l1_pgentry_to_pfn(*l1tab)];
+        page = &frame_table[l1e_get_pfn(*l1tab)];
         if ( !opt_dom0_shadow )
-            *l1tab = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
+            l1e_remove_flags(l1tab, _PAGE_RW);
         else
             if ( !get_page_type(page, PGT_writable_page) )
                 BUG();
@@ -317,7 +319,7 @@ int construct_dom0(struct domain *d,
             get_page(page, d); /* an extra ref because of readable mapping */
         }
         if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) )
-            l1start = l1tab = (l1_pgentry_t *)l2_pgentry_to_phys(*++l2tab);
+            l1start = l1tab = (l1_pgentry_t *)l2e_get_phys(*++l2tab);
     }
 
 #elif defined(__x86_64__)
@@ -335,9 +337,9 @@ int construct_dom0(struct domain *d,
     l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
     memcpy(l4tab, &idle_pg_table[0], PAGE_SIZE);
     l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
-        mk_l4_pgentry(__pa(l4start) | __PAGE_HYPERVISOR);
+        l4e_create_phys(__pa(l4start), __PAGE_HYPERVISOR);
     l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
-        mk_l4_pgentry(__pa(d->arch.mm_perdomain_l3) | __PAGE_HYPERVISOR);
+        l4e_create_phys(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
     ed->arch.guest_table = mk_pagetable(__pa(l4start));
 
     l4tab += l4_table_offset(dsi.v_start);
@@ -366,13 +368,17 @@ int construct_dom0(struct domain *d,
                     clear_page(l3tab);
                     if ( count == 0 )
                         l3tab += l3_table_offset(dsi.v_start);
-                    *l4tab++ = mk_l4_pgentry(__pa(l3start) | L4_PROT);
+                    *l4tab = l4e_create_phys(__pa(l3start), L4_PROT);
+                    l4tab++;
                 }
-                *l3tab++ = mk_l3_pgentry(__pa(l2start) | L3_PROT);
+                *l3tab = l3e_create_phys(__pa(l2start), L3_PROT);
+                l3tab++;
             }
-            *l2tab++ = mk_l2_pgentry(__pa(l1start) | L2_PROT);
+            *l2tab = l2e_create_phys(__pa(l1start), L2_PROT);
+            l2tab++;
         }
-        *l1tab++ = mk_l1_pgentry((mfn << PAGE_SHIFT) | L1_PROT);
+        *l1tab = l1e_create_pfn(mfn, L1_PROT);
+        l1tab++;
 
         page = &frame_table[mfn];
         if ( (page->u.inuse.type_info == 0) &&
@@ -384,16 +390,16 @@ int construct_dom0(struct domain *d,
 
     /* Pages that are part of page tables must be read only. */
     l4tab = l4start + l4_table_offset(vpt_start);
-    l3start = l3tab = l4_pgentry_to_l3(*l4tab);
+    l3start = l3tab = l4e_to_l3e(*l4tab);
     l3tab += l3_table_offset(vpt_start);
-    l2start = l2tab = l3_pgentry_to_l2(*l3tab);
+    l2start = l2tab = l3e_to_l2e(*l3tab);
     l2tab += l2_table_offset(vpt_start);
-    l1start = l1tab = l2_pgentry_to_l1(*l2tab);
+    l1start = l1tab = l2e_to_l1e(*l2tab);
     l1tab += l1_table_offset(vpt_start);
     for ( count = 0; count < nr_pt_pages; count++ ) 
     {
-        *l1tab = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
-        page = &frame_table[l1_pgentry_to_pfn(*l1tab)];
+        l1e_remove_flags(l1tab, _PAGE_RW);
+        page = &frame_table[l1e_get_pfn(*l1tab)];
 
         /* Read-only mapping + PGC_allocated + page-table page. */
         page->count_info         = PGC_allocated | 3;
@@ -412,10 +418,10 @@ int construct_dom0(struct domain *d,
             if ( !((unsigned long)++l2tab & (PAGE_SIZE - 1)) )
             {
                 if ( !((unsigned long)++l3tab & (PAGE_SIZE - 1)) )
-                    l3start = l3tab = l4_pgentry_to_l3(*++l4tab); 
-                l2start = l2tab = l3_pgentry_to_l2(*l3tab);
+                    l3start = l3tab = l4e_to_l3e(*++l4tab); 
+                l2start = l2tab = l3e_to_l2e(*l3tab);
             }
-            l1start = l1tab = l2_pgentry_to_l1(*l2tab);
+            l1start = l1tab = l2e_to_l1e(*l2tab);
         }
     }
 
@@ -525,8 +531,8 @@ int construct_dom0(struct domain *d,
 #if defined(__i386__)
     /* Destroy low mappings - they were only for our convenience. */
     for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
-        if ( l2_pgentry_val(l2start[i]) & _PAGE_PSE )
-            l2start[i] = mk_l2_pgentry(0);
+        if ( l2e_get_flags(l2start[i]) & _PAGE_PSE )
+            l2start[i] = l2e_empty();
     zap_low_mappings(); /* Do the same for the idle page tables. */
 #endif
     
@@ -544,17 +550,27 @@ int construct_dom0(struct domain *d,
                                : SHM_enable));
         if ( opt_dom0_translate )
         {
+            /* Hmm, what does this?
+               Looks like isn't portable across 32/64 bit and pae/non-pae ...
+               -- kraxel */
+
+            /* mafetter: This code is mostly a hack in order to be able to
+             * test with dom0's which are running with shadow translate.
+             * I expect we'll rip this out once we have a stable set of
+             * domU clients which use the various shadow modes, but it's
+             * useful to leave this here for now...
+             */
+
             // map this domain's p2m table into current page table,
             // so that we can easily access it.
             //
-            ASSERT( root_pgentry_val(idle_pg_table[1]) == 0 );
+            ASSERT( root_get_value(idle_pg_table[1]) == 0 );
             ASSERT( pagetable_val(d->arch.phys_table) );
-            idle_pg_table[1] = mk_root_pgentry(
-                pagetable_val(d->arch.phys_table) | __PAGE_HYPERVISOR);
+            idle_pg_table[1] = 
root_create_phys(pagetable_val(d->arch.phys_table),
+                                                __PAGE_HYPERVISOR);
             translate_l2pgtable(d, (l1_pgentry_t *)(1u << L2_PAGETABLE_SHIFT),
-                                pagetable_val(ed->arch.guest_table)
-                                >> PAGE_SHIFT);
-            idle_pg_table[1] = mk_root_pgentry(0);
+                                pagetable_val(ed->arch.guest_table) >> 
PAGE_SHIFT);
+            idle_pg_table[1] = root_empty();
             local_flush_tlb();
         }
 
Index: xen/arch/x86/dom0_ops.c
===================================================================
--- xen.orig/arch/x86/dom0_ops.c        2005-04-15 13:53:10.000000000 +0200
+++ xen/arch/x86/dom0_ops.c     2005-04-15 13:53:33.000000000 +0200
@@ -425,7 +425,7 @@ void arch_getdomaininfo_ctxt(
     {
         for ( i = 0; i < 16; i++ )
             c->gdt_frames[i] = 
-                l1_pgentry_to_pfn(ed->arch.perdomain_ptes[i]);
+                l1e_get_pfn(ed->arch.perdomain_ptes[i]);
         c->gdt_ents = GET_GDT_ENTRIES(ed);
     }
     c->kernel_ss  = ed->arch.kernel_ss;
Index: xen/include/asm-x86/page.h
===================================================================
--- xen.orig/include/asm-x86/page.h     2005-04-15 13:53:10.000000000 +0200
+++ xen/include/asm-x86/page.h  2005-04-15 13:53:33.000000000 +0200
@@ -2,6 +2,13 @@
 #ifndef __X86_PAGE_H__
 #define __X86_PAGE_H__
 
+#ifndef __ASSEMBLY__
+#define PAGE_SIZE           (1UL << PAGE_SHIFT)
+#else
+#define PAGE_SIZE           (1 << PAGE_SHIFT)
+#endif
+#define PAGE_MASK           (~(PAGE_SIZE-1))
+
 #if defined(__i386__)
 #include <asm/x86_32/page.h>
 #elif defined(__x86_64__)
@@ -19,13 +26,6 @@ typedef struct { unsigned long pt_lo; } 
 #define mk_pagetable(_x)    ( (pagetable_t) { (_x) } )
 #endif
 
-#ifndef __ASSEMBLY__
-#define PAGE_SIZE           (1UL << PAGE_SHIFT)
-#else
-#define PAGE_SIZE           (1 << PAGE_SHIFT)
-#endif
-#define PAGE_MASK           (~(PAGE_SIZE-1))
-
 #define clear_page(_p)      memset((void *)(_p), 0, PAGE_SIZE)
 #define copy_page(_t,_f)    memcpy((void *)(_t), (void *)(_f), PAGE_SIZE)
 
@@ -70,7 +70,7 @@ typedef struct { unsigned long pt_lo; } 
 #define linear_l4_table(_ed) ((_ed)->arch.guest_vl4table)
 
 #define va_to_l1mfn(_ed, _va) \
-    (l2_pgentry_val(linear_l2_table(_ed)[_va>>L2_PAGETABLE_SHIFT]) >> 
PAGE_SHIFT)
+    (l2e_get_pfn(linear_l2_table(_ed)[_va>>L2_PAGETABLE_SHIFT]))
 
 extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
 
Index: xen/arch/x86/x86_32/traps.c
===================================================================
--- xen.orig/arch/x86/x86_32/traps.c    2005-04-15 13:53:10.000000000 +0200
+++ xen/arch/x86/x86_32/traps.c 2005-04-15 13:53:33.000000000 +0200
@@ -163,7 +163,7 @@ void show_page_walk(unsigned long addr)
 
     printk("Pagetable walk from %p:\n", addr);
     
-    page = l2_pgentry_val(idle_pg_table[l2_table_offset(addr)]);
+    page = l2e_get_value(idle_pg_table[l2_table_offset(addr)]);
     printk(" L2 = %p %s\n", page, (page & _PAGE_PSE) ? "(4MB)" : "");
     if ( !(page & _PAGE_PRESENT) || (page & _PAGE_PSE) )
         return;
Index: xen/arch/x86/x86_32/mm.c
===================================================================
--- xen.orig/arch/x86/x86_32/mm.c       2005-04-15 13:53:11.000000000 +0200
+++ xen/arch/x86/x86_32/mm.c    2005-04-15 13:53:33.000000000 +0200
@@ -47,9 +47,9 @@ int map_pages(
         if ( ((s|v|p) & ((1<<L2_PAGETABLE_SHIFT)-1)) == 0 )
         {
             /* Super-page mapping. */
-            if ( (l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
+            if ( (l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
                 local_flush_tlb_pge();
-            *pl2e = mk_l2_pgentry(p|flags|_PAGE_PSE);
+            *pl2e = l2e_create_phys(p, flags|_PAGE_PSE);
 
             v += 1 << L2_PAGETABLE_SHIFT;
             p += 1 << L2_PAGETABLE_SHIFT;
@@ -58,16 +58,16 @@ int map_pages(
         else
         {
             /* Normal page mapping. */
-            if ( !(l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
+            if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
             {
                 newpg = (void *)alloc_xenheap_page();
                 clear_page(newpg);
-                *pl2e = mk_l2_pgentry(__pa(newpg) | (flags & __PTE_MASK));
+                *pl2e = l2e_create_phys(__pa(newpg), flags & __PTE_MASK);
             }
-            pl1e = l2_pgentry_to_l1(*pl2e) + l1_table_offset(v);
-            if ( (l1_pgentry_val(*pl1e) & _PAGE_PRESENT) )
+            pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(v);
+            if ( (l1e_get_flags(*pl1e) & _PAGE_PRESENT) )
                 local_flush_tlb_one(v);
-            *pl1e = mk_l1_pgentry(p|flags);
+            *pl1e = l1e_create_phys(p, flags);
 
             v += 1 << L1_PAGETABLE_SHIFT;
             p += 1 << L1_PAGETABLE_SHIFT;
@@ -90,14 +90,14 @@ void __set_fixmap(
 void __init paging_init(void)
 {
     void *ioremap_pt;
-    unsigned long v, l2e;
+    unsigned long v;
     struct pfn_info *pg;
 
     /* Allocate and map the machine-to-phys table. */
     if ( (pg = alloc_domheap_pages(NULL, 10)) == NULL )
         panic("Not enough memory to bootstrap Xen.\n");
     idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)] =
-        mk_l2_pgentry(page_to_phys(pg) | __PAGE_HYPERVISOR | _PAGE_PSE);
+        l2e_create_phys(page_to_phys(pg), __PAGE_HYPERVISOR | _PAGE_PSE);
     memset((void *)RDWR_MPT_VIRT_START, 0x55, 4UL << 20);
 
     /* Xen 4MB mappings can all be GLOBAL. */
@@ -105,10 +105,9 @@ void __init paging_init(void)
     {
         for ( v = HYPERVISOR_VIRT_START; v; v += (1 << L2_PAGETABLE_SHIFT) )
         {
-             l2e = l2_pgentry_val(idle_pg_table[l2_table_offset(v)]);
-             if ( l2e & _PAGE_PSE )
-                 l2e |= _PAGE_GLOBAL;
-             idle_pg_table[v >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(l2e);
+            if (l2e_get_flags(idle_pg_table[l2_table_offset(v)]) & _PAGE_PSE)
+                l2e_add_flags(&idle_pg_table[v >> L2_PAGETABLE_SHIFT],
+                              _PAGE_GLOBAL);
         }
     }
 
@@ -116,33 +115,33 @@ void __init paging_init(void)
     ioremap_pt = (void *)alloc_xenheap_page();
     clear_page(ioremap_pt);
     idle_pg_table[l2_table_offset(IOREMAP_VIRT_START)] =
-        mk_l2_pgentry(__pa(ioremap_pt) | __PAGE_HYPERVISOR);
+        l2e_create_phys(__pa(ioremap_pt), __PAGE_HYPERVISOR);
 
     /* Create read-only mapping of MPT for guest-OS use.
      * NB. Remove the global bit so that shadow_mode_translate()==true domains
      *     can reused this address space for their phys-to-machine mapping.
      */
     idle_pg_table[l2_table_offset(RO_MPT_VIRT_START)] =
-        mk_l2_pgentry(l2_pgentry_val(
-                          idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)]) 
&
-                      ~(_PAGE_RW | _PAGE_GLOBAL));
+        
l2e_create_pfn(l2e_get_pfn(idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)]),
+                       
l2e_get_flags(idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)])
+                       & ~(_PAGE_RW | _PAGE_GLOBAL));
 
     /* Set up mapping cache for domain pages. */
     mapcache = (unsigned long *)alloc_xenheap_page();
     clear_page(mapcache);
     idle_pg_table[l2_table_offset(MAPCACHE_VIRT_START)] =
-        mk_l2_pgentry(__pa(mapcache) | __PAGE_HYPERVISOR);
+        l2e_create_phys(__pa(mapcache), __PAGE_HYPERVISOR);
 
     /* Set up linear page table mapping. */
     idle_pg_table[l2_table_offset(LINEAR_PT_VIRT_START)] =
-        mk_l2_pgentry(__pa(idle_pg_table) | __PAGE_HYPERVISOR);
+        l2e_create_phys(__pa(idle_pg_table), __PAGE_HYPERVISOR);
 }
 
 void __init zap_low_mappings(void)
 {
     int i;
     for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
-        idle_pg_table[i] = mk_l2_pgentry(0);
+        idle_pg_table[i] = l2e_empty();
     flush_tlb_all_pge();
 }
 
@@ -168,7 +167,7 @@ void subarch_init_memory(struct domain *
     }
 
     /* M2P table is mappable read-only by privileged domains. */
-    m2p_start_mfn = l2_pgentry_to_pfn(
+    m2p_start_mfn = l2e_get_pfn(
         idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)]);
     for ( i = 0; i < 1024; i++ )
     {
@@ -318,11 +317,9 @@ void *memguard_init(void *heap_start)
         l1 = (l1_pgentry_t *)heap_start;
         heap_start = (void *)((unsigned long)heap_start + PAGE_SIZE);
         for ( j = 0; j < L1_PAGETABLE_ENTRIES; j++ )
-            l1[j] = mk_l1_pgentry((i << L2_PAGETABLE_SHIFT) |
-                                   (j << L1_PAGETABLE_SHIFT) | 
-                                  __PAGE_HYPERVISOR);
+            l1[j] = l1e_create_pfn(j, __PAGE_HYPERVISOR);
         idle_pg_table[i + l2_table_offset(PAGE_OFFSET)] =
-            mk_l2_pgentry(virt_to_phys(l1) | __PAGE_HYPERVISOR);
+            l2e_create_phys(virt_to_phys(l1), __PAGE_HYPERVISOR);
     }
 
     return heap_start;
@@ -344,11 +341,12 @@ static void __memguard_change_range(void
     while ( _l != 0 )
     {
         l2  = &idle_pg_table[l2_table_offset(_p)];
-        l1  = l2_pgentry_to_l1(*l2) + l1_table_offset(_p);
-        if ( guard )
-            *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) & ~_PAGE_PRESENT);
-        else
-            *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) | _PAGE_PRESENT);
+        l1  = l2e_to_l1e(*l2) + l1_table_offset(_p);
+        if ( guard ) {
+            l1e_remove_flags(l1, _PAGE_PRESENT);
+        } else {
+            l1e_add_flags(l1, _PAGE_PRESENT);
+        }
         _p += PAGE_SIZE;
         _l -= PAGE_SIZE;
     }
Index: xen/common/grant_table.c
===================================================================
--- xen.orig/common/grant_table.c       2005-04-15 13:53:11.000000000 +0200
+++ xen/common/grant_table.c    2005-04-15 13:53:33.000000000 +0200
@@ -253,12 +253,12 @@ __gnttab_activate_grant_ref(
     {
         /* Write update into the pagetable
          */
+        l1_pgentry_t pte;
 
-        rc = update_grant_va_mapping( host_virt_addr,
-                                (frame << PAGE_SHIFT) | _PAGE_PRESENT  |
-                                                        _PAGE_ACCESSED |
-                                                        _PAGE_DIRTY    |
-                       ((dev_hst_ro_flags & GNTMAP_readonly) ? 0 : _PAGE_RW),
+        pte = l1e_create_pfn(frame, _PAGE_PRESENT | _PAGE_ACCESSED | 
_PAGE_DIRTY);
+        if ( !(dev_hst_ro_flags & GNTMAP_readonly) )
+            l1e_add_flags(&pte,_PAGE_RW);
+        rc = update_grant_va_mapping( host_virt_addr, pte, 
                        mapping_d, mapping_ed );
 
         /* IMPORTANT: (rc == 0) => must flush / invalidate entry in TLB.
Index: xen/include/asm-x86/x86_64/page.h
===================================================================
--- xen.orig/include/asm-x86/x86_64/page.h      2005-04-15 13:53:11.000000000 
+0200
+++ xen/include/asm-x86/x86_64/page.h   2005-04-15 13:53:33.000000000 +0200
@@ -24,50 +24,165 @@
 #define PADDR_MASK              ((1UL << PADDR_BITS)-1)
 #define VADDR_MASK              ((1UL << VADDR_BITS)-1)
 
+#define _PAGE_NX                (cpu_has_nx ? (1UL<<63) : 0UL)
+#define PAGE_FLAG_MASK          0xfff
+
 #ifndef __ASSEMBLY__
 #include <xen/config.h>
-typedef struct { unsigned long l1_lo; } l1_pgentry_t;
-typedef struct { unsigned long l2_lo; } l2_pgentry_t;
-typedef struct { unsigned long l3_lo; } l3_pgentry_t;
-typedef struct { unsigned long l4_lo; } l4_pgentry_t;
+#include <asm/types.h>
+typedef struct { u64 l1_lo; } l1_pgentry_t;
+typedef struct { u64 l2_lo; } l2_pgentry_t;
+typedef struct { u64 l3_lo; } l3_pgentry_t;
+typedef struct { u64 l4_lo; } l4_pgentry_t;
 typedef l4_pgentry_t root_pgentry_t;
-#endif /* !__ASSEMBLY__ */
 
-/* Strip type from a table entry. */
-#define l1_pgentry_val(_x)   ((_x).l1_lo)
-#define l2_pgentry_val(_x)   ((_x).l2_lo)
-#define l3_pgentry_val(_x)   ((_x).l3_lo)
-#define l4_pgentry_val(_x)   ((_x).l4_lo)
-#define root_pgentry_val(_x) (l4_pgentry_val(_x))
+/* read access (depricated) */
+#define l1e_get_value(_x)         ((_x).l1_lo)
+#define l2e_get_value(_x)         ((_x).l2_lo)
+#define l3e_get_value(_x)         ((_x).l3_lo)
+#define l4e_get_value(_x)         ((_x).l4_lo)
 
-/* Add type to a table entry. */
-#define mk_l1_pgentry(_x)   ( (l1_pgentry_t) { (_x) } )
-#define mk_l2_pgentry(_x)   ( (l2_pgentry_t) { (_x) } )
-#define mk_l3_pgentry(_x)   ( (l3_pgentry_t) { (_x) } )
-#define mk_l4_pgentry(_x)   ( (l4_pgentry_t) { (_x) } )
-#define mk_root_pgentry(_x) (mk_l4_pgentry(_x))
+/* read access */
+#define l1e_get_pfn(_x)           (((_x).l1_lo & (PADDR_MASK&PAGE_MASK)) >> 
PAGE_SHIFT)
+#define l1e_get_phys(_x)          (((_x).l1_lo & (PADDR_MASK&PAGE_MASK)))
+#define l1e_get_flags(_x)         ((_x).l1_lo  &  PAGE_FLAG_MASK)
 
-/* Turn a typed table entry into a physical address. */
-#define l1_pgentry_to_phys(_x)   (l1_pgentry_val(_x) & (PADDR_MASK&PAGE_MASK))
-#define l2_pgentry_to_phys(_x)   (l2_pgentry_val(_x) & (PADDR_MASK&PAGE_MASK))
-#define l3_pgentry_to_phys(_x)   (l3_pgentry_val(_x) & (PADDR_MASK&PAGE_MASK))
-#define l4_pgentry_to_phys(_x)   (l4_pgentry_val(_x) & (PADDR_MASK&PAGE_MASK))
-#define root_pgentry_to_phys(_x) (l4_pgentry_to_phys(_x))
+#define l2e_get_pfn(_x)           (((_x).l2_lo & (PADDR_MASK&PAGE_MASK)) >> 
PAGE_SHIFT)
+#define l2e_get_phys(_x)          (((_x).l2_lo & (PADDR_MASK&PAGE_MASK)))
+#define l2e_get_flags(_x)         ((_x).l2_lo  &  PAGE_FLAG_MASK)
 
-/* Turn a typed table entry into a page index. */
-#define l1_pgentry_to_pfn(_x)   (l1_pgentry_to_phys(_x) >> PAGE_SHIFT) 
-#define l2_pgentry_to_pfn(_x)   (l2_pgentry_to_phys(_x) >> PAGE_SHIFT)
-#define l3_pgentry_to_pfn(_x)   (l3_pgentry_to_phys(_x) >> PAGE_SHIFT)
-#define l4_pgentry_to_pfn(_x)   (l4_pgentry_to_phys(_x) >> PAGE_SHIFT)
-#define root_pgentry_to_pfn(_x) (l4_pgentry_to_pfn(_x))
+#define l3e_get_pfn(_x)           (((_x).l3_lo & (PADDR_MASK&PAGE_MASK)) >> 
PAGE_SHIFT)
+#define l3e_get_phys(_x)          (((_x).l3_lo & (PADDR_MASK&PAGE_MASK)))
+#define l3e_get_flags(_x)         ((_x).l3_lo  &  PAGE_FLAG_MASK)
+
+#define l4e_get_pfn(_x)           (((_x).l4_lo & (PADDR_MASK&PAGE_MASK)) >> 
PAGE_SHIFT)
+#define l4e_get_phys(_x)          (((_x).l4_lo & (PADDR_MASK&PAGE_MASK)))
+#define l4e_get_flags(_x)         ((_x).l4_lo  &  PAGE_FLAG_MASK)
+
+/* write access */
+static inline l1_pgentry_t l1e_empty(void)
+{
+    l1_pgentry_t e = { .l1_lo = 0 };
+    return e;
+}
+static inline l1_pgentry_t l1e_create_pfn(u64 pfn, u64 flags)
+{
+    l1_pgentry_t e = { .l1_lo = (pfn << PAGE_SHIFT) | flags };
+    return e;
+}
+static inline l1_pgentry_t l1e_create_phys(u64 addr, u64 flags)
+{
+    l1_pgentry_t e = { .l1_lo = (addr & (PADDR_MASK&PAGE_MASK)) | flags };
+    return e;
+}
+static inline void l1e_add_flags(l1_pgentry_t *e, u64 flags)
+{
+    e->l1_lo |= flags;
+}
+static inline void l1e_remove_flags(l1_pgentry_t *e, u64 flags)
+{
+    e->l1_lo &= ~flags;
+}
+
+static inline l2_pgentry_t l2e_empty(void)
+{
+    l2_pgentry_t e = { .l2_lo = 0 };
+    return e;
+}
+static inline l2_pgentry_t l2e_create_pfn(u64 pfn, u64 flags)
+{
+    l2_pgentry_t e = { .l2_lo = (pfn << PAGE_SHIFT) | flags };
+    return e;
+}
+static inline l2_pgentry_t l2e_create_phys(u64 addr, u64 flags)
+{
+    l2_pgentry_t e = { .l2_lo = (addr & (PADDR_MASK&PAGE_MASK)) | flags };
+    return e;
+}
+static inline void l2e_add_flags(l2_pgentry_t *e, u64 flags)
+{
+    e->l2_lo |= flags;
+}
+static inline void l2e_remove_flags(l2_pgentry_t *e, u64 flags)
+{
+    e->l2_lo &= ~flags;
+}
+
+static inline l3_pgentry_t l3e_empty(void)
+{
+    l3_pgentry_t e = { .l3_lo = 0 };
+    return e;
+}
+static inline l3_pgentry_t l3e_create_pfn(u64 pfn, u64 flags)
+{
+    l3_pgentry_t e = { .l3_lo = (pfn << PAGE_SHIFT) | flags };
+    return e;
+}
+static inline l3_pgentry_t l3e_create_phys(u64 addr, u64 flags)
+{
+    l3_pgentry_t e = { .l3_lo = (addr & (PADDR_MASK&PAGE_MASK)) | flags };
+    return e;
+}
+static inline void l3e_add_flags(l3_pgentry_t *e, u64 flags)
+{
+    e->l3_lo |= flags;
+}
+static inline void l3e_remove_flags(l3_pgentry_t *e, u64 flags)
+{
+    e->l3_lo &= ~flags;
+}
+
+static inline l4_pgentry_t l4e_empty(void)
+{
+    l4_pgentry_t e = { .l4_lo = 0 };
+    return e;
+}
+static inline l4_pgentry_t l4e_create_pfn(u64 pfn, u64 flags)
+{
+    l4_pgentry_t e = { .l4_lo = (pfn << PAGE_SHIFT) | flags };
+    return e;
+}
+static inline l4_pgentry_t l4e_create_phys(u64 addr, u64 flags)
+{
+    l4_pgentry_t e = { .l4_lo = (addr & (PADDR_MASK&PAGE_MASK)) | flags };
+    return e;
+}
+static inline void l4e_add_flags(l4_pgentry_t *e, u64 flags)
+{
+    e->l4_lo |= flags;
+}
+static inline void l4e_remove_flags(l4_pgentry_t *e, u64 flags)
+{
+    e->l4_lo &= ~flags;
+}
+
+/* check entries */
+static inline int l1e_has_changed(l1_pgentry_t *e1, l1_pgentry_t *e2, u32 
flags)
+{
+    return ((e1->l1_lo ^ e2->l1_lo) & ((PADDR_MASK&PAGE_MASK) | flags)) != 0;
+}
+static inline int l2e_has_changed(l2_pgentry_t *e1, l2_pgentry_t *e2, u32 
flags)
+{
+    return ((e1->l2_lo ^ e2->l2_lo) & ((PADDR_MASK&PAGE_MASK) | flags)) != 0;
+}
+static inline int l3e_has_changed(l3_pgentry_t *e1, l3_pgentry_t *e2, u32 
flags)
+{
+    return ((e1->l3_lo ^ e2->l3_lo) & ((PADDR_MASK&PAGE_MASK) | flags)) != 0;
+}
+static inline int l4e_has_changed(l4_pgentry_t *e1, l4_pgentry_t *e2, u32 
flags)
+{
+    return ((e1->l4_lo ^ e2->l4_lo) & ((PADDR_MASK&PAGE_MASK) | flags)) != 0;
+}
+
+#endif /* !__ASSEMBLY__ */
 
 /* Pagetable walking. */
-#define l2_pgentry_to_l1(_x) \
-  ((l1_pgentry_t *)__va(l2_pgentry_to_phys(_x)))
-#define l3_pgentry_to_l2(_x) \
-  ((l2_pgentry_t *)__va(l3_pgentry_to_phys(_x)))
-#define l4_pgentry_to_l3(_x) \
-  ((l3_pgentry_t *)__va(l4_pgentry_to_phys(_x)))
+#define l2e_to_l1e(_x) \
+  ((l1_pgentry_t *)__va(l2e_get_phys(_x)))
+#define l3e_to_l2e(_x) \
+  ((l2_pgentry_t *)__va(l3e_get_phys(_x)))
+#define l4e_to_l3e(_x) \
+  ((l3_pgentry_t *)__va(l4e_get_phys(_x)))
 
 /* Given a virtual address, get an entry offset into a page table. */
 #define l1_table_offset(_a) \
@@ -89,10 +204,13 @@ typedef l4_pgentry_t root_pgentry_t;
     (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) || \
      ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT))
 
+#define root_get_pfn              l4e_get_pfn
+#define root_get_flags            l4e_get_flags
+#define root_get_value            l4e_get_value
+#define root_empty                l4e_empty
+#define root_create_phys          l4e_create_phys
 #define PGT_root_page_table PGT_l4_page_table
 
-#define _PAGE_NX         (cpu_has_nx ? (1UL<<63) : 0UL)
-
 #define L1_DISALLOW_MASK ((cpu_has_nx?0:(1UL<<63)) | (3UL << 7))
 #define L2_DISALLOW_MASK ((cpu_has_nx?0:(1UL<<63)) | (7UL << 7))
 #define L3_DISALLOW_MASK ((cpu_has_nx?0:(1UL<<63)) | (7UL << 7))
Index: xen/arch/x86/domain.c
===================================================================
--- xen.orig/arch/x86/domain.c  2005-04-15 13:53:10.000000000 +0200
+++ xen/arch/x86/domain.c       2005-04-15 13:53:33.000000000 +0200
@@ -259,12 +259,14 @@ void arch_do_createdomain(struct exec_do
 
         d->arch.mm_perdomain_l2 = (l2_pgentry_t *)alloc_xenheap_page();
         memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
-        d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] = 
-            mk_l2_pgentry(__pa(d->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR);
+        d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] =
+            l2e_create_phys(__pa(d->arch.mm_perdomain_pt),
+                            __PAGE_HYPERVISOR);
         d->arch.mm_perdomain_l3 = (l3_pgentry_t *)alloc_xenheap_page();
         memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE);
-        d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] = 
-            mk_l3_pgentry(__pa(d->arch.mm_perdomain_l2) | __PAGE_HYPERVISOR);
+        d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] =
+            l3e_create_phys(__pa(d->arch.mm_perdomain_l2),
+                            __PAGE_HYPERVISOR);
 #endif
 
         (void)ptwr_init(d);
Index: xen/arch/x86/x86_64/mm.c
===================================================================
--- xen.orig/arch/x86/x86_64/mm.c       2005-04-15 13:53:10.000000000 +0200
+++ xen/arch/x86/x86_64/mm.c    2005-04-15 13:53:33.000000000 +0200
@@ -69,29 +69,29 @@ int map_pages(
     while ( s != 0 )
     {
         pl4e = &pt[l4_table_offset(v)];
-        if ( !(l4_pgentry_val(*pl4e) & _PAGE_PRESENT) )
+        if ( !(l4e_get_flags(*pl4e) & _PAGE_PRESENT) )
         {
             newpg = safe_page_alloc();
             clear_page(newpg);
-            *pl4e = mk_l4_pgentry(__pa(newpg) | (flags & __PTE_MASK));
+            *pl4e = l4e_create_phys(__pa(newpg), flags & __PTE_MASK);
         }
 
-        pl3e = l4_pgentry_to_l3(*pl4e) + l3_table_offset(v);
-        if ( !(l3_pgentry_val(*pl3e) & _PAGE_PRESENT) )
+        pl3e = l4e_to_l3e(*pl4e) + l3_table_offset(v);
+        if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
         {
             newpg = safe_page_alloc();
             clear_page(newpg);
-            *pl3e = mk_l3_pgentry(__pa(newpg) | (flags & __PTE_MASK));
+            *pl3e = l3e_create_phys(__pa(newpg), flags & __PTE_MASK);
         }
 
-        pl2e = l3_pgentry_to_l2(*pl3e) + l2_table_offset(v);
+        pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(v);
 
         if ( ((s|v|p) & ((1<<L2_PAGETABLE_SHIFT)-1)) == 0 )
         {
             /* Super-page mapping. */
-            if ( (l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
+            if ( (l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
                 local_flush_tlb_pge();
-            *pl2e = mk_l2_pgentry(p|flags|_PAGE_PSE);
+            *pl2e = l2e_create_phys(p, flags|_PAGE_PSE);
 
             v += 1 << L2_PAGETABLE_SHIFT;
             p += 1 << L2_PAGETABLE_SHIFT;
@@ -100,16 +100,16 @@ int map_pages(
         else
         {
             /* Normal page mapping. */
-            if ( !(l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
+            if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
             {
                 newpg = safe_page_alloc();
                 clear_page(newpg);
-                *pl2e = mk_l2_pgentry(__pa(newpg) | (flags & __PTE_MASK));
+                *pl2e = l2e_create_phys(__pa(newpg), flags & __PTE_MASK);
             }
-            pl1e = l2_pgentry_to_l1(*pl2e) + l1_table_offset(v);
-            if ( (l1_pgentry_val(*pl1e) & _PAGE_PRESENT) )
+            pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(v);
+            if ( (l1e_get_flags(*pl1e) & _PAGE_PRESENT) )
                 local_flush_tlb_one(v);
-            *pl1e = mk_l1_pgentry(p|flags);
+            *pl1e = l1e_create_phys(p, flags);
 
             v += 1 << L1_PAGETABLE_SHIFT;
             p += 1 << L1_PAGETABLE_SHIFT;
@@ -161,19 +161,18 @@ void __init paging_init(void)
      * Above we mapped the M2P table as user-accessible and read-writable.
      * Fix security by denying user access at the top level of the page table.
      */
-    idle_pg_table[l4_table_offset(RDWR_MPT_VIRT_START)] =
-        mk_l4_pgentry(l4_pgentry_val(
-            idle_pg_table[l4_table_offset(RDWR_MPT_VIRT_START)]) & 
-                      ~_PAGE_USER);
+    l4e_remove_flags(&idle_pg_table[l4_table_offset(RDWR_MPT_VIRT_START)],
+                     _PAGE_USER);
 
     /* Create read-only mapping of MPT for guest-OS use. */
     l3ro = (l3_pgentry_t *)alloc_xenheap_page();
     clear_page(l3ro);
     idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)] =
-        mk_l4_pgentry((__pa(l3ro) | __PAGE_HYPERVISOR | _PAGE_USER) &
-                      ~_PAGE_RW);
+        l4e_create_phys(__pa(l3ro),
+                        (__PAGE_HYPERVISOR | _PAGE_USER) & ~_PAGE_RW);
+
     /* Copy the L3 mappings from the RDWR_MPT area. */
-    l3rw = l4_pgentry_to_l3(
+    l3rw = l4e_to_l3e(
         idle_pg_table[l4_table_offset(RDWR_MPT_VIRT_START)]);
     l3rw += l3_table_offset(RDWR_MPT_VIRT_START);
     l3ro += l3_table_offset(RO_MPT_VIRT_START);
@@ -182,12 +181,12 @@ void __init paging_init(void)
 
     /* Set up linear page table mapping. */
     idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)] =
-        mk_l4_pgentry(__pa(idle_pg_table) | __PAGE_HYPERVISOR);
+        l4e_create_phys(__pa(idle_pg_table), __PAGE_HYPERVISOR);
 }
 
 void __init zap_low_mappings(void)
 {
-    idle_pg_table[0] = mk_l4_pgentry(0);
+    idle_pg_table[0] = l4e_empty();
     flush_tlb_all_pge();
 }
 
@@ -217,14 +216,14 @@ void subarch_init_memory(struct domain *
           v != RDWR_MPT_VIRT_END;
           v += 1 << L2_PAGETABLE_SHIFT )
     {
-        l3e = l4_pgentry_to_l3(idle_pg_table[l4_table_offset(v)])[
+        l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[
             l3_table_offset(v)];
-        if ( !(l3_pgentry_val(l3e) & _PAGE_PRESENT) )
+        if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
             continue;
-        l2e = l3_pgentry_to_l2(l3e)[l2_table_offset(v)];
-        if ( !(l2_pgentry_val(l2e) & _PAGE_PRESENT) )
+        l2e = l3e_to_l2e(l3e)[l2_table_offset(v)];
+        if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
             continue;
-        m2p_start_mfn = l2_pgentry_to_pfn(l2e);
+        m2p_start_mfn = l2e_get_pfn(l2e);
 
         for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         {
@@ -361,20 +360,20 @@ void *memguard_init(void *heap_start)
     {
         ALLOC_PT(l1);
         for ( j = 0; j < L1_PAGETABLE_ENTRIES; j++ )
-            l1[j] = mk_l1_pgentry((i << L2_PAGETABLE_SHIFT) |
-                                   (j << L1_PAGETABLE_SHIFT) | 
-                                  __PAGE_HYPERVISOR);
+            l1[j] = l1e_create_phys((i << L2_PAGETABLE_SHIFT) |
+                                    (j << L1_PAGETABLE_SHIFT),
+                                    __PAGE_HYPERVISOR);
         if ( !((unsigned long)l2 & (PAGE_SIZE-1)) )
         {
             ALLOC_PT(l2);
             if ( !((unsigned long)l3 & (PAGE_SIZE-1)) )
             {
                 ALLOC_PT(l3);
-                *l4++ = mk_l4_pgentry(virt_to_phys(l3) | __PAGE_HYPERVISOR);
+                *l4++ = l4e_create_phys(virt_to_phys(l3), __PAGE_HYPERVISOR);
             }
-            *l3++ = mk_l3_pgentry(virt_to_phys(l2) | __PAGE_HYPERVISOR);
+            *l3++ = l3e_create_phys(virt_to_phys(l2), __PAGE_HYPERVISOR);
         }
-        *l2++ = mk_l2_pgentry(virt_to_phys(l1) | __PAGE_HYPERVISOR);
+        *l2++ = l2e_create_phys(virt_to_phys(l1), __PAGE_HYPERVISOR);
     }
 
     return heap_start;
@@ -398,13 +397,13 @@ static void __memguard_change_range(void
     while ( _l != 0 )
     {
         l4 = &idle_pg_table[l4_table_offset(_p)];
-        l3 = l4_pgentry_to_l3(*l4) + l3_table_offset(_p);
-        l2 = l3_pgentry_to_l2(*l3) + l2_table_offset(_p);
-        l1 = l2_pgentry_to_l1(*l2) + l1_table_offset(_p);
+        l3 = l4e_to_l3e(*l4) + l3_table_offset(_p);
+        l2 = l3e_to_l2e(*l3) + l2_table_offset(_p);
+        l1 = l2e_to_l1e(*l2) + l1_table_offset(_p);
         if ( guard )
-            *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) & ~_PAGE_PRESENT);
+            l1e_remove_flags(l1, _PAGE_PRESENT);
         else
-            *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) | _PAGE_PRESENT);
+            l1e_add_flags(l1, _PAGE_PRESENT);
         _p += PAGE_SIZE;
         _l -= PAGE_SIZE;
     }

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>