WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [patch/unstable] page table cleanups

To: xen-devel@xxxxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [patch/unstable] page table cleanups
From: Gerd Knorr <kraxel@xxxxxxxxxxx>
Date: Mon, 14 Mar 2005 13:07:31 +0100
Delivery-date: Mon, 14 Mar 2005 12:11:40 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-archive: <http://sourceforge.net/mailarchive/forum.php?forum=xen-devel>
List-help: <mailto:xen-devel-request@lists.sourceforge.net?subject=help>
List-id: List for Xen developers <xen-devel.lists.sourceforge.net>
List-post: <mailto:xen-devel@lists.sourceforge.net>
List-subscribe: <https://lists.sourceforge.net/lists/listinfo/xen-devel>, <mailto:xen-devel-request@lists.sourceforge.net?subject=subscribe>
List-unsubscribe: <https://lists.sourceforge.net/lists/listinfo/xen-devel>, <mailto:xen-devel-request@lists.sourceforge.net?subject=unsubscribe>
Sender: xen-devel-admin@xxxxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.5.6i
  Hi,

In many places xen uses "unsigned long" instead of the l*_pgentry_t
types to pass around page table entries.  Here is a patch which fixes
this in a number of places (mostly in shadow mode code).  Thats what
I've trapped in so far, maybe more of these patches follow.

Fixing this is needed for adding PAE support to xen.  In PAE paging mode
the page table entries are 64 bit and thus will not fit into "unsigned
long".

cheers,

  Gerd

Index: xen/arch/x86/shadow.c
===================================================================
--- xen.orig/arch/x86/shadow.c  2005-03-10 11:55:10.000000000 +0100
+++ xen/arch/x86/shadow.c       2005-03-14 12:34:26.000000000 +0100
@@ -532,13 +532,15 @@ static void shadow_map_l1_into_current_l
 { 
     struct exec_domain *ed = current;
     struct domain *d = ed->domain;
-    unsigned long    *gpl1e, *spl1e, gl2e, sl2e, gl1pfn, sl1mfn, sl1ss;
+    l1_pgentry_t     *gpl1e, *spl1e;
+    l2_pgentry_t     gl2e, sl2e;
+    unsigned long    gl1pfn, sl1mfn, sl1ss;
     struct pfn_info  *sl1mfn_info;
     int               i;
 
     __guest_get_l2e(ed, va, &gl2e);
 
-    gl1pfn = gl2e >> PAGE_SHIFT;
+    gl1pfn = l2_pgentry_val(gl2e) >> PAGE_SHIFT;
 
     sl1ss = __shadow_status(d, gl1pfn);
     if ( !(sl1ss & PSH_shadowed) )
@@ -561,10 +563,10 @@ static void shadow_map_l1_into_current_l
         __guest_set_l2e(ed, va, gl2e);
         __shadow_set_l2e(ed, va, sl2e);
 
-        gpl1e = (unsigned long *) &(linear_pg_table[
+        gpl1e = &(linear_pg_table[
             (va>>L1_PAGETABLE_SHIFT) & ~(L1_PAGETABLE_ENTRIES-1)]);
 
-        spl1e = (unsigned long *) &(shadow_linear_pg_table[
+        spl1e = &(shadow_linear_pg_table[
             (va>>L1_PAGETABLE_SHIFT) & ~(L1_PAGETABLE_ENTRIES-1)]);
 
         for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
@@ -584,7 +586,8 @@ static void shadow_map_l1_into_current_l
 
 void shadow_invlpg(struct exec_domain *ed, unsigned long va)
 {
-    unsigned long gpte, spte;
+    l1_pgentry_t gpte, spte;
+    l1_pgentry_t zero = mk_l1_pgentry(0);
 
     ASSERT(shadow_mode_enabled(ed->domain));
 
@@ -592,21 +595,22 @@ void shadow_invlpg(struct exec_domain *e
      * XXX KAF: Why is this set-to-zero required?
      *          Why, on failure, must we bin all our shadow state?
      */
-    if (__put_user(0L, (unsigned long *)
-                   &shadow_linear_pg_table[va >> PAGE_SHIFT])) {
+    if (__copy_to_user(&shadow_linear_pg_table[va >> PAGE_SHIFT],
+                      &zero, sizeof(zero))) {
         vmx_shadow_clear_state(ed->domain);
         return;
     }
 
-    if (__get_user(gpte, (unsigned long *)
-                   &linear_pg_table[va >> PAGE_SHIFT])) {
+    if (__copy_from_user(&gpte,
+                        &linear_pg_table[va >> PAGE_SHIFT],
+                        sizeof(gpte))) {
         return;
     }
 
     l1pte_propagate_from_guest(ed->domain, &gpte, &spte);
 
-    if (__put_user(spte, (unsigned long *)
-                   &shadow_linear_pg_table[va >> PAGE_SHIFT])) {
+    if (__copy_to_user(&shadow_linear_pg_table[va >> PAGE_SHIFT],
+                      &spte, sizeof(spte))) {
         return;
     }
 }
@@ -715,17 +719,18 @@ int shadow_fault(unsigned long va, struc
 
 
 void shadow_l1_normal_pt_update(
-    unsigned long pa, unsigned long gpte,
+    unsigned long pa, l1_pgentry_t gpte,
     unsigned long *prev_smfn_ptr,
     l1_pgentry_t **prev_spl1e_ptr)
 {
-    unsigned long smfn, spte, prev_smfn = *prev_smfn_ptr;    
+    l1_pgentry_t spte;
+    unsigned long smfn, prev_smfn = *prev_smfn_ptr;    
     l1_pgentry_t *spl1e, *prev_spl1e = *prev_spl1e_ptr;
 
     /* N.B. To get here, we know the l1 page *must* be shadowed. */
     SH_VVLOG("shadow_l1_normal_pt_update pa=%p, gpte=%p, "
              "prev_smfn=%p, prev_spl1e=%p",
-             pa, gpte, prev_smfn, prev_spl1e);
+             pa, l1_pgentry_val(gpte), prev_smfn, prev_spl1e);
 
     smfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) & PSH_pfn_mask;
 
@@ -737,23 +742,24 @@ void shadow_l1_normal_pt_update(
     {
         if ( prev_spl1e != NULL )
             unmap_domain_mem( prev_spl1e );
-        spl1e = (l1_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT);
+        spl1e = map_domain_mem(smfn << PAGE_SHIFT);
         *prev_smfn_ptr  = smfn;
         *prev_spl1e_ptr = spl1e;
     }
 
     l1pte_propagate_from_guest(current->domain, &gpte, &spte);
-    spl1e[(pa & ~PAGE_MASK) / sizeof(l1_pgentry_t)] = mk_l1_pgentry(spte);
+    spl1e[(pa & ~PAGE_MASK) / sizeof(l1_pgentry_t)] = spte;
 }
 
-void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpde)
+void shadow_l2_normal_pt_update(unsigned long pa, l2_pgentry_t gpde)
 {
-    unsigned long sl2mfn, spde = 0;
-    l2_pgentry_t *spl2e;
+    unsigned long sl2mfn;
+    l2_pgentry_t *spl2e, spde = mk_l2_pgentry(0);
     unsigned long sl1mfn;
 
     /* N.B. To get here, we know the l2 page *must* be shadowed. */
-    SH_VVLOG("shadow_l2_normal_pt_update pa=%p, gpde=%p",pa,gpde);
+    SH_VVLOG("shadow_l2_normal_pt_update pa=%p, gpde=%p",pa,
+            l1_pgentry_t(gpde));
 
     sl2mfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) & PSH_pfn_mask;
 
@@ -761,15 +767,15 @@ void shadow_l2_normal_pt_update(unsigned
      * Only propagate to shadow if _PAGE_ACCESSED is set in the guest.
      * Otherwise, to ensure coherency, we blow away the existing shadow value.
      */
-    if ( gpde & _PAGE_ACCESSED )
+    if ( l2_pgentry_val(gpde) & _PAGE_ACCESSED )
     {
-        sl1mfn = (gpde & _PAGE_PRESENT) ?
-            __shadow_status(current->domain, gpde >> PAGE_SHIFT) : 0;
+        sl1mfn = (l2_pgentry_val(gpde) & _PAGE_PRESENT) ?
+            __shadow_status(current->domain, l2_pgentry_val(gpde) >> 
PAGE_SHIFT) : 0;
         l2pde_general(current->domain, &gpde, &spde, sl1mfn);
     }
 
-    spl2e = (l2_pgentry_t *)map_domain_mem(sl2mfn << PAGE_SHIFT);
-    spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)] = mk_l2_pgentry(spde);
+    spl2e = map_domain_mem(sl2mfn << PAGE_SHIFT);
+    spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)] = spde;
     unmap_domain_mem(spl2e);
 }
 
Index: xen/include/asm-x86/shadow.h
===================================================================
--- xen.orig/include/asm-x86/shadow.h   2005-03-10 11:55:10.000000000 +0100
+++ xen/include/asm-x86/shadow.h        2005-03-14 12:35:11.000000000 +0100
@@ -40,9 +40,9 @@ extern void shadow_mode_init(void);
 extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
 extern int shadow_fault(unsigned long va, struct xen_regs *regs);
 extern void shadow_l1_normal_pt_update(
-    unsigned long pa, unsigned long gpte, 
+    unsigned long pa, l1_pgentry_t gpte, 
     unsigned long *prev_spfn_ptr, l1_pgentry_t **prev_spl1e_ptr);
-extern void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpde);
+extern void shadow_l2_normal_pt_update(unsigned long pa, l2_pgentry_t gpde);
 extern void unshadow_table(unsigned long gpfn, unsigned int type);
 extern int shadow_mode_enable(struct domain *p, unsigned int mode);
 extern void free_shadow_state(struct domain *d);
@@ -176,55 +176,53 @@ struct shadow_status {
 
 // BUG: mafetter: this assumes ed == current, so why pass ed?
 static inline void __shadow_get_l2e(
-    struct exec_domain *ed, unsigned long va, unsigned long *sl2e)
+    struct exec_domain *ed, unsigned long va, l2_pgentry_t *sl2e)
 {
     if ( !likely(shadow_mode_enabled(ed->domain)) )
         BUG();
 
     if ( shadow_mode_translate(ed->domain) )
-        *sl2e = l2_pgentry_val(
-            ed->arch.shadow_vtable[l2_table_offset(va)]);       
+        *sl2e = ed->arch.shadow_vtable[l2_table_offset(va)]; 
     else 
-        *sl2e = l2_pgentry_val(
-            shadow_linear_l2_table[l2_table_offset(va)]);
+        *sl2e = shadow_linear_l2_table[l2_table_offset(va)];
 }
 
 static inline void __shadow_set_l2e(
-    struct exec_domain *ed, unsigned long va, unsigned long value)
+    struct exec_domain *ed, unsigned long va, l2_pgentry_t value)
 {
     if ( !likely(shadow_mode_enabled(ed->domain)) )
         BUG();
 
     if ( shadow_mode_translate(ed->domain) ) 
-        ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
+        ed->arch.shadow_vtable[l2_table_offset(va)] = value;
     else 
-        shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
+        shadow_linear_l2_table[l2_table_offset(va)] = value;
 }
 
 static inline void __guest_get_l2e(
-    struct exec_domain *ed, unsigned long va, unsigned long *l2e)
+    struct exec_domain *ed, unsigned long va, l2_pgentry_t *l2e)
 {
     *l2e = ( shadow_mode_translate(ed->domain) ) ?
-        l2_pgentry_val(ed->arch.guest_vtable[l2_table_offset(va)]) :
-        l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
+        ed->arch.guest_vtable[l2_table_offset(va)] :
+        linear_l2_table[l2_table_offset(va)];
 }
 
 static inline void __guest_set_l2e(
-    struct exec_domain *ed, unsigned long va, unsigned long value)
+    struct exec_domain *ed, unsigned long va, l2_pgentry_t value)
 {
     if ( shadow_mode_translate(ed->domain) )
     {
         unsigned long pfn;
 
-        pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
+        pfn = phys_to_machine_mapping(l2_pgentry_val(value) >> PAGE_SHIFT);
         ed->arch.hl2_vtable[l2_table_offset(va)] =
             mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
 
-        ed->arch.guest_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
+        ed->arch.guest_vtable[l2_table_offset(va)] = value;
     }
     else
     {
-        linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
+        linear_l2_table[l2_table_offset(va)] = value;
     }
 }
 
@@ -326,36 +324,38 @@ static inline void l1pte_read_fault(
 }
 
 static inline void l1pte_propagate_from_guest(
-    struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
+    struct domain *d, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p)
 { 
-    unsigned long gpte = *gpte_p;
-    unsigned long spte = *spte_p;
-    unsigned long pfn = gpte >> PAGE_SHIFT;
+    l1_pgentry_t gpte = *gpte_p;
+    l1_pgentry_t spte = *spte_p;
+    unsigned long pfn = l1_pgentry_val(gpte) >> PAGE_SHIFT;
     unsigned long mfn = __gpfn_to_mfn(d, pfn);
 
 #if SHADOW_VERBOSE_DEBUG
-    unsigned long old_spte = spte;
+    l1_pgentry_t old_spte = spte;
 #endif
 
     /* Use 1:1 page table to identify MMIO address space */
-    if ( shadow_mode_external(d) && mmio_space(gpte) ) {
-        *spte_p = 0;
+    if ( shadow_mode_external(d) && mmio_space(l1_pgentry_val(gpte)) ) {
+        *spte_p = mk_l1_pgentry(0);
         return;
     }
     
-    spte = 0;
-    if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == 
+    spte = mk_l1_pgentry(0);
+    if ( (l1_pgentry_val(gpte) & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == 
          (_PAGE_PRESENT|_PAGE_ACCESSED) ) {
         
-        spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
+        spte = mk_l1_pgentry((mfn << PAGE_SHIFT) |
+           (l1_pgentry_val(gpte) & ~PAGE_MASK));
         
-        if ( shadow_mode_log_dirty(d) || !(gpte & _PAGE_DIRTY) )
-            spte &= ~_PAGE_RW;
+        if ( shadow_mode_log_dirty(d) || !(l1_pgentry_val(gpte) & _PAGE_DIRTY) 
)
+            l1_pgentry_val(spte) &= ~_PAGE_RW;
     }
         
 #if SHADOW_VERBOSE_DEBUG
-    if ( old_spte || spte || gpte )
-        SH_VVLOG("l1pte_propagate_from_guest: gpte=0x%p, old spte=0x%p, new 
spte=0x%p ", gpte, old_spte, spte);
+    if ( l1_pgentry_val(old_spte) || l1_pgentry_val(spte) || 
l1_pgentry_val(gpte) )
+        SH_VVLOG("l1pte_propagate_from_guest: gpte=0x%p, old spte=0x%p, new 
spte=0x%p ",
+                l1_pgentry_val(gpte), l1_pgentry_val(old_spte), 
l1_pgentry_val(spte));
 #endif
 
     *gpte_p = gpte;
@@ -366,27 +366,28 @@ static inline void l1pte_propagate_from_
 
 static inline void l2pde_general(
     struct domain *d,
-    unsigned long *gpde_p,
-    unsigned long *spde_p,
+    l2_pgentry_t *gpde_p,
+    l2_pgentry_t *spde_p,
     unsigned long sl1mfn)
 {
-    unsigned long gpde = *gpde_p;
-    unsigned long spde = *spde_p;
+    l2_pgentry_t gpde = *gpde_p;
+    l2_pgentry_t spde = *spde_p;
 
-    spde = 0;
+    spde = mk_l2_pgentry(0);
 
     if ( sl1mfn != 0 )
     {
-        spde = (gpde & ~PAGE_MASK) | (sl1mfn << PAGE_SHIFT) | 
-            _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY;
-        gpde |= _PAGE_ACCESSED; /* N.B. PDEs do not have a dirty bit. */
+        spde = mk_l2_pgentry((l2_pgentry_val(gpde) & ~PAGE_MASK)
+           | (sl1mfn << PAGE_SHIFT)
+           | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY);
+        l2_pgentry_val(gpde) |= _PAGE_ACCESSED; /* N.B. PDEs do not have a 
dirty bit. */
 
         /* Detect linear p.t. mappings and write-protect them. */
         if ( (frame_table[sl1mfn].u.inuse.type_info & PGT_type_mask) ==
              PGT_l2_page_table ) 
         {
             if ( !shadow_mode_translate(d) )
-                spde = gpde & ~_PAGE_RW;
+                spde = mk_l2_pgentry(l2_pgentry_val(gpde) & ~_PAGE_RW);
 
         }
     }
@@ -723,39 +724,42 @@ static inline void set_shadow_status(
     shadow_audit(d, 0);
 }
   
-static inline unsigned long gva_to_gpte(unsigned long gva)
+static inline l1_pgentry_t gva_to_gpte(unsigned long gva)
 {
-    unsigned long gpde, gpte, pfn, index;
+    l1_pgentry_t gpte;
+    l2_pgentry_t gpde;
+    unsigned long pfn, index;
     struct exec_domain *ed = current;
 
     __guest_get_l2e(ed, gva, &gpde);
-    if (!(gpde & _PAGE_PRESENT))
-        return 0;
+    if (!(l2_pgentry_val(gpde) & _PAGE_PRESENT))
+        return mk_l1_pgentry(0);
 
     index = l2_table_offset(gva);
 
     if (!l2_pgentry_val(ed->arch.hl2_vtable[index])) {
-        pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
+        pfn = phys_to_machine_mapping(l2_pgentry_val(gpde) >> PAGE_SHIFT);
         ed->arch.hl2_vtable[index] = 
             mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
     }
 
-    if ( unlikely(__get_user(gpte, (unsigned long *)
-                             &linear_pg_table[gva >> PAGE_SHIFT])) )
-        return 0;
+    if (unlikely(__copy_from_user(&gpte,
+                       &linear_pg_table[gva >> PAGE_SHIFT],
+                       sizeof(gpte))))
+        return mk_l1_pgentry(0);
 
     return gpte;
 }
 
 static inline unsigned long gva_to_gpa(unsigned long gva)
 {
-    unsigned long gpte;
+    l1_pgentry_t gpte;
 
     gpte = gva_to_gpte(gva);
-    if ( !(gpte & _PAGE_PRESENT) )
+    if ( !(l1_pgentry_val(gpte) & _PAGE_PRESENT) )
         return 0;
 
-    return (gpte & PAGE_MASK) + (gva & ~PAGE_MASK); 
+    return (l1_pgentry_val(gpte) & PAGE_MASK) + (gva & ~PAGE_MASK); 
 }
 
 static inline void hl2_table_invalidate(struct exec_domain *ed)
Index: xen/arch/x86/vmx.c
===================================================================
--- xen.orig/arch/x86/vmx.c     2005-03-10 11:55:09.000000000 +0100
+++ xen/arch/x86/vmx.c  2005-03-12 14:39:26.000000000 +0100
@@ -107,7 +107,8 @@ static void inline __update_guest_eip(un
 static int vmx_do_page_fault(unsigned long va, struct xen_regs *regs) 
 {
     unsigned long eip;
-    unsigned long gpte, gpa;
+    l1_pgentry_t gpte;
+    unsigned long gpa; /* FIXME: PAE */
     int result;
 
 #if VMX_DEBUG
@@ -130,9 +131,9 @@ static int vmx_do_page_fault(unsigned lo
     }
 
     gpte = gva_to_gpte(va);
-    if (!(gpte & _PAGE_PRESENT) )
+    if (!(l1_pgentry_val(gpte) & _PAGE_PRESENT) )
             return 0;
-    gpa = (gpte & PAGE_MASK) + (va & ~PAGE_MASK);
+    gpa = (l1_pgentry_val(gpte) & PAGE_MASK) + (va & ~PAGE_MASK);
 
     /* Use 1:1 page table to identify MMIO address space */
     if (mmio_space(gpa))
Index: xen/arch/x86/mm.c
===================================================================
--- xen.orig/arch/x86/mm.c      2005-03-10 11:55:11.000000000 +0100
+++ xen/arch/x86/mm.c   2005-03-14 12:33:09.000000000 +0100
@@ -1758,7 +1758,9 @@ int do_mmu_update(
                           PSH_shadowed) )
                     {
                         shadow_l1_normal_pt_update(
-                            req.ptr, req.val, &prev_smfn, &prev_spl1e);
+                            req.ptr,
+                           mk_l1_pgentry(req.val), /* FIXME: breaks for PAE + 
memory above 4GB */
+                           &prev_smfn, &prev_spl1e);
                         put_shadow_status(d);
                     }
 
@@ -1776,7 +1778,8 @@ int do_mmu_update(
                          (get_shadow_status(d, page-frame_table) & 
                           PSH_shadowed) )
                     {
-                        shadow_l2_normal_pt_update(req.ptr, req.val);
+                        shadow_l2_normal_pt_update(req.ptr,
+                               mk_l2_pgentry(req.val)); /* FIXME: breaks for 
PAE + memory above 4GB */
                         put_shadow_status(d);
                     }
 
@@ -1912,20 +1915,19 @@ int do_mmu_update(
 }
 
 void update_shadow_va_mapping(unsigned long va,
-                              unsigned long val,
+                              l1_pgentry_t val,
                               struct exec_domain *ed,
                               struct domain *d)
 {
     /* This function assumes the caller is holding the domain's BIGLOCK
      * and is running in a shadow mode
      */
-
-    unsigned long   sval = 0;
+    l1_pgentry_t sval = mk_l1_pgentry(0);
 
     l1pte_propagate_from_guest(d, &val, &sval);
 
-    if ( unlikely(__put_user(sval, ((unsigned long *)(
-        &shadow_linear_pg_table[l1_linear_offset(va)])))) )
+    if ( unlikely(__copy_to_user(&shadow_linear_pg_table[l1_linear_offset(va)],
+                 &sval, sizeof(sval))))
     {
         /*
          * Since L2's are guranteed RW, failure indicates either that the
@@ -1941,7 +1943,7 @@ void update_shadow_va_mapping(unsigned l
         if (get_shadow_status(d, gpfn))
         {
             unsigned long gmfn = __gpfn_to_mfn(d, gpfn);
-            unsigned long *gl1e = map_domain_mem(gmfn << PAGE_SHIFT);
+            l1_pgentry_t *gl1e = map_domain_mem(gmfn << PAGE_SHIFT);
             unsigned l1_idx = l1_table_offset(va);
             gl1e[l1_idx] = sval;
             unmap_domain_mem(gl1e);
@@ -1965,7 +1967,7 @@ void update_shadow_va_mapping(unsigned l
 }
 
 int update_grant_va_mapping(unsigned long va,
-                            unsigned long _nl1e, 
+                            l1_pgentry_t _nl1e, 
                             struct domain *d,
                             struct exec_domain *ed)
 {
@@ -1984,30 +1986,32 @@ int update_grant_va_mapping(unsigned lon
      */
 
     int             rc = 0;
-    l1_pgentry_t   *pl1e;
-    unsigned long   _ol1e;
+    l1_pgentry_t    *pl1e;
+    l1_pgentry_t    _ol1e;
 
     cleanup_writable_pagetable(d);
 
     pl1e = &linear_pg_table[l1_linear_offset(va)];
 
-    if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) )
+    if ( unlikely(__copy_from_user(&_ol1e, pl1e, sizeof(_ol1e)) != 0) )
         rc = -EINVAL;
     else
     {
-        l1_pgentry_t ol1e = mk_l1_pgentry(_ol1e);
+        l1_pgentry_t ol1e = _ol1e;
 
-        if ( update_l1e(pl1e, ol1e, mk_l1_pgentry(_nl1e)) )
+        if ( update_l1e(pl1e, ol1e, _nl1e) )
         {
             /* overwrote different mfn?  */
-            if (((_ol1e ^ _nl1e) & (PADDR_MASK & PAGE_MASK)) != 0)
+            if (((l1_pgentry_val(_ol1e) ^ l1_pgentry_val(_nl1e))
+                 & (PADDR_MASK & PAGE_MASK)) != 0)
             {
                 rc = 0;
                 put_page_from_l1e(ol1e, d);
             }
             else
-                rc = ((_ol1e & _PAGE_RW) ? GNTUPDVA_prev_rw
-                                         : GNTUPDVA_prev_ro );
+                rc = ((l1_pgentry_val(_ol1e) & _PAGE_RW)
+                       ? GNTUPDVA_prev_rw
+                       : GNTUPDVA_prev_ro );
                 /* use return code to avoid nasty grant table
                  * slow path in put_page_from_l1e -- caller
                  * must handle ref count instead. */
@@ -2024,7 +2028,7 @@ int update_grant_va_mapping(unsigned lon
 
 
 int do_update_va_mapping(unsigned long va,
-                         unsigned long val, 
+                         l1_pgentry_t  val, 
                          unsigned long flags)
 {
     struct exec_domain      *ed  = current;
@@ -2050,8 +2054,7 @@ int do_update_va_mapping(unsigned long v
      * the case of updating L2 entries.
      */
 
-    if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],
-                                mk_l1_pgentry(val))) )
+    if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],val)) )
         rc = -EINVAL;
 
     if ( unlikely(shadow_mode_enabled(d)) )
@@ -2075,7 +2078,7 @@ int do_update_va_mapping(unsigned long v
 }
 
 int do_update_va_mapping_otherdomain(unsigned long va,
-                                     unsigned long val, 
+                                     l1_pgentry_t  val, 
                                      unsigned long flags,
                                      domid_t domid)
 {
@@ -2297,9 +2300,9 @@ int ptwr_debug = 0x0;
 /* Flush the given writable p.t. page and write-protect it again. */
 void ptwr_flush(const int which)
 {
-    unsigned long  sstat, spte, pte, *ptep, l1va;
-    l1_pgentry_t  *sl1e = NULL, *pl1e, ol1e, nl1e;
-    l2_pgentry_t  *pl2e;
+    unsigned long  sstat, l1va;
+    l1_pgentry_t   *sl1e = NULL, *pl1e, ol1e, nl1e, pte, spte, *ptep;
+    l2_pgentry_t   *pl2e;
     int            i, cpu = smp_processor_id();
     struct exec_domain *ed = current;
     struct domain *d = ed->domain;
@@ -2308,13 +2311,13 @@ void ptwr_flush(const int which)
 #endif
 
     l1va = ptwr_info[cpu].ptinfo[which].l1va;
-    ptep = (unsigned long *)&linear_pg_table[l1_linear_offset(l1va)];
+    ptep = &linear_pg_table[l1_linear_offset(l1va)];
 
     /*
      * STEP 1. Write-protect the p.t. page so no more updates can occur.
      */
 
-    if ( unlikely(__get_user(pte, ptep)) )
+    if ( unlikely(__copy_from_user(&pte, ptep, sizeof(pte))) )
     {
         MEM_LOG("ptwr: Could not read pte at %p\n", ptep);
         /*
@@ -2325,17 +2328,17 @@ void ptwr_flush(const int which)
     }
     PTWR_PRINTK("[%c] disconnected_l1va at %p is %p\n",
                 PTWR_PRINT_WHICH, ptep, pte);
-    pte &= ~_PAGE_RW;
+    l1_pgentry_val(pte) &= ~_PAGE_RW;
 
     if ( unlikely(shadow_mode_enabled(d)) )
     {
         /* Write-protect the p.t. page in the shadow page table. */
         l1pte_propagate_from_guest(d, &pte, &spte);
-        __put_user(spte, (unsigned long *)
-                   &shadow_linear_pg_table[l1_linear_offset(l1va)]);
+        __copy_to_user(&shadow_linear_pg_table[l1_linear_offset(l1va)],
+                      &spte, sizeof(spte));
 
         /* Is the p.t. page itself shadowed? Map it into Xen space if so. */
-        sstat = get_shadow_status(d, pte >> PAGE_SHIFT);
+        sstat = get_shadow_status(d, l1_pgentry_to_pfn(pte));
         if ( sstat & PSH_shadowed )
             sl1e = map_domain_mem((sstat & PSH_pfn_mask) << PAGE_SHIFT);
     }
@@ -2388,9 +2391,7 @@ void ptwr_flush(const int which)
             if ( likely(l1_pgentry_val(nl1e) & _PAGE_PRESENT) )
             {
                 if ( unlikely(sl1e != NULL) )
-                    l1pte_propagate_from_guest(
-                        d, &l1_pgentry_val(nl1e), 
-                        &l1_pgentry_val(sl1e[i]));
+                    l1pte_propagate_from_guest(d, &nl1e, &sl1e[i]);
                 put_page_type(&frame_table[l1_pgentry_to_pfn(nl1e)]);
             }
             continue;
@@ -2412,8 +2413,7 @@ void ptwr_flush(const int which)
         }
         
         if ( unlikely(sl1e != NULL) )
-            l1pte_propagate_from_guest(
-                d, &l1_pgentry_val(nl1e), &l1_pgentry_val(sl1e[i]));
+            l1pte_propagate_from_guest(d, &nl1e, &sl1e[i]);
 
         if ( unlikely(l1_pgentry_val(ol1e) & _PAGE_PRESENT) )
             put_page_from_l1e(ol1e, d);
Index: xen/common/grant_table.c
===================================================================
--- xen.orig/common/grant_table.c       2005-03-10 11:55:10.000000000 +0100
+++ xen/common/grant_table.c    2005-03-12 14:39:26.000000000 +0100
@@ -281,12 +281,11 @@ __gnttab_map_grant_ref(
     {
         /* Write update into the pagetable
          */
+       l1_pgentry_t pte = mk_l1_pgentry((frame << PAGE_SHIFT)
+               | _PAGE_PRESENT  | _PAGE_ACCESSED | _PAGE_DIRTY
+               | ((flags & GNTMAP_readonly) ? 0 : _PAGE_RW));
         if ( 0 > (rc = update_grant_va_mapping( host_virt_addr,
-                                (frame << PAGE_SHIFT) | _PAGE_PRESENT  |
-                                                        _PAGE_ACCESSED |
-                                                        _PAGE_DIRTY    |
-                       ((flags & GNTMAP_readonly) ? 0 : _PAGE_RW),
-                       ld, led )) )
+                       pte, ld, led )) )
         {
             /* Abort. */
             act->pin -= (flags & GNTMAP_readonly) ?
Index: xen/include/asm/mm.h
===================================================================
--- xen.orig/include/asm/mm.h   2005-03-10 11:55:10.000000000 +0100
+++ xen/include/asm/mm.h        2005-03-12 14:39:26.000000000 +0100
@@ -340,7 +340,7 @@ void propagate_page_fault(unsigned long 
  * Caller must own d's BIGLOCK, is responsible for flushing the TLB,
  * and have already get_page'd */
 int update_grant_va_mapping(unsigned long va,
-                            unsigned long val,
+                            l1_pgentry_t  val,
                             struct domain *d,
                             struct exec_domain *ed);
 #define GNTUPDVA_prev_ro 1
Index: xen/arch/x86/vmx_platform.c
===================================================================
--- xen.orig/arch/x86/vmx_platform.c    2005-03-10 11:55:12.000000000 +0100
+++ xen/arch/x86/vmx_platform.c 2005-03-12 14:39:26.000000000 +0100
@@ -367,7 +367,7 @@ static int vmx_decode(const unsigned cha
 
 static int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, 
int inst_len)
 {
-    unsigned long gpte;
+    l1_pgentry_t gpte;
     unsigned long mfn;
     unsigned long ma;
     unsigned char * inst_start;
@@ -378,7 +378,7 @@ static int inst_copy_from_guest(unsigned
 
     if ((guest_eip & PAGE_MASK) == ((guest_eip + inst_len) & PAGE_MASK)) {
         gpte = gva_to_gpte(guest_eip);
-        mfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
+        mfn = phys_to_machine_mapping(l1_pgentry_to_pfn(gpte));
         ma = (mfn << PAGE_SHIFT) | (guest_eip & (PAGE_SIZE - 1));
         inst_start = (unsigned char *)map_domain_mem(ma);
                 


-------------------------------------------------------
SF email is sponsored by - The IT Product Guide
Read honest & candid reviews on hundreds of IT Products from real users.
Discover which products truly live up to the hype. Start reading now.
http://ads.osdn.com/?ad_id=6595&alloc_id=14396&op=click
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.sourceforge.net/lists/listinfo/xen-devel

<Prev in Thread] Current Thread [Next in Thread>