WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] x86: cacheability page attributes

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] x86: cacheability page attributes
From: "Jan Beulich" <jbeulich@xxxxxxxxxx>
Date: Tue, 21 Aug 2007 16:32:21 +0100
Delivery-date: Tue, 21 Aug 2007 08:32:25 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Track cacheability of a page (as requested by the owning guest) to avoid
virual aliases with different cacheability. This is done by splitting
PGT_writable_page into a set of 8 attributes, each being writeable plus
one of the possible caching types (3 PTE bits). It is being complicated
by the fact that now transitions between non-read-only page types must
be supported (e.g. writeable/WB -> writeable/UC), requiring some special
casing in the page table handling code.

Depends on the previously submitted TLB/cache flushing patch.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

Index: 2007-08-08/xen/arch/x86/mm.c
===================================================================
--- 2007-08-08.orig/xen/arch/x86/mm.c   2007-08-21 14:18:00.000000000 +0200
+++ 2007-08-08/xen/arch/x86/mm.c        2007-08-21 14:18:39.000000000 +0200
@@ -147,6 +147,14 @@ struct page_info *frame_table;
 unsigned long max_page;
 unsigned long total_pages;
 
+#define PAGE_CACHE_ATTRS (_PAGE_PAT|_PAGE_PCD|_PAGE_PWT)
+
+#define l1_disallow_mask(d) (!(d)->iomem_caps || \
+                             !rangeset_is_empty((d)->iomem_caps) || \
+                             !rangeset_is_empty((d)->arch.ioport_caps) ? \
+                             L1_DISALLOW_MASK : \
+                             L1_DISALLOW_MASK|PAGE_CACHE_ATTRS)
+
 #ifdef CONFIG_COMPAT
 l2_pgentry_t *compat_idle_pg_table_l2 = NULL;
 #define l3_disallow_mask(d) (!is_pv_32on64_domain(d) ?  \
@@ -278,9 +286,10 @@ void share_xen_page_with_guest(
 
     spin_lock(&d->page_alloc_lock);
 
-    /* The incremented type count pins as writable or read-only. */
     page->u.inuse.type_info  = (readonly ? PGT_none : PGT_writable_page);
-    page->u.inuse.type_info |= PGT_validated | 1;
+    if ( readonly || d != dom_io )
+        /* The incremented type count pins as writable or read-only. */
+        page->u.inuse.type_info |= PGT_validated | 1;
 
     page_set_owner(page, d);
     wmb(); /* install valid domain ptr before updating refcnt. */
@@ -539,6 +548,74 @@ static int get_page_and_type_from_pagenr
     return 1;
 }
 
+static unsigned long get_writable_type(unsigned int flags)
+{
+    unsigned long type = PGT_none;
+
+    if ( flags & _PAGE_RW )
+        type = PGT_writable_page;
+    if ( flags & _PAGE_PWT )
+        type |= PGT_pwt_mask | PGT_writable_page;
+    if ( flags & _PAGE_PCD )
+        type |= PGT_pcd_mask | PGT_writable_page;
+#ifdef CONFIG_PAT
+    if ( flags & _PAGE_PAT )
+        type |= PGT_pat_mask | PGT_writable_page;
+#endif
+    BUG_ON(!(type & PGT_writable_page));
+
+    return type;
+}
+
+static int alloc_writable_page(struct page_info *page, unsigned long type)
+{
+    unsigned long mfn = page_to_mfn(page);
+    unsigned int flags = 0;
+    int ret;
+
+    if ( page_get_owner(page) == dom_io )
+        return 1;
+#ifdef __i386__
+    if ( mfn >= ((DIRECTMAP_VIRT_END - DIRECTMAP_VIRT_START) >> PAGE_SHIFT) )
+        return 1;
+#endif
+
+    if ( type & PGT_pwt_mask )
+        flags |= _PAGE_PWT;
+    if ( type & PGT_pcd_mask )
+        flags |= _PAGE_PCD;
+#ifdef CONFIG_PAT
+    if ( type & PGT_pat_mask )
+        flags |= _PAGE_PAT;
+#endif
+    ret = map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
+                           PAGE_HYPERVISOR | flags);
+    if ( ret == 0 )
+        return 1;
+
+    MEM_LOG("Error %d changing cacheability of mfn %lx", ret, mfn);
+    return 0;
+}
+
+static void free_writable_page(struct page_info *page)
+{
+    unsigned long mfn = page_to_mfn(page);
+
+    if ( page_get_owner(page) == dom_io )
+        return;
+#ifdef __i386__
+    if ( mfn >= ((DIRECTMAP_VIRT_END - DIRECTMAP_VIRT_START) >> PAGE_SHIFT) )
+        return;
+#endif
+
+    if ( map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
+                          PAGE_HYPERVISOR) )
+    {
+        printk("Reverting cacheability for %lx failed\n", mfn);
+        BUG();
+    }
+}
+
 /*
  * We allow root tables to map each other (a.k.a. linear page tables). It
  * needs some special care with reference counts and access permissions:
@@ -599,15 +676,16 @@ get_page_from_l1e(
     l1_pgentry_t l1e, struct domain *d)
 {
     unsigned long mfn = l1e_get_pfn(l1e);
+    unsigned int flags = l1e_get_flags(l1e);
     struct page_info *page = mfn_to_page(mfn);
     int okay;
 
-    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
+    if ( !(flags & _PAGE_PRESENT) )
         return 1;
 
-    if ( unlikely(l1e_get_flags(l1e) & L1_DISALLOW_MASK) )
+    if ( unlikely(flags & l1_disallow_mask(d)) )
     {
-        MEM_LOG("Bad L1 flags %x", l1e_get_flags(l1e) & L1_DISALLOW_MASK);
+        MEM_LOG("Bad L1 flags %x", flags & l1_disallow_mask(d));
         return 0;
     }
 
@@ -637,9 +715,9 @@ get_page_from_l1e(
      * contribute to writeable mapping refcounts.  (This allows the
      * qemu-dm helper process in dom0 to map the domain's memory without
      * messing up the count of "real" writable mappings.) */
-    okay = (((l1e_get_flags(l1e) & _PAGE_RW) && 
+    okay = (((flags & (_PAGE_RW|PAGE_CACHE_ATTRS)) &&
              !(unlikely(paging_mode_external(d) && (d != current->domain))))
-            ? get_page_and_type(page, d, PGT_writable_page)
+            ? get_page_and_type(page, d, get_writable_type(flags))
             : get_page(page, d));
     if ( !okay )
     {
@@ -832,7 +910,7 @@ void put_page_from_l1e(l1_pgentry_t l1e,
 
     /* Remember we didn't take a type-count of foreign writable mappings
      * to paging-external domains */
-    if ( (l1e_get_flags(l1e) & _PAGE_RW) && 
+    if ( (l1e_get_flags(l1e) & (_PAGE_RW|PAGE_CACHE_ATTRS)) &&
          !(unlikely((e != d) && paging_mode_external(e))) )
     {
         put_page_and_type(page);
@@ -1333,6 +1411,60 @@ static inline int update_intpte(intpte_t
                   _t ## e_get_intpte(_o), _t ## e_get_intpte(_n),   \
                   (_m), (_v))
 
+/*
+ * Present->present transitions referencing the same page with old and new
+ * attributes resulting in (different) PGT_writable_page types and with the
+ * type use count being 1 must be special cased, as the transition would
+ * otherwise fail.
+ */
+static int transition_writable_page(l1_pgentry_t *pl1e, l1_pgentry_t ol1e,
+                                    l1_pgentry_t nl1e, unsigned long gl1mfn,
+                                    int do_cmpxchg)
+{
+    struct page_info *page = l1e_get_page(nl1e);
+    unsigned long type = get_writable_type(l1e_get_flags(ol1e));
+    unsigned long nx = type | 2;
+    unsigned long x = type | PGT_validated | 1;
+
+    if ( cmpxchg(&page->u.inuse.type_info, x, nx) == x )
+    {
+        /*
+         * The adjustment is now safe because the refcnt is 2 and validated
+         * bit is clear => non-free ops will spin or fail, and a racing free
+         * is illegal (will crash domain below).
+         */
+        type = get_writable_type(l1e_get_flags(nl1e));
+        if ( alloc_writable_page(page, type) )
+        {
+            x = nx;
+            nx = type | PGT_validated | 1;
+            if ( cmpxchg(&page->u.inuse.type_info, x, nx) == x )
+            {
+                if ( do_cmpxchg )
+                {
+                    intpte_t t = l1e_get_intpte(ol1e);
+
+                    if ( paging_cmpxchg_guest_entry(current,
+                                                    &l1e_get_intpte(*pl1e),
+                                                    &t,
+                                                    l1e_get_intpte(nl1e),
+                                                    _mfn(gl1mfn)) )
+                        return t == l1e_get_intpte(ol1e);
+                }
+                else if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, current) )
+                    return 1;
+            }
+            domain_crash(current->domain);
+            return 0;
+        }
+        page->u.inuse.type_info |= PGT_validated;
+        /* Drop the extra reference. */
+        put_page_type(page);
+    }
+
+    return -1;
+}
+
 /* Update the L1 entry at pl1e to new value nl1e. */
 static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e, 
                         unsigned long gl1mfn)
@@ -1356,19 +1488,31 @@ static int mod_l1_entry(l1_pgentry_t *pl
         ASSERT((mfn & ~(PADDR_MASK >> PAGE_SHIFT)) == 0);
         nl1e = l1e_from_pfn(mfn, l1e_get_flags(nl1e));
 
-        if ( unlikely(l1e_get_flags(nl1e) & L1_DISALLOW_MASK) )
+        if ( unlikely(l1e_get_flags(nl1e) & l1_disallow_mask(d)) )
         {
             MEM_LOG("Bad L1 flags %x",
-                    l1e_get_flags(nl1e) & L1_DISALLOW_MASK);
+                    l1e_get_flags(nl1e) & l1_disallow_mask(d));
             return 0;
         }
 
         adjust_guest_l1e(nl1e, d);
 
-        /* Fast path for identical mapping, r/w and presence. */
-        if ( !l1e_has_changed(ol1e, nl1e, _PAGE_RW | _PAGE_PRESENT) )
+        /* Fast path for identical mapping, r/w, cacheability, and presence. */
+        if ( !l1e_has_changed(ol1e, nl1e,
+                              _PAGE_RW | _PAGE_PRESENT | PAGE_CACHE_ATTRS) )
             return UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, current);
 
+        if ( !l1e_has_changed(ol1e, nl1e, _PAGE_PRESENT) &&
+             (l1e_get_flags(ol1e) & (_PAGE_RW | PAGE_CACHE_ATTRS)) &&
+             (l1e_get_flags(nl1e) & (_PAGE_RW | PAGE_CACHE_ATTRS)) &&
+             mfn_valid(l1e_get_pfn(nl1e)) )
+        {
+            int ret = transition_writable_page(pl1e, ol1e, nl1e, gl1mfn, 0);
+
+            if ( ret >= 0 )
+                return ret;
+        }
+
         if ( unlikely(!get_page_from_l1e(nl1e, FOREIGNDOM)) )
             return 0;
         
@@ -1563,10 +1707,13 @@ static int mod_l4_entry(struct domain *d
 
 #endif
 
-int alloc_page_type(struct page_info *page, unsigned long type)
+static int alloc_page_type(struct page_info *page, unsigned long type)
 {
     struct domain *owner = page_get_owner(page);
 
+    if ( type & PGT_writable_page )
+        return alloc_writable_page(page, type);
+
     /* A page table is dirtied when its type count becomes non-zero. */
     if ( likely(owner != NULL) )
         paging_mark_dirty(owner, page_to_mfn(page));
@@ -1600,6 +1747,12 @@ void free_page_type(struct page_info *pa
     struct domain *owner = page_get_owner(page);
     unsigned long gmfn;
 
+    if ( type & PGT_writable_page )
+    {
+        free_writable_page(page);
+        return;
+    }
+
     if ( likely(owner != NULL) )
     {
         /*
@@ -1669,11 +1822,13 @@ void put_page_type(struct page_info *pag
 
         if ( unlikely((nx & PGT_count_mask) == 0) )
         {
-            if ( unlikely((nx & PGT_type_mask) <= PGT_l4_page_table) &&
+            if ( (unlikely((nx & PGT_type_mask) <= PGT_l4_page_table) ||
+                  unlikely((nx & PGT_type_mask) > PGT_writable_page)) &&
                  likely(nx & PGT_validated) )
             {
                 /*
-                 * Page-table pages must be unvalidated when count is zero. The
+                 * Page-table pages and writable pages with non-default
+                 * cacheability must be unvalidated when count is zero. The
                  * 'free' is safe because the refcnt is non-zero and validated
                  * bit is clear => other ops will spin or fail.
                  */
@@ -1742,7 +1897,7 @@ int get_page_type(struct page_info *page
                 if ( unlikely(!cpus_empty(mask)) &&
                      /* Shadow mode: track only writable pages. */
                      (!shadow_mode_enabled(page_get_owner(page)) ||
-                      ((nx & PGT_type_mask) == PGT_writable_page)) )
+                      (nx & PGT_writable_page)) )
                 {
                     perfc_incr(need_flush_tlb_flush);
                     flush_tlb_mask(mask);
@@ -3304,8 +3459,30 @@ static int ptwr_emulated_update(
     ASSERT((page->u.inuse.type_info & PGT_count_mask) != 0);
     ASSERT(page_get_owner(page) == d);
 
+    pl1e = (l1_pgentry_t *)((unsigned long)map_domain_page(mfn)
+                            + (addr & ~PAGE_MASK));
+    ol1e = do_cmpxchg ? l1e_from_intpte(old) : *pl1e;
+
     /* Check the new PTE. */
     nl1e = l1e_from_intpte(val);
+    adjust_guest_l1e(nl1e, d);
+
+    if ( (l1e_get_flags(nl1e) & _PAGE_PRESENT) &&
+         !l1e_has_changed(ol1e, nl1e, _PAGE_PRESENT) &&
+         l1e_has_changed(ol1e, nl1e, _PAGE_RW | PAGE_CACHE_ATTRS) &&
+         (l1e_get_flags(ol1e) & (_PAGE_RW | PAGE_CACHE_ATTRS)) &&
+         (l1e_get_flags(nl1e) & (_PAGE_RW | PAGE_CACHE_ATTRS)) &&
+         mfn_valid(l1e_get_pfn(nl1e)) )
+    {
+        int ret = transition_writable_page(pl1e, ol1e, nl1e, mfn, do_cmpxchg);
+
+        if ( ret >= 0 )
+        {
+            unmap_domain_page(pl1e);
+            return ret ? X86EMUL_OKAY : X86EMUL_RETRY;
+        }
+    }
+
     if ( unlikely(!get_page_from_l1e(nl1e, d)) )
     {
         if ( (CONFIG_PAGING_LEVELS >= 3) && is_pv_32bit_domain(d) &&
@@ -3324,21 +3501,17 @@ static int ptwr_emulated_update(
         }
         else
         {
+            unmap_domain_page(pl1e);
             MEM_LOG("ptwr_emulate: could not get_page_from_l1e()");
             return X86EMUL_UNHANDLEABLE;
         }
     }
 
-    adjust_guest_l1e(nl1e, d);
-
     /* Checked successfully: do the update (write or cmpxchg). */
-    pl1e = map_domain_page(mfn);
-    pl1e = (l1_pgentry_t *)((unsigned long)pl1e + (addr & ~PAGE_MASK));
     if ( do_cmpxchg )
     {
         int okay;
         intpte_t t = old;
-        ol1e = l1e_from_intpte(old);
 
         okay = paging_cmpxchg_guest_entry(v, &l1e_get_intpte(*pl1e),
                                           &t, val, _mfn(mfn));
@@ -3351,12 +3524,8 @@ static int ptwr_emulated_update(
             return X86EMUL_CMPXCHG_FAILED;
         }
     }
-    else
-    {
-        ol1e = *pl1e;
-        if ( !UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, mfn, v) )
-            BUG();
-    }
+    else if ( !UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, mfn, v) )
+        BUG();
 
     unmap_domain_page(pl1e);
 
@@ -3470,11 +3639,27 @@ void free_xen_pagetable(void *v)
         free_domheap_page(virt_to_page(v));
 }
 
+static inline unsigned int l1f_to_l2f(unsigned int flags)
+{
+    if ( flags & _PAGE_PAT )
+        flags = (flags & ~_PAGE_PAT) | _PAGE_PSE_PAT;
+    return flags | _PAGE_PSE;
+}
+
+static inline unsigned int l2f_to_l1f(unsigned int flags)
+{
+    ASSERT(flags & _PAGE_PSE);
+    flags &= ~_PAGE_PSE;
+    if ( flags & _PAGE_PSE_PAT )
+        flags = (flags & ~_PAGE_PSE_PAT) | _PAGE_PAT;
+    return flags;
+}
+
 int map_pages_to_xen(
     unsigned long virt,
     unsigned long mfn,
     unsigned long nr_mfns,
-    unsigned long flags)
+    unsigned int flags)
 {
     l2_pgentry_t *pl2e, ol2e;
     l1_pgentry_t *pl1e, ol1e;
@@ -3493,11 +3678,15 @@ int map_pages_to_xen(
         {
             /* Super-page mapping. */
             ol2e = *pl2e;
-            l2e_write_atomic(pl2e, l2e_from_pfn(mfn, flags|_PAGE_PSE));
+            l2e_write_atomic(pl2e, l2e_from_pfn(mfn, l1f_to_l2f(flags)));
 
             if ( (l2e_get_flags(ol2e) & _PAGE_PRESENT) )
             {
-                flush_one_local((void *)virt, FLUSH_TLB_GLOBAL|2);
+                flush_one_global((void *)virt,
+                                 ((l2e_get_flags(ol2e) ^ l1f_to_l2f(flags)) &
+                                  l1f_to_l2f(PAGE_CACHE_ATTRS)) ?
+                                 FLUSH_CACHE|FLUSH_TLB_GLOBAL|2 :
+                                 FLUSH_TLB_GLOBAL|2);
                 if ( !(l2e_get_flags(ol2e) & _PAGE_PSE) )
                     free_xen_pagetable(mfn_to_virt(l2e_get_pfn(ol2e)));
             }
@@ -3512,31 +3701,73 @@ int map_pages_to_xen(
             if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
             {
                 pl1e = alloc_xen_pagetable();
+                if ( pl1e == NULL )
+                    return -ENOMEM;
                 clear_page(pl1e);
                 l2e_write(pl2e, l2e_from_pfn(virt_to_mfn(pl1e),
                                              __PAGE_HYPERVISOR));
             }
             else if ( l2e_get_flags(*pl2e) & _PAGE_PSE )
             {
+                if ( (l2e_get_pfn(*pl2e) & ~(L1_PAGETABLE_ENTRIES - 1)) +
+                     l1_table_offset(virt) == mfn &&
+                     ((l2f_to_l1f(l2e_get_flags(*pl2e)) ^ flags) &
+                      ~(_PAGE_ACCESSED|_PAGE_DIRTY)) == 0 )
+                {
+                    virt    += 1UL << L1_PAGETABLE_SHIFT;
+                    mfn     += 1UL;
+                    nr_mfns -= 1UL;
+                    continue;
+                }
+
                 pl1e = alloc_xen_pagetable();
+                if ( pl1e == NULL )
+                    return -ENOMEM;
                 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
                     l1e_write(&pl1e[i],
                               l1e_from_pfn(l2e_get_pfn(*pl2e) + i,
-                                           l2e_get_flags(*pl2e) & ~_PAGE_PSE));
+                                           l2f_to_l1f(l2e_get_flags(*pl2e))));
                 l2e_write_atomic(pl2e, l2e_from_pfn(virt_to_mfn(pl1e),
                                                     __PAGE_HYPERVISOR));
-                flush_one_local((void *)virt, FLUSH_TLB_GLOBAL|2);
+                flush_one_global((void *)virt, FLUSH_TLB_GLOBAL|2);
             }
 
             pl1e  = l2e_to_l1e(*pl2e) + l1_table_offset(virt);
             ol1e  = *pl1e;
             l1e_write_atomic(pl1e, l1e_from_pfn(mfn, flags));
             if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) )
-                local_flush_tlb_one(virt);
+                flush_one_global((void *)virt,
+                                 ((l1e_get_flags(ol1e) ^ flags)
+                                  & PAGE_CACHE_ATTRS) ?
+                                 FLUSH_CACHE|FLUSH_TLB_GLOBAL|1 :
+                                 FLUSH_TLB_GLOBAL|1);
 
             virt    += 1UL << L1_PAGETABLE_SHIFT;
             mfn     += 1UL;
             nr_mfns -= 1UL;
+
+            if ( !map_small_pages &&
+                 flags == PAGE_HYPERVISOR &&
+                 ( nr_mfns == 0 ||
+                   ((((virt>>PAGE_SHIFT) | mfn) & ((1<<PAGETABLE_ORDER)-1)) == 
0)) )
+            {
+                unsigned long base_mfn;
+
+                pl1e  = l2e_to_l1e(*pl2e);
+                base_mfn = l1e_get_pfn(*pl1e);
+                for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++, pl1e++ )
+                    if ( l1e_get_pfn(*pl1e) != base_mfn + i ||
+                         l1e_get_flags(*pl1e) != flags )
+                        break;
+                if ( i == L1_PAGETABLE_ENTRIES )
+                {
+                    ol2e = *pl2e;
+                    l2e_write(pl2e, l2e_from_pfn(base_mfn, l1f_to_l2f(flags)));
+                    flush_one_global((void *)(virt - (1UL << 
L2_PAGETABLE_SHIFT)),
+                                     FLUSH_TLB_GLOBAL|2);
+                    free_xen_pagetable(mfn_to_virt(l2e_get_pfn(ol2e)));
+                }
+            }
         }
     }
 
Index: 2007-08-08/xen/arch/x86/mm/shadow/common.c
===================================================================
--- 2007-08-08.orig/xen/arch/x86/mm/shadow/common.c     2007-08-21 
14:15:47.000000000 +0200
+++ 2007-08-08/xen/arch/x86/mm/shadow/common.c  2007-08-08 12:03:19.000000000 
+0200
@@ -1354,7 +1354,7 @@ static void sh_hash_audit_bucket(struct 
             /* Bad shadow flags on guest page? */
             BUG_ON( !(gpg->shadow_flags & (1<<sp->type)) );
             /* Bad type count on guest page? */
-            if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page 
+            if ( (gpg->u.inuse.type_info & PGT_writable_page)
                  && (gpg->u.inuse.type_info & PGT_count_mask) != 0 )
             {
                 SHADOW_ERROR("MFN %#lx shadowed (by %#"PRI_mfn")"
Index: 2007-08-08/xen/arch/x86/mm/shadow/multi.c
===================================================================
--- 2007-08-08.orig/xen/arch/x86/mm/shadow/multi.c      2007-08-21 
14:15:47.000000000 +0200
+++ 2007-08-08/xen/arch/x86/mm/shadow/multi.c   2007-08-08 12:03:19.000000000 
+0200
@@ -4261,8 +4261,7 @@ audit_gfn_to_mfn(struct vcpu *v, gfn_t g
     if ( !shadow_mode_translate(v->domain) )
         return _mfn(gfn_x(gfn));
     
-    if ( (mfn_to_page(gmfn)->u.inuse.type_info & PGT_type_mask)
-         != PGT_writable_page ) 
+    if ( !(mfn_to_page(gmfn)->u.inuse.type_info & PGT_writable_page) )
         return _mfn(gfn_x(gfn)); /* This is a paging-disabled shadow */
     else 
         return gfn_to_mfn(v->domain, gfn_x(gfn));
Index: 2007-08-08/xen/include/asm-x86/mm.h
===================================================================
--- 2007-08-08.orig/xen/include/asm-x86/mm.h    2007-08-21 14:15:47.000000000 
+0200
+++ 2007-08-08/xen/include/asm-x86/mm.h 2007-08-08 12:03:19.000000000 +0200
@@ -64,24 +64,35 @@ struct page_info
 };
 
  /* The following page types are MUTUALLY EXCLUSIVE. */
-#define PGT_none            (0U<<29) /* no special uses of this page */
-#define PGT_l1_page_table   (1U<<29) /* using this page as an L1 page table? */
-#define PGT_l2_page_table   (2U<<29) /* using this page as an L2 page table? */
-#define PGT_l3_page_table   (3U<<29) /* using this page as an L3 page table? */
-#define PGT_l4_page_table   (4U<<29) /* using this page as an L4 page table? */
-#define PGT_gdt_page        (5U<<29) /* using this page in a GDT? */
-#define PGT_ldt_page        (6U<<29) /* using this page in an LDT? */
-#define PGT_writable_page   (7U<<29) /* has writable mappings of this page? */
-#define PGT_type_mask       (7U<<29) /* Bits 29-31. */
+#define PGT_none            (0U<<28) /* no special uses of this page */
+#define PGT_l1_page_table   (1U<<28) /* using this page as an L1 page table? */
+#define PGT_l2_page_table   (2U<<28) /* using this page as an L2 page table? */
+#define PGT_l3_page_table   (3U<<28) /* using this page as an L3 page table? */
+#define PGT_l4_page_table   (4U<<28) /* using this page as an L4 page table? */
+#define PGT_gdt_page        (5U<<28) /* using this page in a GDT? */
+#define PGT_ldt_page        (6U<<28) /* using this page in an LDT? */
+#define PGT_writable_page   (0x8U<<28) /* has writable mappings of this page? 
*/
+#define PGT_pwt_mask        (0x1U<<28) /* (l1e & _PAGE_PWT) mirror */
+#define PGT_pcd_mask        (0x2U<<28) /* (l1e & _PAGE_PCD) mirror */
+#define PGT_wb_page         (0x8U<<28) /* WB cached writable page? */
+#define PGT_wt_page         (0x9U<<28) /* WT cached writable page? */
+#define PGT_ucm_page        (0xAU<<28) /* UC- cached writable page? */
+#define PGT_uc_page         (0xBU<<28) /* UC cached writable page? */
+#ifdef CONFIG_PAT
+#define PGT_pat_mask        (0x4U<<28) /* (l1e & _PAGE_PAT) mirror */
+#define PGT_wc_page         (0xCU<<28) /* WC cached writable page? */
+#define PGT_wp_page         (0xDU<<28) /* WP cached writable page? */
+#endif
+#define PGT_type_mask       (0xFU<<28) /* Bits 28-31. */
 
  /* Owning guest has pinned this page to its current type? */
-#define _PGT_pinned         28
+#define _PGT_pinned         22
 #define PGT_pinned          (1U<<_PGT_pinned)
  /* Has this page been validated for use as its current type? */
-#define _PGT_validated      27
+#define _PGT_validated      21
 #define PGT_validated       (1U<<_PGT_validated)
  /* PAE only: is this an L2 page directory containing Xen-private mappings? */
-#define _PGT_pae_xen_l2     26
+#define _PGT_pae_xen_l2     20
 #define PGT_pae_xen_l2      (1U<<_PGT_pae_xen_l2)
 
  /* 16-bit count of uses of this frame as its current type. */
@@ -144,7 +155,6 @@ extern unsigned long max_page;
 extern unsigned long total_pages;
 void init_frametable(void);
 
-int alloc_page_type(struct page_info *page, unsigned long type);
 void free_page_type(struct page_info *page, unsigned long type);
 int _shadow_mode_refcounts(struct domain *d);
 
Index: 2007-08-08/xen/include/asm-x86/page.h
===================================================================
--- 2007-08-08.orig/xen/include/asm-x86/page.h  2007-08-21 14:15:47.000000000 
+0200
+++ 2007-08-08/xen/include/asm-x86/page.h       2007-08-08 12:03:19.000000000 
+0200
@@ -355,13 +355,13 @@ void free_xen_pagetable(void *v);
 l2_pgentry_t *virt_to_xen_l2e(unsigned long v);
 
 /* Map machine page range in Xen virtual address space. */
-#define MAP_SMALL_PAGES (1UL<<16) /* don't use superpages for the mapping */
+#define MAP_SMALL_PAGES (1U<<31) /* don't use superpages for the mapping */
 int
 map_pages_to_xen(
     unsigned long virt,
     unsigned long mfn,
     unsigned long nr_mfns,
-    unsigned long flags);
+    unsigned int flags);
 void destroy_xen_mappings(unsigned long v, unsigned long e);
 
 #endif /* !__ASSEMBLY__ */
Index: 2007-08-08/xen/include/asm-x86/x86_32/page-3level.h
===================================================================
--- 2007-08-08.orig/xen/include/asm-x86/x86_32/page-3level.h    2007-08-21 
14:15:47.000000000 +0200
+++ 2007-08-08/xen/include/asm-x86/x86_32/page-3level.h 2007-08-08 
12:03:19.000000000 +0200
@@ -85,6 +85,6 @@ typedef l3_pgentry_t root_pgentry_t;
 #define get_pte_flags(x) (((int)((x) >> 32) & ~0xFFF) | ((int)(x) & 0xFFF))
 #define put_pte_flags(x) (((intpte_t)((x) & ~0xFFF) << 32) | ((x) & 0xFFF))
 
-#define L3_DISALLOW_MASK 0xFFFFF1E6U /* must-be-zero */
+#define L3_DISALLOW_MASK 0xFFFFF1FEU /* must-be-zero */
 
 #endif /* __X86_32_PAGE_3LEVEL_H__ */
Index: 2007-08-08/xen/include/asm-x86/x86_32/page.h
===================================================================
--- 2007-08-08.orig/xen/include/asm-x86/x86_32/page.h   2007-08-21 
14:15:47.000000000 +0200
+++ 2007-08-08/xen/include/asm-x86/x86_32/page.h        2007-08-08 
12:03:19.000000000 +0200
@@ -33,13 +33,13 @@ extern unsigned int PAGE_HYPERVISOR_NOCA
     (_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_GNTTAB)
 
 /*
- * Disallow unused flag bits plus PAT, PSE and GLOBAL.
+ * Disallow unused flag bits plus PAT/PSE and GLOBAL.
  * Permit the NX bit if the hardware supports it.
  */
 #define BASE_DISALLOW_MASK (0xFFFFF180U & ~_PAGE_NX)
 
 #define L1_DISALLOW_MASK (BASE_DISALLOW_MASK | _PAGE_GNTTAB)
-#define L2_DISALLOW_MASK (BASE_DISALLOW_MASK)
+#define L2_DISALLOW_MASK (BASE_DISALLOW_MASK | _PAGE_PCD | _PAGE_PWT)
 
 #endif /* __X86_32_PAGE_H__ */
 
Index: 2007-08-08/xen/include/asm-x86/x86_64/page.h
===================================================================
--- 2007-08-08.orig/xen/include/asm-x86/x86_64/page.h   2007-08-21 
14:15:47.000000000 +0200
+++ 2007-08-08/xen/include/asm-x86/x86_64/page.h        2007-08-08 
12:03:19.000000000 +0200
@@ -105,18 +105,18 @@ typedef l4_pgentry_t root_pgentry_t;
 #define _PAGE_NX     (cpu_has_nx ? _PAGE_NX_BIT : 0U)
 
 /*
- * Disallow unused flag bits plus PAT, PSE and GLOBAL.
+ * Disallow unused flag bits plus PAT/PSE and GLOBAL.
  * Permit the NX bit if the hardware supports it.
  * Note that range [62:52] is available for software use on x86/64.
  */
 #define BASE_DISALLOW_MASK (0xFF800180U & ~_PAGE_NX)
 
 #define L1_DISALLOW_MASK (BASE_DISALLOW_MASK | _PAGE_GNTTAB)
-#define L2_DISALLOW_MASK (BASE_DISALLOW_MASK)
-#define L3_DISALLOW_MASK (BASE_DISALLOW_MASK)
-#define L4_DISALLOW_MASK (BASE_DISALLOW_MASK)
+#define L2_DISALLOW_MASK (BASE_DISALLOW_MASK | _PAGE_PCD | _PAGE_PWT)
+#define L3_DISALLOW_MASK (BASE_DISALLOW_MASK | _PAGE_PCD | _PAGE_PWT)
+#define L4_DISALLOW_MASK (BASE_DISALLOW_MASK | _PAGE_PCD | _PAGE_PWT)
 
-#define COMPAT_L3_DISALLOW_MASK 0xFFFFF1E6U
+#define COMPAT_L3_DISALLOW_MASK 0xFFFFF1FEU
 
 #define PAGE_HYPERVISOR         (__PAGE_HYPERVISOR         | _PAGE_GLOBAL)
 #define PAGE_HYPERVISOR_NOCACHE (__PAGE_HYPERVISOR_NOCACHE | _PAGE_GLOBAL)



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH] x86: cacheability page attributes, Jan Beulich <=