WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86/mm/p2m: little fixes and tidying up

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86/mm/p2m: little fixes and tidying up
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Thu, 16 Jun 2011 11:12:30 +0100
Delivery-date: Thu, 16 Jun 2011 03:27:33 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1307017012 -3600
# Node ID 9344034d624b2e3cd6b0025ab2051cb89bd7e04a
# Parent  c7d0b66e41cde0cca1d9162b92b41fc6bd30cf20
x86/mm/p2m: little fixes and tidying up

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---


diff -r c7d0b66e41cd -r 9344034d624b xen/arch/x86/mm/p2m-pod.c
--- a/xen/arch/x86/mm/p2m-pod.c Thu Jun 02 13:16:51 2011 +0100
+++ b/xen/arch/x86/mm/p2m-pod.c Thu Jun 02 13:16:52 2011 +0100
@@ -42,7 +42,6 @@
 #undef page_to_mfn
 #define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
 
-#define SUPERPAGE_PAGES (1UL << 9)
 #define superpage_aligned(_x)  (((_x)&(SUPERPAGE_PAGES-1))==0)
 
 /*
@@ -688,8 +687,7 @@
     }
 
     /* Try to remove the page, restoring old mapping if it fails. */
-    set_p2m_entry(p2m, gfn,
-                  _mfn(POPULATE_ON_DEMAND_MFN), 9,
+    set_p2m_entry(p2m, gfn, _mfn(0), 9,
                   p2m_populate_on_demand, p2m->default_access);
 
     /* Make none of the MFNs are used elsewhere... for example, mapped
@@ -801,8 +799,7 @@
         }
 
         /* Try to remove the page, restoring old mapping if it fails. */
-        set_p2m_entry(p2m, gfns[i],
-                      _mfn(POPULATE_ON_DEMAND_MFN), 0,
+        set_p2m_entry(p2m, gfns[i], _mfn(0), 0,
                       p2m_populate_on_demand, p2m->default_access);
 
         /* See if the page was successfully unmapped.  (Allow one refcount
@@ -966,7 +963,7 @@
          * set_p2m_entry() should automatically shatter the 1GB page into 
          * 512 2MB pages. The rest of 511 calls are unnecessary.
          */
-        set_p2m_entry(p2m, gfn_aligned, _mfn(POPULATE_ON_DEMAND_MFN), 9,
+        set_p2m_entry(p2m, gfn_aligned, _mfn(0), 9,
                       p2m_populate_on_demand, p2m->default_access);
         audit_p2m(p2m, 1);
         p2m_unlock(p2m);
@@ -1054,7 +1051,7 @@
     /* Remap this 2-meg region in singleton chunks */
     gfn_aligned = (gfn>>order)<<order;
     for(i=0; i<(1<<order); i++)
-        set_p2m_entry(p2m, gfn_aligned+i, _mfn(POPULATE_ON_DEMAND_MFN), 0,
+        set_p2m_entry(p2m, gfn_aligned+i, _mfn(0), 0,
                       p2m_populate_on_demand, p2m->default_access);
     if ( tb_init_done )
     {
@@ -1114,7 +1111,7 @@
     }
 
     /* Now, actually do the two-way mapping */
-    if ( !set_p2m_entry(p2m, gfn, _mfn(POPULATE_ON_DEMAND_MFN), order,
+    if ( !set_p2m_entry(p2m, gfn, _mfn(0), order,
                         p2m_populate_on_demand, p2m->default_access) )
         rc = -EINVAL;
     else
diff -r c7d0b66e41cd -r 9344034d624b xen/arch/x86/mm/p2m-pt.c
--- a/xen/arch/x86/mm/p2m-pt.c  Thu Jun 02 13:16:51 2011 +0100
+++ b/xen/arch/x86/mm/p2m-pt.c  Thu Jun 02 13:16:52 2011 +0100
@@ -51,9 +51,6 @@
 #define P2M_BASE_FLAGS \
         (_PAGE_PRESENT | _PAGE_USER | _PAGE_DIRTY | _PAGE_ACCESSED)
 
-#define SUPERPAGE_PAGES (1UL << 9)
-#define superpage_aligned(_x)  (((_x)&(SUPERPAGE_PAGES-1))==0)
-
 static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn)
 {
     unsigned long flags;
@@ -67,32 +64,31 @@
 #else
     flags = (t & 0x7UL) << 9;
 #endif
-#ifndef HAVE_GRANT_MAP_P2M
-    BUG_ON(p2m_is_grant(t));
+
+#ifndef __x86_64__
+    /* 32-bit builds don't support a lot of the p2m types */
+    BUG_ON(t > p2m_populate_on_demand);
 #endif
+
     switch(t)
     {
     case p2m_invalid:
+    case p2m_mmio_dm:
+    case p2m_populate_on_demand:
     default:
         return flags;
+    case p2m_ram_ro:
+    case p2m_grant_map_ro:
+    case p2m_ram_logdirty:
+    case p2m_ram_shared:
+        return flags | P2M_BASE_FLAGS;
     case p2m_ram_rw:
     case p2m_grant_map_rw:
         return flags | P2M_BASE_FLAGS | _PAGE_RW;
-    case p2m_ram_logdirty:
-        return flags | P2M_BASE_FLAGS;
-    case p2m_ram_ro:
-    case p2m_grant_map_ro:
-        return flags | P2M_BASE_FLAGS;
-    case p2m_ram_shared:
-        return flags | P2M_BASE_FLAGS;
-    case p2m_mmio_dm:
-        return flags;
     case p2m_mmio_direct:
         if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
             flags |= _PAGE_RW;
         return flags | P2M_BASE_FLAGS | _PAGE_PCD;
-    case p2m_populate_on_demand:
-        return flags;
     }
 }
 
diff -r c7d0b66e41cd -r 9344034d624b xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu Jun 02 13:16:51 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c     Thu Jun 02 13:16:52 2011 +0100
@@ -243,31 +243,30 @@
                         p2m_invalid, p2m->default_access) )
         goto error;
 
-    if (p2m_is_nestedp2m(p2m))
-        goto nesteddone;
+    if ( !p2m_is_nestedp2m(p2m) )
+    {
+        /* Copy all existing mappings from the page list and m2p */
+        spin_lock(&p2m->domain->page_alloc_lock);
+        page_list_for_each(page, &p2m->domain->page_list)
+        {
+            mfn = page_to_mfn(page);
+            gfn = get_gpfn_from_mfn(mfn_x(mfn));
+            /* Pages should not be shared that early */
+            ASSERT(gfn != SHARED_M2P_ENTRY);
+            page_count++;
+            if (
+#ifdef __x86_64__
+                (gfn != 0x5555555555555555L)
+#else
+                (gfn != 0x55555555L)
+#endif
+                && gfn != INVALID_M2P_ENTRY
+                && !set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_rw, 
p2m->default_access) )
+                goto error_unlock;
+        }
+        spin_unlock(&p2m->domain->page_alloc_lock);
+    }
 
-    /* Copy all existing mappings from the page list and m2p */
-    spin_lock(&p2m->domain->page_alloc_lock);
-    page_list_for_each(page, &p2m->domain->page_list)
-    {
-        mfn = page_to_mfn(page);
-        gfn = get_gpfn_from_mfn(mfn_x(mfn));
-        /* Pages should not be shared that early */
-        ASSERT(gfn != SHARED_M2P_ENTRY);
-        page_count++;
-        if (
-#ifdef __x86_64__
-            (gfn != 0x5555555555555555L)
-#else
-            (gfn != 0x55555555L)
-#endif
-             && gfn != INVALID_M2P_ENTRY
-            && !set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_rw, 
p2m->default_access) )
-            goto error_unlock;
-    }
-    spin_unlock(&p2m->domain->page_alloc_lock);
-
- nesteddone:
     P2M_PRINTK("p2m table initialised (%u pages)\n", page_count);
     p2m_unlock(p2m);
     return 0;
@@ -693,7 +692,8 @@
 
     /* Remove mapping from p2m table */
     p2m_lock(p2m);
-    set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged, 
p2m->default_access);
+    set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 
+                  p2m_ram_paged, p2m->default_access);
     audit_p2m(p2m, 1);
     p2m_unlock(p2m);
 
@@ -743,7 +743,8 @@
     if ( p2mt == p2m_ram_paged )
     {
         p2m_lock(p2m);
-        set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start, 
p2m->default_access);
+        set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 
+                      p2m_ram_paging_in_start, p2m->default_access);
         audit_p2m(p2m, 1);
         p2m_unlock(p2m);
     }
diff -r c7d0b66e41cd -r 9344034d624b xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu Jun 02 13:16:51 2011 +0100
+++ b/xen/include/asm-x86/p2m.h Thu Jun 02 13:16:52 2011 +0100
@@ -47,10 +47,6 @@
  */
 #define phys_to_machine_mapping ((l1_pgentry_t *)RO_MPT_VIRT_START)
 
-#ifdef __x86_64__
-#define HAVE_GRANT_MAP_P2M
-#endif
-
 /*
  * The upper levels of the p2m pagetable always contain full rights; all 
  * variation in the access control bits is made in the level-1 PTEs.
@@ -78,20 +74,16 @@
     p2m_mmio_direct = 5,        /* Read/write mapping of genuine MMIO area */
     p2m_populate_on_demand = 6, /* Place-holder for empty memory */
 
-    /* Note that these can only be used if HAVE_GRANT_MAP_P2M is
-       defined.  They get defined anyway so as to avoid lots of
-       #ifdef's everywhere else. */
-    p2m_grant_map_rw = 7,       /* Read/write grant mapping */
-    p2m_grant_map_ro = 8,       /* Read-only grant mapping */
-
-    /* Likewise, although these are defined in all builds, they can only
+    /* Although these are defined in all builds, they can only
      * be used in 64-bit builds */
+    p2m_grant_map_rw = 7,         /* Read/write grant mapping */
+    p2m_grant_map_ro = 8,         /* Read-only grant mapping */
     p2m_ram_paging_out = 9,       /* Memory that is being paged out */
     p2m_ram_paged = 10,           /* Memory that has been paged out */
     p2m_ram_paging_in = 11,       /* Memory that is being paged in */
     p2m_ram_paging_in_start = 12, /* Memory that is being paged in */
     p2m_ram_shared = 13,          /* Shared or sharable memory */
-    p2m_ram_broken  =14,          /* Broken page, access cause domain crash */
+    p2m_ram_broken = 14,          /* Broken page, access cause domain crash */
 } p2m_type_t;
 
 /*
@@ -170,6 +162,9 @@
  * reinit the type correctly after fault */
 #define P2M_SHARABLE_TYPES (p2m_to_mask(p2m_ram_rw))
 #define P2M_SHARED_TYPES   (p2m_to_mask(p2m_ram_shared))
+
+/* Broken type: the frame backing this pfn has failed in hardware
+ * and must not be touched. */
 #define P2M_BROKEN_TYPES (p2m_to_mask(p2m_ram_broken))
 
 /* Useful predicates */
@@ -190,12 +185,7 @@
 #define p2m_is_shared(_t)   (p2m_to_mask(_t) & P2M_SHARED_TYPES)
 #define p2m_is_broken(_t)   (p2m_to_mask(_t) & P2M_BROKEN_TYPES)
 
-/* Populate-on-demand */
-#define POPULATE_ON_DEMAND_MFN  (1<<9)
-#define POD_PAGE_ORDER 9
-
-#define PAGING_MFN  INVALID_MFN
-
+/* Per-p2m-table state */
 struct p2m_domain {
     /* Lock that protects updates to the p2m */
     spinlock_t         lock;
@@ -298,10 +288,6 @@
 
 #define p2m_get_pagetable(p2m)  ((p2m)->phys_table)
 
-/* Flushes specified p2m table */
-void p2m_flush(struct vcpu *v, struct p2m_domain *p2m);
-/* Flushes all nested p2m tables */
-void p2m_flush_nestedp2m(struct domain *d);
 
 /*
  * The P2M lock.  This protects all updates to the p2m table.
@@ -376,23 +362,6 @@
         spin_unlock(&(_domain)->arch.nested_p2m_lock);                 \
     } while (0)
 
-/* Extract the type from the PTE flags that store it */
-static inline p2m_type_t p2m_flags_to_type(unsigned long flags)
-{
-    /* Type is stored in the "available" bits */
-#ifdef __x86_64__
-    /* For AMD IOMMUs we need to use type 0 for plain RAM, but we need
-     * to make sure that an entirely empty PTE doesn't have RAM type */
-    if ( flags == 0 ) 
-        return p2m_invalid;
-    /* AMD IOMMUs use bits 9-11 to encode next io page level and bits
-     * 59-62 for iommu flags so we can't use them to store p2m type info. */
-    return (flags >> 12) & 0x7f;
-#else
-    return (flags >> 9) & 0x7;
-#endif
-}
-
 /* Read the current domain's p2m table.  Do not populate PoD pages. */
 static inline mfn_t gfn_to_mfn_type_current(struct p2m_domain *p2m,
                                             unsigned long gfn, p2m_type_t *t,
@@ -508,6 +477,52 @@
 void p2m_teardown(struct p2m_domain *p2m);
 void p2m_final_teardown(struct domain *d);
 
+/* Add a page to a domain's p2m table */
+int guest_physmap_add_entry(struct p2m_domain *p2m, unsigned long gfn,
+                            unsigned long mfn, unsigned int page_order, 
+                            p2m_type_t t);
+
+/* Remove a page from a domain's p2m table */
+void guest_physmap_remove_entry(struct p2m_domain *p2m, unsigned long gfn,
+                            unsigned long mfn, unsigned int page_order);
+
+/* Set a p2m range as populate-on-demand */
+int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
+                                          unsigned int order);
+
+/* Untyped version for RAM only, for compatibility */
+static inline int guest_physmap_add_page(struct domain *d,
+                                         unsigned long gfn,
+                                         unsigned long mfn,
+                                         unsigned int page_order)
+{
+    return guest_physmap_add_entry(d->arch.p2m, gfn, mfn, page_order, 
p2m_ram_rw);
+}
+
+/* Remove a page from a domain's p2m table */
+static inline void guest_physmap_remove_page(struct domain *d,
+                               unsigned long gfn,
+                               unsigned long mfn, unsigned int page_order)
+{
+    guest_physmap_remove_entry(d->arch.p2m, gfn, mfn, page_order);
+}
+
+/* Change types across all p2m entries in a domain */
+void p2m_change_entry_type_global(struct p2m_domain *p2m, p2m_type_t ot, 
p2m_type_t nt);
+
+/* Compare-exchange the type of a single p2m entry */
+p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn,
+                           p2m_type_t ot, p2m_type_t nt);
+
+/* Set mmio addresses in the p2m table (for pass-through) */
+int set_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn);
+int clear_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn);
+
+
+/* 
+ * Populate-on-demand
+ */
+
 /* Dump PoD information about the domain */
 void p2m_pod_dump_data(struct p2m_domain *p2m);
 
@@ -540,52 +555,9 @@
 void
 p2m_pod_offline_or_broken_replace(struct page_info *p);
 
-/* Add a page to a domain's p2m table */
-int guest_physmap_add_entry(struct p2m_domain *p2m, unsigned long gfn,
-                            unsigned long mfn, unsigned int page_order, 
-                            p2m_type_t t);
-
-/* Remove a page from a domain's p2m table */
-void guest_physmap_remove_entry(struct p2m_domain *p2m, unsigned long gfn,
-                            unsigned long mfn, unsigned int page_order);
-
-/* Set a p2m range as populate-on-demand */
-int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
-                                          unsigned int order);
-
-/* Untyped version for RAM only, for compatibility 
- *
- * Return 0 for success
+/*
+ * Paging to disk and page-sharing
  */
-static inline int guest_physmap_add_page(struct domain *d,
-                                         unsigned long gfn,
-                                         unsigned long mfn,
-                                         unsigned int page_order)
-{
-    return guest_physmap_add_entry(d->arch.p2m, gfn, mfn, page_order, 
p2m_ram_rw);
-}
-
-/* Remove a page from a domain's p2m table */
-static inline void guest_physmap_remove_page(struct domain *d,
-                               unsigned long gfn,
-                               unsigned long mfn, unsigned int page_order)
-{
-    guest_physmap_remove_entry(d->arch.p2m, gfn, mfn, page_order);
-}
-
-/* Change types across all p2m entries in a domain */
-void p2m_change_entry_type_global(struct p2m_domain *p2m, p2m_type_t ot, 
p2m_type_t nt);
-
-/* Compare-exchange the type of a single p2m entry */
-p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn,
-                           p2m_type_t ot, p2m_type_t nt);
-
-/* Set mmio addresses in the p2m table (for pass-through) */
-int set_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn);
-int clear_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn);
-
-void nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
-    l1_pgentry_t *p, mfn_t table_mfn, l1_pgentry_t new, unsigned int level);
 
 #ifdef __x86_64__
 /* Modify p2m table for shared gfn */
@@ -680,6 +652,40 @@
 #define P2M_DEBUG(_f, _a...) do { (void)(_f); } while(0)
 #endif
 
+
+/*
+ * Functions specific to the p2m-pt implementation
+ */
+
+/* Extract the type from the PTE flags that store it */
+static inline p2m_type_t p2m_flags_to_type(unsigned long flags)
+{
+    /* Type is stored in the "available" bits */
+#ifdef __x86_64__
+    /* For AMD IOMMUs we need to use type 0 for plain RAM, but we need
+     * to make sure that an entirely empty PTE doesn't have RAM type */
+    if ( flags == 0 ) 
+        return p2m_invalid;
+    /* AMD IOMMUs use bits 9-11 to encode next io page level and bits
+     * 59-62 for iommu flags so we can't use them to store p2m type info. */
+    return (flags >> 12) & 0x7f;
+#else
+    return (flags >> 9) & 0x7;
+#endif
+}
+
+/*
+ * Nested p2m: shadow p2m tables used for nexted HVM virtualization 
+ */
+
+/* Flushes specified p2m table */
+void p2m_flush(struct vcpu *v, struct p2m_domain *p2m);
+/* Flushes all nested p2m tables */
+void p2m_flush_nestedp2m(struct domain *d);
+
+void nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
+    l1_pgentry_t *p, mfn_t table_mfn, l1_pgentry_t new, unsigned int level);
+
 #endif /* _XEN_P2M_H */
 
 /*
diff -r c7d0b66e41cd -r 9344034d624b xen/include/asm-x86/x86_32/page.h
--- a/xen/include/asm-x86/x86_32/page.h Thu Jun 02 13:16:51 2011 +0100
+++ b/xen/include/asm-x86/x86_32/page.h Thu Jun 02 13:16:52 2011 +0100
@@ -15,6 +15,7 @@
 #define L3_PAGETABLE_ENTRIES    4
 #define ROOT_PAGETABLE_ENTRIES  L3_PAGETABLE_ENTRIES
 #define SUPERPAGE_ORDER         PAGETABLE_ORDER
+#define SUPERPAGE_PAGES         (1<<SUPERPAGE_ORDER)
 
 /*
  * Architecturally, physical addresses may be up to 52 bits. However, the
diff -r c7d0b66e41cd -r 9344034d624b xen/include/asm-x86/x86_64/page.h
--- a/xen/include/asm-x86/x86_64/page.h Thu Jun 02 13:16:51 2011 +0100
+++ b/xen/include/asm-x86/x86_64/page.h Thu Jun 02 13:16:52 2011 +0100
@@ -17,6 +17,7 @@
 #define L4_PAGETABLE_ENTRIES    (1<<PAGETABLE_ORDER)
 #define ROOT_PAGETABLE_ENTRIES  L4_PAGETABLE_ENTRIES
 #define SUPERPAGE_ORDER         PAGETABLE_ORDER
+#define SUPERPAGE_PAGES         (1<<SUPERPAGE_ORDER)
 
 #define __PAGE_OFFSET           DIRECTMAP_VIRT_START
 #define __XEN_VIRT_START        XEN_VIRT_START

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86/mm/p2m: little fixes and tidying up, Xen patchbot-unstable <=