# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1305302438 -3600
# Node ID 5f762329e746d99b1b4e68de6df71ec10d676cb9
# Parent c5aebdd80c6d8e10113342c023b9e42d20daf2f0
x86/mm/p2m: little fixes and tidying up
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
diff -r c5aebdd80c6d -r 5f762329e746 xen/arch/x86/mm/p2m-pod.c
--- a/xen/arch/x86/mm/p2m-pod.c Fri May 13 17:00:38 2011 +0100
+++ b/xen/arch/x86/mm/p2m-pod.c Fri May 13 17:00:38 2011 +0100
@@ -60,7 +60,6 @@ extern void audit_p2m(struct p2m_domain
# define audit_p2m(_p2m, _m2p) do { (void)(_p2m),(_m2p); } while (0)
#endif /* P2M_AUDIT */
-#define SUPERPAGE_PAGES (1UL << 9)
#define superpage_aligned(_x) (((_x)&(SUPERPAGE_PAGES-1))==0)
/*
@@ -706,8 +705,7 @@ p2m_pod_zero_check_superpage(struct p2m_
}
/* Try to remove the page, restoring old mapping if it fails. */
- set_p2m_entry(p2m, gfn,
- _mfn(POPULATE_ON_DEMAND_MFN), 9,
+ set_p2m_entry(p2m, gfn, _mfn(0), 9,
p2m_populate_on_demand, p2m->default_access);
/* Make none of the MFNs are used elsewhere... for example, mapped
@@ -819,8 +817,7 @@ p2m_pod_zero_check(struct p2m_domain *p2
}
/* Try to remove the page, restoring old mapping if it fails. */
- set_p2m_entry(p2m, gfns[i],
- _mfn(POPULATE_ON_DEMAND_MFN), 0,
+ set_p2m_entry(p2m, gfns[i], _mfn(0), 0,
p2m_populate_on_demand, p2m->default_access);
/* See if the page was successfully unmapped. (Allow one refcount
@@ -984,7 +981,7 @@ p2m_pod_demand_populate(struct p2m_domai
* set_p2m_entry() should automatically shatter the 1GB page into
* 512 2MB pages. The rest of 511 calls are unnecessary.
*/
- set_p2m_entry(p2m, gfn_aligned, _mfn(POPULATE_ON_DEMAND_MFN), 9,
+ set_p2m_entry(p2m, gfn_aligned, _mfn(0), 9,
p2m_populate_on_demand, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
@@ -1072,7 +1069,7 @@ remap_and_retry:
/* Remap this 2-meg region in singleton chunks */
gfn_aligned = (gfn>>order)<<order;
for(i=0; i<(1<<order); i++)
- set_p2m_entry(p2m, gfn_aligned+i, _mfn(POPULATE_ON_DEMAND_MFN), 0,
+ set_p2m_entry(p2m, gfn_aligned+i, _mfn(0), 0,
p2m_populate_on_demand, p2m->default_access);
if ( tb_init_done )
{
@@ -1132,7 +1129,7 @@ guest_physmap_mark_populate_on_demand(st
}
/* Now, actually do the two-way mapping */
- if ( !set_p2m_entry(p2m, gfn, _mfn(POPULATE_ON_DEMAND_MFN), order,
+ if ( !set_p2m_entry(p2m, gfn, _mfn(0), order,
p2m_populate_on_demand, p2m->default_access) )
rc = -EINVAL;
else
diff -r c5aebdd80c6d -r 5f762329e746 xen/arch/x86/mm/p2m-pt.c
--- a/xen/arch/x86/mm/p2m-pt.c Fri May 13 17:00:38 2011 +0100
+++ b/xen/arch/x86/mm/p2m-pt.c Fri May 13 17:00:38 2011 +0100
@@ -68,9 +68,6 @@
#define P2M_BASE_FLAGS \
(_PAGE_PRESENT | _PAGE_USER | _PAGE_DIRTY | _PAGE_ACCESSED)
-#define SUPERPAGE_PAGES (1UL << 9)
-#define superpage_aligned(_x) (((_x)&(SUPERPAGE_PAGES-1))==0)
-
static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn)
{
unsigned long flags;
@@ -84,32 +81,31 @@ static unsigned long p2m_type_to_flags(p
#else
flags = (t & 0x7UL) << 9;
#endif
-#ifndef HAVE_GRANT_MAP_P2M
- BUG_ON(p2m_is_grant(t));
+
+#ifndef __x86_64__
+ /* 32-bit builds don't support a lot of the p2m types */
+ BUG_ON(t > p2m_populate_on_demand);
#endif
+
switch(t)
{
case p2m_invalid:
+ case p2m_mmio_dm:
+ case p2m_populate_on_demand:
default:
return flags;
+ case p2m_ram_ro:
+ case p2m_grant_map_ro:
+ case p2m_ram_logdirty:
+ case p2m_ram_shared:
+ return flags | P2M_BASE_FLAGS;
case p2m_ram_rw:
case p2m_grant_map_rw:
return flags | P2M_BASE_FLAGS | _PAGE_RW;
- case p2m_ram_logdirty:
- return flags | P2M_BASE_FLAGS;
- case p2m_ram_ro:
- case p2m_grant_map_ro:
- return flags | P2M_BASE_FLAGS;
- case p2m_ram_shared:
- return flags | P2M_BASE_FLAGS;
- case p2m_mmio_dm:
- return flags;
case p2m_mmio_direct:
if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
flags |= _PAGE_RW;
return flags | P2M_BASE_FLAGS | _PAGE_PCD;
- case p2m_populate_on_demand:
- return flags;
}
}
diff -r c5aebdd80c6d -r 5f762329e746 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Fri May 13 17:00:38 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c Fri May 13 17:00:38 2011 +0100
@@ -251,31 +251,30 @@ int p2m_alloc_table(struct p2m_domain *p
p2m_invalid, p2m->default_access) )
goto error;
- if (p2m_is_nestedp2m(p2m))
- goto nesteddone;
+ if ( !p2m_is_nestedp2m(p2m) )
+ {
+ /* Copy all existing mappings from the page list and m2p */
+ spin_lock(&p2m->domain->page_alloc_lock);
+ page_list_for_each(page, &p2m->domain->page_list)
+ {
+ mfn = page_to_mfn(page);
+ gfn = get_gpfn_from_mfn(mfn_x(mfn));
+ /* Pages should not be shared that early */
+ ASSERT(gfn != SHARED_M2P_ENTRY);
+ page_count++;
+ if (
+#ifdef __x86_64__
+ (gfn != 0x5555555555555555L)
+#else
+ (gfn != 0x55555555L)
+#endif
+ && gfn != INVALID_M2P_ENTRY
+ && !set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_rw,
p2m->default_access) )
+ goto error_unlock;
+ }
+ spin_unlock(&p2m->domain->page_alloc_lock);
+ }
- /* Copy all existing mappings from the page list and m2p */
- spin_lock(&p2m->domain->page_alloc_lock);
- page_list_for_each(page, &p2m->domain->page_list)
- {
- mfn = page_to_mfn(page);
- gfn = get_gpfn_from_mfn(mfn_x(mfn));
- /* Pages should not be shared that early */
- ASSERT(gfn != SHARED_M2P_ENTRY);
- page_count++;
- if (
-#ifdef __x86_64__
- (gfn != 0x5555555555555555L)
-#else
- (gfn != 0x55555555L)
-#endif
- && gfn != INVALID_M2P_ENTRY
- && !set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_rw,
p2m->default_access) )
- goto error_unlock;
- }
- spin_unlock(&p2m->domain->page_alloc_lock);
-
- nesteddone:
P2M_PRINTK("p2m table initialised (%u pages)\n", page_count);
p2m_unlock(p2m);
return 0;
@@ -701,7 +700,8 @@ int p2m_mem_paging_evict(struct p2m_doma
/* Remove mapping from p2m table */
p2m_lock(p2m);
- set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged,
p2m->default_access);
+ set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0,
+ p2m_ram_paged, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
@@ -751,7 +751,8 @@ void p2m_mem_paging_populate(struct p2m_
if ( p2mt == p2m_ram_paged )
{
p2m_lock(p2m);
- set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start,
p2m->default_access);
+ set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0,
+ p2m_ram_paging_in_start, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
}
diff -r c5aebdd80c6d -r 5f762329e746 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Fri May 13 17:00:38 2011 +0100
+++ b/xen/include/asm-x86/p2m.h Fri May 13 17:00:38 2011 +0100
@@ -47,10 +47,6 @@
*/
#define phys_to_machine_mapping ((l1_pgentry_t *)RO_MPT_VIRT_START)
-#ifdef __x86_64__
-#define HAVE_GRANT_MAP_P2M
-#endif
-
/*
* The upper levels of the p2m pagetable always contain full rights; all
* variation in the access control bits is made in the level-1 PTEs.
@@ -78,20 +74,16 @@ typedef enum {
p2m_mmio_direct = 5, /* Read/write mapping of genuine MMIO area */
p2m_populate_on_demand = 6, /* Place-holder for empty memory */
- /* Note that these can only be used if HAVE_GRANT_MAP_P2M is
- defined. They get defined anyway so as to avoid lots of
- #ifdef's everywhere else. */
- p2m_grant_map_rw = 7, /* Read/write grant mapping */
- p2m_grant_map_ro = 8, /* Read-only grant mapping */
-
- /* Likewise, although these are defined in all builds, they can only
+ /* Although these are defined in all builds, they can only
* be used in 64-bit builds */
+ p2m_grant_map_rw = 7, /* Read/write grant mapping */
+ p2m_grant_map_ro = 8, /* Read-only grant mapping */
p2m_ram_paging_out = 9, /* Memory that is being paged out */
p2m_ram_paged = 10, /* Memory that has been paged out */
p2m_ram_paging_in = 11, /* Memory that is being paged in */
p2m_ram_paging_in_start = 12, /* Memory that is being paged in */
p2m_ram_shared = 13, /* Shared or sharable memory */
- p2m_ram_broken =14, /* Broken page, access cause domain crash */
+ p2m_ram_broken = 14, /* Broken page, access cause domain crash */
} p2m_type_t;
/*
@@ -170,6 +162,9 @@ typedef enum {
* reinit the type correctly after fault */
#define P2M_SHARABLE_TYPES (p2m_to_mask(p2m_ram_rw))
#define P2M_SHARED_TYPES (p2m_to_mask(p2m_ram_shared))
+
+/* Broken type: the frame backing this pfn has failed in hardware
+ * and must not be touched. */
#define P2M_BROKEN_TYPES (p2m_to_mask(p2m_ram_broken))
/* Useful predicates */
@@ -190,12 +185,7 @@ typedef enum {
#define p2m_is_shared(_t) (p2m_to_mask(_t) & P2M_SHARED_TYPES)
#define p2m_is_broken(_t) (p2m_to_mask(_t) & P2M_BROKEN_TYPES)
-/* Populate-on-demand */
-#define POPULATE_ON_DEMAND_MFN (1<<9)
-#define POD_PAGE_ORDER 9
-
-#define PAGING_MFN INVALID_MFN
-
+/* Per-p2m-table state */
struct p2m_domain {
/* Lock that protects updates to the p2m */
spinlock_t lock;
@@ -298,10 +288,6 @@ struct p2m_domain *p2m_get_p2m(struct vc
#define p2m_get_pagetable(p2m) ((p2m)->phys_table)
-/* Flushes specified p2m table */
-void p2m_flush(struct vcpu *v, struct p2m_domain *p2m);
-/* Flushes all nested p2m tables */
-void p2m_flush_nestedp2m(struct domain *d);
/*
* The P2M lock. This protects all updates to the p2m table.
@@ -376,23 +362,6 @@ void p2m_flush_nestedp2m(struct domain *
spin_unlock(&(_domain)->arch.nested_p2m_lock); \
} while (0)
-/* Extract the type from the PTE flags that store it */
-static inline p2m_type_t p2m_flags_to_type(unsigned long flags)
-{
- /* Type is stored in the "available" bits */
-#ifdef __x86_64__
- /* For AMD IOMMUs we need to use type 0 for plain RAM, but we need
- * to make sure that an entirely empty PTE doesn't have RAM type */
- if ( flags == 0 )
- return p2m_invalid;
- /* AMD IOMMUs use bits 9-11 to encode next io page level and bits
- * 59-62 for iommu flags so we can't use them to store p2m type info. */
- return (flags >> 12) & 0x7f;
-#else
- return (flags >> 9) & 0x7;
-#endif
-}
-
/* Read the current domain's p2m table. Do not populate PoD pages. */
static inline mfn_t gfn_to_mfn_type_current(struct p2m_domain *p2m,
unsigned long gfn, p2m_type_t *t,
@@ -508,6 +477,52 @@ int p2m_alloc_table(struct p2m_domain *p
void p2m_teardown(struct p2m_domain *p2m);
void p2m_final_teardown(struct domain *d);
+/* Add a page to a domain's p2m table */
+int guest_physmap_add_entry(struct p2m_domain *p2m, unsigned long gfn,
+ unsigned long mfn, unsigned int page_order,
+ p2m_type_t t);
+
+/* Remove a page from a domain's p2m table */
+void guest_physmap_remove_entry(struct p2m_domain *p2m, unsigned long gfn,
+ unsigned long mfn, unsigned int page_order);
+
+/* Set a p2m range as populate-on-demand */
+int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
+ unsigned int order);
+
+/* Untyped version for RAM only, for compatibility */
+static inline int guest_physmap_add_page(struct domain *d,
+ unsigned long gfn,
+ unsigned long mfn,
+ unsigned int page_order)
+{
+ return guest_physmap_add_entry(d->arch.p2m, gfn, mfn, page_order,
p2m_ram_rw);
+}
+
+/* Remove a page from a domain's p2m table */
+static inline void guest_physmap_remove_page(struct domain *d,
+ unsigned long gfn,
+ unsigned long mfn, unsigned int page_order)
+{
+ guest_physmap_remove_entry(d->arch.p2m, gfn, mfn, page_order);
+}
+
+/* Change types across all p2m entries in a domain */
+void p2m_change_entry_type_global(struct p2m_domain *p2m, p2m_type_t ot,
p2m_type_t nt);
+
+/* Compare-exchange the type of a single p2m entry */
+p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn,
+ p2m_type_t ot, p2m_type_t nt);
+
+/* Set mmio addresses in the p2m table (for pass-through) */
+int set_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn);
+int clear_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn);
+
+
+/*
+ * Populate-on-demand
+ */
+
/* Dump PoD information about the domain */
void p2m_pod_dump_data(struct p2m_domain *p2m);
@@ -540,52 +555,9 @@ p2m_pod_offline_or_broken_hit(struct pag
void
p2m_pod_offline_or_broken_replace(struct page_info *p);
-/* Add a page to a domain's p2m table */
-int guest_physmap_add_entry(struct p2m_domain *p2m, unsigned long gfn,
- unsigned long mfn, unsigned int page_order,
- p2m_type_t t);
-
-/* Remove a page from a domain's p2m table */
-void guest_physmap_remove_entry(struct p2m_domain *p2m, unsigned long gfn,
- unsigned long mfn, unsigned int page_order);
-
-/* Set a p2m range as populate-on-demand */
-int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
- unsigned int order);
-
-/* Untyped version for RAM only, for compatibility
- *
- * Return 0 for success
+/*
+ * Paging to disk and page-sharing
*/
-static inline int guest_physmap_add_page(struct domain *d,
- unsigned long gfn,
- unsigned long mfn,
- unsigned int page_order)
-{
- return guest_physmap_add_entry(d->arch.p2m, gfn, mfn, page_order,
p2m_ram_rw);
-}
-
-/* Remove a page from a domain's p2m table */
-static inline void guest_physmap_remove_page(struct domain *d,
- unsigned long gfn,
- unsigned long mfn, unsigned int page_order)
-{
- guest_physmap_remove_entry(d->arch.p2m, gfn, mfn, page_order);
-}
-
-/* Change types across all p2m entries in a domain */
-void p2m_change_entry_type_global(struct p2m_domain *p2m, p2m_type_t ot,
p2m_type_t nt);
-
-/* Compare-exchange the type of a single p2m entry */
-p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn,
- p2m_type_t ot, p2m_type_t nt);
-
-/* Set mmio addresses in the p2m table (for pass-through) */
-int set_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn);
-int clear_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn);
-
-void nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
- l1_pgentry_t *p, mfn_t table_mfn, l1_pgentry_t new, unsigned int level);
#ifdef __x86_64__
/* Modify p2m table for shared gfn */
@@ -655,6 +627,40 @@ static inline int p2m_gfn_check_limit(
int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma);
+
+/*
+ * Functions specific to the p2m-pt implementation
+ */
+
+/* Extract the type from the PTE flags that store it */
+static inline p2m_type_t p2m_flags_to_type(unsigned long flags)
+{
+ /* Type is stored in the "available" bits */
+#ifdef __x86_64__
+ /* For AMD IOMMUs we need to use type 0 for plain RAM, but we need
+ * to make sure that an entirely empty PTE doesn't have RAM type */
+ if ( flags == 0 )
+ return p2m_invalid;
+ /* AMD IOMMUs use bits 9-11 to encode next io page level and bits
+ * 59-62 for iommu flags so we can't use them to store p2m type info. */
+ return (flags >> 12) & 0x7f;
+#else
+ return (flags >> 9) & 0x7;
+#endif
+}
+
+/*
+ * Nested p2m: shadow p2m tables used for nexted HVM virtualization
+ */
+
+/* Flushes specified p2m table */
+void p2m_flush(struct vcpu *v, struct p2m_domain *p2m);
+/* Flushes all nested p2m tables */
+void p2m_flush_nestedp2m(struct domain *d);
+
+void nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
+ l1_pgentry_t *p, mfn_t table_mfn, l1_pgentry_t new, unsigned int level);
+
#endif /* _XEN_P2M_H */
/*
diff -r c5aebdd80c6d -r 5f762329e746 xen/include/asm-x86/x86_32/page.h
--- a/xen/include/asm-x86/x86_32/page.h Fri May 13 17:00:38 2011 +0100
+++ b/xen/include/asm-x86/x86_32/page.h Fri May 13 17:00:38 2011 +0100
@@ -15,6 +15,7 @@
#define L3_PAGETABLE_ENTRIES 4
#define ROOT_PAGETABLE_ENTRIES L3_PAGETABLE_ENTRIES
#define SUPERPAGE_ORDER PAGETABLE_ORDER
+#define SUPERPAGE_PAGES (1<<SUPERPAGE_ORDER)
/*
* Architecturally, physical addresses may be up to 52 bits. However, the
diff -r c5aebdd80c6d -r 5f762329e746 xen/include/asm-x86/x86_64/page.h
--- a/xen/include/asm-x86/x86_64/page.h Fri May 13 17:00:38 2011 +0100
+++ b/xen/include/asm-x86/x86_64/page.h Fri May 13 17:00:38 2011 +0100
@@ -17,6 +17,7 @@
#define L4_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
#define ROOT_PAGETABLE_ENTRIES L4_PAGETABLE_ENTRIES
#define SUPERPAGE_ORDER PAGETABLE_ORDER
+#define SUPERPAGE_PAGES (1<<SUPERPAGE_ORDER)
#define __PAGE_OFFSET DIRECTMAP_VIRT_START
#define __XEN_VIRT_START XEN_VIRT_START
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|