* Introduces access types for each page, giving independent read, write, and
execute permissions for each page. The permissions are restrictive from
what the page type gives: for example, a p2m_type_ro page with an access of
p2m_access_rw would have read-only permissions in total, as p2m_type_ro
removed write access and p2m_access_rw removed execute access.
* Implements the access flag storage for EPT, moving some bits from P2M type,
which had 10 bits of storage, to the four bits for access.
* Access flags are stored according to a loose consistency contract, where
pages can be reset to the default access permissions at any time. Right
now, that happens on page type changes, where one would want to reevaluate
whether permissions make sense for that page as they are anyway.
Signed-off-by: Joe Epstein <jepstein98@xxxxxxxxx>
diff -r 4e108cf56d07 -r a3cec4b94150 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Mon Dec 27 08:00:09 2010 +0000
+++ b/xen/include/asm-x86/p2m.h Tue Jan 04 10:30:15 2011 -0800
@@ -88,6 +88,31 @@
p2m_ram_broken =14, /* Broken page, access cause domain crash */
} p2m_type_t;
+/*
+ * Additional access types, which are used to further restrict
+ * the permissions given my the p2m_type_t memory type. Violations
+ * caused by p2m_access_t restrictions are sent to the mem_event
+ * interface.
+ *
+ * The access permissions are soft state: when any ambigious change of page
+ * type or use occurs, or when pages are flushed, swapped, or at any other
+ * convenient type, the access permissions can get reset to the p2m_domain
+ * default.
+ */
+typedef enum {
+ p2m_access_n = 0, /* No access permissions allowed */
+ p2m_access_r = 1,
+ p2m_access_w = 2,
+ p2m_access_rw = 3,
+ p2m_access_x = 4,
+ p2m_access_rx = 5,
+ p2m_access_wx = 6,
+ p2m_access_rwx = 7,
+ p2m_access_rx2rw = 8, /* Special: page goes from RX to RW on write */
+
+ /* NOTE: Assumed to be only 4 bits right now */
+} p2m_access_t;
+
typedef enum {
p2m_query = 0, /* Do not populate a PoD entries */
p2m_alloc = 1, /* Automatically populate PoD entries */
@@ -182,18 +207,30 @@
int (*set_entry )(struct p2m_domain *p2m,
unsigned long gfn,
mfn_t mfn, unsigned int page_order,
- p2m_type_t p2mt);
+ p2m_type_t p2mt,
+ p2m_access_t p2ma);
mfn_t (*get_entry )(struct p2m_domain *p2m,
unsigned long gfn,
p2m_type_t *p2mt,
+ p2m_access_t *p2ma,
p2m_query_t q);
mfn_t (*get_entry_current)(struct p2m_domain *p2m,
unsigned long gfn,
p2m_type_t *p2mt,
+ p2m_access_t *p2ma,
p2m_query_t q);
void (*change_entry_type_global)(struct p2m_domain *p2m,
p2m_type_t ot,
p2m_type_t nt);
+
+ /* Default P2M access type for each page in the the domain: new pages,
+ * swapped in pages, cleared pages, and pages that are ambiquously
+ * retyped get this access type. See definition of p2m_access_t. */
+ p2m_access_t default_access;
+
+ /* If true, and an access fault comes in and there is no
mem_event listener,
+ * pause domain. Otherwise, remove access restrictions. */
+ bool_t access_required;
/* Highest guest frame that's ever been mapped in the p2m */
unsigned long max_mapped_pfn;
@@ -284,9 +321,10 @@
/* Read the current domain's p2m table. Do not populate PoD pages. */
static inline mfn_t gfn_to_mfn_type_current(struct p2m_domain *p2m,
unsigned long gfn, p2m_type_t *t,
+ p2m_access_t *a,
p2m_query_t q)
{
- return p2m->get_entry_current(p2m, gfn, t, q);
+ return p2m->get_entry_current(p2m, gfn, t, a, q);
}
/* Read P2M table, mapping pages as we go.
@@ -295,7 +333,8 @@
gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn,
p2m_type_t *t, p2m_query_t q)
{
- return p2m->get_entry(p2m, gfn, t, q);
+ p2m_access_t a = 0;
+ return p2m->get_entry(p2m, gfn, t, &a, q);
}
@@ -305,6 +344,7 @@
p2m_query_t q)
{
mfn_t mfn;
+ p2m_access_t a;
if ( !p2m || !paging_mode_translate(p2m->domain) )
{
@@ -314,7 +354,7 @@
mfn = _mfn(gfn);
}
else if ( likely(current->domain == p2m->domain) )
- mfn = gfn_to_mfn_type_current(p2m, gfn, t, q);
+ mfn = gfn_to_mfn_type_current(p2m, gfn, t, &a, q);
else
mfn = gfn_to_mfn_type_p2m(p2m, gfn, t, q);
diff -r 4e108cf56d07 -r a3cec4b94150 xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Mon Dec 27 08:00:09 2010 +0000
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Tue Jan 04 10:30:15 2011 -0800
@@ -42,7 +42,8 @@
rsvd2_snp : 1, /* bit 11 - Used for VT-d snoop control
in shared EPT/VT-d usage */
mfn : 40, /* bits 51:12 - Machine physical frame number */
- sa_p2mt : 10, /* bits 61:52 - Software available 2 */
+ sa_p2mt : 6, /* bits 57:52 - Software available 2 */
+ access : 4, /* bits 61:58 - p2m_access_t */
rsvd3_tm : 1, /* bit 62 - Used for VT-d transient-mapping
hint in shared EPT/VT-d usage */
avail3 : 1; /* bit 63 - Software available 3 */
diff -r 4e108cf56d07 -r a3cec4b94150 xen/arch/x86/mm/hap/p2m-ept.c
--- a/xen/arch/x86/mm/hap/p2m-ept.c Mon Dec 27 08:00:09 2010 +0000
+++ b/xen/arch/x86/mm/hap/p2m-ept.c Tue Jan 04 10:30:15 2011 -0800
@@ -62,8 +62,9 @@
return r;
}
-static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type)
+static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t
type, p2m_access_t access)
{
+ /* First apply type permissions */
switch(type)
{
case p2m_invalid:
@@ -75,30 +76,61 @@
case p2m_ram_paging_in_start:
default:
entry->r = entry->w = entry->x = 0;
- return;
+ break;
case p2m_ram_rw:
entry->r = entry->w = entry->x = 1;
- return;
+ break;
case p2m_mmio_direct:
entry->r = entry->x = 1;
entry->w = !rangeset_contains_singleton(mmio_ro_ranges,
entry->mfn);
- return;
+ break;
case p2m_ram_logdirty:
case p2m_ram_ro:
case p2m_ram_shared:
entry->r = entry->x = 1;
entry->w = 0;
- return;
+ break;
case p2m_grant_map_rw:
entry->r = entry->w = 1;
entry->x = 0;
- return;
+ break;
case p2m_grant_map_ro:
entry->r = 1;
entry->w = entry->x = 0;
- return;
+ break;
}
+
+
+ /* Then restrict with access permissions */
+ switch (access)
+ {
+ case p2m_access_n:
+ entry->r = entry->w = entry->x = 0;
+ break;
+ case p2m_access_r:
+ entry->w = entry->x = 0;
+ break;
+ case p2m_access_w:
+ entry->r = entry->x = 0;
+ break;
+ case p2m_access_x:
+ entry->r = entry->w = 0;
+ break;
+ case p2m_access_rx:
+ case p2m_access_rx2rw:
+ entry->w = 0;
+ break;
+ case p2m_access_wx:
+ entry->r = 0;
+ break;
+ case p2m_access_rw:
+ entry->x = 0;
+ break;
+ case p2m_access_rwx:
+ break;
+ }
+
}
#define GUEST_TABLE_MAP_FAILED 0
@@ -117,6 +149,8 @@
ept_entry->epte = 0;
ept_entry->mfn = page_to_mfn(pg);
+ ept_entry->access = p2m->default_access;
+
ept_entry->r = ept_entry->w = ept_entry->x = 1;
return 1;
@@ -170,11 +204,12 @@
epte->emt = ept_entry->emt;
epte->ipat = ept_entry->ipat;
epte->sp = (level > 1) ? 1 : 0;
+ epte->access = ept_entry->access;
epte->sa_p2mt = ept_entry->sa_p2mt;
epte->mfn = ept_entry->mfn + i * trunk;
epte->rsvd2_snp = ( iommu_enabled && iommu_snoop ) ? 1 : 0;
- ept_p2m_type_to_flags(epte, epte->sa_p2mt);
+ ept_p2m_type_to_flags(epte, epte->sa_p2mt, epte->access);
if ( (level - 1) == target )
continue;
@@ -260,7 +295,7 @@
*/
static int
ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
- unsigned int order, p2m_type_t p2mt)
+ unsigned int order, p2m_type_t p2mt, p2m_access_t p2ma)
{
ept_entry_t *table, *ept_entry = NULL;
unsigned long gfn_remainder = gfn;
@@ -334,9 +369,11 @@
/* Construct the new entry, and then write it once */
new_entry.emt = epte_get_entry_emt(p2m->domain, gfn, mfn, &ipat,
direct_mmio);
+
new_entry.ipat = ipat;
new_entry.sp = order ? 1 : 0;
new_entry.sa_p2mt = p2mt;
+ new_entry.access = p2ma;
new_entry.rsvd2_snp = (iommu_enabled && iommu_snoop);
if ( new_entry.mfn == mfn_x(mfn) )
@@ -344,7 +381,7 @@
else
new_entry.mfn = mfn_x(mfn);
- ept_p2m_type_to_flags(&new_entry, p2mt);
+ ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
}
atomic_write_ept_entry(ept_entry, new_entry);
@@ -384,6 +421,7 @@
new_entry.ipat = ipat;
new_entry.sp = i ? 1 : 0;
new_entry.sa_p2mt = p2mt;
+ new_entry.access = p2ma;
new_entry.rsvd2_snp = (iommu_enabled && iommu_snoop);
if ( new_entry.mfn == mfn_x(mfn) )
@@ -391,7 +429,7 @@
else /* the caller should take care of the previous page */
new_entry.mfn = mfn_x(mfn);
- ept_p2m_type_to_flags(&new_entry, p2mt);
+ ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
atomic_write_ept_entry(ept_entry, new_entry);
}
@@ -447,7 +485,7 @@
/* Read ept p2m entries */
static mfn_t ept_get_entry(struct p2m_domain *p2m,
- unsigned long gfn, p2m_type_t *t,
+ unsigned long gfn, p2m_type_t *t, p2m_access_t* a,
p2m_query_t q)
{
struct domain *d = p2m->domain;
@@ -460,6 +498,7 @@
mfn_t mfn = _mfn(INVALID_MFN);
*t = p2m_mmio_dm;
+ *a = p2m_access_n;
/* This pfn is higher than the highest the p2m map currently holds */
if ( gfn > p2m->max_mapped_pfn )
@@ -519,6 +558,8 @@
if ( ept_entry->sa_p2mt != p2m_invalid )
{
*t = ept_entry->sa_p2mt;
+ *a = ept_entry->access;
+
mfn = _mfn(ept_entry->mfn);
if ( i )
{
@@ -626,10 +667,10 @@
}
static mfn_t ept_get_entry_current(struct p2m_domain *p2m,
- unsigned long gfn, p2m_type_t *t,
+ unsigned long gfn, p2m_type_t *t,
p2m_access_t *a,
p2m_query_t q)
{
- return ept_get_entry(p2m, gfn, t, q);
+ return ept_get_entry(p2m, gfn, t, a, q);
}
/*
@@ -689,7 +730,7 @@
order = level * EPT_TABLE_ORDER;
if ( need_modify_ept_entry(p2m, gfn, mfn,
e.ipat, e.emt, e.sa_p2mt) )
- ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt);
+ ept_set_entry(p2m, gfn, mfn, order,
e.sa_p2mt, e.access);
gfn += trunk;
break;
}
@@ -699,7 +740,7 @@
else /* gfn assigned with 4k */
{
if ( need_modify_ept_entry(p2m, gfn, mfn, e.ipat, e.emt,
e.sa_p2mt) )
- ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt);
+ ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt, e.access);
}
}
p2m_unlock(p2m);
@@ -730,7 +771,7 @@
continue;
e.sa_p2mt = nt;
- ept_p2m_type_to_flags(&e, nt);
+ ept_p2m_type_to_flags(&e, nt, e.access);
atomic_write_ept_entry(&epte[i], e);
}
}
diff -r 4e108cf56d07 -r a3cec4b94150 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Mon Dec 27 08:00:09 2010 +0000
+++ b/xen/arch/x86/mm/p2m.c Tue Jan 04 10:30:15 2011 -0800
@@ -285,7 +285,7 @@
*/
static
int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
- unsigned int page_order, p2m_type_t p2mt);
+ unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma);
static int
p2m_pod_cache_add(struct p2m_domain *p2m,
@@ -693,7 +693,7 @@
{
/* All PoD: Mark the whole region invalid and tell caller
* we're done. */
- set_p2m_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid);
+ set_p2m_entry(p2m, gpfn, _mfn(INVALID_MFN), order,
p2m_invalid, p2m->default_access);
p2m->pod.entry_count-=(1<<order); /* Lock: p2m */
BUG_ON(p2m->pod.entry_count < 0);
ret = 1;
@@ -716,7 +716,7 @@
mfn = gfn_to_mfn_query(p2m, gpfn + i, &t);
if ( t == p2m_populate_on_demand )
{
- set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid);
+ set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0,
p2m_invalid, p2m->default_access);
p2m->pod.entry_count--; /* Lock: p2m */
BUG_ON(p2m->pod.entry_count < 0);
pod--;
@@ -729,7 +729,7 @@
page = mfn_to_page(mfn);
- set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid);
+ set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0,
p2m_invalid, p2m->default_access);
set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
p2m_pod_cache_add(p2m, page, 0);
@@ -844,7 +844,7 @@
/* Try to remove the page, restoring old mapping if it fails. */
set_p2m_entry(p2m, gfn,
_mfn(POPULATE_ON_DEMAND_MFN), 9,
- p2m_populate_on_demand);
+ p2m_populate_on_demand, p2m->default_access);
/* Make none of the MFNs are used elsewhere... for example, mapped
* via the grant table interface, or by qemu. Allow one refcount for
@@ -899,7 +899,7 @@
out_reset:
if ( reset )
- set_p2m_entry(p2m, gfn, mfn0, 9, type0);
+ set_p2m_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access);
out:
return ret;
@@ -957,7 +957,7 @@
/* Try to remove the page, restoring old mapping if it fails. */
set_p2m_entry(p2m, gfns[i],
_mfn(POPULATE_ON_DEMAND_MFN), 0,
- p2m_populate_on_demand);
+ p2m_populate_on_demand, p2m->default_access);
/* See if the page was successfully unmapped. (Allow one refcount
* for being allocated to a domain.) */
@@ -966,7 +966,7 @@
unmap_domain_page(map[i]);
map[i] = NULL;
- set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i]);
+ set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i],
p2m->default_access);
continue;
}
@@ -988,7 +988,7 @@
* check timing. */
if ( j < PAGE_SIZE/sizeof(*map[i]) )
{
- set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i]);
+ set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i],
p2m->default_access);
}
else
{
@@ -1121,7 +1121,7 @@
* 512 2MB pages. The rest of 511 calls are unnecessary.
*/
set_p2m_entry(p2m, gfn_aligned, _mfn(POPULATE_ON_DEMAND_MFN), 9,
- p2m_populate_on_demand);
+ p2m_populate_on_demand, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
return 0;
@@ -1158,7 +1158,7 @@
gfn_aligned = (gfn >> order) << order;
- set_p2m_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw);
+ set_p2m_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
p2m->default_access);
for( i = 0; i < (1UL << order); i++ )
set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_aligned + i);
@@ -1198,7 +1198,7 @@
gfn_aligned = (gfn>>order)<<order;
for(i=0; i<(1<<order); i++)
set_p2m_entry(p2m, gfn_aligned+i, _mfn(POPULATE_ON_DEMAND_MFN), 0,
- p2m_populate_on_demand);
+ p2m_populate_on_demand, p2m->default_access);
if ( tb_init_done )
{
struct {
@@ -1250,7 +1250,7 @@
// Returns 0 on error (out of memory)
static int
p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
- unsigned int page_order, p2m_type_t p2mt)
+ unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
{
// XXX -- this might be able to be faster iff current->domain == d
mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
@@ -1401,7 +1401,7 @@
}
static mfn_t
-p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t,
+p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t
*t, p2m_access_t *a,
p2m_query_t q)
{
mfn_t mfn;
@@ -1416,6 +1416,8 @@
* XXX Once we start explicitly registering MMIO regions in the p2m
* XXX we will return p2m_invalid for unmapped gfns */
*t = p2m_mmio_dm;
+ /* Not implemented except with EPT */
+ *a = p2m_access_rwx;
mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
@@ -1542,7 +1544,7 @@
/* Read the current domain's p2m table (through the linear mapping). */
static mfn_t p2m_gfn_to_mfn_current(struct p2m_domain *p2m,
- unsigned long gfn, p2m_type_t *t,
+ unsigned long gfn, p2m_type_t *t,
p2m_access_t *a,
p2m_query_t q)
{
mfn_t mfn = _mfn(INVALID_MFN);
@@ -1553,6 +1555,9 @@
* XXX Once we start explicitly registering MMIO regions in the p2m
* XXX we will return p2m_invalid for unmapped gfns */
+ /* Not currently implemented except for EPT */
+ *a = p2m_access_rwx;
+
if ( gfn <= p2m->max_mapped_pfn )
{
l1_pgentry_t l1e = l1e_empty(), *p2m_entry;
@@ -1726,6 +1731,8 @@
INIT_PAGE_LIST_HEAD(&p2m->pod.single);
p2m->domain = d;
+ p2m->default_access = p2m_access_rwx; /* Dom flags override */
+
p2m->set_entry = p2m_set_entry;
p2m->get_entry = p2m_gfn_to_mfn;
p2m->get_entry_current = p2m_gfn_to_mfn_current;
@@ -1745,7 +1752,7 @@
if ( p2m == NULL )
return -ENOMEM;
p2m_initialise(d, p2m);
-
+
return 0;
}
@@ -1759,7 +1766,7 @@
static
int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
- unsigned int page_order, p2m_type_t p2mt)
+ unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
{
struct domain *d = p2m->domain;
unsigned long todo = 1ul << page_order;
@@ -1776,7 +1783,7 @@
else
order = 0;
- if ( !p2m->set_entry(p2m, gfn, mfn, order, p2mt) )
+ if ( !p2m->set_entry(p2m, gfn, mfn, order, p2mt, p2ma) )
rc = 0;
gfn += 1ul << order;
if ( mfn_x(mfn) != INVALID_MFN )
@@ -1837,7 +1844,7 @@
/* Initialise physmap tables for slot zero. Other code assumes this. */
if ( !set_p2m_entry(p2m, 0, _mfn(INVALID_MFN), 0,
- p2m_invalid) )
+ p2m_invalid, p2m->default_access) )
goto error;
/* Copy all existing mappings from the page list and m2p */
@@ -1856,7 +1863,7 @@
(gfn != 0x55555555L)
#endif
&& gfn != INVALID_M2P_ENTRY
- && !set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_rw) )
+ && !set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_rw,
p2m->default_access) )
goto error_unlock;
}
spin_unlock(&p2m->domain->page_alloc_lock);
@@ -1883,6 +1890,7 @@
#ifdef __x86_64__
unsigned long gfn;
p2m_type_t t;
+ p2m_access_t a;
mfn_t mfn;
#endif
@@ -1891,7 +1899,7 @@
#ifdef __x86_64__
for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ )
{
- mfn = p2m->get_entry(p2m, gfn, &t, p2m_query);
+ mfn = p2m->get_entry(p2m, gfn, &t, &a, p2m_query);
if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
BUG_ON(mem_sharing_unshare_page(p2m, gfn,
MEM_SHARING_DESTROY_GFN));
}
@@ -2188,6 +2196,7 @@
unsigned long i;
mfn_t mfn_return;
p2m_type_t t;
+ p2m_access_t a;
if ( !paging_mode_translate(p2m->domain) )
{
@@ -2201,12 +2210,12 @@
for ( i = 0; i < (1UL << page_order); i++ )
{
- mfn_return = p2m->get_entry(p2m, gfn + i, &t, p2m_query);
+ mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, p2m_query);
if ( !p2m_is_grant(t) )
set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
}
- set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid);
+ set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order,
p2m_invalid, p2m->default_access);
}
void
@@ -2286,7 +2295,7 @@
/* Now, actually do the two-way mapping */
if ( !set_p2m_entry(p2m, gfn, _mfn(POPULATE_ON_DEMAND_MFN), order,
- p2m_populate_on_demand) )
+ p2m_populate_on_demand, p2m->default_access) )
rc = -EINVAL;
else
{
@@ -2399,7 +2408,7 @@
/* Now, actually do the two-way mapping */
if ( mfn_valid(_mfn(mfn)) )
{
- if ( !set_p2m_entry(p2m, gfn, _mfn(mfn), page_order, t) )
+ if ( !set_p2m_entry(p2m, gfn, _mfn(mfn), page_order, t,
p2m->default_access) )
rc = -EINVAL;
if ( !p2m_is_grant(t) )
{
@@ -2412,7 +2421,7 @@
gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n",
gfn, mfn);
if ( !set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order,
- p2m_invalid) )
+ p2m_invalid, p2m->default_access) )
rc = -EINVAL;
else
{
@@ -2565,7 +2574,7 @@
}
/* Modify the p2m type of a single gfn from ot to nt, returning the
- * entry's previous type */
+ * entry's previous type. Resets the access permissions. */
p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn,
p2m_type_t ot, p2m_type_t nt)
{
@@ -2578,7 +2587,7 @@
mfn = gfn_to_mfn_query(p2m, gfn, &pt);
if ( pt == ot )
- set_p2m_entry(p2m, gfn, mfn, 0, nt);
+ set_p2m_entry(p2m, gfn, mfn, 0, nt, p2m->default_access);
p2m_unlock(p2m);
@@ -2609,7 +2618,7 @@
P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn));
p2m_lock(p2m);
- rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_mmio_direct);
+ rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_mmio_direct, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
if ( 0 == rc )
@@ -2639,7 +2648,7 @@
return 0;
}
p2m_lock(p2m);
- rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 0);
+ rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 0, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
@@ -2665,7 +2674,7 @@
set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
P2M_DEBUG("set shared %lx %lx\n", gfn, mfn_x(mfn));
- rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_shared);
+ rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_shared, p2m->default_access);
if ( 0 == rc )
gdprintk(XENLOG_ERR,
"set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
@@ -2708,7 +2717,7 @@
/* Fix p2m entry */
p2m_lock(p2m);
- set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out);
+ set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
@@ -2745,7 +2754,7 @@
/* Remove mapping from p2m table */
p2m_lock(p2m);
- set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged);
+ set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged,
p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
@@ -2775,7 +2784,7 @@
if ( p2mt == p2m_ram_paged )
{
p2m_lock(p2m);
- set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start);
+ set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0,
p2m_ram_paging_in_start, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
}
@@ -2811,7 +2820,7 @@
/* Fix p2m mapping */
p2m_lock(p2m);
- set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in);
+ set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in,
p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
@@ -2831,7 +2840,7 @@
/* Fix p2m entry */
mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt);
p2m_lock(p2m);
- set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw);
+ set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, p2m->default_access);
audit_p2m(p2m, 1);
p2m_unlock(p2m);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|