* Introduces access types for each page, giving independent read, write, and execute permissions for each page. The permissions are restrictive from what the page type gives: for example, a p2m_type_ro page with an access of
p2m_access_rw would have read-only permissions in total, as p2m_type_ro removed write access and p2m_access_rw removed execute access.
* Implements the access flag storage for EPT, moving some bits from P2M type,
which had 10 bits of storage, to the four bits for access.
* Access flags are stored according to a loose consistency contract, where pages can be reset to the default access permissions at any time. Right
now, that happens on page type changes, where one would want to reevaluate whether permissions make sense for that page as they are anyway.
Signed-off-by: Joe Epstein <jepstein98@xxxxxxxxx>
diff -r 285a8f8d217e -r cae1ccf5857b xen/arch/x86/mm/hap/p2m-ept.c --- a/xen/arch/x86/mm/hap/p2m-ept.c Tue Jan 04 15:40:00 2011 +0000 +++ b/xen/arch/x86/mm/hap/p2m-ept.c Wed Jan 05 18:44:56 2011 -0800
@@ -62,8 +62,9 @@ static int ept_pod_check_and_populate(st
return r; } -static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type) +static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type, p2m_access_t access) { + /* First apply type permissions */
switch(type) { case p2m_invalid: @@ -75,30 +76,61 @@ static void ept_p2m_type_to_flags(ept_en case p2m_ram_paging_in_start: default: entry->r = entry->w = entry->x = 0;
- return; + break; case p2m_ram_rw: entry->r = entry->w = entry->x = 1; - return; + break; case p2m_mmio_direct: entry->r = entry->x = 1;
entry->w = !rangeset_contains_singleton(mmio_ro_ranges, entry->mfn); - return; + break; case p2m_ram_logdirty:
case p2m_ram_ro: case p2m_ram_shared: entry->r = entry->x = 1; entry->w = 0; - return; + break; case p2m_grant_map_rw:
entry->r = entry->w = 1; entry->x = 0; - return; + break; case p2m_grant_map_ro: entry->r = 1; entry->w = entry->x = 0;
- return; + break; } + + + /* Then restrict with access permissions */ + switch (access) + { + case p2m_access_n: + entry->r = entry->w = entry->x = 0;
+ break; + case p2m_access_r: + entry->w = entry->x = 0; + break; + case p2m_access_w: + entry->r = entry->x = 0; + break;
+ case p2m_access_x: + entry->r = entry->w = 0; + break; + case p2m_access_rx: + case p2m_access_rx2rw: + entry->w = 0; + break;
+ case p2m_access_wx: + entry->r = 0; + break; + case p2m_access_rw: + entry->x = 0; + break; + case p2m_access_rwx:
+ break;
+ } + } #define GUEST_TABLE_MAP_FAILED 0 @@ -117,6 +149,8 @@ static int ept_set_middle_entry(struct p ept_entry->epte = 0; ept_entry->mfn = page_to_mfn(pg); + ept_entry->access = p2m->default_access;
+ ept_entry->r = ept_entry->w = ept_entry->x = 1; return 1; @@ -170,11 +204,12 @@ static int ept_split_super_page(struct p epte->emt = ept_entry->emt; epte->ipat = ept_entry->ipat;
epte->sp = (level > 1) ? 1 : 0; + epte->access = ept_entry->access; epte->sa_p2mt = ept_entry->sa_p2mt; epte->mfn = ept_entry->mfn + i * trunk; epte->rsvd2_snp = ( iommu_enabled && iommu_snoop ) ? 1 : 0;
- ept_p2m_type_to_flags(epte, epte->sa_p2mt); + ept_p2m_type_to_flags(epte, epte->sa_p2mt, epte->access); if ( (level - 1) == target ) continue; @@ -260,7 +295,7 @@ static int ept_next_level(struct p2m_dom
*/ static int ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, - unsigned int order, p2m_type_t p2mt) + unsigned int order, p2m_type_t p2mt, p2m_access_t p2ma)
{ ept_entry_t *table, *ept_entry = NULL; unsigned long gfn_remainder = gfn; @@ -334,9 +369,11 @@ ept_set_entry(struct p2m_domain *p2m, un /* Construct the new entry, and then write it once */
new_entry.emt = epte_get_entry_emt(p2m->domain, gfn, mfn, &ipat, direct_mmio); + new_entry.ipat = ipat; new_entry.sp = order ? 1 : 0;
new_entry.sa_p2mt = p2mt; + new_entry.access = p2ma; new_entry.rsvd2_snp = (iommu_enabled && iommu_snoop); if ( new_entry.mfn == mfn_x(mfn) ) @@ -344,7 +381,7 @@ ept_set_entry(struct p2m_domain *p2m, un
else new_entry.mfn = mfn_x(mfn); - ept_p2m_type_to_flags(&new_entry, p2mt); + ept_p2m_type_to_flags(&new_entry, p2mt, p2ma); }
atomic_write_ept_entry(ept_entry, new_entry); @@ -384,6 +421,7 @@ ept_set_entry(struct p2m_domain *p2m, un new_entry.ipat = ipat; new_entry.sp = i ? 1 : 0; new_entry.sa_p2mt = p2mt;
+ new_entry.access = p2ma; new_entry.rsvd2_snp = (iommu_enabled && iommu_snoop); if ( new_entry.mfn == mfn_x(mfn) ) @@ -391,7 +429,7 @@ ept_set_entry(struct p2m_domain *p2m, un
else /* the caller should take care of the previous page */ new_entry.mfn = mfn_x(mfn); - ept_p2m_type_to_flags(&new_entry, p2mt); + ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
atomic_write_ept_entry(ept_entry, new_entry); } @@ -447,7 +485,7 @@ out: /* Read ept p2m entries */ static mfn_t ept_get_entry(struct p2m_domain *p2m, - unsigned long gfn, p2m_type_t *t,
+ unsigned long gfn, p2m_type_t *t, p2m_access_t* a, p2m_query_t q) { struct domain *d = p2m->domain; @@ -460,6 +498,7 @@ static mfn_t ept_get_entry(struct p2m_do
mfn_t mfn = _mfn(INVALID_MFN); *t = p2m_mmio_dm; + *a = p2m_access_n; /* This pfn is higher than the highest the p2m map currently holds */ if ( gfn > p2m->max_mapped_pfn )
@@ -519,6 +558,8 @@ static mfn_t ept_get_entry(struct p2m_do if ( ept_entry->sa_p2mt != p2m_invalid ) { *t = ept_entry->sa_p2mt; + *a = ept_entry->access; + mfn = _mfn(ept_entry->mfn);
if ( i ) { @@ -626,10 +667,10 @@ out: } static mfn_t ept_get_entry_current(struct p2m_domain *p2m, - unsigned long gfn, p2m_type_t *t, + unsigned long gfn, p2m_type_t *t, p2m_access_t *a,
p2m_query_t q) { - return ept_get_entry(p2m, gfn, t, q); + return ept_get_entry(p2m, gfn, t, a, q); } /* @@ -689,7 +730,7 @@ void ept_change_entry_emt_with_range(str
order = level * EPT_TABLE_ORDER; if ( need_modify_ept_entry(p2m, gfn, mfn, e.ipat, e.emt, e.sa_p2mt) ) - ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt);
+ ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt, e.access); gfn += trunk; break; } @@ -699,7 +740,7 @@ void ept_change_entry_emt_with_range(str
else /* gfn assigned with 4k */ { if ( need_modify_ept_entry(p2m, gfn, mfn, e.ipat, e.emt, e.sa_p2mt) ) - ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt); + ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt, e.access);
} } p2m_unlock(p2m); @@ -730,7 +771,7 @@ static void ept_change_entry_type_page(m continue; e.sa_p2mt = nt; - ept_p2m_type_to_flags(&e, nt);
+ ept_p2m_type_to_flags(&e, nt, e.access); atomic_write_ept_entry(&epte[i], e); } } diff -r 285a8f8d217e -r cae1ccf5857b xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c Tue Jan 04 15:40:00 2011 +0000
+++ b/xen/arch/x86/mm/p2m.c Wed Jan 05 18:44:56 2011 -0800 @@ -285,7 +285,7 @@ p2m_next_level(struct p2m_domain *p2m, m */ static int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
- unsigned int page_order, p2m_type_t p2mt); + unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma); static int p2m_pod_cache_add(struct p2m_domain *p2m, @@ -693,7 +693,7 @@ p2m_pod_decrease_reservation(struct doma
{ /* All PoD: Mark the whole region invalid and tell caller * we're done. */ - set_p2m_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid); + set_p2m_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid, p2m->default_access);
p2m->pod.entry_count-=(1<<order); /* Lock: p2m */ BUG_ON(p2m->pod.entry_count < 0); ret = 1; @@ -716,7 +716,7 @@ p2m_pod_decrease_reservation(struct doma mfn = gfn_to_mfn_query(p2m, gpfn + i, &t);
if ( t == p2m_populate_on_demand ) { - set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid); + set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid, p2m->default_access);
p2m->pod.entry_count--; /* Lock: p2m */ BUG_ON(p2m->pod.entry_count < 0); pod--; @@ -729,7 +729,7 @@ p2m_pod_decrease_reservation(struct doma page = mfn_to_page(mfn);
- set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid); + set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid, p2m->default_access); set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
p2m_pod_cache_add(p2m, page, 0); @@ -844,7 +844,7 @@ p2m_pod_zero_check_superpage(struct p2m_ /* Try to remove the page, restoring old mapping if it fails. */ set_p2m_entry(p2m, gfn,
_mfn(POPULATE_ON_DEMAND_MFN), 9, - p2m_populate_on_demand); + p2m_populate_on_demand, p2m->default_access); /* Make none of the MFNs are used elsewhere... for example, mapped
* via the grant table interface, or by qemu. Allow one refcount for @@ -899,7 +899,7 @@ p2m_pod_zero_check_superpage(struct p2m_ out_reset: if ( reset ) - set_p2m_entry(p2m, gfn, mfn0, 9, type0);
+ set_p2m_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access); out: return ret; @@ -957,7 +957,7 @@ p2m_pod_zero_check(struct p2m_domain *p2 /* Try to remove the page, restoring old mapping if it fails. */
set_p2m_entry(p2m, gfns[i], _mfn(POPULATE_ON_DEMAND_MFN), 0, - p2m_populate_on_demand); + p2m_populate_on_demand, p2m->default_access);
/* See if the page was successfully unmapped. (Allow one refcount * for being allocated to a domain.) */ @@ -966,7 +966,7 @@ p2m_pod_zero_check(struct p2m_domain *p2 unmap_domain_page(map[i]);
map[i] = NULL; - set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i]); + set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i], p2m->default_access); continue;
} @@ -988,7 +988,7 @@ p2m_pod_zero_check(struct p2m_domain *p2 * check timing. */ if ( j < PAGE_SIZE/sizeof(*map[i]) ) { - set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i]);
+ set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i], p2m->default_access); } else { @@ -1121,7 +1121,7 @@ p2m_pod_demand_populate(struct p2m_domai * 512 2MB pages. The rest of 511 calls are unnecessary.
*/ set_p2m_entry(p2m, gfn_aligned, _mfn(POPULATE_ON_DEMAND_MFN), 9, - p2m_populate_on_demand); + p2m_populate_on_demand, p2m->default_access); audit_p2m(p2m, 1);
p2m_unlock(p2m); return 0; @@ -1158,7 +1158,7 @@ p2m_pod_demand_populate(struct p2m_domai gfn_aligned = (gfn >> order) << order; - set_p2m_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw);
+ set_p2m_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw, p2m->default_access); for( i = 0; i < (1UL << order); i++ ) set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_aligned + i); @@ -1198,7 +1198,7 @@ remap_and_retry:
gfn_aligned = (gfn>>order)<<order; for(i=0; i<(1<<order); i++) set_p2m_entry(p2m, gfn_aligned+i, _mfn(POPULATE_ON_DEMAND_MFN), 0, - p2m_populate_on_demand);
+ p2m_populate_on_demand, p2m->default_access); if ( tb_init_done ) { struct { @@ -1250,7 +1250,7 @@ static int p2m_pod_check_and_populate(st // Returns 0 on error (out of memory)
static int p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, - unsigned int page_order, p2m_type_t p2mt) + unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
{ // XXX -- this might be able to be faster iff current->domain == d mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m)); @@ -1401,7 +1401,7 @@ out: } static mfn_t -p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t,
+p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, p2m_access_t *a, p2m_query_t q) { mfn_t mfn; @@ -1416,6 +1416,8 @@ p2m_gfn_to_mfn(struct p2m_domain *p2m, u * XXX Once we start explicitly registering MMIO regions in the p2m
* XXX we will return p2m_invalid for unmapped gfns */ *t = p2m_mmio_dm; + /* Not implemented except with EPT */ + *a = p2m_access_rwx; mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
@@ -1542,7 +1544,7 @@ pod_retry_l1: /* Read the current domain's p2m table (through the linear mapping). */ static mfn_t p2m_gfn_to_mfn_current(struct p2m_domain *p2m, - unsigned long gfn, p2m_type_t *t,
+ unsigned long gfn, p2m_type_t *t, p2m_access_t *a, p2m_query_t q) { mfn_t mfn = _mfn(INVALID_MFN); @@ -1553,6 +1555,9 @@ static mfn_t p2m_gfn_to_mfn_current(stru
* XXX Once we start explicitly registering MMIO regions in the p2m * XXX we will return p2m_invalid for unmapped gfns */ + /* Not currently implemented except for EPT */ + *a = p2m_access_rwx;
+ if ( gfn <= p2m->max_mapped_pfn ) { l1_pgentry_t l1e = l1e_empty(), *p2m_entry; @@ -1726,6 +1731,8 @@ static void p2m_initialise(struct domain INIT_PAGE_LIST_HEAD(&p2m->pod.single);
p2m->domain = d; + p2m->default_access = p2m_access_rwx; + p2m->set_entry = p2m_set_entry; p2m->get_entry = p2m_gfn_to_mfn; p2m->get_entry_current = p2m_gfn_to_mfn_current;
@@ -1745,7 +1752,7 @@ int p2m_init(struct domain *d) if ( p2m == NULL ) return -ENOMEM; p2m_initialise(d, p2m); - + return 0; } @@ -1759,7 +1766,7 @@ void p2m_change_entry_type_global(struct
static int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, - unsigned int page_order, p2m_type_t p2mt) + unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
{ struct domain *d = p2m->domain; unsigned long todo = 1ul << page_order; @@ -1776,7 +1783,7 @@ int set_p2m_entry(struct p2m_domain *p2m else order = 0; - if ( !p2m->set_entry(p2m, gfn, mfn, order, p2mt) )
+ if ( !p2m->set_entry(p2m, gfn, mfn, order, p2mt, p2ma) ) rc = 0; gfn += 1ul << order; if ( mfn_x(mfn) != INVALID_MFN ) @@ -1837,7 +1844,7 @@ int p2m_alloc_table(struct p2m_domain *p
/* Initialise physmap tables for slot zero. Other code assumes this. */ if ( !set_p2m_entry(p2m, 0, _mfn(INVALID_MFN), 0, - p2m_invalid) ) + p2m_invalid, p2m->default_access) )
goto error; /* Copy all existing mappings from the page list and m2p */ @@ -1856,7 +1863,7 @@ int p2m_alloc_table(struct p2m_domain *p (gfn != 0x55555555L) #endif && gfn != INVALID_M2P_ENTRY
- && !set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_rw) ) + && !set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_rw, p2m->default_access) ) goto error_unlock; } spin_unlock(&p2m->domain->page_alloc_lock);
@@ -1883,6 +1890,7 @@ void p2m_teardown(struct p2m_domain *p2m #ifdef __x86_64__ unsigned long gfn; p2m_type_t t; + p2m_access_t a; mfn_t mfn; #endif @@ -1891,7 +1899,7 @@ void p2m_teardown(struct p2m_domain *p2m
#ifdef __x86_64__ for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ ) { - mfn = p2m->get_entry(p2m, gfn, &t, p2m_query); + mfn = p2m->get_entry(p2m, gfn, &t, &a, p2m_query);
if ( mfn_valid(mfn) && (t == p2m_ram_shared) ) BUG_ON(mem_sharing_unshare_page(p2m, gfn, MEM_SHARING_DESTROY_GFN)); } @@ -2188,6 +2196,7 @@ p2m_remove_page(struct p2m_domain *p2m,
unsigned long i; mfn_t mfn_return; p2m_type_t t; + p2m_access_t a; if ( !paging_mode_translate(p2m->domain) ) { @@ -2201,12 +2210,12 @@ p2m_remove_page(struct p2m_domain *p2m,
for ( i = 0; i < (1UL << page_order); i++ ) { - mfn_return = p2m->get_entry(p2m, gfn + i, &t, p2m_query); + mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, p2m_query);
if ( !p2m_is_grant(t) ) set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY); ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) ); } - set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid);
+ set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid, p2m->default_access); } void @@ -2286,7 +2295,7 @@ guest_physmap_mark_populate_on_demand(st /* Now, actually do the two-way mapping */
if ( !set_p2m_entry(p2m, gfn, _mfn(POPULATE_ON_DEMAND_MFN), order, - p2m_populate_on_demand) ) + p2m_populate_on_demand, p2m->default_access) ) rc = -EINVAL;
else { @@ -2399,7 +2408,7 @@ guest_physmap_add_entry(struct p2m_domai /* Now, actually do the two-way mapping */ if ( mfn_valid(_mfn(mfn)) ) { - if ( !set_p2m_entry(p2m, gfn, _mfn(mfn), page_order, t) )
+ if ( !set_p2m_entry(p2m, gfn, _mfn(mfn), page_order, t, p2m->default_access) ) rc = -EINVAL; if ( !p2m_is_grant(t) ) { @@ -2412,7 +2421,7 @@ guest_physmap_add_entry(struct p2m_domai
gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n", gfn, mfn); if ( !set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, - p2m_invalid) )
+ p2m_invalid, p2m->default_access) ) rc = -EINVAL; else { @@ -2565,7 +2574,7 @@ void p2m_change_type_global(struct p2m_d } /* Modify the p2m type of a single gfn from ot to nt, returning the
- * entry's previous type */ + * entry's previous type. Resets the access permissions. */ p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t ot, p2m_type_t nt)
{ @@ -2578,7 +2587,7 @@ p2m_type_t p2m_change_type(struct p2m_do mfn = gfn_to_mfn_query(p2m, gfn, &pt); if ( pt == ot ) - set_p2m_entry(p2m, gfn, mfn, 0, nt); + set_p2m_entry(p2m, gfn, mfn, 0, nt, p2m->default_access);
p2m_unlock(p2m); @@ -2609,7 +2618,7 @@ set_mmio_p2m_entry(struct p2m_domain *p2 P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn)); p2m_lock(p2m); - rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_mmio_direct);
+ rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_mmio_direct, p2m->default_access); audit_p2m(p2m, 1); p2m_unlock(p2m); if ( 0 == rc ) @@ -2639,7 +2648,7 @@ clear_mmio_p2m_entry(struct p2m_domain *
return 0; } p2m_lock(p2m); - rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 0); + rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 0, p2m->default_access); audit_p2m(p2m, 1);
p2m_unlock(p2m); @@ -2668,7 +2677,7 @@ set_shared_p2m_entry(struct p2m_domain * P2M_DEBUG("set shared %lx %lx\n", gfn, mfn_x(mfn)); if ( need_lock ) p2m_lock(p2m); - rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_shared);
+ rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_shared, p2m->default_access); if ( need_lock ) p2m_unlock(p2m); if ( 0 == rc ) @@ -2713,7 +2722,7 @@ int p2m_mem_paging_nominate(struct p2m_d
/* Fix p2m entry */ p2m_lock(p2m); - set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out); + set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out, p2m->default_access); audit_p2m(p2m, 1);
p2m_unlock(p2m); @@ -2750,7 +2759,7 @@ int p2m_mem_paging_evict(struct p2m_doma /* Remove mapping from p2m table */ p2m_lock(p2m); - set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged);
+ set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged, p2m->default_access); audit_p2m(p2m, 1); p2m_unlock(p2m); @@ -2780,7 +2789,7 @@ void p2m_mem_paging_populate(struct p2m_ if ( p2mt == p2m_ram_paged )
{ p2m_lock(p2m); - set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start); + set_p2m_entry(p2m, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start, p2m->default_access);
audit_p2m(p2m, 1); p2m_unlock(p2m); } @@ -2816,7 +2825,7 @@ int p2m_mem_paging_prep(struct p2m_domai /* Fix p2m mapping */ p2m_lock(p2m); - set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in);
+ set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in, p2m->default_access); audit_p2m(p2m, 1); p2m_unlock(p2m); @@ -2836,7 +2845,7 @@ void p2m_mem_paging_resume(struct p2m_do
/* Fix p2m entry */ mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt); p2m_lock(p2m); - set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw); + set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, p2m->default_access);
audit_p2m(p2m, 1); p2m_unlock(p2m); diff -r 285a8f8d217e -r cae1ccf5857b xen/include/asm-x86/hvm/vmx/vmx.h --- a/xen/include/asm-x86/hvm/vmx/vmx.h Tue Jan 04 15:40:00 2011 +0000 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h Wed Jan 05 18:44:56 2011 -0800
@@ -42,7 +42,8 @@ typedef union { rsvd2_snp : 1, /* bit 11 - Used for VT-d snoop control in shared EPT/VT-d usage */ mfn : 40, /* bits 51:12 - Machine physical frame number */
- sa_p2mt : 10, /* bits 61:52 - Software available 2 */ + sa_p2mt : 6, /* bits 57:52 - Software available 2 */ + access : 4, /* bits 61:58 - p2m_access_t */ rsvd3_tm : 1, /* bit 62 - Used for VT-d transient-mapping
hint in shared EPT/VT-d usage */ avail3 : 1; /* bit 63 - Software available 3 */ diff -r 285a8f8d217e -r cae1ccf5857b xen/include/asm-x86/p2m.h --- a/xen/include/asm-x86/p2m.h Tue Jan 04 15:40:00 2011 +0000
+++ b/xen/include/asm-x86/p2m.h Wed Jan 05 18:44:56 2011 -0800 @@ -88,6 +88,31 @@ typedef enum { p2m_ram_broken =14, /* Broken page, access cause domain crash */ } p2m_type_t; +/* + * Additional access types, which are used to further restrict
+ * the permissions given my the p2m_type_t memory type. Violations + * caused by p2m_access_t restrictions are sent to the mem_event + * interface. + * + * The access permissions are soft state: when any ambigious change of page
+ * type or use occurs, or when pages are flushed, swapped, or at any other + * convenient type, the access permissions can get reset to the p2m_domain + * default. + */ +typedef enum { + p2m_access_n = 0, /* No access permissions allowed */
+ p2m_access_r = 1, + p2m_access_w = 2, + p2m_access_rw = 3, + p2m_access_x = 4, + p2m_access_rx = 5, + p2m_access_wx = 6, + p2m_access_rwx = 7, + p2m_access_rx2rw = 8, /* Special: page goes from RX to RW on write */
+ + /* NOTE: Assumed to be only 4 bits right now */ +} p2m_access_t; + typedef enum { p2m_query = 0, /* Do not populate a PoD entries */ p2m_alloc = 1, /* Automatically populate PoD entries */
@@ -182,18 +207,30 @@ struct p2m_domain { int (*set_entry )(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, unsigned int page_order,
- p2m_type_t p2mt); + p2m_type_t p2mt, + p2m_access_t p2ma); mfn_t (*get_entry )(struct p2m_domain *p2m,
unsigned long gfn, p2m_type_t *p2mt, + p2m_access_t *p2ma, p2m_query_t q);
mfn_t (*get_entry_current)(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *p2mt, + p2m_access_t *p2ma,
p2m_query_t q); void (*change_entry_type_global)(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt);
+ + /* Default P2M access type for each page in the the domain: new pages, + * swapped in pages, cleared pages, and pages that are ambiquously + * retyped get this access type. See definition of p2m_access_t. */
+ p2m_access_t default_access; + + /* If true, and an access fault comes in and there is no mem_event listener, + * pause domain. Otherwise, remove access restrictions. */ + bool_t access_required;
/* Highest guest frame that's ever been mapped in the p2m */ unsigned long max_mapped_pfn; @@ -284,9 +321,10 @@ static inline p2m_type_t p2m_flags_to_ty /* Read the current domain's p2m table. Do not populate PoD pages. */
static inline mfn_t gfn_to_mfn_type_current(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, + p2m_access_t *a, p2m_query_t q)
{ - return p2m->get_entry_current(p2m, gfn, t, q); + return p2m->get_entry_current(p2m, gfn, t, a, q); } /* Read P2M table, mapping pages as we go. @@ -295,7 +333,8 @@ static inline mfn_t
gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, p2m_query_t q) { - return p2m->get_entry(p2m, gfn, t, q); + p2m_access_t a = 0; + return p2m->get_entry(p2m, gfn, t, &a, q);
} @@ -305,6 +344,7 @@ static inline mfn_t _gfn_to_mfn_type(str p2m_query_t q) { mfn_t mfn; + p2m_access_t a; if ( !p2m || !paging_mode_translate(p2m->domain) )
{ @@ -314,7 +354,7 @@ static inline mfn_t _gfn_to_mfn_type(str mfn = _mfn(gfn); } else if ( likely(current->domain == p2m->domain) ) - mfn = gfn_to_mfn_type_current(p2m, gfn, t, q);
+ mfn = gfn_to_mfn_type_current(p2m, gfn, t, &a, q); else mfn = gfn_to_mfn_type_p2m(p2m, gfn, t, q);
1.patch
Description: Text Data
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|