# HG changeset patch # User Tim Deegan # Date 1282319534 -3600 # Node ID 10627fb7c8cfa1f9a055a2a30011de5a087931a8 # Parent 1544aa105c624f8a49e16900b97e3f10aa30d0cd x86 shadow: remove the assumption that multipage shadows are contiguous. x86 shadow: remove the assumption that multipage shadows are contiguous and move from page to page using the linked list instead. Signed-off-by: Tim Deegan diff -r 1544aa105c62 -r 10627fb7c8cf xen/arch/x86/mm/shadow/common.c --- a/xen/arch/x86/mm/shadow/common.c Fri Aug 20 16:52:13 2010 +0100 +++ b/xen/arch/x86/mm/shadow/common.c Fri Aug 20 16:52:14 2010 +0100 @@ -1214,8 +1214,8 @@ int shadow_cmpxchg_guest_entry(struct vc * l1 tables (covering 2MB of virtual address space each). Similarly, a * 32-bit guest l2 table (4GB va) needs to be shadowed by four * PAE/64-bit l2 tables (1GB va each). These multi-page shadows are - * contiguous and aligned; functions for handling offsets into them are - * defined in shadow.c (shadow_l1_index() etc.) + * not contiguous in memory; functions for handling offsets into them are + * defined in shadow/multi.c (shadow_l1_index() etc.) * * This table shows the allocation behaviour of the different modes: * diff -r 1544aa105c62 -r 10627fb7c8cf xen/arch/x86/mm/shadow/multi.c --- a/xen/arch/x86/mm/shadow/multi.c Fri Aug 20 16:52:13 2010 +0100 +++ b/xen/arch/x86/mm/shadow/multi.c Fri Aug 20 16:52:14 2010 +0100 @@ -421,13 +421,27 @@ sh_guest_get_eff_l1e(struct vcpu *v, uns * way to see this is: a 32-bit guest L2 page maps 4GB of virtual address * space, while a PAE- or 64-bit shadow L2 page maps 1GB of virtual address * space.) - * - * For PAE guests, for every 32-bytes of guest L3 page table, we use 64-bytes - * of shadow (to store both the shadow, and the info that would normally be - * stored in page_info fields). This arrangement allows the shadow and the - * "page_info" fields to always be stored in the same page (in fact, in - * the same cache line), avoiding an extra call to map_domain_page(). - */ + */ + +/* From one page of a multi-page shadow, find the next one */ +static inline mfn_t sh_next_page(mfn_t smfn) +{ + mfn_t next; + struct page_info *pg = mfn_to_page(smfn); + + ASSERT(pg->u.sh.type == SH_type_l1_32_shadow + || pg->u.sh.type == SH_type_fl1_32_shadow + || pg->u.sh.type == SH_type_l2_32_shadow); + ASSERT(pg->u.sh.type == SH_type_l2_32_shadow || pg->u.sh.head); + ASSERT(pg->list.next != PAGE_LIST_NULL); + + next = _mfn(pdx_to_pfn(pg->list.next)); + + /* XXX not for long */ ASSERT(mfn_x(next) == mfn_x(smfn) + 1); + ASSERT(mfn_to_page(next)->u.sh.type == pg->u.sh.type); + ASSERT(!mfn_to_page(next)->u.sh.head); + return next; +} static inline u32 guest_index(void *ptr) @@ -440,8 +454,8 @@ shadow_l1_index(mfn_t *smfn, u32 guest_i { #if (GUEST_PAGING_LEVELS == 2) ASSERT(mfn_to_page(*smfn)->u.sh.head); - *smfn = _mfn(mfn_x(*smfn) + - (guest_index / SHADOW_L1_PAGETABLE_ENTRIES)); + if ( guest_index >= SHADOW_L1_PAGETABLE_ENTRIES ) + *smfn = sh_next_page(*smfn); return (guest_index % SHADOW_L1_PAGETABLE_ENTRIES); #else return guest_index; @@ -452,13 +466,12 @@ shadow_l2_index(mfn_t *smfn, u32 guest_i shadow_l2_index(mfn_t *smfn, u32 guest_index) { #if (GUEST_PAGING_LEVELS == 2) + int i; ASSERT(mfn_to_page(*smfn)->u.sh.head); // Because we use 2 shadow l2 entries for each guest entry, the number of // guest entries per shadow page is SHADOW_L2_PAGETABLE_ENTRIES/2 - // - *smfn = _mfn(mfn_x(*smfn) + - (guest_index / (SHADOW_L2_PAGETABLE_ENTRIES / 2))); - + for ( i = 0; i < guest_index / (SHADOW_L2_PAGETABLE_ENTRIES / 2); i++ ) + *smfn = sh_next_page(*smfn); // We multiply by two to get the index of the first of the two entries // used to shadow the specified guest entry. return (guest_index % (SHADOW_L2_PAGETABLE_ENTRIES / 2)) * 2; @@ -1014,11 +1027,11 @@ static int shadow_set_l2e(struct vcpu *v /* In 2-on-3 we work with pairs of l2es pointing at two-page * shadows. Reference counting and up-pointers track from the first * page of the shadow to the first l2e, so make sure that we're - * working with those: - * Align the pointer down so it's pointing at the first of the pair */ + * working with those: + * Start with a pair of identical entries */ + shadow_l2e_t pair[2] = { new_sl2e, new_sl2e }; + /* Align the pointer down so it's pointing at the first of the pair */ sl2e = (shadow_l2e_t *)((unsigned long)sl2e & ~(sizeof(shadow_l2e_t))); - /* Align the mfn of the shadow entry too */ - new_sl2e.l2 &= ~(1<arch.shadow_table[0]); int i; for ( i = 0; i < 4; i++ ) { #if GUEST_PAGING_LEVELS == 2 /* 2-on-3: make a PAE l3 that points at the four-page l2 */ - smfn = _mfn(pagetable_get_pfn(v->arch.shadow_table[0]) + i); + if ( i != 0 ) + smfn = sh_next_page(smfn); #else /* 3-on-3: make a PAE l3 that points at the four l2 pages */ smfn = pagetable_get_mfn(v->arch.shadow_table[i]);