By combining the overlay fields that are 8 bytes long (on x86-64) into
a union separate from the one used for the 4 byte wide fields, no
unnecessary padding will be inserted while at the same time avoiding to
use __attribute__((__packed__)) on any of the sub-structures (which
bares the risk of misaligning structure members without immediately
noticing).
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
--- 2009-01-30.orig/xen/arch/x86/mm/shadow/common.c 2009-01-30
10:19:27.000000000 +0100
+++ 2009-01-30/xen/arch/x86/mm/shadow/common.c 2009-01-30 10:19:35.000000000
+0100
@@ -1334,7 +1334,7 @@ static inline void trace_shadow_prealloc
/* Convert smfn to gfn */
unsigned long gfn;
ASSERT(mfn_valid(smfn));
- gfn = mfn_to_gfn(d, _mfn(mfn_to_shadow_page(smfn)->u.sh.back));
+ gfn = mfn_to_gfn(d, _mfn(mfn_to_shadow_page(smfn)->v.sh.back));
__trace_var(TRC_SHADOW_PREALLOC_UNPIN, 0/*!tsc*/,
sizeof(gfn), (unsigned char*)&gfn);
}
@@ -1542,7 +1542,7 @@ mfn_t shadow_alloc(struct domain *d,
while ( i != order )
{
i--;
- sp->u.sh.order = i;
+ sp->v.free.order = i;
page_list_add_tail(sp, &d->arch.paging.shadow.freelists[i]);
sp += 1 << i;
}
@@ -1569,7 +1569,7 @@ mfn_t shadow_alloc(struct domain *d,
sp[i].u.sh.type = shadow_type;
sp[i].u.sh.pinned = 0;
sp[i].u.sh.count = 0;
- sp[i].u.sh.back = backpointer;
+ sp[i].v.sh.back = backpointer;
set_next_shadow(&sp[i], NULL);
perfc_incr(shadow_alloc_count);
}
@@ -1629,20 +1629,20 @@ void shadow_free(struct domain *d, mfn_t
if ( (mfn_x(shadow_page_to_mfn(sp)) & mask) ) {
/* Merge with predecessor block? */
if ( ((sp-mask)->u.sh.type != PGT_none) ||
- ((sp-mask)->u.sh.order != order) )
+ ((sp-mask)->v.free.order != order) )
break;
sp -= mask;
page_list_del(sp, &d->arch.paging.shadow.freelists[order]);
} else {
/* Merge with successor block? */
if ( ((sp+mask)->u.sh.type != PGT_none) ||
- ((sp+mask)->u.sh.order != order) )
+ ((sp+mask)->v.free.order != order) )
break;
page_list_del(sp + mask, &d->arch.paging.shadow.freelists[order]);
}
}
- sp->u.sh.order = order;
+ sp->v.free.order = order;
page_list_add_tail(sp, &d->arch.paging.shadow.freelists[order]);
}
@@ -1825,7 +1825,7 @@ static unsigned int sh_set_allocation(st
sp[j].u.sh.count = 0;
sp[j].tlbflush_timestamp = 0; /* Not in any TLB */
}
- sp->u.sh.order = order;
+ sp->v.free.order = order;
page_list_add_tail(sp, &d->arch.paging.shadow.freelists[order]);
}
else if ( d->arch.paging.shadow.total_pages > pages )
@@ -1904,17 +1904,17 @@ static void sh_hash_audit_bucket(struct
BUG_ON( sp->u.sh.type == 0 );
BUG_ON( sp->u.sh.type > SH_type_max_shadow );
/* Wrong bucket? */
- BUG_ON( sh_hash(sp->u.sh.back, sp->u.sh.type) != bucket );
+ BUG_ON( sh_hash(sp->v.sh.back, sp->u.sh.type) != bucket );
/* Duplicate entry? */
for ( x = next_shadow(sp); x; x = next_shadow(x) )
- BUG_ON( x->u.sh.back == sp->u.sh.back &&
+ BUG_ON( x->v.sh.back == sp->v.sh.back &&
x->u.sh.type == sp->u.sh.type );
/* Follow the backpointer to the guest pagetable */
if ( sp->u.sh.type != SH_type_fl1_32_shadow
&& sp->u.sh.type != SH_type_fl1_pae_shadow
&& sp->u.sh.type != SH_type_fl1_64_shadow )
{
- struct page_info *gpg = mfn_to_page(_mfn(sp->u.sh.back));
+ struct page_info *gpg = mfn_to_page(_mfn(sp->v.sh.back));
/* Bad shadow flags on guest page? */
BUG_ON( !(gpg->shadow_flags & (1<<sp->u.sh.type)) );
/* Bad type count on guest page? */
@@ -1930,7 +1930,7 @@ static void sh_hash_audit_bucket(struct
{
SHADOW_ERROR("MFN %#"PRpgmfn" shadowed (by
%#"PRI_mfn")"
" and not OOS but has typecount %#lx\n",
- sp->u.sh.back,
+ sp->v.sh.back,
mfn_x(shadow_page_to_mfn(sp)),
gpg->u.inuse.type_info);
BUG();
@@ -1944,7 +1944,7 @@ static void sh_hash_audit_bucket(struct
{
SHADOW_ERROR("MFN %#"PRpgmfn" shadowed (by %#"PRI_mfn")"
" but has typecount %#lx\n",
- sp->u.sh.back, mfn_x(shadow_page_to_mfn(sp)),
+ sp->v.sh.back, mfn_x(shadow_page_to_mfn(sp)),
gpg->u.inuse.type_info);
BUG();
}
@@ -2030,7 +2030,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
prev = NULL;
while(sp)
{
- if ( sp->u.sh.back == n && sp->u.sh.type == t )
+ if ( sp->v.sh.back == n && sp->u.sh.type == t )
{
/* Pull-to-front if 'sp' isn't already the head item */
if ( unlikely(sp != d->arch.paging.shadow.hash_table[key]) )
@@ -2197,7 +2197,7 @@ void sh_destroy_shadow(struct vcpu *v, m
t == SH_type_fl1_64_shadow ||
t == SH_type_monitor_table ||
(is_pv_32on64_vcpu(v) && t == SH_type_l4_64_shadow) ||
- (page_get_owner(mfn_to_page(_mfn(sp->u.sh.back)))
+ (page_get_owner(mfn_to_page(_mfn(sp->v.sh.back)))
== v->domain));
/* The down-shifts here are so that the switch statement is on nice
--- 2009-01-30.orig/xen/arch/x86/mm/shadow/multi.c 2009-01-30
10:14:47.000000000 +0100
+++ 2009-01-30/xen/arch/x86/mm/shadow/multi.c 2009-01-30 10:19:35.000000000
+0100
@@ -974,7 +974,7 @@ static int shadow_set_l2e(struct vcpu *v
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
{
struct shadow_page_info *sp = mfn_to_shadow_page(sl1mfn);
- mfn_t gl1mfn = _mfn(sp->u.sh.back);
+ mfn_t gl1mfn = _mfn(sp->v.sh.back);
/* If the shadow is a fl1 then the backpointer contains
the GFN instead of the GMFN, and it's definitely not
@@ -1926,7 +1926,7 @@ void sh_destroy_l4_shadow(struct vcpu *v
ASSERT(t == SH_type_l4_shadow);
/* Record that the guest page isn't shadowed any more (in this type) */
- gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
+ gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
/* Decrement refcounts of all the old entries */
@@ -1955,7 +1955,7 @@ void sh_destroy_l3_shadow(struct vcpu *v
ASSERT(t == SH_type_l3_shadow);
/* Record that the guest page isn't shadowed any more (in this type) */
- gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
+ gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
@@ -1990,7 +1990,7 @@ void sh_destroy_l2_shadow(struct vcpu *v
#endif
/* Record that the guest page isn't shadowed any more (in this type) */
- gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
+ gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
@@ -2020,12 +2020,12 @@ void sh_destroy_l1_shadow(struct vcpu *v
/* Record that the guest page isn't shadowed any more (in this type) */
if ( t == SH_type_fl1_shadow )
{
- gfn_t gfn = _gfn(mfn_to_shadow_page(smfn)->u.sh.back);
+ gfn_t gfn = _gfn(mfn_to_shadow_page(smfn)->v.sh.back);
delete_fl1_shadow_status(v, gfn, smfn);
}
else
{
- mfn_t gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
+ mfn_t gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
}
@@ -2349,7 +2349,7 @@ static int validate_gl1e(struct vcpu *v,
result |= shadow_set_l1e(v, sl1p, new_sl1e, sl1mfn);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
- gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
+ gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
if ( mfn_valid(gl1mfn)
&& mfn_is_out_of_sync(gl1mfn) )
{
@@ -2968,7 +2968,7 @@ static int sh_page_fault(struct vcpu *v,
sizeof(sl2e)) != 0)
|| !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT)
|| !mfn_valid(gl1mfn = _mfn(mfn_to_shadow_page(
- shadow_l2e_get_mfn(sl2e))->u.sh.back))
+ shadow_l2e_get_mfn(sl2e))->v.sh.back))
|| unlikely(mfn_is_out_of_sync(gl1mfn)) )
{
/* Hit the slow path as if there had been no
@@ -3530,7 +3530,7 @@ sh_invlpg(struct vcpu *v, unsigned long
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Check to see if the SL1 is out of sync. */
{
- mfn_t gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
+ mfn_t gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
struct page_info *pg = mfn_to_page(gl1mfn);
if ( mfn_valid(gl1mfn)
&& page_is_out_of_sync(pg) )
@@ -3560,7 +3560,7 @@ sh_invlpg(struct vcpu *v, unsigned long
}
sl1mfn = shadow_l2e_get_mfn(sl2e);
- gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
+ gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
pg = mfn_to_page(gl1mfn);
if ( likely(sh_mfn_is_a_page_table(gl1mfn)
@@ -4887,7 +4887,7 @@ int sh_audit_l1_table(struct vcpu *v, mf
int done = 0;
/* Follow the backpointer */
- gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
+ gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Out-of-sync l1 shadows can contain anything: just check the OOS hash */
@@ -4977,7 +4977,7 @@ int sh_audit_l2_table(struct vcpu *v, mf
int done = 0;
/* Follow the backpointer */
- gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->u.sh.back);
+ gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->v.sh.back);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Only L1's may be out of sync. */
@@ -5026,7 +5026,7 @@ int sh_audit_l3_table(struct vcpu *v, mf
int done = 0;
/* Follow the backpointer */
- gl3mfn = _mfn(mfn_to_shadow_page(sl3mfn)->u.sh.back);
+ gl3mfn = _mfn(mfn_to_shadow_page(sl3mfn)->v.sh.back);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Only L1's may be out of sync. */
@@ -5073,7 +5073,7 @@ int sh_audit_l4_table(struct vcpu *v, mf
int done = 0;
/* Follow the backpointer */
- gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->u.sh.back);
+ gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->v.sh.back);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Only L1's may be out of sync. */
--- 2009-01-30.orig/xen/arch/x86/mm/shadow/private.h 2009-01-30
10:19:16.000000000 +0100
+++ 2009-01-30/xen/arch/x86/mm/shadow/private.h 2009-01-30 10:19:35.000000000
+0100
@@ -631,7 +631,7 @@ static inline int sh_get_ref(struct vcpu
if ( unlikely(nx >= 1U<<26) )
{
SHADOW_PRINTK("shadow ref overflow, gmfn=%" PRpgmfn " smfn=%lx\n",
- sp->u.sh.back, mfn_x(smfn));
+ sp->v.sh.back, mfn_x(smfn));
return 0;
}
--- 2009-01-30.orig/xen/include/asm-x86/mm.h 2009-01-30 10:19:27.000000000
+0100
+++ 2009-01-30/xen/include/asm-x86/mm.h 2009-01-30 10:21:49.000000000 +0100
@@ -15,7 +15,7 @@
* 1. 'struct page_info' contains a 'struct page_list_entry list'.
* 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
*/
-#define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
+#define PFN_ORDER(_pfn) ((_pfn)->v.free.order)
/*
* This definition is solely for the use in struct page_info (and
@@ -59,8 +59,6 @@ struct page_info
/* Page is in use: ((count_info & PGC_count_mask) != 0). */
struct {
- /* Owner of this page (NULL if page is anonymous). */
- u32 _domain; /* pickled format */
/* Type reference count and various PGT_xxx flags and fields. */
unsigned long type_info;
} inuse;
@@ -70,18 +68,10 @@ struct page_info
unsigned long type:5; /* What kind of shadow is this? */
unsigned long pinned:1; /* Is the shadow pinned? */
unsigned long count:26; /* Reference count */
- union {
- /* When in use, GMFN of guest page we're a shadow of. */
- __mfn_t back;
- /* When free, order of the freelist we're on. */
- unsigned int order;
- };
} sh;
/* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
struct {
- /* Order-size of the free chunk this page is the head of. */
- u32 order;
/* Mask of possibly-tainted TLBs. */
cpumask_t cpumask;
} free;
@@ -89,6 +79,28 @@ struct page_info
} u;
union {
+
+ /* Page is in use, but not as a shadow. */
+ struct {
+ /* Owner of this page (NULL if page is anonymous). */
+ u32 _domain; /* pickled format */
+ } inuse;
+
+ /* Page is in use as a shadow. */
+ struct {
+ /* GMFN of guest page we're a shadow of. */
+ __mfn_t back;
+ } sh;
+
+ /* Page is on a free list (including shadow code free lists). */
+ struct {
+ /* Order-size of the free chunk this page is the head of. */
+ unsigned int order;
+ } free;
+
+ } v;
+
+ union {
/*
* Timestamp from 'TLB clock', used to avoid extra safety flushes.
* Only valid for: a) free pages, and b) pages with zero type count
@@ -225,10 +237,10 @@ struct page_info
#define SHADOW_OOS_FIXUPS 2
#define page_get_owner(_p) \
- ((struct domain *)((_p)->u.inuse._domain ? \
- mfn_to_virt((_p)->u.inuse._domain) : NULL))
+ ((struct domain *)((_p)->v.inuse._domain ? \
+ mfn_to_virt((_p)->v.inuse._domain) : NULL))
#define page_set_owner(_p,_d) \
- ((_p)->u.inuse._domain = (_d) ? virt_to_mfn(_d) : 0)
+ ((_p)->v.inuse._domain = (_d) ? virt_to_mfn(_d) : 0)
#define maddr_get_owner(ma) (page_get_owner(maddr_to_page((ma))))
#define vaddr_get_owner(va) (page_get_owner(virt_to_page((va))))
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|