ChangeSet 1.1713.2.13, 2005/06/21 09:55:11-06:00, djm@xxxxxxxxxxxxxxx
More hyperprivop stuff
Signed-off-by: Dan Magenheimer <dan.magenheimer@xxxxxx>
arch/ia64/asm-offsets.c | 3 ++
arch/ia64/domain.c | 2 +
arch/ia64/hyperprivop.S | 64 ++++++++++++++++++++++++++++++++++++++++++++++
arch/ia64/regionreg.c | 39 +++++++++++-----------------
include/asm-ia64/domain.h | 5 +--
5 files changed, 87 insertions(+), 26 deletions(-)
diff -Nru a/xen/arch/ia64/asm-offsets.c b/xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c 2005-06-23 07:04:00 -04:00
+++ b/xen/arch/ia64/asm-offsets.c 2005-06-23 07:04:00 -04:00
@@ -54,6 +54,7 @@
DEFINE(XSI_BANKNUM_OFS, offsetof(vcpu_info_t, arch.banknum));
DEFINE(XSI_BANK0_OFS, offsetof(vcpu_info_t, arch.bank0_regs[0]));
DEFINE(XSI_BANK1_OFS, offsetof(vcpu_info_t, arch.bank1_regs[0]));
+ DEFINE(XSI_RR0_OFS, offsetof(vcpu_info_t, arch.rrs[0]));
DEFINE(XSI_METAPHYS_OFS, offsetof(vcpu_info_t, arch.metaphysical_mode));
DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(vcpu_info_t, arch.precover_ifs));
DEFINE(XSI_INCOMPL_REG_OFS, offsetof(vcpu_info_t,
arch.incomplete_regframe));
@@ -79,6 +80,8 @@
DEFINE(IA64_VCPU_IRR0_OFFSET, offsetof (struct vcpu, arch.irr[0]));
DEFINE(IA64_VCPU_IRR3_OFFSET, offsetof (struct vcpu, arch.irr[3]));
DEFINE(IA64_VCPU_INSVC3_OFFSET, offsetof (struct vcpu, arch.insvc[3]));
+ DEFINE(IA64_VCPU_STARTING_RID_OFFSET, offsetof (struct vcpu,
arch.starting_rid));
+ DEFINE(IA64_VCPU_ENDING_RID_OFFSET, offsetof (struct vcpu,
arch.ending_rid));
DEFINE(IA64_VCPU_DOMAIN_ITM_OFFSET, offsetof (struct vcpu,
arch.domain_itm));
BLANK();
diff -Nru a/xen/arch/ia64/domain.c b/xen/arch/ia64/domain.c
--- a/xen/arch/ia64/domain.c 2005-06-23 07:04:00 -04:00
+++ b/xen/arch/ia64/domain.c 2005-06-23 07:04:00 -04:00
@@ -258,6 +258,8 @@
#define DOMAIN_RID_BITS_DEFAULT 18
if (!allocate_rid_range(d,DOMAIN_RID_BITS_DEFAULT)) // FIXME
BUG();
+ v->arch.starting_rid = d->arch.starting_rid;
+ v->arch.ending_rid = d->arch.ending_rid;
// the following will eventually need to be negotiated dynamically
d->xen_vastart = 0xf000000000000000;
d->xen_vaend = 0xf300000000000000;
diff -Nru a/xen/arch/ia64/hyperprivop.S b/xen/arch/ia64/hyperprivop.S
--- a/xen/arch/ia64/hyperprivop.S 2005-06-23 07:04:00 -04:00
+++ b/xen/arch/ia64/hyperprivop.S 2005-06-23 07:04:00 -04:00
@@ -101,6 +101,10 @@
cmp.eq p7,p6=XEN_HYPER_SET_ITM,r17
(p7) br.sptk.many hyper_set_itm;;
+ // HYPERPRIVOP_SET_RR?
+ cmp.eq p7,p6=XEN_HYPER_SET_RR,r17
+(p7) br.sptk.many hyper_set_rr;;
+
// if not one of the above, give up for now and do it the slow way
br.sptk.many dispatch_break_fault ;;
@@ -849,3 +853,63 @@
rfi
;;
END(hyper_set_itm)
+
+ENTRY(hyper_set_rr)
+#if 1
+ br.sptk.many dispatch_break_fault ;;
+#endif
+ extr.u r25=r8,61,3;;
+ cmp.leu p7,p0=7,r25 // punt on setting rr7
+(p7) br.spnt.many dispatch_break_fault ;;
+#ifdef FAST_HYPERPRIVOP_CNT
+ movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_SET_RR);;
+ ld8 r21=[r20];;
+ adds r21=1,r21;;
+ st8 [r20]=r21;;
+#endif
+ extr.u r26=r9,8,24 // r26 = r9.rid
+ mov r20=IA64_KR(CURRENT);;
+ adds r21=IA64_VCPU_STARTING_RID_OFFSET,r20;;
+ ld4 r22=[r21];;
+ adds r21=IA64_VCPU_ENDING_RID_OFFSET,r20;;
+ ld4 r23=[r21];;
+ adds r24=IA64_VCPU_META_SAVED_RR0_OFFSET,r22;;
+ add r22=r26,r22;;
+ cmp.geu p6,p0=r22,r23 // if r9.rid + starting_rid >= ending_rid
+(p6) br.cond.sptk.many 1f; // this is an error, but just ignore/return
+ // r21=starting_rid
+ adds r20=XSI_RR0_OFS-XSI_PSR_IC_OFS,r18 ;;
+ shl r25=r25,3;;
+ add r20=r20,r25;;
+ st8 [r20]=r9;; // store away exactly what was passed
+ // but adjust value actually placed in rr[r8]
+ // r22 contains adjusted rid, "mangle" it (see regionreg.c)
+ // and set ps to PAGE_SHIFT and ve to 1
+ extr.u r27=r22,0,8
+ extr.u r28=r22,8,8
+ extr.u r29=r22,16,8;;
+ dep.z r23=PAGE_SHIFT,2,6;;
+ dep r23=-1,r23,0,1;; // mangling is swapping bytes 1 & 3
+ dep r23=r27,r23,24,8;;
+ dep r23=r28,r23,16,8;;
+ dep r23=r29,r23,8,8
+ cmp.eq p6,p0=r25,r0;; // if rr0, save for metaphysical
+(p6) st4 [r24]=r23
+ mov rr[r8]=r23;;
+ // done, mosey on back
+1: mov r24=cr.ipsr
+ mov r25=cr.iip;;
+ extr.u r26=r24,41,2 ;;
+ cmp.eq p6,p7=2,r26 ;;
+(p6) mov r26=0
+(p6) adds r25=16,r25
+(p7) adds r26=1,r26
+ ;;
+ dep r24=r26,r24,41,2
+ ;;
+ mov cr.ipsr=r24
+ mov cr.iip=r25
+ mov pr=r31,-1 ;;
+ rfi
+ ;;
+END(hyper_set_rr)
diff -Nru a/xen/arch/ia64/regionreg.c b/xen/arch/ia64/regionreg.c
--- a/xen/arch/ia64/regionreg.c 2005-06-23 07:04:00 -04:00
+++ b/xen/arch/ia64/regionreg.c 2005-06-23 07:04:00 -04:00
@@ -148,11 +148,10 @@
for (j = i; j < i + n_rid_blocks; ++j) ridblock_owner[j] = d;
// setup domain struct
- d->rid_bits = ridbits;
- d->starting_rid = i << IA64_MIN_IMPL_RID_BITS;
- d->ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
+ d->arch.rid_bits = ridbits;
+ d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS; d->arch.ending_rid
= (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
printf("###allocating rid_range, domain %p: starting_rid=%lx,
ending_rid=%lx\n",
-d,d->starting_rid, d->ending_rid);
+d,d->arch.starting_rid, d->arch.ending_rid);
return 1;
}
@@ -161,14 +160,14 @@
int deallocate_rid_range(struct domain *d)
{
int i;
- int rid_block_end = d->ending_rid >> IA64_MIN_IMPL_RID_BITS;
- int rid_block_start = d->starting_rid >> IA64_MIN_IMPL_RID_BITS;
+ int rid_block_end = d->arch.ending_rid >> IA64_MIN_IMPL_RID_BITS;
+ int rid_block_start = d->arch.starting_rid >> IA64_MIN_IMPL_RID_BITS;
return 1; // KLUDGE ALERT
//
// not all domains will have allocated RIDs (physical mode loaders for
instance)
//
- if (d->rid_bits == 0) return 1;
+ if (d->arch.rid_bits == 0) return 1;
#ifdef DEBUG
for (i = rid_block_start; i < rid_block_end; ++i) {
@@ -179,9 +178,9 @@
for (i = rid_block_start; i < rid_block_end; ++i)
ridblock_owner[i] = NULL;
- d->rid_bits = 0;
- d->starting_rid = 0;
- d->ending_rid = 0;
+ d->arch.rid_bits = 0;
+ d->arch.starting_rid = 0;
+ d->arch.ending_rid = 0;
return 1;
}
@@ -193,9 +192,8 @@
// a region register; anytime it is "viewable" outside of this module,
// it should be unmangled
-//This appears to work in Xen... turn it on later so no complications yet
-#define CONFIG_MANGLE_RIDS
-#ifdef CONFIG_MANGLE_RIDS
+// NOTE: this function is also implemented in assembly code in hyper_set_rr!!
+// Must ensure these two remain consistent!
static inline unsigned long
vmMangleRID(unsigned long RIDVal)
{
@@ -214,11 +212,6 @@
// since vmMangleRID is symmetric, use it for unmangling also
#define vmUnmangleRID(x) vmMangleRID(x)
-#else
-// no mangling/unmangling
-#define vmMangleRID(x) (x)
-#define vmUnmangleRID(x) (x)
-#endif
static inline void
set_rr_no_srlz(unsigned long rr, unsigned long rrval)
@@ -265,12 +258,12 @@
rrv.rrval = val;
newrrv.rrval = 0;
- newrid = v->domain->starting_rid + rrv.rid;
+ newrid = v->arch.starting_rid + rrv.rid;
- if (newrid > v->domain->ending_rid) {
+ if (newrid > v->arch.ending_rid) {
printk("can't set rr%d to %lx, starting_rid=%lx,"
"ending_rid=%lx, val=%lx\n", rreg, newrid,
- v->domain->starting_rid,v->domain->ending_rid,val);
+ v->arch.starting_rid,v->arch.ending_rid,val);
return 0;
}
@@ -358,7 +351,7 @@
ia64_rr rrv;
rrv.rrval = rrval;
- rrv.rid += v->domain->starting_rid;
+ rrv.rid += v->arch.starting_rid;
return rrv.rrval;
}
@@ -368,7 +361,7 @@
ia64_rr rrv;
rrv.rrval = rrval;
- rrv.rid -= v->domain->starting_rid;
+ rrv.rid -= v->arch.starting_rid;
return rrv.rrval;
}
diff -Nru a/xen/include/asm-ia64/domain.h b/xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h 2005-06-23 07:04:00 -04:00
+++ b/xen/include/asm-ia64/domain.h 2005-06-23 07:04:00 -04:00
@@ -54,9 +54,6 @@
u64 entry;
#endif
};
-#define starting_rid arch.starting_rid
-#define ending_rid arch.ending_rid
-#define rid_bits arch.rid_bits
#define xen_vastart arch.xen_vastart
#define xen_vaend arch.xen_vaend
#define shared_info_va arch.shared_info_va
@@ -83,6 +80,8 @@
int metaphysical_rr0; // from arch_domain (so is pinned)
int metaphysical_saved_rr0; // from arch_domain (so is
pinned)
int breakimm; // from arch_domain (so is pinned)
+ int starting_rid; /* first RID assigned to domain */
+ int ending_rid; /* one beyond highest RID assigned to domain */
struct mm_struct *active_mm;
struct thread_struct _thread; // this must be last
#ifdef CONFIG_VTI
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|