ChangeSet 1.1668.1.4, 2005/06/07 16:00:55-06:00, djm@xxxxxxxxxxxxxxx
More hyperprivop work
Signed-off by: Dan Magenheimer <dan.magenheimer@xxxxxx>
asm-offsets.c | 1
hyperprivop.S | 198 ++++++++++++++++++++++++++++++++++++++++++++++++----------
process.c | 6 +
3 files changed, 170 insertions(+), 35 deletions(-)
diff -Nru a/xen/arch/ia64/asm-offsets.c b/xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c 2005-06-10 14:04:01 -04:00
+++ b/xen/arch/ia64/asm-offsets.c 2005-06-10 14:04:01 -04:00
@@ -49,6 +49,7 @@
DEFINE(XSI_IPSR, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.ipsr)));
DEFINE(XSI_IPSR_OFS, offsetof(vcpu_info_t, arch.ipsr));
DEFINE(XSI_IFS_OFS, offsetof(vcpu_info_t, arch.ifs));
+ DEFINE(XSI_ISR_OFS, offsetof(vcpu_info_t, arch.isr));
DEFINE(XSI_IIM_OFS, offsetof(vcpu_info_t, arch.iim));
DEFINE(XSI_BANKNUM_OFS, offsetof(vcpu_info_t, arch.banknum));
DEFINE(XSI_BANK0_OFS, offsetof(vcpu_info_t, arch.bank0_regs[0]));
diff -Nru a/xen/arch/ia64/hyperprivop.S b/xen/arch/ia64/hyperprivop.S
--- a/xen/arch/ia64/hyperprivop.S 2005-06-10 14:04:01 -04:00
+++ b/xen/arch/ia64/hyperprivop.S 2005-06-10 14:04:01 -04:00
@@ -14,6 +14,23 @@
#include <asm/system.h>
#include <public/arch-ia64.h>
+#define FAST_HYPERPRIVOP_CNT
+
+// Should be included from common header file (also in process.c)
+// NO PSR_CLR IS DIFFERENT! (CPL)
+#define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
+// note IA64_PSR_PK removed from following, why is this necessary?
+#define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
+ IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
+ IA64_PSR_IT | IA64_PSR_BN)
+
+#define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
+ IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
+ IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
+ IA64_PSR_MC | IA64_PSR_IS | \
+ IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
+ IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
+
// Note: not hand-scheduled for now
// Registers at entry
// r16 == cr.isr
@@ -22,7 +39,13 @@
// r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
// r31 == pr
GLOBAL_ENTRY(fast_hyperprivop)
- //cover;;
+#if 1
+ // HYPERPRIVOP_SSM_I?
+ // assumes domain interrupts pending, so just do it
+ cmp.eq p7,p6=XEN_HYPER_SSM_I,r17
+(p7) br.sptk.many hyper_ssm_i;;
+#endif
+#if 1
// if domain interrupts pending, give up for now and do it the slow way
adds r20=XSI_PEND_OFS-XSI_PSR_IC_OFS,r18 ;;
ld8 r20=[r20] ;;
@@ -33,21 +56,13 @@
cmp.eq p7,p6=XEN_HYPER_RFI,r17
(p7) br.sptk.many hyper_rfi;;
-#if 0
- // HYPERPRIVOP_SSM_I?
- cmp.eq p7,p6=XEN_HYPER_SSM_I,r17
-(p7) br.sptk.many hyper_ssm_i;;
-#endif
-
-#if 1
// hard to test, because only called from rbs_switch
// HYPERPRIVOP_COVER?
cmp.eq p7,p6=XEN_HYPER_COVER,r17
(p7) br.sptk.many hyper_cover;;
#endif
-#if 0 // FIXME: This inexplicably causes the number of ssm_dt's to
- // skyrocket, thus slowing down everything
+#if 1
// HYPERPRIVOP_SSM_DT?
cmp.eq p7,p6=XEN_HYPER_SSM_DT,r17
(p7) br.sptk.many hyper_ssm_dt;;
@@ -62,6 +77,141 @@
// if not one of the above, give up for now and do it the slow way
br.sptk.many dispatch_break_fault ;;
+
+// give up for now if: ipsr.be==1, ipsr.pp==1
+// from reflect_interruption, don't need to:
+// - printf first extint (debug only)
+// - check for interrupt collection enabled (routine will force on)
+// - set ifa (not valid for extint)
+// - set iha (not valid for extint)
+// - set itir (not valid for extint)
+// DO need to
+// - increment the HYPER_SSM_I fast_hyperprivop counter
+// - set shared_mem iip to instruction after HYPER_SSM_I
+// - set cr.iip to guest iva+0x3000
+// - set shared_mem ipsr to [vcpu_get_ipsr_int_state]
+// be = pp = bn = 0; dt = it = rt = 1; cpl = 3 or 0;
+// i = shared_mem interrupt_delivery_enabled
+// ic = shared_mem interrupt_collection_enabled
+// ri = instruction after HYPER_SSM_I
+// all other bits unchanged from real cr.ipsr
+// - set cr.ipsr (DELIVER_PSR_SET/CLEAR, don't forget cpl!)
+// - set shared_mem isr: isr.ei to instr following HYPER_SSM_I
+// and isr.ri to cr.isr.ri (all other bits zero)
+// - cover and set shared_mem precover_ifs to cr.ifs
+// ^^^ MISSED THIS FOR fast_break??
+// - set shared_mem ifs and incomplete_regframe to 0
+// - set shared_mem interrupt_delivery_enabled to 0
+// - set shared_mem interrupt_collection_enabled to 0
+// - set r31 to SHAREDINFO_ADDR
+// - virtual bank switch 0
+// maybe implement later
+// - verify that there really IS a deliverable interrupt pending
+// - set shared_mem iva
+// needs to be done but not implemented (in reflect_interruption)
+// - set shared_mem iipa
+// don't know for sure
+// - set shared_mem unat
+// r16 == cr.isr
+// r17 == cr.iim
+// r18 == XSI_PSR_IC
+// r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
+// r22 == IA64_KR(CURRENT)+IA64_VCPU_BREAKIMM_OFFSET
+// r31 == pr
+ENTRY(hyper_ssm_i)
+ // give up for now if: ipsr.be==1, ipsr.pp==1
+ mov r30=cr.ipsr;;
+ mov r29=cr.iip;;
+ extr.u r21=r30,IA64_PSR_BE_BIT,1 ;;
+ cmp.ne p7,p0=r21,r0
+(p7) br.sptk.many dispatch_break_fault ;;
+ extr.u r21=r30,IA64_PSR_PP_BIT,1 ;;
+ cmp.ne p7,p0=r21,r0
+(p7) br.sptk.many dispatch_break_fault ;;
+#ifdef FAST_HYPERPRIVOP_CNT
+ movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_SSM_I);;
+ ld8 r21=[r20];;
+ adds r21=1,r21;;
+ st8 [r20]=r21;;
+#endif
+ // set shared_mem iip to instruction after HYPER_SSM_I
+extr.u r20=r30,41,2 ;;
+cmp.eq p6,p7=2,r20 ;;
+(p6) mov r20=0
+(p6) adds r29=16,r29
+(p7) adds r20=1,r20 ;;
+ dep r30=r20,r30,41,2;; // adjust cr.ipsr.ri but don't save yet
+ adds r21=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st8 [r21]=r29 ;;
+ // set shared_mem isr
+ extr.u r16=r16,38,1;; // grab cr.isr.ir bit
+ dep r16=r16,r0,38,1 ;; // insert into cr.isr (rest of bits zero)
+ dep r16=r20,r16,41,2 ;; // deposit cr.isr.ri
+ adds r21=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st8 [r21]=r16 ;;
+ // set cr.ipsr
+ mov r29=r30 ;;
+ movl r28=DELIVER_PSR_SET;;
+ movl r27=~DELIVER_PSR_CLR;;
+ or r29=r29,r28;;
+ and r29=r29,r27;;
+ mov cr.ipsr=r29;;
+ // set shared_mem ipsr (from ipsr in r30 with ipsr.ri already set)
+ extr.u r29=r30,IA64_PSR_CPL0_BIT,2;;
+ cmp.eq p6,p7=3,r29;;
+(p6) dep r30=-1,r30,IA64_PSR_CPL0_BIT,2
+(p7) dep r30=0,r30,IA64_PSR_CPL0_BIT,2
+ ;;
+ // FOR SSM_I ONLY, also turn on psr.i and psr.ic
+ movl r28=(IA64_PSR_DT|IA64_PSR_IT|IA64_PSR_RT|IA64_PSR_I|IA64_PSR_IC);;
+ movl r27=~(IA64_PSR_BE|IA64_PSR_PP|IA64_PSR_BN);;
+ or r30=r30,r28;;
+ and r30=r30,r27;;
+ adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st8 [r21]=r30 ;;
+ // set shared_mem interrupt_delivery_enabled to 0
+ // set shared_mem interrupt_collection_enabled to 0
+ st8 [r18]=r0;;
+ // cover and set shared_mem precover_ifs to cr.ifs
+ // set shared_mem ifs and incomplete_regframe to 0
+ cover ;;
+ mov r20=cr.ifs;;
+ adds r21=XSI_INCOMPL_REG_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st4 [r21]=r0 ;;
+ adds r21=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st8 [r21]=r0 ;;
+ adds r21=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st8 [r21]=r20 ;;
+ // leave cr.ifs alone for later rfi
+ // set iip to go to domain IVA break instruction vector
+ mov r22=IA64_KR(CURRENT);;
+ adds r22=IA64_VCPU_IVA_OFFSET,r22;;
+ ld8 r23=[r22];;
+ movl r24=0x3000;;
+ add r24=r24,r23;;
+ mov cr.iip=r24;;
+ // OK, now all set to go except for switch to virtual bank0
+ mov r30=r2; mov r29=r3;;
+ adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18;
+ adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;;
+ bsw.1;;
+ st8 [r2]=r16,16; st8 [r3]=r17,16 ;;
+ st8 [r2]=r18,16; st8 [r3]=r19,16 ;;
+ st8 [r2]=r20,16; st8 [r3]=r21,16 ;;
+ st8 [r2]=r22,16; st8 [r3]=r23,16 ;;
+ st8 [r2]=r24,16; st8 [r3]=r25,16 ;;
+ st8 [r2]=r26,16; st8 [r3]=r27,16 ;;
+ st8 [r2]=r28,16; st8 [r3]=r29,16 ;;
+ st8 [r2]=r30,16; st8 [r3]=r31,16 ;;
+ movl r31=XSI_IPSR;;
+ bsw.0 ;;
+ mov r2=r30; mov r3=r29;;
+ adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st4 [r20]=r0 ;;
+ mov pr=r31,-1 ;;
+ rfi
+ ;;
+
// reflect domain breaks directly to domain
// FIXME: DOES NOT WORK YET
// r16 == cr.isr
@@ -150,7 +300,6 @@
// ensure that, if giving up, registers at entry to fast_hyperprivop unchanged
ENTRY(hyper_rfi)
-#define FAST_HYPERPRIVOP_CNT
#ifdef FAST_HYPERPRIVOP_CNT
movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_RFI);;
ld8 r21=[r20];;
@@ -276,7 +425,7 @@
srlz.i;;
st4 [r20]=r0 ;;
// adjust return address to skip over break instruction
- extr.u r26=r24,41,2 ;;
+1: extr.u r26=r24,41,2 ;;
cmp.eq p6,p7=2,r26 ;;
(p6) mov r26=0
(p6) adds r25=16,r25
@@ -286,7 +435,7 @@
;;
mov cr.ipsr=r24
mov cr.iip=r25
-1: mov pr=r31,-1 ;;
+ mov pr=r31,-1 ;;
rfi
;;
@@ -312,7 +461,7 @@
adds r21=1,r0 ;;
st4 [r20]=r21 ;;
// adjust return address to skip over break instruction
- extr.u r26=r24,41,2 ;;
+1: extr.u r26=r24,41,2 ;;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|