# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 3ab5ab4d6d75a7c585fd90814da92977ecbf175e
# Parent adb151aaf3f800e9a34b19aa94a93513a0a59e9f
[IA64] slightly improve stability
vcpu_ptc_e: fix flush order.
vcpu_ptc_g: fix typo (only local vcpu v-tlb was flushed)
itlb_pte/dtlb_pte removed.
vcpu_itr_* and vcpu_itc_no_srlz call vcpu_set_tr_entry coherently.
in_tpa parameter of vcpu_translate removed.
handle_lazy_cover is now static and unused 'isr' removed.
Signed-off-by: Tristan Gingold <tristan.gingold@xxxxxxxx>
---
xen/arch/ia64/asm-offsets.c | 2 --
xen/arch/ia64/xen/hyperprivop.S | 13 ++++++++-----
xen/arch/ia64/xen/process.c | 9 +++++----
xen/arch/ia64/xen/vcpu.c | 32 +++++++++++++-------------------
xen/include/asm-ia64/domain.h | 2 --
xen/include/asm-ia64/vcpu.h | 3 +--
6 files changed, 27 insertions(+), 34 deletions(-)
diff -r adb151aaf3f8 -r 3ab5ab4d6d75 xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c Wed Apr 26 20:55:42 2006 -0600
+++ b/xen/arch/ia64/asm-offsets.c Mon May 08 12:47:54 2006 -0600
@@ -50,8 +50,6 @@ void foo(void)
DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct vcpu,
arch.metaphysical_saved_rr0));
DEFINE(IA64_VCPU_BREAKIMM_OFFSET, offsetof (struct vcpu,
arch.breakimm));
DEFINE(IA64_VCPU_IVA_OFFSET, offsetof (struct vcpu, arch.iva));
- DEFINE(IA64_VCPU_DTLB_PTE_OFFSET, offsetof (struct vcpu,
arch.dtlb_pte));
- DEFINE(IA64_VCPU_ITLB_PTE_OFFSET, offsetof (struct vcpu,
arch.itlb_pte));
DEFINE(IA64_VCPU_IRR0_OFFSET, offsetof (struct vcpu, arch.irr[0]));
DEFINE(IA64_VCPU_IRR3_OFFSET, offsetof (struct vcpu, arch.irr[3]));
DEFINE(IA64_VCPU_INSVC3_OFFSET, offsetof (struct vcpu, arch.insvc[3]));
diff -r adb151aaf3f8 -r 3ab5ab4d6d75 xen/arch/ia64/xen/hyperprivop.S
--- a/xen/arch/ia64/xen/hyperprivop.S Wed Apr 26 20:55:42 2006 -0600
+++ b/xen/arch/ia64/xen/hyperprivop.S Mon May 08 12:47:54 2006 -0600
@@ -30,7 +30,7 @@
#undef FAST_ITC //XXX CONFIG_XEN_IA64_DOM0_VP
// TODO fast_itc doesn't suport dom0 vp yet.
#else
-//#define FAST_ITC // working but default off for now
+//#define FAST_ITC // to be reviewed
#endif
#define FAST_BREAK
#ifndef CONFIG_XEN_IA64_DOM0_VP
@@ -769,7 +769,7 @@ GLOBAL_ENTRY(fast_tlb_miss_reflect)
GLOBAL_ENTRY(fast_tlb_miss_reflect)
#ifndef FAST_TLB_MISS_REFLECT // see beginning of file
br.spnt.few page_fault ;;
-#endif
+#else
mov r31=pr
mov r30=cr.ipsr
mov r29=cr.iip
@@ -1007,6 +1007,7 @@ 1: extr.u r25=r17,61,3;;
mov r29=cr.iip
mov r30=cr.ipsr
br.sptk.many fast_reflect;;
+#endif
END(fast_tlb_miss_reflect)
// ensure that, if giving up, registers at entry to fast_hyperprivop unchanged
@@ -2003,7 +2004,7 @@ ENTRY(hyper_itc_d)
ENTRY(hyper_itc_d)
#ifndef FAST_ITC
br.sptk.many dispatch_break_fault ;;
-#endif
+#else
// ensure itir.ps >= xen's pagesize
adds r23=XSI_ITIR_OFS-XSI_PSR_IC_OFS,r18 ;;
ld8 r23=[r23];;
@@ -2040,7 +2041,9 @@ ENTRY(hyper_itc_d)
movl r30=recover_and_dispatch_break_fault ;;
mov r16=r8;;
// fall through
-
+#endif
+
+#if defined(FAST_ITC) || defined (FAST_TLB_MISS_REFLECT)
// fast_insert(PSCB(ifa),r24=ps,r16=pte)
// r16 == pte
@@ -2175,4 +2178,4 @@ no_inc_iip:
rfi
;;
END(fast_insert)
-
+#endif
diff -r adb151aaf3f8 -r 3ab5ab4d6d75 xen/arch/ia64/xen/process.c
--- a/xen/arch/ia64/xen/process.c Wed Apr 26 20:55:42 2006 -0600
+++ b/xen/arch/ia64/xen/process.c Mon May 08 12:47:54 2006 -0600
@@ -265,7 +265,8 @@ void deliver_pending_interrupt(struct pt
}
unsigned long lazy_cover_count = 0;
-int handle_lazy_cover(struct vcpu *v, unsigned long isr, struct pt_regs *regs)
+static int
+handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
{
if (!PSCB(v,interrupt_collection_enabled)) {
PSCB(v,ifs) = regs->cr_ifs;
@@ -285,7 +286,7 @@ void ia64_do_page_fault (unsigned long a
unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
IA64FAULT fault;
- if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, isr, regs))
return;
+ if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs)) return;
if ((isr & IA64_ISR_SP)
|| ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) ==
IA64_ISR_CODE_LFETCH))
{
@@ -299,7 +300,7 @@ void ia64_do_page_fault (unsigned long a
}
again:
- fault = vcpu_translate(current,address,is_data,0,&pteval,&itir,&iha);
+ fault = vcpu_translate(current,address,is_data,&pteval,&itir,&iha);
if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
u64 logps;
pteval = translate_domain_pte(pteval, address, itir, &logps);
@@ -813,7 +814,7 @@ printf("*** Handled privop masquerading
while(vector);
return;
}
- if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v,
isr, regs)) return;
+ if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v,
regs)) return;
PSCB(current,ifa) = ifa;
PSCB(current,itir) = vcpu_get_itir_on_fault(v,ifa);
reflect_interruption(isr,regs,vector);
diff -r adb151aaf3f8 -r 3ab5ab4d6d75 xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c Wed Apr 26 20:55:42 2006 -0600
+++ b/xen/arch/ia64/xen/vcpu.c Mon May 08 12:47:54 2006 -0600
@@ -1290,8 +1290,7 @@ static inline int vcpu_match_tr_entry(TR
return trp->pte.p && vcpu_match_tr_entry_no_p(trp, ifa, rid);
}
-// in_tpa is not used when CONFIG_XEN_IA64_DOM0_VP
-IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, BOOLEAN
in_tpa, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
+IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64
*pteval, UINT64 *itir, UINT64 *iha)
{
unsigned long region = address >> 61;
unsigned long pta, rid, rr;
@@ -1368,12 +1367,7 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
pte = trp->pte;
if (/* is_data && */ pte.p
&& vcpu_match_tr_entry_no_p(trp,address,rid)) {
-#ifndef CONFIG_XEN_IA64_DOM0_VP
- if (vcpu->domain==dom0 && !in_tpa)
- *pteval = pte.val;
- else
-#endif
- *pteval = vcpu->arch.dtlb_pte;
+ *pteval = pte.val;
*itir = trp->itir;
dtlb_translate_count++;
return IA64_USE_TLB;
@@ -1422,7 +1416,7 @@ IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 va
UINT64 pteval, itir, mask, iha;
IA64FAULT fault;
- fault = vcpu_translate(vcpu, vadr, TRUE, TRUE, &pteval, &itir, &iha);
+ fault = vcpu_translate(vcpu, vadr, TRUE, &pteval, &itir, &iha);
if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB)
{
mask = itir_mask(itir);
@@ -1800,12 +1794,10 @@ void vcpu_itc_no_srlz(VCPU *vcpu, UINT64
if ((mp_pte == -1UL) || (IorD & 0x4)) // don't place in 1-entry TLB
return;
if (IorD & 0x1) {
- vcpu_set_tr_entry(&PSCBX(vcpu,itlb),pte,ps<<2,vaddr);
- PSCBX(vcpu,itlb_pte) = mp_pte;
+ vcpu_set_tr_entry(&PSCBX(vcpu,itlb),mp_pte,ps<<2,vaddr);
}
if (IorD & 0x2) {
- vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),pte,ps<<2,vaddr);
- PSCBX(vcpu,dtlb_pte) = mp_pte;
+ vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),mp_pte,ps<<2,vaddr);
}
}
@@ -1882,13 +1874,15 @@ IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64
// architected loop to purge the entire TLB, should use
// base = stride1 = stride2 = 0, count0 = count 1 = 1
-#ifdef VHPT_GLOBAL
- vhpt_flush(); // FIXME: This is overdoing it
-#endif
- local_flush_tlb_all();
// just invalidate the "whole" tlb
vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
+
+#ifdef VHPT_GLOBAL
+ vhpt_flush(); // FIXME: This is overdoing it
+#endif
+ local_flush_tlb_all();
+
return IA64_NO_FAULT;
}
@@ -1915,8 +1909,8 @@ IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64
/* Purge TC entries.
FIXME: clear only if match. */
- vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
- vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
+ vcpu_purge_tr_entry(&PSCBX(v,dtlb));
+ vcpu_purge_tr_entry(&PSCBX(v,itlb));
#ifdef VHPT_GLOBAL
/* Invalidate VHPT entries. */
diff -r adb151aaf3f8 -r 3ab5ab4d6d75 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h Wed Apr 26 20:55:42 2006 -0600
+++ b/xen/include/asm-ia64/domain.h Mon May 08 12:47:54 2006 -0600
@@ -69,8 +69,6 @@ struct arch_vcpu {
TR_ENTRY dtlb;
unsigned int itr_regions;
unsigned int dtr_regions;
- unsigned long itlb_pte;
- unsigned long dtlb_pte;
unsigned long irr[4];
unsigned long insvc[4];
unsigned long tc_regions;
diff -r adb151aaf3f8 -r 3ab5ab4d6d75 xen/include/asm-ia64/vcpu.h
--- a/xen/include/asm-ia64/vcpu.h Wed Apr 26 20:55:42 2006 -0600
+++ b/xen/include/asm-ia64/vcpu.h Mon May 08 12:47:54 2006 -0600
@@ -148,8 +148,7 @@ extern IA64FAULT vcpu_ptc_ga(VCPU *vcpu,
extern IA64FAULT vcpu_ptc_ga(VCPU *vcpu, UINT64 vadr, UINT64 addr_range);
extern IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
extern IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
-extern IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address,
- BOOLEAN is_data, BOOLEAN in_tpa,
+extern IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data,
UINT64 *pteval, UINT64 *itir, UINT64 *iha);
extern IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
extern IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|