ChangeSet 1.1709.2.2, 2005/06/15 10:26:56-06:00, djm@xxxxxxxxxxxxxxx
Enable VHPT for region 7
Signed-off-by: Dan Magenheimer <dan.magenheimer@xxxxxx>
arch/ia64/ivt.S | 18 +++++++++++++++++-
arch/ia64/regionreg.c | 10 ++++++++++
arch/ia64/vcpu.c | 3 ++-
arch/ia64/vhpt.c | 31 +++++++++++++++++++++++++++++++
include/asm-ia64/vhpt.h | 17 +++++++++++++++--
5 files changed, 75 insertions(+), 4 deletions(-)
diff -Nru a/xen/arch/ia64/ivt.S b/xen/arch/ia64/ivt.S
--- a/xen/arch/ia64/ivt.S 2005-06-19 14:04:32 -04:00
+++ b/xen/arch/ia64/ivt.S 2005-06-19 14:04:32 -04:00
@@ -348,12 +348,23 @@
// ;;
//#endif
#endif
+#ifdef XEN
+ mov r31=pr
+ mov r16=cr.ifa // get address that caused the TLB miss
+ ;;
+late_alt_itlb_miss:
+ movl r17=PAGE_KERNEL
+ mov r21=cr.ipsr
+ movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
+ ;;
+#else
mov r16=cr.ifa // get address that caused the TLB miss
movl r17=PAGE_KERNEL
mov r21=cr.ipsr
movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
mov r31=pr
;;
+#endif
#ifdef CONFIG_DISABLE_VHPT
shr.u r22=r16,61 // get the region number into
r21
;;
@@ -399,13 +410,18 @@
// ;;
//#endif
#endif
+#ifdef XEN
+ mov r31=pr
mov r16=cr.ifa // get address that caused the TLB miss
+ ;;
+late_alt_dtlb_miss:
movl r17=PAGE_KERNEL
mov r20=cr.isr
movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
mov r21=cr.ipsr
- mov r31=pr
;;
+#else
+#endif
#ifdef CONFIG_DISABLE_VHPT
shr.u r22=r16,61 // get the region number into
r21
;;
diff -Nru a/xen/arch/ia64/regionreg.c b/xen/arch/ia64/regionreg.c
--- a/xen/arch/ia64/regionreg.c 2005-06-19 14:04:32 -04:00
+++ b/xen/arch/ia64/regionreg.c 2005-06-19 14:04:32 -04:00
@@ -274,6 +274,7 @@
return 0;
}
+#ifdef CONFIG_VTI
memrrv.rrval = rrv.rrval;
if (rreg == 7) {
newrrv.rid = newrid;
@@ -290,6 +291,15 @@
if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
set_rr(rr,newrrv.rrval);
}
+#else
+ memrrv.rrval = rrv.rrval;
+ newrrv.rid = newrid;
+ newrrv.ve = 1; // VHPT now enabled for region 7!!
+ newrrv.ps = PAGE_SHIFT;
+ if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
+ if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info);
+ else set_rr(rr,newrrv.rrval);
+#endif
return 1;
}
diff -Nru a/xen/arch/ia64/vcpu.c b/xen/arch/ia64/vcpu.c
--- a/xen/arch/ia64/vcpu.c 2005-06-19 14:04:32 -04:00
+++ b/xen/arch/ia64/vcpu.c 2005-06-19 14:04:32 -04:00
@@ -1589,7 +1589,8 @@
// addresses never get flushed. More work needed if this
// ever happens.
//printf("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
- vhpt_insert(vaddr,pte,logps<<2);
+ if (logps > PAGE_SHIFT) vhpt_multiple_insert(vaddr,pte,logps);
+ else vhpt_insert(vaddr,pte,logps<<2);
}
// even if domain pagesize is larger than PAGE_SIZE, just put
// PAGE_SIZE mapping in the vhpt for now, else purging is complicated
diff -Nru a/xen/arch/ia64/vhpt.c b/xen/arch/ia64/vhpt.c
--- a/xen/arch/ia64/vhpt.c 2005-06-19 14:04:32 -04:00
+++ b/xen/arch/ia64/vhpt.c 2005-06-19 14:04:32 -04:00
@@ -87,6 +87,37 @@
ia64_srlz_i();
}
+void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned
long logps)
+{
+ unsigned long mask = (1L << logps) - 1;
+ int i;
+
+ if (logps-PAGE_SHIFT > 10) {
+ // if this happens, we may want to revisit this algorithm
+ printf("vhpt_multiple_insert:logps-PAGE_SHIFT>10,spinning..\n");
+ while(1);
+ }
+ if (logps-PAGE_SHIFT > 2) {
+ // FIXME: Should add counter here to see how often this
+ // happens (e.g. for 16MB pages!) and determine if it
+ // is a performance problem. On a quick look, it takes
+ // about 39000 instrs for a 16MB page and it seems to occur
+ // only a few times/second, so OK for now.
+ // An alternate solution would be to just insert the one
+ // 16KB in the vhpt (but with the full mapping)?
+ //printf("vhpt_multiple_insert: logps-PAGE_SHIFT==%d,"
+ //"va=%p, pa=%p, pa-masked=%p\n",
+ //logps-PAGE_SHIFT,vaddr,pte&_PFN_MASK,
+ //(pte&_PFN_MASK)&~mask);
+ }
+ vaddr &= ~mask;
+ pte = ((pte & _PFN_MASK) & ~mask) | (pte & ~_PFN_MASK);
+ for (i = 1L << (logps-PAGE_SHIFT); i > 0; i--) {
+ vhpt_insert(vaddr,pte,logps<<2);
+ vaddr += PAGE_SIZE;
+ }
+}
+
void vhpt_init(void)
{
unsigned long vhpt_total_size, vhpt_alignment, vhpt_imva;
diff -Nru a/xen/include/asm-ia64/vhpt.h b/xen/include/asm-ia64/vhpt.h
--- a/xen/include/asm-ia64/vhpt.h 2005-06-19 14:04:32 -04:00
+++ b/xen/include/asm-ia64/vhpt.h 2005-06-19 14:04:32 -04:00
@@ -140,12 +140,20 @@
mov r16 = cr.ifa; \
movl r30 = int_counts; \
;; \
+ extr.u r17=r16,59,5 \
+ ;; \
+ cmp.eq p6,p0=0x1e,r17; \
+(p6) br.cond.spnt .Alt_##Name \
+ ;; \
+ cmp.eq p6,p0=0x1d,r17; \
+(p6) br.cond.spnt .Alt_##Name \
+ ;; \
thash r28 = r16; \
adds r30 = CAUSE_VHPT_CC_HANDLED << 3, r30; \
;; \
ttag r19 = r16; \
- ld8 r27 = [r30]; \
- adds r17 = VLE_CCHAIN_OFFSET, r28; \
+ld8 r27 = [r30]; \
+adds r17 = VLE_CCHAIN_OFFSET, r28; \
;; \
ld8 r17 = [r17]; \
;; \
@@ -192,6 +200,11 @@
rfi; \
;; \
\
+.Alt_##Name:; \
+ mov pr = r31, 0x1ffff; \
+ ;; \
+ br.cond.sptk late_alt_##Name \
+ ;; \
.Out_##Name:; \
mov pr = r31, 0x1ffff; \
;; \
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|