# HG changeset patch
# User djm@xxxxxxxxxxxxxxx
# Node ID 0e774127646895866311f9f617b38577891bf9b9
# Parent 40be48f67a3379af0632cfffbd083706b02bad99
Cleanup virtual translation code
Add some additional statistics
Signed-off by: Matt Chapman <matthewc@xxxxxx>
Signed-off by: Dan Magenheimer <dan.magenheimer@xxxxxx>
diff -r 40be48f67a33 -r 0e7741276468 xen/arch/ia64/vcpu.c
--- a/xen/arch/ia64/vcpu.c Sat Jul 9 14:25:29 2005
+++ b/xen/arch/ia64/vcpu.c Sat Jul 9 14:36:13 2005
@@ -53,8 +53,15 @@
#define PRIVOP_COUNT_ADDR(x,y) do {} while (0)
#endif
+unsigned long dtlb_translate_count = 0;
+unsigned long tr_translate_count = 0;
+unsigned long phys_translate_count = 0;
+
unsigned long vcpu_verbose = 0;
#define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
+
+extern TR_ENTRY *match_tr(VCPU *vcpu, unsigned long ifa);
+extern TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa);
/**************************************************************************
VCPU general register access routines
@@ -224,6 +231,9 @@
//else printf("but nothing pending\n");
}
#endif
+ if (enabling_interrupts &&
+ vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
+ PSCB(vcpu,pending_interruption) = 1;
return IA64_NO_FAULT;
}
@@ -267,6 +277,9 @@
return IA64_EXTINT_VECTOR;
}
#endif
+ if (enabling_interrupts &&
+ vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
+ PSCB(vcpu,pending_interruption) = 1;
return IA64_NO_FAULT;
}
@@ -531,6 +544,11 @@
/**************************************************************************
VCPU interrupt control register access routines
**************************************************************************/
+
+void vcpu_pend_unspecified_interrupt(VCPU *vcpu)
+{
+ PSCB(vcpu,pending_interruption) = 1;
+}
void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
{
@@ -1241,28 +1259,101 @@
return (IA64_ILLOP_FAULT);
}
+#define itir_ps(itir) ((itir >> 2) & 0x3f)
+#define itir_mask(itir) (~((1UL << itir_ps(itir)) - 1))
+
+unsigned long vhpt_translate_count = 0;
+
+IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64
*pteval, UINT64 *itir)
+{
+ unsigned long pta, pta_mask, iha, pte, ps;
+ TR_ENTRY *trp;
+ ia64_rr rr;
+
+ if (!(address >> 61)) {
+ if (!PSCB(vcpu,metaphysical_mode))
+ panic_domain(0,"vcpu_translate: bad address %p\n",
address);
+
+ *pteval = (address & _PAGE_PPN_MASK) | __DIRTY_BITS |
_PAGE_PL_2 | _PAGE_AR_RWX;
+ *itir = PAGE_SHIFT << 2;
+ phys_translate_count++;
+ return IA64_NO_FAULT;
+ }
+
+ /* check translation registers */
+ if ((trp = match_tr(vcpu,address))) {
+ tr_translate_count++;
+ *pteval = trp->page_flags;
+ *itir = trp->itir;
+ return IA64_NO_FAULT;
+ }
+
+ /* check 1-entry TLB */
+ if ((trp = match_dtlb(vcpu,address))) {
+ dtlb_translate_count++;
+ *pteval = trp->page_flags;
+ *itir = trp->itir;
+ return IA64_NO_FAULT;
+ }
+
+ /* check guest VHPT */
+ pta = PSCB(vcpu,pta);
+ rr.rrval = PSCB(vcpu,rrs)[address>>61];
+ if (rr.ve && (pta & IA64_PTA_VE))
+ {
+ if (pta & IA64_PTA_VF)
+ {
+ /* long format VHPT - not implemented */
+ return (is_data ? IA64_DATA_TLB_VECTOR :
IA64_INST_TLB_VECTOR);
+ }
+ else
+ {
+ /* short format VHPT */
+
+ /* avoid recursively walking VHPT */
+ pta_mask = (itir_mask(pta) << 3) >> 3;
+ if (((address ^ pta) & pta_mask) == 0)
+ return (is_data ? IA64_DATA_TLB_VECTOR :
IA64_INST_TLB_VECTOR);
+
+ vcpu_thash(vcpu, address, &iha);
+ if (__copy_from_user(&pte, (void *)iha, sizeof(pte)) !=
0)
+ return IA64_VHPT_TRANS_VECTOR;
+
+ /*
+ * Optimisation: this VHPT walker aborts on not-present
pages
+ * instead of inserting a not-present translation, this
allows
+ * vectoring directly to the miss handler.
+ \ */
+ if (pte & _PAGE_P)
+ {
+ *pteval = pte;
+ *itir = vcpu_get_itir_on_fault(vcpu,address);
+ vhpt_translate_count++;
+ return IA64_NO_FAULT;
+ }
+ return (is_data ? IA64_DATA_TLB_VECTOR :
IA64_INST_TLB_VECTOR);
+ }
+ }
+ return (is_data ? IA64_ALT_DATA_TLB_VECTOR : IA64_ALT_INST_TLB_VECTOR);
+}
+
IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
{
- extern TR_ENTRY *match_tr(VCPU *,UINT64);
- unsigned long match_dtlb(VCPU *, unsigned long, unsigned long *,
unsigned long *);
- TR_ENTRY *trp;
- UINT64 mask, pteval, mp_pte, ps;
-
-extern unsigned long privop_trace;
- if (pteval = match_dtlb(vcpu, vadr, &ps, &mp_pte) && (mp_pte != -1UL)) {
- mask = (1L << ps) - 1;
- *padr = ((mp_pte & _PAGE_PPN_MASK) & ~mask) | (vadr & mask);
- verbose("vcpu_tpa: addr=%p @%p, successful,
padr=%p\n",vadr,PSCB(vcpu,iip),*padr);
+ UINT64 pteval, itir, mask;
+ IA64FAULT fault;
+
+ fault = vcpu_translate(vcpu, vadr, 1, &pteval, &itir);
+ if (fault == IA64_NO_FAULT)
+ {
+ mask = itir_mask(itir);
+ *padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);
return (IA64_NO_FAULT);
}
- if (trp=match_tr(current,vadr)) {
- mask = (1L << trp->ps) - 1;
- *padr = ((trp->ppn << 12) & ~mask) | (vadr & mask);
- verbose("vcpu_tpa: addr=%p @%p, successful,
padr=%p\n",vadr,PSCB(vcpu,iip),*padr);
- return (IA64_NO_FAULT);
- }
- verbose("vcpu_tpa addr=%p, @%p, forcing data
miss\n",vadr,PSCB(vcpu,iip));
- return vcpu_force_data_miss(vcpu, vadr);
+ else
+ {
+ PSCB(vcpu,tmp[0]) = vadr; // save ifa in vcpu structure,
then specify IA64_FORCED_IFA
+ return (fault | IA64_FORCED_IFA);
+ }
}
IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
@@ -1614,15 +1705,12 @@
// NOTE: returns a physical pte, NOT a "metaphysical" pte, so do not check
// the physical address contained for correctness
-unsigned long match_dtlb(VCPU *vcpu, unsigned long ifa, unsigned long *ps,
unsigned long *mp_pte)
+TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa)
{
TR_ENTRY *trp;
- if (trp = vcpu_match_tr_entry(vcpu,&vcpu->arch.dtlb,ifa,1)) {
- if (ps) *ps = trp->ps;
- if (mp_pte) *mp_pte = vcpu->arch.dtlb_pte;
- return (trp->page_flags);
- }
+ if (trp = vcpu_match_tr_entry(vcpu,&vcpu->arch.dtlb,ifa,1))
+ return (&vcpu->arch.dtlb);
return 0UL;
}
@@ -1679,11 +1767,12 @@
// TODO: Only allowed for current vcpu
UINT64 mpaddr, ps;
IA64FAULT fault;
- unsigned long match_dtlb(VCPU *, unsigned long, unsigned long *,
unsigned long *);
+ TR_ENTRY *trp;
unsigned long lookup_domain_mpa(struct domain *,unsigned long);
unsigned long pteval, dom_imva;
- if (pteval = match_dtlb(vcpu, vadr, NULL, NULL)) {
+ if ((trp = match_dtlb(vcpu,vadr))) {
+ pteval = trp->page_flags;
dom_imva = __va(pteval & _PFN_MASK);
ia64_fc(dom_imva);
return IA64_NO_FAULT;
diff -r 40be48f67a33 -r 0e7741276468 xen/arch/ia64/hypercall.c
--- a/xen/arch/ia64/hypercall.c Sat Jul 9 14:25:29 2005
+++ b/xen/arch/ia64/hypercall.c Sat Jul 9 14:36:13 2005
@@ -19,12 +19,16 @@
extern struct ia64_sal_retval pal_emulator_static(UINT64);
extern struct ia64_sal_retval
sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
+unsigned long idle_when_pending = 0;
+unsigned long pal_halt_light_count = 0;
+
int
ia64_hypercall (struct pt_regs *regs)
{
struct vcpu *v = (struct domain *) current;
struct ia64_sal_retval x;
unsigned long *tv, *tc;
+ int pi;
switch (regs->r2) {
case FW_HYPERCALL_PAL_CALL:
@@ -40,19 +44,21 @@
#endif
x = pal_emulator_static(regs->r28);
if (regs->r28 == PAL_HALT_LIGHT) {
-#if 1
#define SPURIOUS_VECTOR 15
- if (vcpu_check_pending_interrupts(v)!=SPURIOUS_VECTOR) {
-// int pi = vcpu_check_pending_interrupts(v);
+ pi = vcpu_check_pending_interrupts(v);
+ if (pi != SPURIOUS_VECTOR) {
+ idle_when_pending++;
+ pi = vcpu_pend_unspecified_interrupt(v);
//printf("idle w/int#%d pending!\n",pi);
//this shouldn't happen, but it apparently does quite a bit! so don't
//allow it to happen... i.e. if a domain has an interrupt pending and
//it tries to halt itself because it thinks it is idle, just return here
//as deliver_pending_interrupt is called on the way out and will deliver it
}
- else
-#endif
- do_sched_op(SCHEDOP_yield);
+ else {
+ pal_halt_light_count++;
+ do_sched_op(SCHEDOP_yield);
+ }
//break;
}
regs->r8 = x.status; regs->r9 = x.v0;
diff -r 40be48f67a33 -r 0e7741276468 xen/arch/ia64/privop.c
--- a/xen/arch/ia64/privop.c Sat Jul 9 14:25:29 2005
+++ b/xen/arch/ia64/privop.c Sat Jul 9 14:36:13 2005
@@ -1033,6 +1033,36 @@
}
#endif
+extern unsigned long dtlb_translate_count;
+extern unsigned long tr_translate_count;
+extern unsigned long phys_translate_count;
+extern unsigned long vhpt_translate_count;
+extern unsigned long lazy_cover_count;
+extern unsigned long idle_when_pending;
+extern unsigned long pal_halt_light_count;
+
+int dump_misc_stats(char *buf)
+{
+ char *s = buf;
+ s += sprintf(s,"Virtual TR translations: %d\n",tr_translate_count);
+ s += sprintf(s,"Virtual VHPT translations: %d\n",vhpt_translate_count);
+ s += sprintf(s,"Virtual DTLB translations: %d\n",dtlb_translate_count);
+ s += sprintf(s,"Physical translations: %d\n",phys_translate_count);
+ s += sprintf(s,"Idle when pending: %d\n",idle_when_pending);
+ s += sprintf(s,"PAL_HALT_LIGHT (no pending):
%d\n",pal_halt_light_count);
+ s += sprintf(s,"Lazy covers: %d\n",lazy_cover_count);
+ return s - buf;
+}
+
+void zero_misc_stats(void)
+{
+ dtlb_translate_count = 0;
+ tr_translate_count = 0;
+ phys_translate_count = 0;
+ vhpt_translate_count = 0;
+ lazy_cover_count = 0;
+}
+
int dump_hyperprivop_counts(char *buf)
{
int i;
@@ -1072,6 +1102,7 @@
#ifdef PRIVOP_ADDR_COUNT
n += dump_privop_addrs(buf + n);
#endif
+ n += dump_misc_stats(buf + n);
if (len < TMPBUFLEN) return -1;
if (__copy_to_user(ubuf,buf,n)) return -1;
return n;
@@ -1086,6 +1117,7 @@
#ifdef PRIVOP_ADDR_COUNT
zero_privop_addrs();
#endif
+ zero_misc_stats();
zero_reflect_counts();
if (len < TMPBUFLEN) return -1;
if (__copy_to_user(ubuf,buf,n)) return -1;
diff -r 40be48f67a33 -r 0e7741276468 xen/include/asm-ia64/ia64_int.h
--- a/xen/include/asm-ia64/ia64_int.h Sat Jul 9 14:25:29 2005
+++ b/xen/include/asm-ia64/ia64_int.h Sat Jul 9 14:36:13 2005
@@ -3,11 +3,11 @@
//#include "ia64.h"
-#define IA64_VHPT_TRANS_VECTOR 0x0000 /* UNUSED */
+#define IA64_VHPT_TRANS_VECTOR 0x0000
#define IA64_INST_TLB_VECTOR 0x0400
#define IA64_DATA_TLB_VECTOR 0x0800
-#define IA64_ALT_INST_TLB_VECTOR 0x0c00 /* UNUSED */
-#define IA64_ALT_DATA_TLB_VECTOR 0x1000 /* UNUSED */
+#define IA64_ALT_INST_TLB_VECTOR 0x0c00
+#define IA64_ALT_DATA_TLB_VECTOR 0x1000
#define IA64_DATA_NESTED_TLB_VECTOR 0x1400
#define IA64_INST_KEY_MISS_VECTOR 0x1800
#define IA64_DATA_KEY_MISS_VECTOR 0x1c00
@@ -33,12 +33,11 @@
#define IA64_TAKEN_BRANCH_TRAP_VECTOR 0x5f00
#define IA64_SINGLE_STEP_TRAP_VECTOR 0x6000
-#define IA64_NO_FAULT 0x0000
-#define IA64_RFI_IN_PROGRESS 0x0001
-#define IA64_RETRY 0x0002
+#define IA64_NO_FAULT 0x0001
+#define IA64_RFI_IN_PROGRESS 0x0002
+#define IA64_RETRY 0x0003
#ifdef CONFIG_VTI
-#define IA64_FAULT 0x0001
-#define IA64_INJ_FAULT 0x0005
+#define IA64_FAULT 0x0002
#endif //CONFIG_VTI
#define IA64_FORCED_IFA 0x0004
#define IA64_ILLOP_FAULT (IA64_GENEX_VECTOR | 0x00)
diff -r 40be48f67a33 -r 0e7741276468 xen/arch/ia64/process.c
--- a/xen/arch/ia64/process.c Sat Jul 9 14:25:29 2005
+++ b/xen/arch/ia64/process.c Sat Jul 9 14:36:13 2005
@@ -75,8 +75,6 @@
}
#endif // CONFIG_VTI
}
-
-extern TR_ENTRY *match_tr(struct vcpu *v, unsigned long ifa);
void tdpfoo(void) { }
@@ -260,140 +258,29 @@
++pending_false_positive;
}
}
+unsigned long lazy_cover_count = 0;
int handle_lazy_cover(struct vcpu *v, unsigned long isr, struct pt_regs *regs)
{
if (!PSCB(v,interrupt_collection_enabled)) {
- if (isr & IA64_ISR_IR) {
-// printf("Handling lazy cover\n");
- PSCB(v,ifs) = regs->cr_ifs;
- PSCB(v,incomplete_regframe) = 1;
- regs->cr_ifs = 0;
- return(1); // retry same instruction with cr.ifs off
- }
+ PSCB(v,ifs) = regs->cr_ifs;
+ PSCB(v,incomplete_regframe) = 1;
+ regs->cr_ifs = 0;
+ lazy_cover_count++;
+ return(1); // retry same instruction with cr.ifs off
}
return(0);
}
-#define IS_XEN_ADDRESS(d,a) ((a >= d->xen_vastart) && (a <= d->xen_vaend))
-
-void xen_handle_domain_access(unsigned long address, unsigned long isr, struct
pt_regs *regs, unsigned long itir)
-{
- struct domain *d = (struct domain *) current->domain;
- struct domain *ed = (struct vcpu *) current;
- TR_ENTRY *trp;
- unsigned long psr = regs->cr_ipsr, mask, flags;
+void ia64_do_page_fault (unsigned long address, unsigned long isr, struct
pt_regs *regs, unsigned long itir)
+{
unsigned long iip = regs->cr_iip;
// FIXME should validate address here
- unsigned long pteval, mpaddr, ps;
- unsigned long lookup_domain_mpa(struct domain *,unsigned long);
- unsigned long match_dtlb(struct vcpu *,unsigned long, unsigned long *,
unsigned long *);
+ unsigned long pteval;
+ unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
IA64FAULT fault;
-// NEED TO HANDLE THREE CASES:
-// 1) domain is in metaphysical mode
-// 2) domain address is in TR
-// 3) domain address is not in TR (reflect data miss)
-
- // got here trying to read a privop bundle
- //if (d->metaphysical_mode) {
- if (PSCB(current,metaphysical_mode) && !(address>>61)) { //FIXME
- if (d == dom0) {
- if (address < dom0_start || address >= dom0_start +
dom0_size) {
- printk("xen_handle_domain_access: out-of-bounds"
- "dom0 mpaddr %p! continuing...\n",mpaddr);
- tdpfoo();
- }
- }
- pteval = lookup_domain_mpa(d,address);
- //FIXME: check return value?
- // would be nice to have a counter here
- vcpu_itc_no_srlz(ed,2,address,pteval,-1UL,PAGE_SHIFT);
- return;
- }
-if (address < 0x4000) printf("WARNING: page_fault @%p, iip=%p\n",address,iip);
-
- if (trp = match_tr(current,address)) {
- // FIXME address had better be pre-validated on insert
- pteval =
translate_domain_pte(trp->page_flags,address,trp->itir);
-
vcpu_itc_no_srlz(current,6,address,pteval,-1UL,(trp->itir>>2)&0x3f);
- return;
- }
- // if we are fortunate enough to have it in the 1-entry TLB...
- if (pteval = match_dtlb(ed,address,&ps,NULL)) {
- vcpu_itc_no_srlz(ed,6,address,pteval,-1UL,ps);
- return;
- }
- if (ia64_done_with_exception(regs)) {
-//if (!(uacnt++ & 0x3ff)) printk("*** xen_handle_domain_access: successfully
handled cnt=%d iip=%p, addr=%p...\n",uacnt,iip,address);
- return;
- }
- else {
- // should never happen. If it does, region 0 addr may
- // indicate a bad xen pointer
- printk("*** xen_handle_domain_access: exception table"
- " lookup failed, iip=%p, addr=%p, spinning...\n",
- iip,address);
- panic_domain(regs,"*** xen_handle_domain_access: exception
table"
- " lookup failed, iip=%p, addr=%p, spinning...\n",
- iip,address);
- }
-}
-
-void ia64_do_page_fault (unsigned long address, unsigned long isr, struct
pt_regs *regs, unsigned long itir)
-{
- struct domain *d = (struct domain *) current->domain;
- TR_ENTRY *trp;
- unsigned long psr = regs->cr_ipsr, mask, flags;
- unsigned long iip = regs->cr_iip;
- // FIXME should validate address here
- unsigned long iha, pteval, mpaddr;
- unsigned long lookup_domain_mpa(struct domain *,unsigned long);
- unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
- unsigned long vector;
- IA64FAULT fault;
-
-
- //The right way is put in VHPT and take another miss!
-
- // weak attempt to avoid doing both I/D tlb insert to avoid
- // problems for privop bundle fetch, doesn't work, deal with later
- if (IS_XEN_ADDRESS(d,iip) && !IS_XEN_ADDRESS(d,address)) {
- xen_handle_domain_access(address, isr, regs, itir);
-
- return;
- }
-
- // FIXME: no need to pass itir in to this routine as we need to
- // compute the virtual itir anyway (based on domain's RR.ps)
- // AND ACTUALLY reflect_interruption doesn't use it anyway!
- itir = vcpu_get_itir_on_fault(current,address);
-
- if (PSCB(current,metaphysical_mode) && (is_data || !(address>>61))) {
//FIXME
- // FIXME should validate mpaddr here
- if (d == dom0) {
- if (address < dom0_start || address >= dom0_start +
dom0_size) {
- printk("ia64_do_page_fault: out-of-bounds dom0
mpaddr %p, iip=%p! continuing...\n",address,iip);
- printk("ia64_do_page_fault: out-of-bounds dom0
mpaddr %p, old iip=%p!\n",address,current->vcpu_info->arch.iip);
- tdpfoo();
- }
- }
- pteval = lookup_domain_mpa(d,address);
- // FIXME, must be inlined or potential for nested fault here!
-
vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,PAGE_SHIFT);
- return;
- }
- if (trp = match_tr(current,address)) {
- // FIXME address had better be pre-validated on insert
- pteval =
translate_domain_pte(trp->page_flags,address,trp->itir);
-
vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,(trp->itir>>2)&0x3f);
- return;
- }
-
- if (handle_lazy_cover(current, isr, regs)) return;
-if (!(address>>61)) {
-panic_domain(0,"ia64_do_page_fault: @%p???, iip=%p, b0=%p, itc=%p
(spinning...)\n",address,iip,regs->b0,ia64_get_itc());
-}
+ if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, isr, regs))
return;
if ((isr & IA64_ISR_SP)
|| ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) ==
IA64_ISR_CODE_LFETCH))
{
@@ -406,37 +293,29 @@
return;
}
- if (vcpu_get_rr_ve(current, address) && (PSCB(current,pta) &
IA64_PTA_VE))
+ fault = vcpu_translate(current,address,is_data,&pteval,&itir);
+ if (fault == IA64_NO_FAULT)
{
- if (PSCB(current,pta) & IA64_PTA_VF)
- {
- /* long format VHPT - not implemented */
- vector = is_data ? IA64_DATA_TLB_VECTOR :
IA64_INST_TLB_VECTOR;
- }
- else
- {
- /* short format VHPT */
- vcpu_thash(current, address, &iha);
- if (__copy_from_user(&pteval, iha, sizeof(pteval)) == 0)
- {
- /*
- * Optimisation: this VHPT walker aborts on
not-present pages
- * instead of inserting a not-present
translation, this allows
- * vectoring directly to the miss handler.
- \ */
- if (pteval & _PAGE_P)
- {
- pteval =
translate_domain_pte(pteval,address,itir);
-
vcpu_itc_no_srlz(current,is_data?6:1,address,pteval,-1UL,(itir>>2)&0x3f);
- return;
- }
- else vector = is_data ? IA64_DATA_TLB_VECTOR :
IA64_INST_TLB_VECTOR;
- }
- else vector = IA64_VHPT_TRANS_VECTOR;
- }
- }
- else vector = is_data ? IA64_ALT_DATA_TLB_VECTOR :
IA64_ALT_INST_TLB_VECTOR;
- reflect_interruption(address, isr, itir, regs, vector);
+ pteval = translate_domain_pte(pteval,address,itir);
+
vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,(itir>>2)&0x3f);
+ return;
+ }
+ else if (IS_VMM_ADDRESS(iip))
+ {
+ if (!ia64_done_with_exception(regs)) {
+ // should never happen. If it does, region 0 addr may
+ // indicate a bad xen pointer
+ printk("*** xen_handle_domain_access: exception table"
+ " lookup failed, iip=%p, addr=%p, spinning...\n",
+ iip,address);
+ panic_domain(regs,"*** xen_handle_domain_access:
exception table"
+ " lookup failed, iip=%p, addr=%p, spinning...\n",
+ iip,address);
+ }
+ return;
+ }
+
+ reflect_interruption(address, isr, 0, regs, fault);
}
void
@@ -865,6 +744,6 @@
while(vector);
return;
}
- if (check_lazy_cover && handle_lazy_cover(v, isr, regs)) return;
+ if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v,
isr, regs)) return;
reflect_interruption(ifa,isr,itir,regs,vector);
}
diff -r 40be48f67a33 -r 0e7741276468 xen/include/asm-ia64/vcpu.h
--- a/xen/include/asm-ia64/vcpu.h Sat Jul 9 14:25:29 2005
+++ b/xen/include/asm-ia64/vcpu.h Sat Jul 9 14:36:13 2005
@@ -135,6 +135,7 @@
extern IA64FAULT vcpu_ptc_ga(VCPU *vcpu, UINT64 vadr, UINT64 addr_range);
extern IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
extern IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
+extern IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data,
UINT64 *pteval, UINT64 *itir);
extern IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
/* misc */
extern IA64FAULT vcpu_rfi(VCPU *vcpu);
@@ -150,5 +151,4 @@
extern UINT64 vcpu_get_tmp(VCPU *, UINT64);
extern void vcpu_set_tmp(VCPU *, UINT64, UINT64);
-
#endif
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|