# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 6e979aa0e6d244824380af142b4e3be515bac2d5
# Parent 874661fc2d42195c52b619d4be76e7231a62a8ff
[IA64] panic -> panic domain
This patch uses panic domain instead of panic when the
panic happening is only related to current domain not whole system.
Signed-off-by: Zhang Xiantao <xiantao.zhang@xxxxxxxxx>
---
xen/arch/ia64/vmx/pal_emul.c | 6 +++---
xen/arch/ia64/vmx/vlsapic.c | 7 ++++---
xen/arch/ia64/vmx/vmmu.c | 18 +++++++++---------
xen/arch/ia64/vmx/vmx_init.c | 15 +++++++++------
xen/arch/ia64/vmx/vmx_phy_mode.c | 9 +++++----
xen/arch/ia64/vmx/vmx_process.c | 2 +-
xen/arch/ia64/vmx/vmx_support.c | 8 ++++----
xen/arch/ia64/vmx/vmx_vcpu.c | 2 +-
xen/arch/ia64/vmx/vmx_virt.c | 23 ++++++++++-------------
xen/include/asm-ia64/vmx_vcpu.h | 2 +-
10 files changed, 47 insertions(+), 45 deletions(-)
diff -r 874661fc2d42 -r 6e979aa0e6d2 xen/arch/ia64/vmx/pal_emul.c
--- a/xen/arch/ia64/vmx/pal_emul.c Tue May 09 12:42:44 2006 -0600
+++ b/xen/arch/ia64/vmx/pal_emul.c Tue May 09 15:23:33 2006 -0600
@@ -62,8 +62,8 @@ pal_cache_flush (VCPU *vcpu) {
// ia64_pal_call_static(gr28 ,gr29, gr30,
// result.v1,1LL);
// }
- while (result.status != 0) {
- panic("PAL_CACHE_FLUSH ERROR, status %ld", result.status);
+ if(result.status != 0) {
+ panic_domain(vcpu_regs(vcpu),"PAL_CACHE_FLUSH ERROR, status
%ld", result.status);
}
return result;
@@ -445,7 +445,7 @@ pal_emul( VCPU *vcpu) {
break;
default:
- panic("pal_emul(): guest call unsupported pal" );
+ panic_domain(vcpu_regs(vcpu),"pal_emul(): guest call
unsupported pal" );
}
set_pal_result (vcpu, result);
}
diff -r 874661fc2d42 -r 6e979aa0e6d2 xen/arch/ia64/vmx/vlsapic.c
--- a/xen/arch/ia64/vmx/vlsapic.c Tue May 09 12:42:44 2006 -0600
+++ b/xen/arch/ia64/vmx/vlsapic.c Tue May 09 15:23:33 2006 -0600
@@ -568,7 +568,7 @@ int vmx_check_pending_irq(VCPU *vcpu)
if ( vpsr.i && IRQ_NO_MASKED == mask ) {
isr = vpsr.val & IA64_PSR_RI;
if ( !vpsr.ic )
- panic("Interrupt when IC=0\n");
+ panic_domain(regs,"Interrupt when IC=0\n");
vmx_reflect_interruption(0,isr,0, 12, regs ); // EXT IRQ
injected = 1;
}
@@ -595,7 +595,8 @@ void guest_write_eoi(VCPU *vcpu)
uint64_t spsr;
vec = highest_inservice_irq(vcpu);
- if ( vec == NULL_VECTOR ) panic("Wrong vector to EOI\n");
+ if ( vec == NULL_VECTOR )
+ panic_domain(vcpu_regs(vcpu),"Wrong vector to EOI\n");
local_irq_save(spsr);
VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63));
local_irq_restore(spsr);
@@ -634,7 +635,7 @@ static void generate_exirq(VCPU *vcpu)
update_vhpi(vcpu, NULL_VECTOR);
isr = vpsr.val & IA64_PSR_RI;
if ( !vpsr.ic )
- panic("Interrupt when IC=0\n");
+ panic_domain(regs,"Interrupt when IC=0\n");
vmx_reflect_interruption(0,isr,0, 12, regs); // EXT IRQ
}
diff -r 874661fc2d42 -r 6e979aa0e6d2 xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c Tue May 09 12:42:44 2006 -0600
+++ b/xen/arch/ia64/vmx/vmmu.c Tue May 09 15:23:33 2006 -0600
@@ -134,7 +134,7 @@ static void init_domain_vhpt(struct vcpu
void * vbase;
page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
if ( page == NULL ) {
- panic("No enough contiguous memory for init_domain_vhpt\n");
+ panic_domain(vcpu_regs(v),"No enough contiguous memory for
init_domain_vhpt\n");
}
vbase = page_to_virt(page);
memset(vbase, 0, VCPU_VHPT_SIZE);
@@ -157,7 +157,7 @@ void init_domain_tlb(struct vcpu *v)
init_domain_vhpt(v);
page = alloc_domheap_pages (NULL, VCPU_VTLB_ORDER, 0);
if ( page == NULL ) {
- panic("No enough contiguous memory for init_domain_tlb\n");
+ panic_domain(vcpu_regs(v),"No enough contiguous memory for
init_domain_tlb\n");
}
vbase = page_to_virt(page);
memset(vbase, 0, VCPU_VTLB_SIZE);
@@ -200,7 +200,7 @@ void machine_tlb_insert(struct vcpu *d,
mtlb.ppn = get_mfn(d->domain,tlb->ppn);
mtlb_ppn=mtlb.ppn;
if (mtlb_ppn == INVALID_MFN)
- panic("Machine tlb insert with invalid mfn number.\n");
+ panic_domain(vcpu_regs(d),"Machine tlb insert with invalid mfn
number.\n");
psr = ia64_clear_ic();
if ( cl == ISIDE_TLB ) {
@@ -323,12 +323,12 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
}
if( gpip){
mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
- if( mfn == INVALID_MFN ) panic("fetch_code: invalid memory\n");
+ if( mfn == INVALID_MFN ) panic_domain(vcpu_regs(vcpu),"fetch_code:
invalid memory\n");
vpa =(u64 *)__va( (gip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT));
}else{
tlb = vhpt_lookup(gip);
if( tlb == NULL)
- panic("No entry found in ITLB and DTLB\n");
+ panic_domain(vcpu_regs(vcpu),"No entry found in ITLB and DTLB\n");
vpa =(u64
*)__va((tlb->ppn>>(PAGE_SHIFT-ARCH_PAGE_SHIFT)<<PAGE_SHIFT)|(gip&(PAGE_SIZE-1)));
}
*code1 = *vpa++;
@@ -345,7 +345,7 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN
slot = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
if (slot >=0) {
// generate MCA.
- panic("Tlb conflict!!");
+ panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
return IA64_FAULT;
}
thash_purge_and_insert(vcpu, pte, itir, ifa);
@@ -361,7 +361,7 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN
slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
if (slot >=0) {
// generate MCA.
- panic("Tlb conflict!!");
+ panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
return IA64_FAULT;
}
gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
@@ -385,7 +385,7 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64
index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
if (index >=0) {
// generate MCA.
- panic("Tlb conflict!!");
+ panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
return IA64_FAULT;
}
thash_purge_entries(vcpu, va, ps);
@@ -407,7 +407,7 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64
index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
if (index>=0) {
// generate MCA.
- panic("Tlb conflict!!");
+ panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
return IA64_FAULT;
}
thash_purge_entries(vcpu, va, ps);
diff -r 874661fc2d42 -r 6e979aa0e6d2 xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c Tue May 09 12:42:44 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_init.c Tue May 09 15:23:33 2006 -0600
@@ -208,8 +208,9 @@ vmx_create_vp(struct vcpu *v)
ivt_base = (u64) &vmx_ia64_ivt;
printk("ivt_base: 0x%lx\n", ivt_base);
ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0);
- if (ret != PAL_STATUS_SUCCESS)
- panic("ia64_pal_vp_create failed. \n");
+ if (ret != PAL_STATUS_SUCCESS){
+ panic_domain(vcpu_regs(v),"ia64_pal_vp_create failed. \n");
+ }
}
/* Other non-context related tasks can be done in context switch */
@@ -220,8 +221,9 @@ vmx_save_state(struct vcpu *v)
/* FIXME: about setting of pal_proc_vector... time consuming */
status = ia64_pal_vp_save((u64 *)v->arch.privregs, 0);
- if (status != PAL_STATUS_SUCCESS)
- panic("Save vp status failed\n");
+ if (status != PAL_STATUS_SUCCESS){
+ panic_domain(vcpu_regs(v),"Save vp status failed\n");
+ }
/* Need to save KR when domain switch, though HV itself doesn;t
@@ -244,8 +246,9 @@ vmx_load_state(struct vcpu *v)
u64 status;
status = ia64_pal_vp_restore((u64 *)v->arch.privregs, 0);
- if (status != PAL_STATUS_SUCCESS)
- panic("Restore vp status failed\n");
+ if (status != PAL_STATUS_SUCCESS){
+ panic_domain(vcpu_regs(v),"Restore vp status failed\n");
+ }
ia64_set_kr(0, v->arch.arch_vmx.vkr[0]);
ia64_set_kr(1, v->arch.arch_vmx.vkr[1]);
diff -r 874661fc2d42 -r 6e979aa0e6d2 xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c Tue May 09 12:42:44 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c Tue May 09 15:23:33 2006 -0600
@@ -186,8 +186,10 @@ vmx_load_all_rr(VCPU *vcpu)
* mode in same region
*/
if (is_physical_mode(vcpu)) {
- if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
- panic("Unexpected domain switch in phy emul\n");
+ if (vcpu->arch.mode_flags & GUEST_PHY_EMUL){
+ panic_domain(vcpu_regs(vcpu),
+ "Unexpected domain switch in phy emul\n");
+ }
phy_rr.rrval = vcpu->arch.metaphysical_rr0;
//phy_rr.ps = PAGE_SHIFT;
phy_rr.ve = 1;
@@ -322,8 +324,7 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_
break;
default:
/* Sanity check */
- printf("old: %lx, new: %lx\n", old_psr.val, new_psr.val);
- panic("Unexpected virtual <--> physical mode transition");
+ panic_domain(vcpu_regs(vcpu),"Unexpected virtual <--> physical mode
transition,old:%lx,new:%lx\n",old_psr.val,new_psr.val);
break;
}
return;
diff -r 874661fc2d42 -r 6e979aa0e6d2 xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c Tue May 09 12:42:44 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_process.c Tue May 09 15:23:33 2006 -0600
@@ -338,7 +338,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
}
if(vec == 1) type = ISIDE_TLB;
else if(vec == 2) type = DSIDE_TLB;
- else panic("wrong vec\n");
+ else panic_domain(regs,"wrong vec:%0xlx\n",vec);
// prepare_if_physical_mode(v);
diff -r 874661fc2d42 -r 6e979aa0e6d2 xen/arch/ia64/vmx/vmx_support.c
--- a/xen/arch/ia64/vmx/vmx_support.c Tue May 09 12:42:44 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_support.c Tue May 09 15:23:33 2006 -0600
@@ -92,12 +92,12 @@ void vmx_io_assist(struct vcpu *v)
*/
vio = get_vio(v->domain, v->vcpu_id);
if (!vio)
- panic("Corruption: bad shared page: %lx\n", (unsigned long)vio);
+ panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n",
(unsigned long)vio);
p = &vio->vp_ioreq;
if (p->state == STATE_IORESP_HOOK)
- panic("Not supported: No hook available for DM request\n");
+ panic_domain(vcpu_regs(v),"Not supported: No hook available for DM
request\n");
if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
if (p->state != STATE_IORESP_READY) {
@@ -135,7 +135,7 @@ void vmx_intr_assist(struct vcpu *v)
* out of vmx_wait_io, when guest is still waiting for response.
*/
if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
- panic("!!!Bad resume to guest before I/O emulation is done.\n");
+ panic_domain(vcpu_regs(v),"!!!Bad resume to guest before I/O emulation
is done.\n");
/* Clear indicator specific to interrupt delivered from DM */
if (test_and_clear_bit(port,
@@ -154,7 +154,7 @@ void vmx_intr_assist(struct vcpu *v)
*/
vio = get_vio(v->domain, v->vcpu_id);
if (!vio)
- panic("Corruption: bad shared page: %lx\n", (unsigned long)vio);
+ panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n",
(unsigned long)vio);
#ifdef V_IOSAPIC_READY
/* Confirm virtual interrupt line signals, and set pending bits in vpd */
diff -r 874661fc2d42 -r 6e979aa0e6d2 xen/arch/ia64/vmx/vmx_vcpu.c
--- a/xen/arch/ia64/vmx/vmx_vcpu.c Tue May 09 12:42:44 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c Tue May 09 15:23:33 2006 -0600
@@ -91,7 +91,7 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned lo
* Otherwise panic
*/
if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) {
- panic ("Setting unsupport guest psr!");
+ panic_domain (regs,"Setting unsupport guest psr!");
}
/*
diff -r 874661fc2d42 -r 6e979aa0e6d2 xen/arch/ia64/vmx/vmx_virt.c
--- a/xen/arch/ia64/vmx/vmx_virt.c Tue May 09 12:42:44 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_virt.c Tue May 09 15:23:33 2006 -0600
@@ -182,8 +182,9 @@ IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu
IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
{
UINT64 val;
+
if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
- panic(" get_psr nat bit fault\n");
+ panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n");
val = (val & MASK(0, 32)) | (VCPU(vcpu, vpsr) & MASK(32, 32));
#if 0
@@ -216,7 +217,7 @@ IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST6
regs=vcpu_regs(vcpu);
vpsr.val=regs->cr_ipsr;
if ( vpsr.is == 1 ) {
- panic ("We do not support IA32 instruction yet");
+ panic_domain(regs,"We do not support IA32 instruction yet");
}
return vmx_vcpu_rfi(vcpu);
@@ -715,8 +716,9 @@ IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *v
{
// I27 and M30 are identical for these fields
UINT64 imm;
+
if(inst.M30.ar3!=44){
- panic("Can't support ar register other than itc");
+ panic_domain(vcpu_regs(vcpu),"Can't support ar register other than
itc");
}
#ifdef CHECK_FAULT
IA64_PSR vpsr;
@@ -741,7 +743,7 @@ IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *v
// I26 and M29 are identical for these fields
u64 r2;
if(inst.M29.ar3!=44){
- panic("Can't support ar register other than itc");
+ panic_domain(vcpu_regs(vcpu),"Can't support ar register other than
itc");
}
if(vcpu_get_gr_nat(vcpu,inst.M29.r2,&r2)){
#ifdef CHECK_FAULT
@@ -769,7 +771,7 @@ IA64FAULT vmx_emul_mov_from_ar_reg(VCPU
// I27 and M30 are identical for these fields
u64 r1;
if(inst.M31.ar3!=44){
- panic("Can't support ar register other than itc");
+ panic_domain(vcpu_regs(vcpu),"Can't support ar register other than
itc");
}
#ifdef CHECK_FAULT
if(check_target_register(vcpu,inst.M31.r1)){
@@ -1359,8 +1361,7 @@ if ( (cause == 0xff && opcode == 0x1e000
slot_type = slot_types[bundle.template][slot];
ia64_priv_decoder(slot_type, inst, &cause);
if(cause==0){
- printf("This instruction at 0x%lx slot %d can't be virtualized", iip,
slot);
- panic("123456\n");
+ panic_domain(regs,"This instruction at 0x%lx slot %d can't be
virtualized", iip, slot);
}
#else
inst.inst=opcode;
@@ -1494,12 +1495,8 @@ if ( (cause == 0xff && opcode == 0x1e000
status=IA64_FAULT;
break;
default:
- printf("unknown cause %ld, iip: %lx, ipsr: %lx\n",
cause,regs->cr_iip,regs->cr_ipsr);
- while(1);
- /* For unknown cause, let hardware to re-execute */
- status=IA64_RETRY;
- break;
-// panic("unknown cause in virtualization intercept");
+ panic_domain(regs,"unknown cause %ld, iip: %lx, ipsr: %lx\n",
cause,regs->cr_iip,regs->cr_ipsr);
+ break;
};
#if 0
diff -r 874661fc2d42 -r 6e979aa0e6d2 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h Tue May 09 12:42:44 2006 -0600
+++ b/xen/include/asm-ia64/vmx_vcpu.h Tue May 09 15:23:33 2006 -0600
@@ -359,7 +359,7 @@ IA64FAULT vmx_vcpu_get_cpuid(VCPU *vcpu,
// TODO: unimplemented DBRs return a reserved register fault
// TODO: Should set Logical CPU state, not just physical
if(reg > 4){
- panic("there are only five cpuid registers");
+ panic_domain(vcpu_regs(vcpu),"there are only five cpuid registers");
}
*pval=VCPU(vcpu,vcpuid[reg]);
return (IA64_NO_FAULT);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|