# HG changeset patch # User Tristan Gingold # Date 1189823603 -7200 # Node ID 94f6757e01f42ae9c1b9c1b956884f209cb7a958 # Parent 273ad3c22e2330530fe60e89f33fb73dc671df09 Check slot for itr.d and itr.i and generate interrupt in case of error. This avoids a buffer overflow in Xen. Signed-off-by: Tristan Gingold diff -r 273ad3c22e23 -r 94f6757e01f4 xen/arch/ia64/vmx/vmmu.c --- a/xen/arch/ia64/vmx/vmmu.c Thu Sep 13 03:30:36 2007 +0200 +++ b/xen/arch/ia64/vmx/vmmu.c Sat Sep 15 04:33:23 2007 +0200 @@ -403,6 +403,12 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64 } thash_purge_entries(vcpu, va, ps); #endif + + if (slot >= NITRS) { + panic_domain(NULL, "bad itr.i slot (%ld)", slot); + return IA64_FAULT; + } + pte &= ~PAGE_FLAGS_RV_MASK; vcpu_get_rr(vcpu, va, &rid); rid = rid& RR_RID_MASK; @@ -431,6 +437,12 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 return IA64_FAULT; } #endif + + if (slot >= NDTRS) { + panic_domain(NULL, "bad itr.d slot (%ld)", slot); + return IA64_FAULT; + } + pte &= ~PAGE_FLAGS_RV_MASK; /* This is a bad workaround diff -r 273ad3c22e23 -r 94f6757e01f4 xen/arch/ia64/vmx/vmx_virt.c --- a/xen/arch/ia64/vmx/vmx_virt.c Thu Sep 13 03:30:36 2007 +0200 +++ b/xen/arch/ia64/vmx/vmx_virt.c Sat Sep 15 04:33:23 2007 +0200 @@ -202,6 +202,7 @@ static IA64FAULT vmx_emul_rfi(VCPU *vcpu return IA64_FAULT; } #endif // CHECK_FAULT + regs=vcpu_regs(vcpu); vpsr.val=regs->cr_ipsr; if ( vpsr.is == 1 ) { @@ -275,8 +276,9 @@ static IA64FAULT vmx_emul_ptc_l(VCPU *vc vcpu_set_isr(vcpu, isr.val); unimpl_daddr(vcpu); return IA64_FAULT; - } -#endif // VMAL_NO_FAULT_CHECK + } +#endif // VMAL_NO_FAULT_CHECK + return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7)); } @@ -333,8 +335,9 @@ static IA64FAULT vmx_emul_ptc_g(VCPU *vc vcpu_set_isr(vcpu, isr.val); unimpl_daddr(vcpu); return IA64_FAULT; - } -#endif // VMAL_NO_FAULT_CHECK + } +#endif // VMAL_NO_FAULT_CHECK + return vmx_vcpu_ptc_g(vcpu,r3,bits(r2,2,7)); } @@ -366,8 +369,9 @@ static IA64FAULT vmx_emul_ptc_ga(VCPU *v vcpu_set_isr(vcpu, isr.val); unimpl_daddr(vcpu); return IA64_FAULT; - } -#endif // VMAL_NO_FAULT_CHECK + } +#endif // VMAL_NO_FAULT_CHECK + return vmx_vcpu_ptc_ga(vcpu,r3,bits(r2,2,7)); } @@ -567,41 +571,42 @@ static IA64FAULT vmx_emul_itr_d(VCPU *vc static IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst) { u64 itir, ifa, pte, slot; + ISR isr; #ifdef VMAL_NO_FAULT_CHECK IA64_PSR vpsr; - vpsr.val=vmx_vcpu_get_psr(vcpu); - if ( vpsr.ic ) { - set_illegal_op_isr(vcpu); - illegal_op(vcpu); - return IA64_FAULT; - } - ISR isr; - if ( vpsr.cpl != 0) { - /* Inject Privileged Operation fault into guest */ - set_privileged_operation_isr (vcpu, 0); - privilege_op (vcpu); - return IA64_FAULT; - } -#endif // VMAL_NO_FAULT_CHECK - if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){ -#ifdef VMAL_NO_FAULT_CHECK - set_isr_reg_nat_consumption(vcpu,0,0); - rnat_comsumption(vcpu); - return IA64_FAULT; -#endif // VMAL_NO_FAULT_CHECK - } -#ifdef VMAL_NO_FAULT_CHECK - if(is_reserved_rr_register(vcpu, slot)){ - set_illegal_op_isr(vcpu); - illegal_op(vcpu); - return IA64_FAULT; - } -#endif // VMAL_NO_FAULT_CHECK - - if (vcpu_get_itir(vcpu,&itir)){ + vpsr.val = vmx_vcpu_get_psr(vcpu); + if (vpsr.ic) { + set_illegal_op_isr(vcpu); + illegal_op(vcpu); + return IA64_FAULT; + } + if (vpsr.cpl != 0) { + /* Inject Privileged Operation fault into guest */ + set_privileged_operation_isr (vcpu, 0); + privilege_op (vcpu); + return IA64_FAULT; + } +#endif // VMAL_NO_FAULT_CHECK + if (vcpu_get_gr_nat(vcpu, inst.M45.r3, &slot) + || vcpu_get_gr_nat(vcpu,inst.M45.r2, &pte)) { +#ifdef VMAL_NO_FAULT_CHECK + set_isr_reg_nat_consumption(vcpu, 0, 0); + rnat_comsumption(vcpu); + return IA64_FAULT; +#endif // VMAL_NO_FAULT_CHECK + } +#ifdef VMAL_NO_FAULT_CHECK + if (is_reserved_rr_register(vcpu, slot)) { + set_illegal_op_isr(vcpu); + illegal_op(vcpu); + return IA64_FAULT; + } +#endif // VMAL_NO_FAULT_CHECK + + if (vcpu_get_itir(vcpu,&itir)) { return(IA64_FAULT); } - if (vcpu_get_ifa(vcpu,&ifa)){ + if (vcpu_get_ifa(vcpu,&ifa)) { return(IA64_FAULT); } #ifdef VMAL_NO_FAULT_CHECK @@ -609,123 +614,140 @@ static IA64FAULT vmx_emul_itr_d(VCPU *vc // TODO return IA64_FAULT; } + if (unimplemented_gva(vcpu,ifa)) { + isr.val = set_isr_ei_ni(vcpu); + isr.code = IA64_RESERVED_REG_FAULT; + vcpu_set_isr(vcpu, isr.val); + unimpl_daddr(vcpu); + return IA64_FAULT; + } +#endif // VMAL_NO_FAULT_CHECK + + if (slot >= NDTRS) { + isr.val = set_isr_ei_ni(vcpu); + isr.code = IA64_RESERVED_REG_FAULT; + vcpu_set_isr(vcpu, isr.val); + rsv_reg_field(vcpu); + return IA64_FAULT; + } + + return (vmx_vcpu_itr_d(vcpu, slot, pte, itir, ifa)); +} + +static IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst) +{ + u64 itir, ifa, pte, slot; + ISR isr; +#ifdef VMAL_NO_FAULT_CHECK + IA64_PSR vpsr; + vpsr.val=vmx_vcpu_get_psr(vcpu); + if ( vpsr.ic ) { + set_illegal_op_isr(vcpu); + illegal_op(vcpu); + return IA64_FAULT; + } + if ( vpsr.cpl != 0) { + /* Inject Privileged Operation fault into guest */ + set_privileged_operation_isr (vcpu, 0); + privilege_op (vcpu); + return IA64_FAULT; + } +#endif // VMAL_NO_FAULT_CHECK + if (vcpu_get_gr_nat(vcpu,inst.M45.r3, &slot) + || vcpu_get_gr_nat(vcpu,inst.M45.r2, &pte)) { +#ifdef VMAL_NO_FAULT_CHECK + set_isr_reg_nat_consumption(vcpu,0,0); + rnat_comsumption(vcpu); + return IA64_FAULT; +#endif // VMAL_NO_FAULT_CHECK + } +#ifdef VMAL_NO_FAULT_CHECK + if (is_reserved_rr_register(vcpu, slot)) { + set_illegal_op_isr(vcpu); + illegal_op(vcpu); + return IA64_FAULT; + } +#endif // VMAL_NO_FAULT_CHECK + + if (vcpu_get_itir(vcpu,&itir)) { + return(IA64_FAULT); + } + if (vcpu_get_ifa(vcpu,&ifa)) { + return(IA64_FAULT); + } +#ifdef VMAL_NO_FAULT_CHECK + if (is_reserved_itir_field(vcpu, itir)) { + // TODO + return IA64_FAULT; + } + if (unimplemented_gva(vcpu,ifa)) { + isr.val = set_isr_ei_ni(vcpu); + isr.code = IA64_RESERVED_REG_FAULT; + vcpu_set_isr(vcpu, isr.val); + unimpl_daddr(vcpu); + return IA64_FAULT; + } +#endif // VMAL_NO_FAULT_CHECK + + if (slot >= NITRS) { + isr.val = set_isr_ei_ni(vcpu); + isr.code = IA64_RESERVED_REG_FAULT; + vcpu_set_isr(vcpu, isr.val); + rsv_reg_field(vcpu); + return IA64_FAULT; + } + + return (vmx_vcpu_itr_i(vcpu, slot, pte, itir, ifa)); +} + +static IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, + u64 *itir, u64 *ifa, u64 *pte) +{ + IA64FAULT ret1; + +#ifdef VMAL_NO_FAULT_CHECK + IA64_PSR vpsr; + vpsr.val=vmx_vcpu_get_psr(vcpu); + if ( vpsr.ic ) { + set_illegal_op_isr(vcpu); + illegal_op(vcpu); + return IA64_FAULT; + } + + u64 fault; + ISR isr; + if ( vpsr.cpl != 0) { + /* Inject Privileged Operation fault into guest */ + set_privileged_operation_isr (vcpu, 0); + privilege_op (vcpu); + return IA64_FAULT; + } +#endif // VMAL_NO_FAULT_CHECK + ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pte); +#ifdef VMAL_NO_FAULT_CHECK + if( ret1 != IA64_NO_FAULT ){ + set_isr_reg_nat_consumption(vcpu,0,0); + rnat_comsumption(vcpu); + return IA64_FAULT; + } +#endif // VMAL_NO_FAULT_CHECK + + if (vcpu_get_itir(vcpu,itir)){ + return(IA64_FAULT); + } + if (vcpu_get_ifa(vcpu,ifa)){ + return(IA64_FAULT); + } +#ifdef VMAL_NO_FAULT_CHECK if (unimplemented_gva(vcpu,ifa) ) { isr.val = set_isr_ei_ni(vcpu); isr.code = IA64_RESERVED_REG_FAULT; vcpu_set_isr(vcpu, isr.val); unimpl_daddr(vcpu); return IA64_FAULT; - } -#endif // VMAL_NO_FAULT_CHECK - - return (vmx_vcpu_itr_d(vcpu,slot,pte,itir,ifa)); -} - -static IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst) -{ - u64 itir, ifa, pte, slot; -#ifdef VMAL_NO_FAULT_CHECK - ISR isr; - IA64_PSR vpsr; - vpsr.val=vmx_vcpu_get_psr(vcpu); - if ( vpsr.ic ) { - set_illegal_op_isr(vcpu); - illegal_op(vcpu); - return IA64_FAULT; - } - if ( vpsr.cpl != 0) { - /* Inject Privileged Operation fault into guest */ - set_privileged_operation_isr (vcpu, 0); - privilege_op (vcpu); - return IA64_FAULT; - } -#endif // VMAL_NO_FAULT_CHECK - if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){ -#ifdef VMAL_NO_FAULT_CHECK - set_isr_reg_nat_consumption(vcpu,0,0); - rnat_comsumption(vcpu); - return IA64_FAULT; -#endif // VMAL_NO_FAULT_CHECK - } -#ifdef VMAL_NO_FAULT_CHECK - if(is_reserved_rr_register(vcpu, slot)){ - set_illegal_op_isr(vcpu); - illegal_op(vcpu); - return IA64_FAULT; - } -#endif // VMAL_NO_FAULT_CHECK - - if (vcpu_get_itir(vcpu,&itir)){ - return(IA64_FAULT); - } - if (vcpu_get_ifa(vcpu,&ifa)){ - return(IA64_FAULT); - } -#ifdef VMAL_NO_FAULT_CHECK - if (is_reserved_itir_field(vcpu, itir)) { - // TODO - return IA64_FAULT; - } - if (unimplemented_gva(vcpu,ifa) ) { - isr.val = set_isr_ei_ni(vcpu); - isr.code = IA64_RESERVED_REG_FAULT; - vcpu_set_isr(vcpu, isr.val); - unimpl_daddr(vcpu); - return IA64_FAULT; - } -#endif // VMAL_NO_FAULT_CHECK - - return (vmx_vcpu_itr_i(vcpu,slot,pte,itir,ifa)); -} - -static IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, - u64 *itir, u64 *ifa, u64 *pte) -{ - IA64FAULT ret1; - -#ifdef VMAL_NO_FAULT_CHECK - IA64_PSR vpsr; - vpsr.val=vmx_vcpu_get_psr(vcpu); - if ( vpsr.ic ) { - set_illegal_op_isr(vcpu); - illegal_op(vcpu); - return IA64_FAULT; - } - - u64 fault; - ISR isr; - if ( vpsr.cpl != 0) { - /* Inject Privileged Operation fault into guest */ - set_privileged_operation_isr (vcpu, 0); - privilege_op (vcpu); - return IA64_FAULT; - } -#endif // VMAL_NO_FAULT_CHECK - ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pte); -#ifdef VMAL_NO_FAULT_CHECK - if( ret1 != IA64_NO_FAULT ){ - set_isr_reg_nat_consumption(vcpu,0,0); - rnat_comsumption(vcpu); - return IA64_FAULT; - } -#endif // VMAL_NO_FAULT_CHECK - - if (vcpu_get_itir(vcpu,itir)){ - return(IA64_FAULT); - } - if (vcpu_get_ifa(vcpu,ifa)){ - return(IA64_FAULT); - } -#ifdef VMAL_NO_FAULT_CHECK - if (unimplemented_gva(vcpu,ifa) ) { - isr.val = set_isr_ei_ni(vcpu); - isr.code = IA64_RESERVED_REG_FAULT; - vcpu_set_isr(vcpu, isr.val); - unimpl_daddr(vcpu); - return IA64_FAULT; - } -#endif // VMAL_NO_FAULT_CHECK - return IA64_NO_FAULT; + } +#endif // VMAL_NO_FAULT_CHECK + return IA64_NO_FAULT; } static IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst) @@ -736,7 +758,7 @@ static IA64FAULT vmx_emul_itc_d(VCPU *vc return IA64_FAULT; } - return (vmx_vcpu_itc_d(vcpu,pte,itir,ifa)); + return (vmx_vcpu_itc_d(vcpu,pte,itir,ifa)); } static IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst) @@ -747,8 +769,7 @@ static IA64FAULT vmx_emul_itc_i(VCPU *vc return IA64_FAULT; } - return (vmx_vcpu_itc_i(vcpu,pte,itir,ifa)); - + return (vmx_vcpu_itc_i(vcpu,pte,itir,ifa)); } /*************************************