WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ia64-devel

RE: [Xen-ia64-devel] PATCH: check r2 value for VTi mov rr[r3]=r2

To: "Isaku Yamahata" <yamahata@xxxxxxxxxxxxx>, <tgingold@xxxxxxx>
Subject: RE: [Xen-ia64-devel] PATCH: check r2 value for VTi mov rr[r3]=r2
From: "Xu, Anthony" <anthony.xu@xxxxxxxxx>
Date: Mon, 22 Oct 2007 10:20:35 +0800
Cc: Alex Williamson <alex.williamson@xxxxxx>, Xen-ia64-devel <xen-ia64-devel@xxxxxxxxxxxxxxxxxxx>
Delivery-date: Sun, 21 Oct 2007 19:23:35 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
In-reply-to: <20071022015839.GA6453%yamahata@xxxxxxxxxxxxx>
List-help: <mailto:xen-ia64-devel-request@lists.xensource.com?subject=help>
List-id: Discussion of the ia64 port of Xen <xen-ia64-devel.lists.xensource.com>
List-post: <mailto:xen-ia64-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20071022015118.GA3524@xxxxxxxxxxxxxxxxxxxxx> <20071022015839.GA6453%yamahata@xxxxxxxxxxxxx>
Sender: xen-ia64-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: AcgUTyZKpIoeBf3LSDyW3pwT56/9aQAAhPYg
Thread-topic: [Xen-ia64-devel] PATCH: check r2 value for VTi mov rr[r3]=r2
> Don't we have to check wheter rr.ps is appropriate?
If I remember correctly, hardware will check rr.ps integrity.
GP fault has higher priority than Virtualization fault.


Anthony



Isaku Yamahata wrote:
> Good catch.
> Don't we have to check wheter rr.ps is appropriate?
> Otherwise mov to rr results in reserved register/field fault and
> xen panics. is_reserved_rr_field() in vmx_utility.c would help.
> Or should it be catched by exeption table for performance?
> 
> I've hit a similar issue and have a patch for C fall-back,
> but didn't have one for assembler code.
> I'll send it after rebasing it to the current c/s unless you send
> the one before me.
> 
> thanks,
> 
> On Mon, Oct 22, 2007 at 03:51:18AM +0200, tgingold@xxxxxxx wrote:
>> Hi,
>> 
>> RID value was not checked for mov_to_rr.  This is a security hole as
>> it allowed a VTi domain to read memory of any other domain.
>> 
>> Also use C fall-back for thash when long VHPT format is used.
>> 
>> Comments added.
>> 
>> Tristan.
> 
>> # HG changeset patch
>> # User Tristan Gingold <tgingold@xxxxxxx>
>> # Date 1193017765 -7200
>> # Node ID 769c23dcfbf1a2bfcac5307d45e32bdd0e16d969
>> # Parent  8c7e879e3e81f628714f98b7f2f7968264072fb6
>> Check range of r2 for mov rr[r3]=r2
>> This fixes a security hole.
>> Use C fall-back for thash with long VHPT format
>> Add comments.
>> 
>> Signed-off-by: Tristan Gingold <tgingold@xxxxxxx>
>> 
>> diff -r 8c7e879e3e81 -r 769c23dcfbf1 xen/arch/ia64/vmx/optvfault.S
>> --- a/xen/arch/ia64/vmx/optvfault.S  Fri Oct 19 06:24:24 2007 +0200
>> +++ b/xen/arch/ia64/vmx/optvfault.S  Mon Oct 22 03:49:25 2007 +0200
>> @@ -6,7 +6,9 @@ 
>>   *  Xuefei Xu (Anthony Xu) <anthony.xu@xxxxxxxxx>
>>   */
>> 
>> -#include <linux/config.h>
>> +#include <linux/config.h>
>> +#include <asm/config.h>
>> +#include <asm/pgtable.h>
>>  #include <asm/asmmacro.h>
>>  #include <asm/kregs.h>
>>  #include <asm/offsets.h>
>> @@ -26,6 +28,9 @@
>>  #define ACCE_MOV_TO_PSR
>>  #define ACCE_THASH
>> 
>> +// Inputs are: r21 (= current), r24 (= cause), r25 (= insn), r31
>> (=saved pr) + +
>>  //mov r1=ar3 (only itc is virtualized)
>>  GLOBAL_ENTRY(vmx_asm_mov_from_ar)
>>  #ifndef ACCE_MOV_FROM_AR
>> @@ -90,13 +95,16 @@ GLOBAL_ENTRY(vmx_asm_mov_to_rr)  #ifndef
>>      ACCE_MOV_TO_RR br.many vmx_virtualization_fault_back
>>  #endif
>> -    extr.u r16=r25,20,7
>> -    extr.u r17=r25,13,7
>> +    add r22=IA64_VCPU_DOMAIN_OFFSET,r21
>> +    extr.u r16=r25,20,7             // r3
>> +    extr.u r17=r25,13,7             // r2
>> +    ;;
>> +    ld8 r22=[r22]           // Get domain
>>      movl r20=asm_mov_from_reg
>>      ;;
>>      adds r30=vmx_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
>> -    shladd r16=r16,4,r20
>> -    mov r22=b0
>> +    shladd r16=r16,4,r20    // get r3
>> +    mov r18=b0                      // save b0
>>      ;;
>>      add r27=VCPU_VRR0_OFS,r21
>>      mov b0=r16
>> @@ -104,47 +112,56 @@ GLOBAL_ENTRY(vmx_asm_mov_to_rr)      ;;
>>  vmx_asm_mov_to_rr_back_1:
>>      adds r30=vmx_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
>> -    shr.u r23=r19,61
>> -    shladd r17=r17,4,r20
>> +    shr.u r23=r19,61                // get RR #
>> +    shladd r17=r17,4,r20    // get r2
>>      ;;
>>      //if rr7, go back
>>      cmp.eq p6,p0=7,r23
>> -    mov b0=r22
>> +    mov b0=r18                      // restore b0
>>      (p6) br.cond.dpnt.many vmx_virtualization_fault_back      ;;
>> -    mov r28=r19
>> +    mov r28=r19                     // save r3
>>      mov b0=r17
>>      br.many b0
>>  vmx_asm_mov_to_rr_back_2:
>>      adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20
>> -    shladd r27=r23,3,r27
>> -    ;; // +starting_rid
>> -    st8 [r27]=r19
>> +    shladd r27=r23,3,r27    // address of VRR
>> +    add r22=IA64_DOMAIN_RID_BITS_OFFSET,r22
>> +    ;;
>> +    ld1 r22=[r22]           // Load rid_bits from domain
>> +    mov b0=r18                      // restore b0
>> +    adds r16=IA64_VCPU_STARTING_RID_OFFSET,r21
>> +    ;;
>> +    ld4 r16=[r16]           // load starting_rid
>> +    extr.u r17=r19,8,24             // Extract RID
>> +    ;;
>> +    shr r17=r17,r22         // Shift out used bits
>> +    shl r16=r16,8
>> +    ;;
>> +    add r20=r19,r16
>> +    cmp.ne p6,p0=0,r17      // If reserved RID bits are set, use C
fall
>> back. +    (p6) br.cond.dpnt.many vmx_virtualization_fault_back
>> +    ;; //mangling rid 1 and 3
>> +    extr.u r16=r20,8,8
>> +    extr.u r17=r20,24,8
>> +    mov r24=r18             // saved b0 for resume
>> +    ;;
>> +    extr.u r18=r20,2,6 // page size
>> +    dep r20=r16,r20,24,8
>>      mov b0=r30
>>      ;;
>> -    adds r16=IA64_VCPU_STARTING_RID_OFFSET,r21
>> -    ;;
>> -    ld4 r16=[r16]
>> -    ;;
>> -    shl r16=r16,8
>> -    ;;
>> -    add r19=r19,r16
>> -    ;; //mangling rid 1 and 3
>> -    extr.u r16=r19,8,8
>> -    extr.u r17=r19,24,8
>> -    extr.u r18=r19,2,6 // page size
>> -    ;;
>> -    dep r19=r16,r19,24,8
>> -    ;;
>> -    dep r19=r17,r19,8,8
>> +    dep r20=r17,r20,8,8
>>      ;; //set ve 1
>> -    dep r19=-1,r19,0,1
>> -    cmp.lt p6,p0=14,r18
>> -    ;;
>> -    (p6) mov r18=14
>> -    ;;
>> -    (p6) dep r19=r18,r19,2,6
>> -    ;;
>> +    dep r20=-1,r20,0,1
>> +    // If ps > PAGE_SHIFT, use PAGE_SHIFT
>> +    cmp.lt p6,p0=PAGE_SHIFT,r18
>> +    ;;
>> +    (p6) mov r18=PAGE_SHIFT
>> +    ;;
>> +    (p6) dep r20=r18,r20,2,6
>> +    ;;
>> +    st8 [r27]=r19   // Write to vrr.
>> +    // Write to save_rr if rr=0 or rr=4.
>>      cmp.eq p6,p0=0,r23
>>      ;;
>>      cmp.eq.or p6,p0=4,r23
>> @@ -156,11 +173,10 @@ vmx_asm_mov_to_rr_back_2:
>>      cmp.eq p7,p0=r0,r0
>>      (p6) shladd r17=r23,1,r17
>>      ;;
>> -    (p6) st8 [r17]=r19
>> +    (p6) st8 [r17]=r20
>>      (p6) cmp.eq p7,p0=VMX_MMU_VIRTUAL,r16 // Set physical rr if in
>> virt mode      ;; 
>> -    (p7) mov rr[r28]=r19
>> -    mov r24=r22
>> +    (p7) mov rr[r28]=r20
>>      br.many b0
>>  END(vmx_asm_mov_to_rr)
>> 
>> @@ -420,7 +436,7 @@ ENTRY(vmx_asm_dispatch_vexirq)
>>      br.many vmx_dispatch_vexirq
>>  END(vmx_asm_dispatch_vexirq)
>> 
>> -// thash
>> +// thash r1=r3
>>  // TODO: add support when pta.vf = 1
>>  GLOBAL_ENTRY(vmx_asm_thash)
>>  #ifndef ACCE_THASH
>> @@ -433,8 +449,7 @@ GLOBAL_ENTRY(vmx_asm_thash)
>>      adds r30=vmx_asm_thash_back1-asm_mov_from_reg,r20
>>      shladd r17=r17,4,r20    // get addr of MOVE_FROM_REG(r17)
>>      adds r16=IA64_VPD_BASE_OFFSET,r21       // get
vcpu.arch.priveregs
>> -    ;;
>> -    mov r24=b0
>> +    mov r24=b0                      // save b0
>>      ;;
>>      ld8 r16=[r16]           // get VPD addr
>>      mov b0=r17
>> @@ -452,6 +467,10 @@ vmx_asm_thash_back1:
>>      extr.u r29=r17,2,6              // get pta.size
>>      ld8 r25=[r27]           // get vcpu->arch.arch_vmx.vrr[r23]'s
value     
>> ;; +    // Fall-back to C if VF (long format) is set
>> +    tbit.nz p7,p0=r17,8
>> +    mov b0=r24
>> +    (p6) br.cond.dpnt.many vmx_virtualization_fault_back
>>      extr.u r25=r25,2,6              // get rr.ps
>>      shl r22=r26,r29         // 1UL << pta.size
>>      ;;
>> @@ -595,6 +614,7 @@ MOV_FROM_BANK0_REG(31)
>> 
>>  // mov from reg table
>>  // r19:     value, r30: return address
>> +// r26 may be destroyed
>>  ENTRY(asm_mov_from_reg)
>>      MOV_FROM_REG(0)
>>      MOV_FROM_REG(1)
> 
>> _______________________________________________
>> Xen-ia64-devel mailing list
>> Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
>> http://lists.xensource.com/xen-ia64-devel

_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel