WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [IA64] Optimize vmx_vcpu_thash()

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [IA64] Optimize vmx_vcpu_thash()
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 09 Feb 2007 09:40:23 -0800
Delivery-date: Fri, 09 Feb 2007 09:47:00 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User awilliam@xxxxxxxxxxxx
# Date 1169762321 25200
# Node ID 6745b7274effc835d5471a44622927c8885281fa
# Parent  730e24a1594ac13e8a5a0bde7cd2530dc40b7d7b
[IA64] Optimize vmx_vcpu_thash()

Implement in assembly

Signed-off-by: Zhang Xin <xing.z.zhang@xxxxxxxxx>
---
 xen/arch/ia64/asm-offsets.c       |    1 
 xen/arch/ia64/vmx/optvfault.S     |   60 ++++++++++++++++++++++++++++++++++++++
 xen/arch/ia64/vmx/vmx_ivt.S       |    2 +
 xen/include/asm-ia64/vmx_mm_def.h |    4 +-
 4 files changed, 66 insertions(+), 1 deletion(-)

diff -r 730e24a1594a -r 6745b7274eff xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c       Thu Jan 25 14:40:45 2007 -0700
+++ b/xen/arch/ia64/asm-offsets.c       Thu Jan 25 14:58:41 2007 -0700
@@ -200,6 +200,7 @@ void foo(void)
        DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.privregs));
        DEFINE(IA64_VPD_VIFS_OFFSET, offsetof (mapped_regs_t, ifs));
        DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu, 
arch.insvc[0]));
+       DEFINE(IA64_VPD_VPTA_OFFSET, offsetof (struct mapped_regs, pta));
        DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta));
        DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t));
 
diff -r 730e24a1594a -r 6745b7274eff xen/arch/ia64/vmx/optvfault.S
--- a/xen/arch/ia64/vmx/optvfault.S     Thu Jan 25 14:40:45 2007 -0700
+++ b/xen/arch/ia64/vmx/optvfault.S     Thu Jan 25 14:58:41 2007 -0700
@@ -15,6 +15,7 @@
 #include <asm/vmx_vpd.h>
 #include <asm/vmx_pal_vsa.h>
 #include <asm/asm-offsets.h>
+#include <asm-ia64/vmx_mm_def.h>
 
 #define ACCE_MOV_FROM_AR
 #define ACCE_MOV_FROM_RR
@@ -22,6 +23,7 @@
 #define ACCE_RSM
 #define ACCE_SSM
 #define ACCE_MOV_TO_PSR
+#define ACCE_THASH
 
 //mov r1=ar3
 GLOBAL_ENTRY(vmx_asm_mov_from_ar)
@@ -418,6 +420,64 @@ ENTRY(vmx_asm_dispatch_vexirq)
     br.many vmx_dispatch_vexirq
 END(vmx_asm_dispatch_vexirq)
 
+// thash
+// TODO: add support when pta.vf = 1
+GLOBAL_ENTRY(vmx_asm_thash)
+#ifndef ACCE_THASH
+    br.many vmx_virtualization_fault_back
+#endif
+    extr.u r17=r25,20,7                // get r3 from opcode in r25 
+    extr.u r18=r25,6,7         // get r1 from opcode in r25
+    movl r20=asm_mov_from_reg
+    ;;
+    adds r30=vmx_asm_thash_back1-asm_mov_from_reg,r20
+    shladd r17=r17,4,r20       // get addr of MOVE_FROM_REG(r17)
+    adds r16=IA64_VPD_BASE_OFFSET,r21  // get vcpu.arch.priveregs
+    ;;
+    mov r24=b0
+    ;;
+    ld8 r16=[r16]              // get VPD addr
+    mov b0=r17
+    br.many b0                 // r19 return value
+    ;;                                                     
+vmx_asm_thash_back1:
+    shr.u r23=r19,61           // get RR number
+    adds r25=VCPU_VRR0_OFS,r21 // get vcpu->arch.arch_vmx.vrr[0]'s addr
+    adds r16=IA64_VPD_VPTA_OFFSET,r16  // get vpta 
+    ;;
+    shladd r27=r23,3,r25       // get vcpu->arch.arch_vmx.vrr[r23]'s addr
+    ld8 r17=[r16]              // get PTA
+    mov r26=1
+    ;;
+    extr.u r29=r17,2,6         // get pta.size
+    ld8 r25=[r27]              // get vcpu->arch.arch_vmx.vrr[r23]'s value
+    ;;
+    extr.u r25=r25,2,6         // get rr.ps
+    shl r22=r26,r29            // 1UL << pta.size
+    ;;
+    shr.u r23=r19,r25          // vaddr >> rr.ps
+    adds r26=3,r29             // pta.size + 3 
+    shl r27=r17,3              // pta << 3 
+    ;;
+    shl r23=r23,3              // (vaddr >> rr.ps) << 3
+    shr.u r27=r27,r26          // (pta << 3) >> (pta.size+3)
+    movl r16=VRN_MASK
+    ;;
+    adds r22=-1,r22            // (1UL << pta.size) - 1
+    shl r27=r27,r29            // ((pta<<3)>>(pta.size+3))<<pta.size
+    and r19=r19,r16            // vaddr & VRN_MASK
+    ;;
+    and r22=r22,r23            // vhpt_offset 
+    or r19=r19,r27             // (vadr&VRN_MASK) |(((pta<<3)>>(pta.size + 
3))<<pta.size) 
+    adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
+    ;;
+    or r19=r19,r22             // calc pval
+    shladd r17=r18,4,r26
+    adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20
+    ;;
+    mov b0=r17
+    br.many b0
+END(vmx_asm_thash)
 
 #define MOV_TO_REG0    \
 {;                     \
diff -r 730e24a1594a -r 6745b7274eff xen/arch/ia64/vmx/vmx_ivt.S
--- a/xen/arch/ia64/vmx/vmx_ivt.S       Thu Jan 25 14:40:45 2007 -0700
+++ b/xen/arch/ia64/vmx/vmx_ivt.S       Thu Jan 25 14:58:41 2007 -0700
@@ -795,12 +795,14 @@ ENTRY(vmx_virtualization_fault)
     cmp.eq p9,p0=EVENT_RSM,r24
     cmp.eq p10,p0=EVENT_SSM,r24
     cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
+    cmp.eq p12,p0=EVENT_THASH,r24 
     (p6) br.dptk.many vmx_asm_mov_from_ar
     (p7) br.dptk.many vmx_asm_mov_from_rr
     (p8) br.dptk.many vmx_asm_mov_to_rr
     (p9) br.dptk.many vmx_asm_rsm
     (p10) br.dptk.many vmx_asm_ssm
     (p11) br.dptk.many vmx_asm_mov_to_psr
+    (p12) br.dptk.many vmx_asm_thash
     ;;
 vmx_virtualization_fault_back:
     mov r19=37
diff -r 730e24a1594a -r 6745b7274eff xen/include/asm-ia64/vmx_mm_def.h
--- a/xen/include/asm-ia64/vmx_mm_def.h Thu Jan 25 14:40:45 2007 -0700
+++ b/xen/include/asm-ia64/vmx_mm_def.h Thu Jan 25 14:58:41 2007 -0700
@@ -103,7 +103,7 @@
 #define VA_MATTR_WC     0x6
 #define VA_MATTR_NATPAGE    0x7
 
-#define VRN_MASK        0xe000000000000000L
+#define VRN_MASK        0xe000000000000000
 #define PTA_BASE_MASK       0x3fffffffffffL
 #define PTA_BASE_SHIFT      15
 #define VHPT_OFFSET_MASK    0x7fff
@@ -114,6 +114,7 @@
 #define HPA_MAPPING_ATTRIBUTE   0x61  //ED:0;AR:0;PL:0;D:1;A:1;P:1
 #define VPN_2_VRN(vpn)  ((vpn << PPN_SHIFT) >> IA64_VRN_SHIFT)
 
+#ifndef __ASSEMBLY__
 typedef enum { INSTRUCTION, DATA, REGISTER } miss_type;
 
 //typedef enum { MVHPT, STLB } vtlb_loc_type_t;
@@ -169,5 +170,6 @@ bits_v(uint64_t v, uint32_t bs, uint32_t
                   "M" ((len)));                                 \
          ret;                                                   \
  })
+#endif
 
 #endif

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [IA64] Optimize vmx_vcpu_thash(), Xen patchbot-unstable <=