WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [IA64] cleanup: vcpu_set_psr_sm.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [IA64] cleanup: vcpu_set_psr_sm.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Mon, 02 Jun 2008 04:40:10 -0700
Delivery-date: Mon, 02 Jun 2008 04:40:09 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
# Date 1211965844 -32400
# Node ID 9c0a654157cbf18650183b91246529407c305ac9
# Parent  74d0f17f3fa5245675aa8d3cdfc354e6702645f3
[IA64] cleanup: vcpu_set_psr_sm.

It is pointless to set the machine psr.

Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>
---
 xen/arch/ia64/xen/vcpu.c |   36 +++++++++---------------------------
 1 files changed, 9 insertions(+), 27 deletions(-)

diff -r 74d0f17f3fa5 -r 9c0a654157cb xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c  Thu May 22 19:42:51 2008 +0900
+++ b/xen/arch/ia64/xen/vcpu.c  Wed May 28 18:10:44 2008 +0900
@@ -301,13 +301,12 @@ IA64FAULT vcpu_reset_psr_dt(VCPU * vcpu)
 
 IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, u64 imm24)
 {
-       struct ia64_psr psr, imm, *ipsr;
+       struct ia64_psr imm, *ipsr;
        REGS *regs = vcpu_regs(vcpu);
 
        //PRIVOP_COUNT_ADDR(regs,_RSM);
        // TODO: All of these bits need to be virtualized
        // TODO: Only allowed for current vcpu
-       __asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
        ipsr = (struct ia64_psr *)&regs->cr_ipsr;
        imm = *(struct ia64_psr *)&imm24;
        // interrupt flag
@@ -336,14 +335,10 @@ IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu,
                // ipsr->pp = 1;
                PSCB(vcpu, vpsr_pp) = 0; // but fool the domain if it gets psr
        }
-       if (imm.up) {
+       if (imm.up)
                ipsr->up = 0;
-               psr.up = 0;
-       }
-       if (imm.sp) {
+       if (imm.sp)
                ipsr->sp = 0;
-               psr.sp = 0;
-       }
        if (imm.be)
                ipsr->be = 0;
        if (imm.dt)
@@ -352,7 +347,6 @@ IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu,
                ipsr->pk = 0;
                vcpu_pkr_use_unset(vcpu);
        }
-       __asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
        return IA64_NO_FAULT;
 }
 
@@ -371,13 +365,12 @@ IA64FAULT vcpu_set_psr_i(VCPU * vcpu)
 
 IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u64 imm24)
 {
-       struct ia64_psr psr, imm, *ipsr;
+       struct ia64_psr imm, *ipsr;
        REGS *regs = vcpu_regs(vcpu);
        u64 mask, enabling_interrupts = 0;
 
        //PRIVOP_COUNT_ADDR(regs,_SSM);
        // TODO: All of these bits need to be virtualized
-       __asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
        imm = *(struct ia64_psr *)&imm24;
        ipsr = (struct ia64_psr *)&regs->cr_ipsr;
        // just handle psr.sp,pp and psr.i,ic (and user mask) for now
@@ -401,10 +394,8 @@ IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u
                // ipsr->pp = 1;
                PSCB(vcpu, vpsr_pp) = 1;
        }
-       if (imm.sp) {
+       if (imm.sp)
                ipsr->sp = 1;
-               psr.sp = 1;
-       }
        if (imm.i) {
                if (vcpu->vcpu_info->evtchn_upcall_mask) {
 //printk("vcpu_set_psr_sm: psr.ic 0->1\n");
@@ -415,22 +406,14 @@ IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u
        if (imm.ic)
                PSCB(vcpu, interrupt_collection_enabled) = 1;
        // TODO: do this faster
-       if (imm.mfl) {
+       if (imm.mfl)
                ipsr->mfl = 1;
-               psr.mfl = 1;
-       }
-       if (imm.mfh) {
+       if (imm.mfh)
                ipsr->mfh = 1;
-               psr.mfh = 1;
-       }
-       if (imm.ac) {
+       if (imm.ac)
                ipsr->ac = 1;
-               psr.ac = 1;
-       }
-       if (imm.up) {
+       if (imm.up)
                ipsr->up = 1;
-               psr.up = 1;
-       }
        if (imm.be)
                ipsr->be = 1;
        if (imm.dt)
@@ -439,7 +422,6 @@ IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u
                vcpu_pkr_set_psr_handling(vcpu);
                ipsr->pk = 1;
        }
-       __asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
        if (enabling_interrupts &&
            vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
                PSCB(vcpu, pending_interruption) = 1;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [IA64] cleanup: vcpu_set_psr_sm., Xen patchbot-unstable <=