WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [IA64] Switch on PKR

# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1185835912 21600
# Node ID 57f519c41534de7448f8029df96917ffd71f6334
# Parent  255abff9d1f75f1dd1502e5764c736835232712f
[IA64] Switch on PKR

First implementation of handling protection keys in domU's.  Currently
only 15 registers are usable by domU's. pkr[15] is reserved for the
hypervisor.  The hypervisor doesn't take care of entries with the same key.

Signed-off-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxxxxxxx>
---
 xen/arch/ia64/xen/domain.c      |    2 
 xen/arch/ia64/xen/vcpu.c        |   84 +++++++++++++++++++++++++++++++++++++---
 xen/include/asm-ia64/domain.h   |    7 +++
 xen/include/asm-ia64/vcpu.h     |   13 ++++++
 xen/include/asm-ia64/xenkregs.h |    3 +
 5 files changed, 103 insertions(+), 6 deletions(-)

diff -r 255abff9d1f7 -r 57f519c41534 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Mon Jul 30 16:38:47 2007 -0600
+++ b/xen/arch/ia64/xen/domain.c        Mon Jul 30 16:51:52 2007 -0600
@@ -262,6 +262,8 @@ void context_switch(struct vcpu *prev, s
             load_region_regs(current);
             ia64_set_pta(vcpu_pta(current));
             vcpu_load_kernel_regs(current);
+            if (vcpu_pkr_in_use(current))
+                vcpu_pkr_load_regs(current);
             vcpu_set_next_timer(current);
             if (vcpu_timer_expired(current))
                 vcpu_pend_timer(current);
diff -r 255abff9d1f7 -r 57f519c41534 xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c  Mon Jul 30 16:38:47 2007 -0600
+++ b/xen/arch/ia64/xen/vcpu.c  Mon Jul 30 16:51:52 2007 -0600
@@ -242,6 +242,42 @@ IA64FAULT vcpu_get_ar(VCPU * vcpu, u64 r
 }
 
 /**************************************************************************
+ VCPU protection key emulating for PV
+ This first implementation reserves 1 pkr for the hypervisor key.
+ On setting psr.pk the hypervisor key is loaded in pkr[15], therewith the
+ hypervisor may run with psr.pk==1. The key for the hypervisor is 0.
+ Furthermore the VCPU is flagged to use the protection keys.
+ Currently the domU has to take care of the used keys, because on setting
+ a pkr there is no check against other pkr's whether this key is already
+ used.
+**************************************************************************/
+
+/* The function loads the protection key registers from the struct arch_vcpu
+ * into the processor pkr's! Called in context_switch().
+ * TODO: take care of the order of writing pkr's!
+ */
+void vcpu_pkr_load_regs(VCPU * vcpu)
+{
+       int i;
+
+       for (i = 0; i <= XEN_IA64_NPKRS; i++)
+               ia64_set_pkr(i, PSCBX(vcpu, pkrs[i]));
+}
+
+/* The function activates the pkr handling. */
+static void vcpu_pkr_set_psr_handling(VCPU * vcpu)
+{
+       if (PSCBX(vcpu, pkr_flags) & XEN_IA64_PKR_IN_USE)
+               return;
+
+       vcpu_pkr_use_set(vcpu);
+       PSCBX(vcpu, pkrs[XEN_IA64_NPKRS]) = XEN_IA64_PKR_VAL;
+
+       /* Write the special key for the hypervisor into pkr[15]. */
+       ia64_set_pkr(XEN_IA64_NPKRS, XEN_IA64_PKR_VAL);
+}
+
+/**************************************************************************
  VCPU processor status register access routines
 **************************************************************************/
 
@@ -284,7 +320,7 @@ IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu,
        // just handle psr.up and psr.pp for now
        if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP |
                      IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT |
-                     IA64_PSR_DFL | IA64_PSR_DFH))
+                     IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_PK))
                return IA64_ILLOP_FAULT;
        if (imm.dfh) {
                ipsr->dfh = PSCB(vcpu, hpsr_dfh);
@@ -309,6 +345,10 @@ IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu,
                ipsr->be = 0;
        if (imm.dt)
                vcpu_set_metaphysical_mode(vcpu, TRUE);
+       if (imm.pk) {
+               ipsr->pk = 0;
+               vcpu_pkr_use_unset(vcpu);
+       }
        __asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
        return IA64_NO_FAULT;
 }
@@ -340,7 +380,8 @@ IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u
        // just handle psr.sp,pp and psr.i,ic (and user mask) for now
        mask =
            IA64_PSR_PP | IA64_PSR_SP | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_UM |
-           IA64_PSR_DT | IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_BE;
+           IA64_PSR_DT | IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_BE |
+           IA64_PSR_PK;
        if (imm24 & ~mask)
                return IA64_ILLOP_FAULT;
        if (imm.dfh) {
@@ -388,6 +429,10 @@ IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u
                ipsr->be = 1;
        if (imm.dt)
                vcpu_set_metaphysical_mode(vcpu, FALSE);
+       if (imm.pk) {
+               vcpu_pkr_set_psr_handling(vcpu);
+               ipsr->pk = 1;
+       }
        __asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
        if (enabling_interrupts &&
            vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
@@ -448,6 +493,11 @@ IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u6
                vcpu_set_metaphysical_mode(vcpu, TRUE);
        if (newpsr.be)
                ipsr->be = 1;
+       if (newpsr.pk) {
+               vcpu_pkr_set_psr_handling(vcpu);
+               ipsr->pk = 1;
+       } else
+               vcpu_pkr_use_unset(vcpu);
        if (enabling_interrupts &&
            vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
                PSCB(vcpu, pending_interruption) = 1;
@@ -504,6 +554,11 @@ IA64FAULT vcpu_set_psr(VCPU * vcpu, u64 
                else
                        vcpu_bsw0(vcpu);
        }
+       if (vpsr.pk) {
+               vcpu_pkr_set_psr_handling(vcpu);
+               newpsr.pk = 1;
+       } else
+               vcpu_pkr_use_unset(vcpu);
 
        regs->cr_ipsr = newpsr.val;
 
@@ -2058,14 +2113,31 @@ IA64FAULT vcpu_get_rr(VCPU * vcpu, u64 r
 
 IA64FAULT vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval)
 {
-       printk("vcpu_get_pkr: called, not implemented yet\n");
-       return IA64_ILLOP_FAULT;
+       if (reg > XEN_IA64_NPKRS)
+               return IA64_RSVDREG_FAULT;      /* register index to large */
+
+       *pval = (u64) PSCBX(vcpu, pkrs[reg]);
+       return IA64_NO_FAULT;
 }
 
 IA64FAULT vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val)
 {
-       printk("vcpu_set_pkr: called, not implemented yet\n");
-       return IA64_ILLOP_FAULT;
+       ia64_pkr_t pkr_new;
+
+       if (reg >= XEN_IA64_NPKRS)
+               return IA64_RSVDREG_FAULT;      /* index to large */
+
+       pkr_new.val = val;
+       if (pkr_new.reserved1)
+               return IA64_RSVDREG_FAULT;      /* reserved field */
+
+       if (pkr_new.reserved2)
+               return IA64_RSVDREG_FAULT;      /* reserved field */
+
+       PSCBX(vcpu, pkrs[reg]) = pkr_new.val;
+       ia64_set_pkr(reg, pkr_new.val);
+
+       return IA64_NO_FAULT;
 }
 
 /**************************************************************************
diff -r 255abff9d1f7 -r 57f519c41534 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Mon Jul 30 16:38:47 2007 -0600
+++ b/xen/include/asm-ia64/domain.h     Mon Jul 30 16:51:52 2007 -0600
@@ -239,6 +239,13 @@ struct arch_vcpu {
     struct timer hlt_timer;
     struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
 
+    /* This vector hosts the protection keys for pkr emulation of PV domains.
+     * Currently only 15 registers are usable by domU's. pkr[15] is
+     * reserved for the hypervisor. */
+    unsigned long pkrs[XEN_IA64_NPKRS+1];      /* protection key registers */
+#define XEN_IA64_PKR_IN_USE    0x1             /* If psr.pk = 1 was set. */
+    unsigned char pkr_flags;
+
 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
     PTA                 pta;
     unsigned long       vhpt_maddr;
diff -r 255abff9d1f7 -r 57f519c41534 xen/include/asm-ia64/vcpu.h
--- a/xen/include/asm-ia64/vcpu.h       Mon Jul 30 16:38:47 2007 -0600
+++ b/xen/include/asm-ia64/vcpu.h       Mon Jul 30 16:51:52 2007 -0600
@@ -125,6 +125,19 @@ extern IA64FAULT vcpu_get_rr(VCPU * vcpu
 extern IA64FAULT vcpu_get_rr(VCPU * vcpu, u64 reg, u64 * pval);
 extern IA64FAULT vcpu_get_rr_ve(VCPU * vcpu, u64 vadr);
 /* protection key registers */
+extern void vcpu_pkr_load_regs(VCPU * vcpu);
+static inline int vcpu_pkr_in_use(VCPU * vcpu)
+{
+       return (PSCBX(vcpu, pkr_flags) & XEN_IA64_PKR_IN_USE);
+}
+static inline void vcpu_pkr_use_set(VCPU * vcpu)
+{
+       PSCBX(vcpu, pkr_flags) |= XEN_IA64_PKR_IN_USE;
+}
+static inline void vcpu_pkr_use_unset(VCPU * vcpu)
+{
+       PSCBX(vcpu, pkr_flags) &= ~XEN_IA64_PKR_IN_USE;
+}
 extern IA64FAULT vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval);
 extern IA64FAULT vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val);
 extern IA64FAULT vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key);
diff -r 255abff9d1f7 -r 57f519c41534 xen/include/asm-ia64/xenkregs.h
--- a/xen/include/asm-ia64/xenkregs.h   Mon Jul 30 16:38:47 2007 -0600
+++ b/xen/include/asm-ia64/xenkregs.h   Mon Jul 30 16:51:52 2007 -0600
@@ -63,4 +63,7 @@
 
 #define        XEN_IA64_NPKRS          15      /* Number of pkr's in PV */
 
+       /* A pkr val for the hypervisor: key = 0, valid = 1. */
+#define XEN_IA64_PKR_VAL       ((0 << IA64_PKR_KEY) | IA64_PKR_VALID)
+
 #endif /* _ASM_IA64_XENKREGS_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [IA64] Switch on PKR, Xen patchbot-unstable <=