WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ia64-devel

[Xen-ia64-devel] [PATCH] EFI mapping: restoring mapping correctly.

To: xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-ia64-devel] [PATCH] EFI mapping: restoring mapping correctly.
From: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
Date: Wed, 20 Aug 2008 15:39:07 +0900
Cc: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
Delivery-date: Tue, 19 Aug 2008 23:36:10 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-ia64-devel-request@lists.xensource.com?subject=help>
List-id: Discussion of the ia64 port of Xen <xen-ia64-devel.lists.xensource.com>
List-post: <mailto:xen-ia64-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-ia64-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.5.18 (2008-05-17)
[IA64] EFI mapping: restoring mapping correctly.

When swiching back from efi mapping, correctly switch back
depending on the current vcpu type.

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>

diff -r 57e8888b2b6c xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c  Thu Aug 07 18:34:24 2008 +0900
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c  Thu Aug 07 19:07:29 2008 +0900
@@ -169,9 +169,7 @@
        ia64_dv_serialize_data();
        ia64_set_rr((VRN6 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
        ia64_dv_serialize_data();
-       vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
-                      (void *)vcpu->arch.vhpt.hash,
-                      vcpu->arch.privregs);
+       vmx_switch_rr7_vcpu(vcpu, vrrtomrr(vcpu, VMX(vcpu, vrr[VRN7])));
        ia64_set_pta(VMX(vcpu, mpta));
        vmx_ia64_set_dcr(vcpu);
 
diff -r 57e8888b2b6c xen/arch/ia64/vmx/vmx_vcpu.c
--- a/xen/arch/ia64/vmx/vmx_vcpu.c      Thu Aug 07 18:34:24 2008 +0900
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c      Thu Aug 07 19:07:29 2008 +0900
@@ -196,13 +196,17 @@
     }
 }
 
-void vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
-                    void *shared_arch_info)
+void __vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid)
 {
-    __get_cpu_var(inserted_vhpt) = (unsigned long)guest_vhpt;
-    __get_cpu_var(inserted_vpd) = (unsigned long)shared_arch_info;
-    __get_cpu_var(inserted_mapped_regs) = (unsigned long)shared_arch_info;
-    __vmx_switch_rr7(rid, guest_vhpt, shared_arch_info);
+    __vmx_switch_rr7(rid, (void *)v->arch.vhpt.hash, v->arch.privregs);
+}
+
+void vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid)
+{
+    __get_cpu_var(inserted_vhpt) = (unsigned long)v->arch.vhpt.hash;
+    __get_cpu_var(inserted_vpd) = (unsigned long)v->arch.privregs;
+    __get_cpu_var(inserted_mapped_regs) = (unsigned long)v->arch.privregs;
+    __vmx_switch_rr7_vcpu(v, rid);
 }
 
 IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
@@ -218,8 +222,7 @@
     switch((u64)(reg>>VRN_SHIFT)) {
     case VRN7:
         if (likely(vcpu == current))
-            vmx_switch_rr7(vrrtomrr(vcpu,val), (void *)vcpu->arch.vhpt.hash,
-                           vcpu->arch.privregs);
+            vmx_switch_rr7_vcpu(vcpu, vrrtomrr(vcpu, val));
        break;
     case VRN4:
         rrval = vrrtomrr(vcpu,val);
diff -r 57e8888b2b6c xen/arch/ia64/xen/regionreg.c
--- a/xen/arch/ia64/xen/regionreg.c     Thu Aug 07 18:34:24 2008 +0900
+++ b/xen/arch/ia64/xen/regionreg.c     Thu Aug 07 19:07:29 2008 +0900
@@ -18,9 +18,12 @@
 #include <asm/vcpu.h>
 #include <asm/percpu.h>
 #include <asm/pal.h>
+#include <asm/vmx_vcpu.h>
 
 /* Defined in xemasm.S  */
-extern void ia64_new_rr7(unsigned long rid, void *shared_info, void 
*shared_arch_info, unsigned long shared_info_va, unsigned long va_vhpt);
+extern void ia64_new_rr7(unsigned long rid, void *shared_info,
+                        void *shared_arch_info, unsigned long shared_info_va,
+                        unsigned long va_vhpt);
 extern void ia64_new_rr7_efi(unsigned long rid, unsigned long repin_percpu,
                             unsigned long vpd);
 
@@ -238,6 +241,14 @@
        ia64_srlz_d();
 }
 
+static inline void
+ia64_new_rr7_vcpu(struct vcpu *v, unsigned long rid)
+{
+       ia64_new_rr7(rid, v->domain->shared_info,
+                    v->arch.privregs, v->domain->arch.shared_info_va,
+                    __va_ul(vcpu_vhpt_maddr(v)));
+}
+
 // validates and changes a single region register
 // in the currently executing domain
 // Passing a value of -1 is a (successful) no-op
@@ -281,9 +292,7 @@
                __get_cpu_var(inserted_mapped_regs) =
                                        v->domain->arch.shared_info_va +
                                        XMAPPEDREGS_OFS;
-               ia64_new_rr7(vmMangleRID(newrrv.rrval),v->domain->shared_info,
-                            v->arch.privregs, v->domain->arch.shared_info_va,
-                            __va_ul(vcpu_vhpt_maddr(v)));
+               ia64_new_rr7_vcpu(v, vmMangleRID(newrrv.rrval));
        } else {
                set_rr(rr,newrrv.rrval);
        }
@@ -309,6 +318,31 @@
        }
 
        return 1;
+}
+
+void
+set_one_rr_efi_restore(unsigned long rr, unsigned long val)
+{
+       unsigned long rreg = REGION_NUMBER(rr);
+       
+       BUG_ON(rreg != 6 && rreg != 7);
+
+       if (rreg == 6) {
+               ia64_set_rr(rr, val);
+               ia64_srlz_d();
+       } else {
+               /* firmware call is done very early before struct vcpu
+                  and strcut domain are initialized. */
+               if (unlikely(current == NULL || current->domain == NULL ||
+                            is_idle_vcpu(current)))
+                       ia64_new_rr7_efi(val, cpu_isset(smp_processor_id(),
+                                                       percpu_set),
+                                        0UL);
+               else if (VMX_DOMAIN(current))
+                       __vmx_switch_rr7_vcpu(current, val);
+               else
+                       ia64_new_rr7_vcpu(current, val);
+       }
 }
 
 void set_virtual_rr0(void)
diff -r 57e8888b2b6c xen/include/asm-ia64/linux-xen/linux/efi.h
--- a/xen/include/asm-ia64/linux-xen/linux/efi.h        Thu Aug 07 18:34:24 
2008 +0900
+++ b/xen/include/asm-ia64/linux-xen/linux/efi.h        Thu Aug 07 19:07:29 
2008 +0900
@@ -488,8 +488,8 @@
 #define XEN_EFI_RR_LEAVE(rr6, rr7) do {                        \
        if (rr7 != XEN_EFI_RR) {                        \
                efi_unmap_pal_code();                   \
-               set_one_rr_efi(6UL << 61, rr6);         \
-               set_one_rr_efi(7UL << 61, rr7);         \
+               set_one_rr_efi_restore(6UL << 61, rr6); \
+               set_one_rr_efi_restore(7UL << 61, rr7); \
        }                                               \
 } while (0)
 
diff -r 57e8888b2b6c xen/include/asm-ia64/regionreg.h
--- a/xen/include/asm-ia64/regionreg.h  Thu Aug 07 18:34:24 2008 +0900
+++ b/xen/include/asm-ia64/regionreg.h  Thu Aug 07 19:07:29 2008 +0900
@@ -46,6 +46,7 @@
 
 int set_one_rr(unsigned long rr, unsigned long val);
 int set_one_rr_efi(unsigned long rr, unsigned long val);
+void set_one_rr_efi_restore(unsigned long rr, unsigned long val);
 
 // This function is purely for performance... apparently scrambling
 //  bits in the region id makes for better hashing, which means better
diff -r 57e8888b2b6c xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h   Thu Aug 07 18:34:24 2008 +0900
+++ b/xen/include/asm-ia64/vmx_vcpu.h   Thu Aug 07 19:07:29 2008 +0900
@@ -105,8 +105,8 @@
 extern void vcpu_load_kernel_regs(VCPU * vcpu);
 extern void __vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
                              void *shared_arch_info);
-extern void vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
-                           void *shared_arch_info);
+extern void __vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid);
+extern void vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid);
 extern void vmx_ia64_set_dcr(VCPU * v);
 extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec);
 extern void vmx_asm_bsw0(void);

Attachment: fix-xen-efi-rr-leave.patch
Description: Text Data

_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-ia64-devel] [PATCH] EFI mapping: restoring mapping correctly., Isaku Yamahata <=