WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] Add save/restore support for viridian APIC assist pf

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH] Add save/restore support for viridian APIC assist pfn
From: Paul Durrant <paul.durrant@xxxxxxxxxx>
Date: Fri, 23 Sep 2011 13:36:06 +0100
Delivery-date: Fri, 23 Sep 2011 05:39:22 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mercurial-patchbomb/1.6.4
# HG changeset patch
# User Paul Durrant <paul.durrant@xxxxxxxxxx>
# Date 1316781326 -3600
# Node ID 55a9ffe0ca81b9b4183626f81fa54343d378704f
# Parent  cc339ab1d91789ed6ff4d3d9abc1bae2e90ac294
Add save/restore support for viridian APIC assist pfn.

c/s 17b754cab7b0 introduced a per-VCPU viridian structure to
store the APIC assist pfn. This patch adds support for save and
restore of that value.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

diff -r cc339ab1d917 -r 55a9ffe0ca81 tools/misc/xen-hvmctx.c
--- a/tools/misc/xen-hvmctx.c   Thu Sep 22 18:37:06 2011 +0100
+++ b/tools/misc/xen-hvmctx.c   Fri Sep 23 13:35:26 2011 +0100
@@ -366,15 +366,23 @@ static void dump_mtrr(void)
                (unsigned long long) p.msr_mtrr_fixed[i]);
 }
 
-static void dump_viridian(void)
+static void dump_viridian_domain(void)
 {
-    HVM_SAVE_TYPE(VIRIDIAN) p;
+    HVM_SAVE_TYPE(VIRIDIAN_DOMAIN) p;
     READ(p);
-    printf("    VIRIDIAN: hypercall gpa 0x%llx, guest ID 0x%llx\n",
+    printf("    VIRIDIAN_DOMAIN: hypercall gpa 0x%llx, guest_os_id 0x%llx\n",
            (unsigned long long) p.hypercall_gpa,
            (unsigned long long) p.guest_os_id);           
 }
 
+static void dump_viridian_vcpu(void)
+{
+    HVM_SAVE_TYPE(VIRIDIAN_VCPU) p;
+    READ(p);
+    printf("    VIRIDIAN_VCPU: apic_assist 0x%llx\n",
+           (unsigned long long) p.apic_assist);           
+}
+
 int main(int argc, char **argv)
 {
     int entry, domid;
@@ -439,7 +447,8 @@ int main(int argc, char **argv)
         case HVM_SAVE_CODE(HPET): dump_hpet(); break;
         case HVM_SAVE_CODE(PMTIMER): dump_pmtimer(); break;
         case HVM_SAVE_CODE(MTRR): dump_mtrr(); break;
-        case HVM_SAVE_CODE(VIRIDIAN): dump_viridian(); break;
+        case HVM_SAVE_CODE(VIRIDIAN_DOMAIN): dump_viridian_domain(); break;
+        case HVM_SAVE_CODE(VIRIDIAN_VCPU): dump_viridian_vcpu(); break;
         case HVM_SAVE_CODE(END): break;
         default:
             printf(" ** Don't understand type %u: skipping\n",
diff -r cc339ab1d917 -r 55a9ffe0ca81 xen/arch/x86/hvm/viridian.c
--- a/xen/arch/x86/hvm/viridian.c       Thu Sep 22 18:37:06 2011 +0100
+++ b/xen/arch/x86/hvm/viridian.c       Fri Sep 23 13:35:26 2011 +0100
@@ -172,10 +172,10 @@ void initialize_apic_assist(struct vcpu 
     uint8_t *p;
 
     /*
-     * We don't support the APIC assist page, and that fact is reflected in
-     * our CPUID flags. However, Newer versions of Windows have a bug which
-     * means that they don't recognise that, and tries to use the page
-     * anyway. We therefore have to fake up just enough to keep windows happy.
+     * We don't yet make use of the APIC assist page but by setting
+     * the CPUID3A_MSR_APIC_ACCESS bit in CPUID leaf 40000003 we are duty
+     * bound to support the MSR. We therefore do just enough to keep windows
+     * happy.
      *
      * See http://msdn.microsoft.com/en-us/library/ff538657%28VS.85%29.aspx for
      * details of how Windows uses the page.
@@ -387,9 +387,9 @@ out:
     return HVM_HCALL_completed;
 }
 
-static int viridian_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+static int viridian_save_domain_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
-    struct hvm_viridian_context ctxt;
+    struct hvm_viridian_domain_context ctxt;
 
     if ( !is_viridian_domain(d) )
         return 0;
@@ -397,14 +397,14 @@ static int viridian_save_cpu_ctxt(struct
     ctxt.hypercall_gpa = d->arch.hvm_domain.viridian.hypercall_gpa.raw;
     ctxt.guest_os_id   = d->arch.hvm_domain.viridian.guest_os_id.raw;
 
-    return (hvm_save_entry(VIRIDIAN, 0, h, &ctxt) != 0);
+    return (hvm_save_entry(VIRIDIAN_DOMAIN, 0, h, &ctxt) != 0);
 }
 
-static int viridian_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+static int viridian_load_domain_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
-    struct hvm_viridian_context ctxt;
+    struct hvm_viridian_domain_context ctxt;
 
-    if ( hvm_load_entry(VIRIDIAN, h, &ctxt) != 0 )
+    if ( hvm_load_entry(VIRIDIAN_DOMAIN, h, &ctxt) != 0 )
         return -EINVAL;
 
     d->arch.hvm_domain.viridian.hypercall_gpa.raw = ctxt.hypercall_gpa;
@@ -413,5 +413,48 @@ static int viridian_load_cpu_ctxt(struct
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(VIRIDIAN, viridian_save_cpu_ctxt,
-                          viridian_load_cpu_ctxt, 1, HVMSR_PER_DOM);
+HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_DOMAIN, viridian_save_domain_ctxt,
+                          viridian_load_domain_ctxt, 1, HVMSR_PER_DOM);
+
+static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+{
+    struct vcpu *v;
+
+    if ( !is_viridian_domain(d) )
+        return 0;
+
+    for_each_vcpu( d, v ) {
+        struct hvm_viridian_vcpu_context ctxt;
+
+        ctxt.apic_assist = v->arch.hvm_vcpu.viridian.apic_assist.raw;
+
+        if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) != 0 )
+            return 1;
+    }
+
+    return 0;
+}
+
+static int viridian_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+{
+    int vcpuid;
+    struct vcpu *v;
+    struct hvm_viridian_vcpu_context ctxt;
+
+    vcpuid = hvm_load_instance(h);
+    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
+    {
+        gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
+        return -EINVAL;
+    }
+
+    if ( hvm_load_entry(VIRIDIAN_VCPU, h, &ctxt) != 0 )
+        return -EINVAL;
+
+    v->arch.hvm_vcpu.viridian.apic_assist.raw = ctxt.apic_assist;
+
+    return 0;
+}
+
+HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_VCPU, viridian_save_vcpu_ctxt,
+                          viridian_load_vcpu_ctxt, 1, HVMSR_PER_VCPU);
diff -r cc339ab1d917 -r 55a9ffe0ca81 xen/include/public/arch-x86/hvm/save.h
--- a/xen/include/public/arch-x86/hvm/save.h    Thu Sep 22 18:37:06 2011 +0100
+++ b/xen/include/public/arch-x86/hvm/save.h    Fri Sep 23 13:35:26 2011 +0100
@@ -547,18 +547,6 @@ struct hvm_hw_mtrr {
 DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr);
 
 /*
- * Viridian hypervisor context.
- */
-
-struct hvm_viridian_context {
-    uint64_t hypercall_gpa;
-    uint64_t guest_os_id;
-};
-
-DECLARE_HVM_SAVE_TYPE(VIRIDIAN, 15, struct hvm_viridian_context);
-
-
-/*
  * The save area of XSAVE/XRSTOR.
  */
 
@@ -580,9 +568,26 @@ struct hvm_hw_cpu_xsave {
 
 #define CPU_XSAVE_CODE  16
 
+/*
+ * Viridian hypervisor context.
+ */
+
+struct hvm_viridian_domain_context {
+    uint64_t hypercall_gpa;
+    uint64_t guest_os_id;
+};
+
+DECLARE_HVM_SAVE_TYPE(VIRIDIAN_DOMAIN, 15, struct hvm_viridian_domain_context);
+
+struct hvm_viridian_vcpu_context {
+    uint64_t apic_assist;
+};
+
+DECLARE_HVM_SAVE_TYPE(VIRIDIAN_VCPU, 17, struct hvm_viridian_vcpu_context);
+
 /* 
  * Largest type-code in use
  */
-#define HVM_SAVE_CODE_MAX 16
+#define HVM_SAVE_CODE_MAX 17
 
 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>