WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 08/12] x86/p2m: allocate CPU masks dynamically

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 08/12] x86/p2m: allocate CPU masks dynamically
From: "Jan Beulich" <JBeulich@xxxxxxxx>
Date: Thu, 20 Oct 2011 14:41:47 +0100
Delivery-date: Thu, 20 Oct 2011 06:50:21 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- 2011-10-18.orig/xen/arch/x86/hvm/nestedhvm.c        2011-10-11 
17:24:46.000000000 +0200
+++ 2011-10-18/xen/arch/x86/hvm/nestedhvm.c     2011-10-18 16:45:02.000000000 
+0200
@@ -114,9 +114,9 @@ nestedhvm_flushtlb_ipi(void *info)
 void
 nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m)
 {
-    on_selected_cpus(&p2m->p2m_dirty_cpumask, nestedhvm_flushtlb_ipi,
+    on_selected_cpus(p2m->dirty_cpumask, nestedhvm_flushtlb_ipi,
         p2m->domain, 1);
-    cpumask_clear(&p2m->p2m_dirty_cpumask);
+    cpumask_clear(p2m->dirty_cpumask);
 }
 
 bool_t
--- 2011-10-18.orig/xen/arch/x86/mm/hap/nested_hap.c    2011-09-12 
08:34:38.000000000 +0200
+++ 2011-10-18/xen/arch/x86/mm/hap/nested_hap.c 2011-10-18 16:44:35.000000000 
+0200
@@ -88,7 +88,7 @@ nestedp2m_write_p2m_entry(struct p2m_dom
     safe_write_pte(p, new);
 
     if (old_flags & _PAGE_PRESENT)
-        flush_tlb_mask(&p2m->p2m_dirty_cpumask);
+        flush_tlb_mask(p2m->dirty_cpumask);
     
     paging_unlock(d);
 }
--- 2011-10-18.orig/xen/arch/x86/mm/p2m.c       2011-10-14 09:47:46.000000000 
+0200
+++ 2011-10-18/xen/arch/x86/mm/p2m.c    2011-10-18 16:45:49.000000000 +0200
@@ -81,7 +81,6 @@ static void p2m_initialise(struct domain
     p2m->default_access = p2m_access_rwx;
 
     p2m->cr3 = CR3_EADDR;
-    cpumask_clear(&p2m->p2m_dirty_cpumask);
 
     if ( hap_enabled(d) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
         ept_p2m_init(p2m);
@@ -102,6 +101,8 @@ p2m_init_nestedp2m(struct domain *d)
         d->arch.nested_p2m[i] = p2m = xzalloc(struct p2m_domain);
         if (p2m == NULL)
             return -ENOMEM;
+        if ( !zalloc_cpumask_var(&p2m->dirty_cpumask) )
+            return -ENOMEM;
         p2m_initialise(d, p2m);
         p2m->write_p2m_entry = nestedp2m_write_p2m_entry;
         list_add(&p2m->np2m_list, &p2m_get_hostp2m(d)->np2m_list);
@@ -117,6 +118,8 @@ int p2m_init(struct domain *d)
     p2m_get_hostp2m(d) = p2m = xzalloc(struct p2m_domain);
     if ( p2m == NULL )
         return -ENOMEM;
+    if ( !zalloc_cpumask_var(&p2m->dirty_cpumask) )
+        return -ENOMEM;
     p2m_initialise(d, p2m);
 
     /* Must initialise nestedp2m unconditionally
@@ -330,6 +333,9 @@ static void p2m_teardown_nestedp2m(struc
     uint8_t i;
 
     for (i = 0; i < MAX_NESTEDP2M; i++) {
+        if ( !d->arch.nested_p2m[i] )
+            continue;
+        free_cpumask_var(d->arch.nested_p2m[i]->dirty_cpumask);
         xfree(d->arch.nested_p2m[i]);
         d->arch.nested_p2m[i] = NULL;
     }
@@ -338,8 +344,12 @@ static void p2m_teardown_nestedp2m(struc
 void p2m_final_teardown(struct domain *d)
 {
     /* Iterate over all p2m tables per domain */
-    xfree(d->arch.p2m);
-    d->arch.p2m = NULL;
+    if ( d->arch.p2m )
+    {
+        free_cpumask_var(d->arch.p2m->dirty_cpumask);
+        xfree(d->arch.p2m);
+        d->arch.p2m = NULL;
+    }
 
     /* We must teardown unconditionally because
      * we initialise them unconditionally.
@@ -1197,7 +1207,7 @@ p2m_get_nestedp2m(struct vcpu *v, uint64
             if (p2m->cr3 == CR3_EADDR)
                 hvm_asid_flush_vcpu(v);
             p2m->cr3 = cr3;
-            cpu_set(v->processor, p2m->p2m_dirty_cpumask);
+            cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
             p2m_unlock(p2m);
             nestedp2m_unlock(d);
             return p2m;
@@ -1214,7 +1224,7 @@ p2m_get_nestedp2m(struct vcpu *v, uint64
     p2m->cr3 = cr3;
     nv->nv_flushp2m = 0;
     hvm_asid_flush_vcpu(v);
-    cpu_set(v->processor, p2m->p2m_dirty_cpumask);
+    cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
     p2m_unlock(p2m);
     nestedp2m_unlock(d);
 
--- 2011-10-18.orig/xen/include/asm-x86/p2m.h   2011-09-12 08:34:38.000000000 
+0200
+++ 2011-10-18/xen/include/asm-x86/p2m.h        2011-10-18 16:39:34.000000000 
+0200
@@ -198,7 +198,7 @@ struct p2m_domain {
      * this p2m and those physical cpus whose vcpu's are in
      * guestmode.
      */
-    cpumask_t          p2m_dirty_cpumask;
+    cpumask_var_t      dirty_cpumask;
 
     struct domain     *domain;   /* back pointer to domain */
 


Attachment: x86-p2m-dirty-cpumask-alloc.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel