WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 1 of 4] Nested p2m: implement "flush" as a first-clas

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 1 of 4] Nested p2m: implement "flush" as a first-class action
From: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Date: Wed, 22 Jun 2011 17:10:27 +0100
Cc: Christoph Egger <Christoph.Egger@xxxxxxx>
Delivery-date: Wed, 22 Jun 2011 09:15:36 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1308759026@xxxxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <patchbomb.1308759026@xxxxxxxxxxxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mercurial-patchbomb/1.8.3
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1308758648 -3600
# Node ID c323e69a0a08ce9f1e54d2e2fa2edd9845bc8efe
# Parent  b7e5a25663329254cba539e21f4fbd5b32c67556
Nested p2m: implement "flush" as a first-class action
rather than using the teardown and init functions.
This makes the locking clearer and avoids an expensive scan of all
pfns that's only needed for non-nested p2ms.  It also moves the
tlb flush into the proper place in the flush logic, avoiding a
possible race against other CPUs.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>

diff -r b7e5a2566332 -r c323e69a0a08 xen/arch/x86/hvm/nestedhvm.c
--- a/xen/arch/x86/hvm/nestedhvm.c      Tue Jun 21 18:28:53 2011 +0100
+++ b/xen/arch/x86/hvm/nestedhvm.c      Wed Jun 22 17:04:08 2011 +0100
@@ -119,12 +119,6 @@ nestedhvm_vmcx_flushtlb(struct p2m_domai
     cpus_clear(p2m->p2m_dirty_cpumask);
 }
 
-void
-nestedhvm_vmcx_flushtlbdomain(struct domain *d)
-{
-    on_selected_cpus(d->domain_dirty_cpumask, nestedhvm_flushtlb_ipi, d, 1);
-}
-
 bool_t
 nestedhvm_is_n2(struct vcpu *v)
 {
diff -r b7e5a2566332 -r c323e69a0a08 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Tue Jun 21 18:28:53 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c     Wed Jun 22 17:04:08 2011 +0100
@@ -1050,20 +1050,41 @@ p2m_getlru_nestedp2m(struct domain *d, s
     return lrup2m;
 }
 
-static int 
+/* Reset this p2m table to be empty */
+static void
 p2m_flush_locked(struct p2m_domain *p2m)
 {
-    ASSERT(p2m);
-    if (p2m->cr3 == CR3_EADDR)
-        /* Microoptimisation: p2m is already empty.
-         * => about 0.3% speedup of overall system performance.
-         */
-        return 0;
+    struct page_info *top, *pg;
+    struct domain *d = p2m->domain;
+    void *p;
 
-    p2m_teardown(p2m);
-    p2m_initialise(p2m->domain, p2m);
-    p2m->write_p2m_entry = nestedp2m_write_p2m_entry;
-    return p2m_alloc_table(p2m);
+    p2m_lock(p2m);
+
+    /* "Host" p2m tables can have shared entries &c that need a bit more 
+     * care when discarding them */
+    ASSERT(p2m_is_nestedp2m(p2m));
+    ASSERT(page_list_empty(&p2m->pod.super));
+    ASSERT(page_list_empty(&p2m->pod.single));
+
+    /* This is no longer a valid nested p2m for any address space */
+    p2m->cr3 = CR3_EADDR;
+    
+    /* Zap the top level of the trie */
+    top = mfn_to_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
+    p = map_domain_page(top);
+    clear_page(p);
+    unmap_domain_page(p);
+
+    /* Make sure nobody else is using this p2m table */
+    nestedhvm_vmcx_flushtlb(p2m);
+
+    /* Free the rest of the trie pages back to the paging pool */
+    while ( (pg = page_list_remove_head(&p2m->pages)) )
+        if ( pg != top ) 
+            d->arch.paging.free_page(d, pg);
+    page_list_add(top, &p2m->pages);
+
+    p2m_unlock(p2m);
 }
 
 void
@@ -1074,9 +1095,8 @@ p2m_flush(struct vcpu *v, struct p2m_dom
     ASSERT(v->domain == d);
     vcpu_nestedhvm(v).nv_p2m = NULL;
     nestedp2m_lock(d);
-    BUG_ON(p2m_flush_locked(p2m) != 0);
+    p2m_flush_locked(p2m);
     hvm_asid_flush_vcpu(v);
-    nestedhvm_vmcx_flushtlb(p2m);
     nestedp2m_unlock(d);
 }
 
@@ -1086,12 +1106,8 @@ p2m_flush_nestedp2m(struct domain *d)
     int i;
 
     nestedp2m_lock(d);
-    for (i = 0; i < MAX_NESTEDP2M; i++) {
-        struct p2m_domain *p2m = d->arch.nested_p2m[i];
-        BUG_ON(p2m_flush_locked(p2m) != 0);
-        cpus_clear(p2m->p2m_dirty_cpumask);
-    }
-    nestedhvm_vmcx_flushtlbdomain(d);
+    for ( i = 0; i < MAX_NESTEDP2M; i++ )
+        p2m_flush_locked(d->arch.nested_p2m[i]);
     nestedp2m_unlock(d);
 }
 
@@ -1104,7 +1120,7 @@ p2m_get_nestedp2m(struct vcpu *v, uint64
     volatile struct nestedvcpu *nv = &vcpu_nestedhvm(v);
     struct domain *d;
     struct p2m_domain *p2m;
-    int i, rv;
+    int i;
 
     if (cr3 == 0 || cr3 == CR3_EADDR)
         cr3 = v->arch.hvm_vcpu.guest_cr[3];
@@ -1136,9 +1152,7 @@ p2m_get_nestedp2m(struct vcpu *v, uint64
      */
     for (i = 0; i < MAX_NESTEDP2M; i++) {
         p2m = p2m_getlru_nestedp2m(d, NULL);
-        rv = p2m_flush_locked(p2m);
-        if (rv == 0)
-            break;
+        p2m_flush_locked(p2m);
     }
     nv->nv_p2m = p2m;
     p2m->cr3 = cr3;
diff -r b7e5a2566332 -r c323e69a0a08 xen/include/asm-x86/hvm/nestedhvm.h
--- a/xen/include/asm-x86/hvm/nestedhvm.h       Tue Jun 21 18:28:53 2011 +0100
+++ b/xen/include/asm-x86/hvm/nestedhvm.h       Wed Jun 22 17:04:08 2011 +0100
@@ -61,7 +61,6 @@ unsigned long *nestedhvm_vcpu_iomap_get(
     (!!vcpu_nestedhvm((v)).nv_vmswitch_in_progress)
 
 void nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m);
-void nestedhvm_vmcx_flushtlbdomain(struct domain *d);
 
 bool_t nestedhvm_is_n2(struct vcpu *v);
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel