WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [RFC][PATCH] 2/9 Populate-on-demand memory: calls to gfn_to_

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [RFC][PATCH] 2/9 Populate-on-demand memory: calls to gfn_to_mfn_query()
From: "George Dunlap" <dunlapg@xxxxxxxxx>
Date: Tue, 23 Dec 2008 13:38:16 +0000
Delivery-date: Tue, 23 Dec 2008 05:39:19 -0800
Dkim-signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=gamma; h=domainkey-signature:received:received:message-id:date:from:sender :to:subject:mime-version:content-type:content-transfer-encoding :content-disposition:x-google-sender-auth; bh=KGBFrprMxJk71ojvAJwb5beFtp2jIhtjkkKaPSWs6j4=; b=gJyuhekmlSMps4pWFAtARSApQOsx0GisXnGPHcHWfE+C22WeM3Bzyp78wy17n08i2T 7Yp3E+DE5GJoy+JR38gMP5KyDe4/sNBTNzFfaEytEzouVUaOBIwfWmgo2+IrhKxKiPd7 cPhfpKD9A/+aF3F+NbQcDTbFvxs+Wr5EGSTU4=
Domainkey-signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=message-id:date:from:sender:to:subject:mime-version:content-type :content-transfer-encoding:content-disposition:x-google-sender-auth; b=hcXcw7eAzSnHqQAfL3iZejiPjQsbvmfu+TPEDgAYM2MITbSftAe5AJ3ZqcoKW4sIWj vswUuzUV8JR04hK/nrLibvhgAZDxKWVjfg2mSwTSi2LNkhUZ8aAlIeIT6agwRxlI+j3C xFOSthp/SVNNpnX5VI3wuGTULVvMNZi9Eaz4k=
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Shadow code, and other important places, call gfn_to_mfn_query().  In
particulary, any place that holds the shadow lock must make a query
call.

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>

diff -r 65c24b33082a xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed Dec 17 12:59:28 2008 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed Dec 17 12:59:50 2008 +0000
@@ -888,7 +888,7 @@
      * If this GFN is emulated MMIO or marked as read-only, pass the fault
      * to the mmio handler.
      */
-    mfn = gfn_to_mfn_current(gfn, &p2mt);
+    mfn = gfn_to_mfn_type_current(gfn, &p2mt, p2m_guest);
     if ( (p2mt == p2m_mmio_dm) || (p2mt == p2m_ram_ro) )
     {
         if ( !handle_mmio() )
diff -r 65c24b33082a xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Dec 17 12:59:28 2008 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Dec 17 12:59:50 2008 +0000
@@ -2099,9 +2099,9 @@
     mfn_t mfn;
     p2m_type_t t;

-    mfn = gfn_to_mfn(d, gfn, &t);
+    mfn = gfn_to_mfn_guest(d, gfn, &t);

-    /* There are two legitimate reasons for taking an EPT violation.
+    /* There are three legitimate reasons for taking an EPT violation.
      * One is a guest access to MMIO space. */
     if ( gla_validity == EPT_GLA_VALIDITY_MATCH && p2m_is_mmio(t) )
     {
@@ -2109,15 +2109,18 @@
         return;
     }

-    /* The other is log-dirty mode, writing to a read-only page */
-    if ( paging_mode_log_dirty(d)
-         && (gla_validity == EPT_GLA_VALIDITY_MATCH
-             || gla_validity == EPT_GLA_VALIDITY_GPT_WALK)
+    /* The second is log-dirty mode, writing to a read-only page;
+     * The third is populating a populate-on-demand page. */
+    if ( (gla_validity == EPT_GLA_VALIDITY_MATCH
+          || gla_validity == EPT_GLA_VALIDITY_GPT_WALK)
          && p2m_is_ram(t) && (t != p2m_ram_ro) )
     {
-        paging_mark_dirty(d, mfn_x(mfn));
-        p2m_change_type(d, gfn, p2m_ram_logdirty, p2m_ram_rw);
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        if ( paging_mode_log_dirty(d) )
+        {
+            paging_mark_dirty(d, mfn_x(mfn));
+            p2m_change_type(d, gfn, p2m_ram_logdirty, p2m_ram_rw);
+            flush_tlb_mask(d->domain_dirty_cpumask);
+        }
         return;
     }

diff -r 65c24b33082a xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Wed Dec 17 12:59:28 2008 +0000
+++ b/xen/arch/x86/mm/p2m.c     Wed Dec 17 12:59:50 2008 +0000
@@ -734,7 +734,7 @@
             continue;
         }

-        p2mfn = gfn_to_mfn_foreign(d, gfn, &type);
+        p2mfn = gfn_to_mfn_type_foreign(d, gfn, &type, p2m_query);
         if ( mfn_x(p2mfn) != mfn )
         {
             mpbad++;
@@ -752,7 +752,7 @@

         if ( test_linear && (gfn <= d->arch.p2m->max_mapped_pfn) )
         {
-            lp2mfn = mfn_x(gfn_to_mfn(d, gfn, &type));
+            lp2mfn = mfn_x(gfn_to_mfn_query(d, gfn, &type));
             if ( lp2mfn != mfn_x(p2mfn) )
             {
                 P2M_PRINTK("linear mismatch gfn %#lx -> mfn %#lx "
@@ -963,7 +963,7 @@
     /* First, remove m->p mappings for existing p->m mappings */
     for ( i = 0; i < (1UL << page_order); i++ )
     {
-        omfn = gfn_to_mfn(d, gfn + i, &ot);
+        omfn = gfn_to_mfn_query(d, gfn + i, &ot);
         if ( p2m_is_ram(ot) )
         {
             ASSERT(mfn_valid(omfn));
@@ -988,7 +988,7 @@
              * address */
             P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n",
                       mfn + i, ogfn, gfn + i);
-            omfn = gfn_to_mfn(d, ogfn, &ot);
+            omfn = gfn_to_mfn_query(d, ogfn, &ot);
             if ( p2m_is_ram(ot) )
             {
                 ASSERT(mfn_valid(omfn));
@@ -1157,7 +1157,7 @@
     if ( !paging_mode_translate(d) )
         return 0;

-    omfn = gfn_to_mfn(d, gfn, &ot);
+    omfn = gfn_to_mfn_query(d, gfn, &ot);
     if ( p2m_is_ram(ot) )
     {
         ASSERT(mfn_valid(omfn));
diff -r 65c24b33082a xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Wed Dec 17 12:59:28 2008 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c    Wed Dec 17 12:59:50 2008 +0000
@@ -2170,7 +2170,7 @@
     if ( guest_l4e_get_flags(new_gl4e) & _PAGE_PRESENT )
     {
         gfn_t gl3gfn = guest_l4e_get_gfn(new_gl4e);
-        mfn_t gl3mfn = gfn_to_mfn(d, gl3gfn, &p2mt);
+        mfn_t gl3mfn = gfn_to_mfn_query(d, gl3gfn, &p2mt);
         if ( p2m_is_ram(p2mt) )
             sl3mfn = get_shadow_status(v, gl3mfn, SH_type_l3_shadow);
         else
@@ -2227,7 +2227,7 @@
     if ( guest_l3e_get_flags(new_gl3e) & _PAGE_PRESENT )
     {
         gfn_t gl2gfn = guest_l3e_get_gfn(new_gl3e);
-        mfn_t gl2mfn = gfn_to_mfn(v->domain, gl2gfn, &p2mt);
+        mfn_t gl2mfn = gfn_to_mfn_query(v->domain, gl2gfn, &p2mt);
         if ( p2m_is_ram(p2mt) )
             sl2mfn = get_shadow_status(v, gl2mfn, SH_type_l2_shadow);
         else
@@ -2276,7 +2276,7 @@
         }
         else
         {
-            mfn_t gl1mfn = gfn_to_mfn(v->domain, gl1gfn, &p2mt);
+            mfn_t gl1mfn = gfn_to_mfn_query(v->domain, gl1gfn, &p2mt);
             if ( p2m_is_ram(p2mt) )
                 sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow);
             else
@@ -2346,7 +2346,7 @@
     perfc_incr(shadow_validate_gl1e_calls);

     gfn = guest_l1e_get_gfn(new_gl1e);
-    gmfn = gfn_to_mfn(v->domain, gfn, &p2mt);
+    gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);

     l1e_propagate_from_guest(v, new_gl1e, gmfn, &new_sl1e, ft_prefetch, p2mt);
     result |= shadow_set_l1e(v, sl1p, new_sl1e, sl1mfn);
@@ -2406,7 +2406,7 @@
             shadow_l1e_t nsl1e;

             gfn = guest_l1e_get_gfn(gl1e);
-            gmfn = gfn_to_mfn(v->domain, gfn, &p2mt);
+            gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
             l1e_propagate_from_guest(v, gl1e, gmfn, &nsl1e, ft_prefetch, p2mt);
             rc |= shadow_set_l1e(v, sl1p, nsl1e, sl1mfn);

@@ -2723,7 +2723,7 @@

         /* Look at the gfn that the l1e is pointing at */
         gfn = guest_l1e_get_gfn(gl1e);
-        gmfn = gfn_to_mfn(v->domain, gfn, &p2mt);
+        gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);

         /* Propagate the entry.  */
         l1e_propagate_from_guest(v, gl1e, gmfn, &sl1e, ft_prefetch, p2mt);
@@ -3079,7 +3079,7 @@

     /* What mfn is the guest trying to access? */
     gfn = guest_l1e_get_gfn(gw.l1e);
-    gmfn = gfn_to_mfn(d, gfn, &p2mt);
+    gmfn = gfn_to_mfn_guest(d, gfn, &p2mt);

     if ( shadow_mode_refcounts(d) &&
          (!p2m_is_valid(p2mt) || (!p2m_is_mmio(p2mt) && !mfn_valid(gmfn))) )
@@ -4126,7 +4126,7 @@
             if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
             {
                 gl2gfn = guest_l3e_get_gfn(gl3e[i]);
-                gl2mfn = gfn_to_mfn(d, gl2gfn, &p2mt);
+                gl2mfn = gfn_to_mfn_query(d, gl2gfn, &p2mt);
                 if ( p2m_is_ram(p2mt) )
                     flush |= sh_remove_write_access(v, gl2mfn, 2, 0);
             }
@@ -4139,7 +4139,7 @@
             if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
             {
                 gl2gfn = guest_l3e_get_gfn(gl3e[i]);
-                gl2mfn = gfn_to_mfn(d, gl2gfn, &p2mt);
+                gl2mfn = gfn_to_mfn_query(d, gl2gfn, &p2mt);
                 if ( p2m_is_ram(p2mt) )
                     sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3)
                                            ? SH_type_l2h_shadow
@@ -4525,7 +4525,12 @@
     }

     /* Translate the GFN to an MFN */
-    mfn = gfn_to_mfn(v->domain, _gfn(gfn), &p2mt);
+    /* PoD: query only if shadow lock is held (to avoid deadlock) */
+    if ( shadow_locked_by_me(v->domain) )
+        mfn = gfn_to_mfn_query(v->domain, _gfn(gfn), &p2mt);
+    else
+        mfn = gfn_to_mfn(v->domain, _gfn(gfn), &p2mt);
+
     if ( p2mt == p2m_ram_ro )
         return _mfn(READONLY_GFN);
     if ( !p2m_is_ram(p2mt) )
@@ -4929,7 +4934,7 @@
             {
                 gfn = guest_l1e_get_gfn(*gl1e);
                 mfn = shadow_l1e_get_mfn(*sl1e);
-                gmfn = gfn_to_mfn(v->domain, gfn, &p2mt);
+                gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
                 if ( mfn_x(gmfn) != mfn_x(mfn) )
                     AUDIT_FAIL(1, "bad translation: gfn %" SH_PRI_gfn
                                " --> %" PRI_mfn " != mfn %" PRI_mfn,
@@ -4996,7 +5001,7 @@
             mfn = shadow_l2e_get_mfn(*sl2e);
             gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE)
                 ? get_fl1_shadow_status(v, gfn)
-                : get_shadow_status(v, gfn_to_mfn(v->domain, gfn, &p2mt),
+                : get_shadow_status(v, gfn_to_mfn_query(v->domain,
gfn, &p2mt),
                                     SH_type_l1_shadow);
             if ( mfn_x(gmfn) != mfn_x(mfn) )
                 AUDIT_FAIL(2, "bad translation: gfn %" SH_PRI_gfn
@@ -5004,7 +5009,7 @@
                            " --> %" PRI_mfn " != mfn %" PRI_mfn,
                            gfn_x(gfn),
                            (guest_l2e_get_flags(*gl2e) & _PAGE_PSE) ? 0
-                           : mfn_x(gfn_to_mfn(v->domain, gfn, &p2mt)),
+                           : mfn_x(gfn_to_mfn_query(v->domain, gfn, &p2mt)),
                            mfn_x(gmfn), mfn_x(mfn));
         }
     });
@@ -5043,7 +5048,7 @@
         {
             gfn = guest_l3e_get_gfn(*gl3e);
             mfn = shadow_l3e_get_mfn(*sl3e);
-            gmfn = get_shadow_status(v, gfn_to_mfn(v->domain, gfn, &p2mt),
+            gmfn = get_shadow_status(v, gfn_to_mfn_query(v->domain,
gfn, &p2mt),
                                      ((GUEST_PAGING_LEVELS == 3 ||
                                        is_pv_32on64_vcpu(v))
                                       && !shadow_mode_external(v->domain)
@@ -5090,7 +5095,7 @@
         {
             gfn = guest_l4e_get_gfn(*gl4e);
             mfn = shadow_l4e_get_mfn(*sl4e);
-            gmfn = get_shadow_status(v, gfn_to_mfn(v->domain, gfn, &p2mt),
+            gmfn = get_shadow_status(v, gfn_to_mfn_query(v->domain,
gfn, &p2mt),
                                      SH_type_l3_shadow);
             if ( mfn_x(gmfn) != mfn_x(mfn) )
                 AUDIT_FAIL(4, "bad translation: gfn %" SH_PRI_gfn
diff -r 65c24b33082a xen/arch/x86/mm/shadow/types.h
--- a/xen/arch/x86/mm/shadow/types.h    Wed Dec 17 12:59:28 2008 +0000
+++ b/xen/arch/x86/mm/shadow/types.h    Wed Dec 17 12:59:50 2008 +0000
@@ -191,6 +191,12 @@
 })
 #endif

+ /* Override gfn_to_mfn to work with gfn_t */
+#undef gfn_to_mfn_query
+#define gfn_to_mfn_query(d, g, t) _gfn_to_mfn_type((d), gfn_x(g),
(t), p2m_query)
+#undef gfn_to_mfn_guest
+#define gfn_to_mfn_guest(d, g, t) _gfn_to_mfn_type((d), gfn_x(g),
(t), p2m_guest)
+
 /* The shadow types needed for the various levels. */

 #if GUEST_PAGING_LEVELS == 2

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [RFC][PATCH] 2/9 Populate-on-demand memory: calls to gfn_to_mfn_query(), George Dunlap <=