WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 07/16] xenpaging: populate only paged-out pages

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 07/16] xenpaging: populate only paged-out pages
From: Olaf Hering <olaf@xxxxxxxxx>
Date: Tue, 02 Nov 2010 23:30:17 +0100
Delivery-date: Tue, 02 Nov 2010 15:35:35 -0700
Dkim-signature: v=1; a=rsa-sha1; c=relaxed/relaxed; t=1288737015; l=3565; s=domk; d=aepfle.de; h=References:Subject:To:From:Date:X-RZG-CLASS-ID:X-RZG-AUTH; bh=+8D+NPXt2cXuMcOMNem6eOAAxnI=; b=JWnixrLzSXl/arS52Gkx4yHzzJhtxgT31KnWMz10jiy6Z1IxeiNQvi3u9K7A3nL3sX8 cmIbCyqn5hjtTgxuo3S/gURu5kFzoFfdJQiUhacStd03rTdP0zj65oPaEEv0wNvnna0U0 0KFG5gQbAJ9yRn0w36AVySWig4gc9TvoheE=
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20101102223010.603002116@xxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: quilt/0.48-4.4
populdate a paged-out page only once to reduce pressure in the ringbuffer.
Several cpus may still request a page at once. xenpaging can handle this.

But: maybe this will miss pages in paging-out state?

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

---
 xen/arch/x86/hvm/emulate.c       |    3 ++-
 xen/arch/x86/hvm/hvm.c           |   15 ++++++++++-----
 xen/arch/x86/mm/guest_walk.c     |    3 ++-
 xen/arch/x86/mm/hap/guest_walk.c |    6 ++++--
 4 files changed, 18 insertions(+), 9 deletions(-)

--- xen-unstable.hg-4.1.22344.orig/xen/arch/x86/hvm/emulate.c
+++ xen-unstable.hg-4.1.22344/xen/arch/x86/hvm/emulate.c
@@ -66,7 +66,8 @@ static int hvmemul_do_io(
     ram_mfn = gfn_to_mfn_unshare(p2m, ram_gfn, &p2mt, 0);
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(p2m, ram_gfn);
+        if ( p2m_is_paged(p2mt) )
+            p2m_mem_paging_populate(p2m, ram_gfn);
         return X86EMUL_RETRY;
     }
     if ( p2m_is_shared(p2mt) )
--- xen-unstable.hg-4.1.22344.orig/xen/arch/x86/hvm/hvm.c
+++ xen-unstable.hg-4.1.22344/xen/arch/x86/hvm/hvm.c
@@ -345,7 +345,8 @@ static int hvm_set_ioreq_page(
         return -EINVAL;
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(p2m, gmfn);
+        if ( p2m_is_paged(p2mt) )
+            p2m_mem_paging_populate(p2m, gmfn);
         return -ENOENT;
     }
     if ( p2m_is_shared(p2mt) )
@@ -1369,7 +1370,8 @@ static void *__hvm_map_guest_frame(unsig
         return NULL;
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(p2m, gfn);
+        if ( p2m_is_paged(p2mt) )
+            p2m_mem_paging_populate(p2m, gfn);
         return NULL;
     }
 
@@ -1811,7 +1813,8 @@ static enum hvm_copy_result __hvm_copy(
 
         if ( p2m_is_paging(p2mt) )
         {
-            p2m_mem_paging_populate(p2m, gfn);
+            if ( p2m_is_paged(p2mt) )
+                p2m_mem_paging_populate(p2m, gfn);
             return HVMCOPY_gfn_paged_out;
         }
         if ( p2m_is_shared(p2mt) )
@@ -3118,7 +3121,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
             mfn_t mfn = gfn_to_mfn(p2m, pfn, &t);
             if ( p2m_is_paging(t) )
             {
-                p2m_mem_paging_populate(p2m, pfn);
+                if ( p2m_is_paged(t) )
+                    p2m_mem_paging_populate(p2m, pfn);
 
                 rc = -EINVAL;
                 goto param_fail3;
@@ -3184,7 +3188,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
             mfn = gfn_to_mfn_unshare(p2m, pfn, &t, 0);
             if ( p2m_is_paging(t) )
             {
-                p2m_mem_paging_populate(p2m, pfn);
+                if ( p2m_is_paged(t) )
+                    p2m_mem_paging_populate(p2m, pfn);
 
                 rc = -EINVAL;
                 goto param_fail4;
--- xen-unstable.hg-4.1.22344.orig/xen/arch/x86/mm/guest_walk.c
+++ xen-unstable.hg-4.1.22344/xen/arch/x86/mm/guest_walk.c
@@ -96,7 +96,8 @@ static inline void *map_domain_gfn(struc
     *mfn = gfn_to_mfn_unshare(p2m, gfn_x(gfn), p2mt, 0);
     if ( p2m_is_paging(*p2mt) )
     {
-        p2m_mem_paging_populate(p2m, gfn_x(gfn));
+        if ( p2m_is_paged(*p2mt) )
+            p2m_mem_paging_populate(p2m, gfn_x(gfn));
 
         *rc = _PAGE_PAGED;
         return NULL;
--- xen-unstable.hg-4.1.22344.orig/xen/arch/x86/mm/hap/guest_walk.c
+++ xen-unstable.hg-4.1.22344/xen/arch/x86/mm/hap/guest_walk.c
@@ -50,7 +50,8 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
     top_mfn = gfn_to_mfn_unshare(p2m, cr3 >> PAGE_SHIFT, &p2mt, 0);
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(p2m, cr3 >> PAGE_SHIFT);
+        if ( p2m_is_paged(p2mt) )
+            p2m_mem_paging_populate(p2m, cr3 >> PAGE_SHIFT);
 
         pfec[0] = PFEC_page_paged;
         return INVALID_GFN;
@@ -82,7 +83,8 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
         gfn_to_mfn_unshare(p2m, gfn_x(gfn), &p2mt, 0);
         if ( p2m_is_paging(p2mt) )
         {
-            p2m_mem_paging_populate(p2m, gfn_x(gfn));
+            if ( p2m_is_paged(p2mt) )
+                p2m_mem_paging_populate(p2m, gfn_x(gfn));
 
             pfec[0] = PFEC_page_paged;
             return INVALID_GFN;


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>