populdate a paged-out page only once to reduce pressure in the ringbuffer.
Several cpus may still request a page at once. xenpaging can handle this.
Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>
---
xen/arch/x86/hvm/emulate.c | 3 ++-
xen/arch/x86/hvm/hvm.c | 17 ++++++++++-------
xen/arch/x86/mm/guest_walk.c | 3 ++-
xen/arch/x86/mm/hap/guest_walk.c | 6 ++++--
4 files changed, 18 insertions(+), 11 deletions(-)
--- xen-4.0.1-testing.orig/xen/arch/x86/hvm/emulate.c
+++ xen-4.0.1-testing/xen/arch/x86/hvm/emulate.c
@@ -65,7 +65,8 @@ static int hvmemul_do_io(
ram_mfn = gfn_to_mfn_unshare(current->domain, ram_gfn, &p2mt, 0);
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(curr->domain, ram_gfn);
+ if ( p2m_is_paged(p2mt) )
+ p2m_mem_paging_populate(curr->domain, ram_gfn);
return X86EMUL_RETRY;
}
if ( p2m_is_shared(p2mt) )
--- xen-4.0.1-testing.orig/xen/arch/x86/hvm/hvm.c
+++ xen-4.0.1-testing/xen/arch/x86/hvm/hvm.c
@@ -291,7 +291,8 @@ static int hvm_set_ioreq_page(
return -EINVAL;
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(d, gmfn);
+ if ( p2m_is_paged(p2mt) )
+ p2m_mem_paging_populate(d, gmfn);
return -ENOENT;
}
if ( p2m_is_shared(p2mt) )
@@ -1324,7 +1325,8 @@ static void *hvm_map_entry(unsigned long
mfn = mfn_x(gfn_to_mfn_unshare(current->domain, gfn, &p2mt, 0));
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(current->domain, gfn);
+ if ( p2m_is_paged(p2mt) )
+ p2m_mem_paging_populate(current->domain, gfn);
return NULL;
}
if ( p2m_is_shared(p2mt) )
@@ -1723,7 +1725,8 @@ static enum hvm_copy_result __hvm_copy(
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(curr->domain, gfn);
+ if ( p2m_is_paged(p2mt) )
+ p2m_mem_paging_populate(curr->domain, gfn);
return HVMCOPY_gfn_paged_out;
}
if ( p2m_is_shared(p2mt) )
@@ -3032,8 +3035,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
mfn_t mfn = gfn_to_mfn(d, pfn, &t);
if ( p2m_is_paging(t) )
{
- p2m_mem_paging_populate(d, pfn);
-
+ if ( p2m_is_paged(t) )
+ p2m_mem_paging_populate(d, pfn);
rc = -EINVAL;
goto param_fail3;
}
@@ -3096,8 +3099,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
mfn = gfn_to_mfn_unshare(d, pfn, &t, 0);
if ( p2m_is_paging(t) )
{
- p2m_mem_paging_populate(d, pfn);
-
+ if ( p2m_is_paged(t) )
+ p2m_mem_paging_populate(d, pfn);
rc = -EINVAL;
goto param_fail4;
}
--- xen-4.0.1-testing.orig/xen/arch/x86/mm/guest_walk.c
+++ xen-4.0.1-testing/xen/arch/x86/mm/guest_walk.c
@@ -96,7 +96,8 @@ static inline void *map_domain_gfn(struc
*mfn = gfn_to_mfn_unshare(d, gfn_x(gfn), p2mt, 0);
if ( p2m_is_paging(*p2mt) )
{
- p2m_mem_paging_populate(d, gfn_x(gfn));
+ if ( p2m_is_paged(*p2mt) )
+ p2m_mem_paging_populate(d, gfn_x(gfn));
*rc = _PAGE_PAGED;
return NULL;
--- xen-4.0.1-testing.orig/xen/arch/x86/mm/hap/guest_walk.c
+++ xen-4.0.1-testing/xen/arch/x86/mm/hap/guest_walk.c
@@ -49,7 +49,8 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
top_mfn = gfn_to_mfn_unshare(v->domain, cr3 >> PAGE_SHIFT, &p2mt, 0);
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(v->domain, cr3 >> PAGE_SHIFT);
+ if ( p2m_is_paged(p2mt) )
+ p2m_mem_paging_populate(v->domain, cr3 >> PAGE_SHIFT);
pfec[0] = PFEC_page_paged;
return INVALID_GFN;
@@ -81,7 +82,8 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
gfn_to_mfn_unshare(v->domain, gfn_x(gfn), &p2mt, 0);
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(v->domain, gfn_x(gfn));
+ if ( p2m_is_paged(p2mt) )
+ p2m_mem_paging_populate(v->domain, gfn_x(gfn));
pfec[0] = PFEC_page_paged;
return INVALID_GFN;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|