WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] New P2M types for memory paging and suppo

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] New P2M types for memory paging and supporting functions.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 16 Dec 2009 22:40:25 -0800
Delivery-date: Wed, 16 Dec 2009 22:42:01 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1261031275 0
# Node ID 01037b222d745bf1fc07a9720bc1cdb1f70ceed9
# Parent  c9fb3c514f65ab5f2c348215fa3affa462e94556
New P2M types for memory paging and supporting functions.
Several new types need to be added to represent the various different stages
a page can be in while being paged out/in. Xen will sometimes make different
decisions based on these types.

Signed-off-by: Patrick Colp <Patrick.Colp@xxxxxxxxxx>
---
 xen/arch/x86/mm/p2m.c     |  161 ++++++++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/p2m.h |   40 ++++++++++-
 2 files changed, 198 insertions(+), 3 deletions(-)

diff -r c9fb3c514f65 -r 01037b222d74 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu Dec 17 06:27:55 2009 +0000
+++ b/xen/arch/x86/mm/p2m.c     Thu Dec 17 06:27:55 2009 +0000
@@ -3,6 +3,7 @@
  *
  * physical-to-machine mappings for automatically-translated domains.
  *
+ * Parts of this code are Copyright (c) 2009 by Citrix (R&D) Ltd. (Patrick 
Colp)
  * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices.
  * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
  * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
@@ -29,6 +30,9 @@
 #include <asm/p2m.h>
 #include <asm/hvm/vmx/vmx.h> /* ept_p2m_init() */
 #include <xen/iommu.h>
+#include <asm/mem_event.h>
+#include <public/mem_event.h>
+#include <xen/event.h>
 
 /* Debugging and auditing of the P2M code? */
 #define P2M_AUDIT     0
@@ -2297,6 +2301,163 @@ clear_mmio_p2m_entry(struct domain *d, u
     return rc;
 }
 
+int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn)
+{
+    struct page_info *page;
+    p2m_type_t p2mt;
+    mfn_t mfn;
+    int ret;
+
+    mfn = gfn_to_mfn(d, gfn, &p2mt);
+
+    /* Check if mfn is valid */
+    ret = -EINVAL;
+    if ( !mfn_valid(mfn) )
+        goto out;
+
+    /* Check p2m type */
+    ret = -EAGAIN;
+    if ( !p2m_is_pageable(p2mt) )
+        goto out;
+
+    /* Check for io memory page */
+    if ( is_iomem_page(mfn_x(mfn)) )
+        goto out;
+
+    /* Check page count and type */
+    page = mfn_to_page(mfn);
+    if ( (page->count_info & (PGC_count_mask | PGC_allocated)) !=
+         (1 | PGC_allocated) )
+        goto out;
+
+    if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_none )
+        goto out;
+
+    /* Fix p2m entry */
+    p2m_lock(d->arch.p2m);
+    set_p2m_entry(d, gfn, mfn, 0, p2m_ram_paging_out);
+    p2m_unlock(d->arch.p2m);
+
+    ret = 0;
+
+ out:
+    return ret;
+}
+
+int p2m_mem_paging_evict(struct domain *d, unsigned long gfn)
+{
+    struct page_info *page;
+    p2m_type_t p2mt;
+    mfn_t mfn;
+
+    /* Get mfn */
+    mfn = gfn_to_mfn(d, gfn, &p2mt);
+    if ( unlikely(!mfn_valid(mfn)) )
+        return -EINVAL;
+
+    if ( (p2mt == p2m_ram_paged) || (p2mt == p2m_ram_paging_in) ||
+         (p2mt == p2m_ram_paging_in_start) )
+        return -EINVAL;
+
+    /* Get the page so it doesn't get modified under Xen's feet */
+    page = mfn_to_page(mfn);
+    if ( unlikely(!get_page(page, d)) )
+        return -EINVAL;
+
+    /* Decrement guest domain's ref count of the page */
+    if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+        put_page(page);
+
+    /* Remove mapping from p2m table */
+    p2m_lock(d->arch.p2m);
+    set_p2m_entry(d, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paged);
+    p2m_unlock(d->arch.p2m);
+
+    /* Put the page back so it gets freed */
+    put_page(page);
+
+    return 0;
+}
+
+void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
+{
+    struct vcpu *v = current;
+    mem_event_request_t req;
+    p2m_type_t p2mt;
+
+    memset(&req, 0, sizeof(req));
+
+    /* Check that there's space on the ring for this request */
+    if ( mem_event_check_ring(d) )
+        return;
+
+    /* Fix p2m mapping */
+    /* XXX: It seems inefficient to have this here, as it's only needed
+     *      in one case (ept guest accessing paging out page) */
+    gfn_to_mfn(d, gfn, &p2mt);
+    if ( p2mt != p2m_ram_paging_out )
+    {
+        p2m_lock(d->arch.p2m);
+        set_p2m_entry(d, gfn, _mfn(PAGING_MFN), 0, p2m_ram_paging_in_start);
+        p2m_unlock(d->arch.p2m);
+    }
+
+    /* Pause domain */
+    if ( v->domain->domain_id == d->domain_id )
+    {
+        vcpu_pause_nosync(v);
+        req.flags |= MEM_EVENT_FLAG_PAUSED;
+    }
+
+    /* Send request to pager */
+    req.gfn = gfn;
+    req.p2mt = p2mt;
+    req.vcpu_id = v->vcpu_id;
+
+    mem_event_put_request(d, &req);
+}
+
+int p2m_mem_paging_prep(struct domain *d, unsigned long gfn)
+{
+    struct page_info *page;
+
+    /* Get a free page */
+    page = alloc_domheap_page(d, 0);
+    if ( unlikely(page == NULL) )
+        return -EINVAL;
+
+    /* Fix p2m mapping */
+    p2m_lock(d->arch.p2m);
+    set_p2m_entry(d, gfn, page_to_mfn(page), 0, p2m_ram_paging_in);
+    p2m_unlock(d->arch.p2m);
+
+    return 0;
+}
+
+void p2m_mem_paging_resume(struct domain *d)
+{
+    mem_event_response_t rsp;
+    p2m_type_t p2mt;
+    mfn_t mfn;
+
+    /* Pull the response off the ring */
+    mem_event_get_response(d, &rsp);
+
+    /* Fix p2m entry */
+    mfn = gfn_to_mfn(d, rsp.gfn, &p2mt);
+    p2m_lock(d->arch.p2m);
+    set_p2m_entry(d, rsp.gfn, mfn, 0, p2m_ram_rw);
+    p2m_unlock(d->arch.p2m);
+
+    /* Unpause domain */
+    if ( rsp.flags & MEM_EVENT_FLAG_PAUSED )
+        vcpu_unpause(d->vcpu[rsp.vcpu_id]);
+
+    /* Unpause any domains that were paused because the ring was full */
+    mem_event_unpause_vcpus(d);
+}
+
+
 /*
  * Local variables:
  * mode: C
diff -r c9fb3c514f65 -r 01037b222d74 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu Dec 17 06:27:55 2009 +0000
+++ b/xen/include/asm-x86/p2m.h Thu Dec 17 06:27:55 2009 +0000
@@ -75,6 +75,11 @@ typedef enum {
        #ifdef's everywhere else. */
     p2m_grant_map_rw = 7,       /* Read/write grant mapping */
     p2m_grant_map_ro = 8,       /* Read-only grant mapping */
+
+    p2m_ram_paging_out = 9,       /* Memory that is being paged out */
+    p2m_ram_paged = 10,           /* Memory that has been paged out */
+    p2m_ram_paging_in = 11,       /* Memory that is being paged in */
+    p2m_ram_paging_in_start = 12, /* Memory that is being paged in */
 } p2m_type_t;
 
 typedef enum {
@@ -87,9 +92,13 @@ typedef enum {
 #define p2m_to_mask(_t) (1UL << (_t))
 
 /* RAM types, which map to real machine frames */
-#define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw)          \
-                       | p2m_to_mask(p2m_ram_logdirty)  \
-                       | p2m_to_mask(p2m_ram_ro))
+#define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw)                \
+                       | p2m_to_mask(p2m_ram_logdirty)        \
+                       | p2m_to_mask(p2m_ram_ro)              \
+                       | p2m_to_mask(p2m_ram_paging_out)      \
+                       | p2m_to_mask(p2m_ram_paged)           \
+                       | p2m_to_mask(p2m_ram_paging_in_start) \
+                       | p2m_to_mask(p2m_ram_paging_in))
 
 /* Grant mapping types, which map to a real machine frame in another
  * VM */
@@ -106,6 +115,16 @@ typedef enum {
                       | p2m_to_mask(p2m_grant_map_ro) )
 
 #define P2M_MAGIC_TYPES (p2m_to_mask(p2m_populate_on_demand))
+
+/* Pageable types */
+#define P2M_PAGEABLE_TYPES (p2m_to_mask(p2m_ram_rw))
+
+#define P2M_PAGING_TYPES (p2m_to_mask(p2m_ram_paging_out)        \
+                          | p2m_to_mask(p2m_ram_paged)           \
+                          | p2m_to_mask(p2m_ram_paging_in_start) \
+                          | p2m_to_mask(p2m_ram_paging_in))
+
+#define P2M_PAGED_TYPES (p2m_to_mask(p2m_ram_paged))
 
 /* Useful predicates */
 #define p2m_is_ram(_t) (p2m_to_mask(_t) & P2M_RAM_TYPES)
@@ -118,11 +137,15 @@ typedef enum {
    implementations, there's no way of synchronising against that. */
 #define p2m_is_valid(_t) (p2m_to_mask(_t) & (P2M_RAM_TYPES | P2M_MMIO_TYPES))
 #define p2m_has_emt(_t)  (p2m_to_mask(_t) & (P2M_RAM_TYPES | 
p2m_to_mask(p2m_mmio_direct)))
+#define p2m_is_pageable(_t) (p2m_to_mask(_t) & P2M_PAGEABLE_TYPES)
+#define p2m_is_paging(_t)   (p2m_to_mask(_t) & P2M_PAGING_TYPES)
+#define p2m_is_paged(_t)    (p2m_to_mask(_t) & P2M_PAGED_TYPES)
 
 /* Populate-on-demand */
 #define POPULATE_ON_DEMAND_MFN  (1<<9)
 #define POD_PAGE_ORDER 9
 
+#define PAGING_MFN  INVALID_MFN
 
 struct p2m_domain {
     /* Lock that protects updates to the p2m */
@@ -369,6 +392,17 @@ int set_mmio_p2m_entry(struct domain *d,
 int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
 int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
 
+/* Check if a nominated gfn is valid to be paged out */
+int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn);
+/* Evict a frame */
+int p2m_mem_paging_evict(struct domain *d, unsigned long gfn);
+/* Start populating a paged out frame */
+void p2m_mem_paging_populate(struct domain *d, unsigned long gfn);
+/* Prepare the p2m for paging a frame in */
+int p2m_mem_paging_prep(struct domain *d, unsigned long gfn);
+/* Resume normal operation (in case a domain was paused) */
+void p2m_mem_paging_resume(struct domain *d);
+
 #endif /* _XEN_P2M_H */
 
 /*

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] New P2M types for memory paging and supporting functions., Xen patchbot-unstable <=