WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [POWERPC][XEN] Implement guest_physmap_ma

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [POWERPC][XEN] Implement guest_physmap_max_mem_pages().
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 07 Mar 2007 11:30:34 -0800
Delivery-date: Wed, 07 Mar 2007 12:43:17 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Hollis Blanchard <hollisb@xxxxxxxxxx>
# Date 1172876821 21600
# Node ID f56981f78d731b60fdb9ce2ee1b78727e004f848
# Parent  eceb9ccd84a8de9e4e3c8ced4b68e60b335b8a95
[POWERPC][XEN] Implement guest_physmap_max_mem_pages().
- Create a p2m array large enough to cover d->max_pages.
- Free in domain_relinquish_resources().
Signed-off-by: Ryan Harper <ryanh@xxxxxxxxxx>
Signed-off-by: Hollis Blanchard <hollisb@xxxxxxxxxx>
---
 xen/arch/powerpc/domain.c        |    3 ++
 xen/arch/powerpc/mm.c            |   41 +++++++++++++++++++++++++++++++++++++++
 xen/include/asm-powerpc/domain.h |    5 +++-
 xen/include/asm-powerpc/mm.h     |    4 ++-
 xen/include/asm-powerpc/shadow.h |    4 +--
 5 files changed, 53 insertions(+), 4 deletions(-)

diff -r eceb9ccd84a8 -r f56981f78d73 xen/arch/powerpc/domain.c
--- a/xen/arch/powerpc/domain.c Fri Mar 02 17:06:50 2007 -0600
+++ b/xen/arch/powerpc/domain.c Fri Mar 02 17:07:01 2007 -0600
@@ -16,6 +16,8 @@
  * Copyright IBM Corp. 2005, 2006, 2007
  *
  * Authors: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
+ *          Ryan Harper <ryanh@xxxxxxxxxx>
+ *          Hollis Blanchard <hollisb@xxxxxxxxxx>
  */
 
 #include <stdarg.h>
@@ -311,6 +313,7 @@ void domain_relinquish_resources(struct 
     relinquish_memory(d, &d->page_list);
     free_extents(d);
     xfree(d->arch.foreign_mfns);
+    xfree(d->arch.p2m);
     return;
 }
 
diff -r eceb9ccd84a8 -r f56981f78d73 xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c     Fri Mar 02 17:06:50 2007 -0600
+++ b/xen/arch/powerpc/mm.c     Fri Mar 02 17:07:01 2007 -0600
@@ -536,6 +536,47 @@ unsigned long mfn_to_gmfn(struct domain 
     return INVALID_M2P_ENTRY;
 }
 
+/* NB: caller holds d->page_alloc lock, sets d->max_pages = new_max */
+int guest_physmap_max_mem_pages(struct domain *d, unsigned long new_max_pages)
+{
+    u32 *p2m_array = NULL;
+    u32 *p2m_old = NULL;
+    ulong i;
+
+    /* XXX We probably could, but right now we don't shrink the p2m array.
+     * NB: d->max_pages >= d->arch.p2m_entries */
+    if (new_max_pages < d->max_pages) {
+        printk("Can't shrink DOM%d max memory pages\n", d->domain_id);
+        return -EINVAL;
+    }
+
+    /* Allocate one u32 per page. */
+    p2m_array = xmalloc_array(u32, new_max_pages);
+    if (p2m_array == NULL)
+        return -ENOMEM;
+
+    /* Copy old mappings into new array. */
+    if (d->arch.p2m != NULL) {
+        /* XXX This could take a long time; we should use a continuation. */
+        memcpy(p2m_array, d->arch.p2m, d->arch.p2m_entries * sizeof(u32));
+        p2m_old = d->arch.p2m;
+    }
+
+    /* Mark new mfns as invalid. */
+    for (i = d->arch.p2m_entries; i < new_max_pages; i++)
+        p2m_array[i] = INVALID_MFN;
+
+    /* Set new p2m pointer and size. */
+    d->arch.p2m = p2m_array;
+    d->arch.p2m_entries = new_max_pages;
+
+    /* Free old p2m array if present. */
+    if (p2m_old)
+        xfree(p2m_old);
+
+    return 0;
+}
+
 void guest_physmap_add_page(
     struct domain *d, unsigned long gpfn, unsigned long mfn)
 {
diff -r eceb9ccd84a8 -r f56981f78d73 xen/include/asm-powerpc/domain.h
--- a/xen/include/asm-powerpc/domain.h  Fri Mar 02 17:06:50 2007 -0600
+++ b/xen/include/asm-powerpc/domain.h  Fri Mar 02 17:07:01 2007 -0600
@@ -13,7 +13,7 @@
  * along with this program; if not, write to the Free Software
  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  *
- * Copyright (C) IBM Corp. 2005
+ * Copyright IBM Corp. 2005, 2007
  *
  * Authors: Hollis Blanchard <hollisb@xxxxxxxxxx>
  */
@@ -46,6 +46,9 @@ struct arch_domain {
 
     /* I/O-port access bitmap mask. */
     u8 *iobmp_mask;       /* Address of IO bitmap mask, or NULL.      */
+
+    u32 *p2m; /* Array of 32-bit MFNs supports 44 bits of physical memory. */
+    ulong p2m_entries;
 
     uint large_page_sizes;
     uint large_page_order[4];
diff -r eceb9ccd84a8 -r f56981f78d73 xen/include/asm-powerpc/mm.h
--- a/xen/include/asm-powerpc/mm.h      Fri Mar 02 17:06:50 2007 -0600
+++ b/xen/include/asm-powerpc/mm.h      Fri Mar 02 17:07:01 2007 -0600
@@ -239,7 +239,9 @@ extern unsigned long mfn_to_gmfn(struct 
 
 extern unsigned long paddr_to_maddr(unsigned long paddr);
 
-#define INVALID_MFN (~0UL)
+/* INVALID_MFN can be any value that fails mfn_valid(). */
+#define INVALID_MFN (~0U)
+
 #define PFN_TYPE_NONE 0
 #define PFN_TYPE_RMA 1
 #define PFN_TYPE_LOGICAL 2
diff -r eceb9ccd84a8 -r f56981f78d73 xen/include/asm-powerpc/shadow.h
--- a/xen/include/asm-powerpc/shadow.h  Fri Mar 02 17:06:50 2007 -0600
+++ b/xen/include/asm-powerpc/shadow.h  Fri Mar 02 17:07:01 2007 -0600
@@ -32,6 +32,8 @@
       ? machine_to_phys_mapping[(mfn)]                 \
       : (mfn) )
 
+extern int guest_physmap_max_mem_pages(struct domain *d, unsigned long 
new_max);
+
 extern void guest_physmap_add_page(
     struct domain *d, unsigned long gpfn, unsigned long mfn);
 
@@ -60,7 +62,5 @@ static inline unsigned int shadow_get_al
     return (1ULL << (d->arch.htab.order + PAGE_SHIFT)) >> 20;
 }
 
-#define guest_physmap_max_mem_pages(d, n) (0)
-
 #endif
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [POWERPC][XEN] Implement guest_physmap_max_mem_pages()., Xen patchbot-unstable <=