WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Replace the MMUEXTOP 'pfn hole' commands with a new

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Replace the MMUEXTOP 'pfn hole' commands with a new
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Mon, 30 Jan 2006 15:00:20 +0000
Delivery-date: Mon, 30 Jan 2006 15:24:33 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 1db05e589fa029a676dae172bdb980aadb83958e
# Parent  ce057aa33cadc2a71ed5ef715217e577fc867408
Replace the MMUEXTOP 'pfn hole' commands with a new
arch-specific XENMEM_ operation.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r ce057aa33cad -r 1db05e589fa0 tools/libxc/xc_linux_build.c
--- a/tools/libxc/xc_linux_build.c      Sat Jan 28 12:01:19 2006
+++ b/tools/libxc/xc_linux_build.c      Sat Jan 28 14:31:43 2006
@@ -114,9 +114,12 @@
             vl2e++;
         }
 
-        if (shadow_mode_enabled) {
+        if ( shadow_mode_enabled )
+        {
             *vl1e = (count << PAGE_SHIFT) | L1_PROT;
-        } else {
+        }
+        else
+        {
             *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
             if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) && 
                  (count <  ((vpt_end  -dsi_v_start)>>PAGE_SHIFT)) )
@@ -196,9 +199,12 @@
                 *vl2e++ = l1tab | L2_PROT;
         }
         
-        if (shadow_mode_enabled) {
+        if ( shadow_mode_enabled )
+        {
             *vl1e = (count << PAGE_SHIFT) | L1_PROT;
-        } else {
+        }
+        else
+        {
             *vl1e = ((uint64_t)page_array[count] << PAGE_SHIFT) | L1_PROT;
             if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
                  (count <  ((vpt_end  -dsi_v_start)>>PAGE_SHIFT)) ) 
@@ -289,9 +295,12 @@
             vl2e++;
         }
         
-        if (shadow_mode_enabled) {
+        if ( shadow_mode_enabled )
+        {
             *vl1e = (count << PAGE_SHIFT) | L1_PROT;
-        } else {
+        }
+        else
+        {
             *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
             if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
                  (count <  ((vpt_end  -dsi_v_start)>>PAGE_SHIFT)) ) 
@@ -442,7 +451,9 @@
     {
         ctxt->initrd.start    = vinitrd_start;
         ctxt->initrd.size     = initrd_len;
-    } else {
+    }
+    else
+    {
         ctxt->initrd.start    = 0;
         ctxt->initrd.size     = 0;
     }
@@ -553,12 +564,15 @@
         if ( (v_end - vstack_end) < (512UL << 10) )
             v_end += 1UL << 22; /* Add extra 4MB to get >= 512kB padding. */
 #if defined(__i386__)
-        if (dsi.pae_kernel) {
+        if ( dsi.pae_kernel )
+        {
             /* FIXME: assumes one L2 pgtable @ 0xc0000000 */
             if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT_PAE)-1)) >> 
                    L2_PAGETABLE_SHIFT_PAE) + 2) <= nr_pt_pages )
                 break;
-        } else {
+        }
+        else
+        {
             if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT)-1)) >> 
                    L2_PAGETABLE_SHIFT) + 1) <= nr_pt_pages )
                 break;
@@ -676,23 +690,33 @@
     if ( xc_finish_mmu_updates(xc_handle, mmu) )
         goto error_out;
 
-    if (shadow_mode_enabled) {
+    if ( shadow_mode_enabled )
+    {
+        struct xen_reserved_phys_area xrpa;
+
         /* Enable shadow translate mode */
-        if (xc_shadow_control(xc_handle, dom,
-                              DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE,
-                              NULL, 0, NULL) < 0) {
+        if ( xc_shadow_control(xc_handle, dom,
+                               DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE,
+                               NULL, 0, NULL) < 0 )
+        {
             PERROR("Could not enable translation mode");
             goto error_out;
         }
 
         /* Find the shared info frame.  It's guaranteed to be at the
            start of the PFN hole. */
-        guest_shared_info_mfn = xc_get_pfn_hole_start(xc_handle, dom);
-        if (guest_shared_info_mfn <= 0) {
+        xrpa.domid = dom;
+        xrpa.idx   = 0;
+        rc = xc_memory_op(xc_handle, XENMEM_reserved_phys_area, &xrpa);
+        if ( rc != 0 )
+        {
             PERROR("Cannot find shared info pfn");
             goto error_out;
         }
-    } else {
+        guest_shared_info_mfn = xrpa.first_pfn;
+    }
+    else
+    {
         guest_shared_info_mfn = shared_info_frame;
     }
 
@@ -723,12 +747,16 @@
      * Pin down l2tab addr as page dir page - causes hypervisor to provide
      * correct protection for the page
      */
-    if (!shadow_mode_enabled) {
-        if (dsi.pae_kernel) {
+    if ( !shadow_mode_enabled )
+    {
+        if ( dsi.pae_kernel )
+        {
             if ( pin_table(xc_handle, MMUEXT_PIN_L3_TABLE,
                            ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
                 goto error_out;
-        } else {
+        }
+        else
+        {
             if ( pin_table(xc_handle, MMUEXT_PIN_L2_TABLE,
                            ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
                 goto error_out;
@@ -751,10 +779,13 @@
     if ( xc_clear_domain_page(xc_handle, dom, *store_mfn) ||
          xc_clear_domain_page(xc_handle, dom, *console_mfn) )
         goto error_out;
-    if (shadow_mode_enabled) {
+    if ( shadow_mode_enabled )
+    {
         guest_store_mfn = (vstoreinfo_start-dsi.v_start) >> PAGE_SHIFT;
         guest_console_mfn = (vconsole_start-dsi.v_start) >> PAGE_SHIFT;
-    } else {
+    }
+    else
+    {
         guest_store_mfn = *store_mfn;
         guest_console_mfn = *console_mfn;
     }
diff -r ce057aa33cad -r 1db05e589fa0 tools/libxc/xc_misc.c
--- a/tools/libxc/xc_misc.c     Sat Jan 28 12:01:19 2006
+++ b/tools/libxc/xc_misc.c     Sat Jan 28 14:31:43 2006
@@ -131,13 +131,6 @@
     return rc;
 }
 
-int xc_get_pfn_hole_start(int xc_handle, domid_t dom)
-{
-    struct mmuext_op op = {0};
-    op.cmd = MMUEXT_PFN_HOLE_BASE;
-    return xc_mmuext_op(xc_handle, &op, 1, dom);
-}
-
 
 /*
  * Local variables:
diff -r ce057aa33cad -r 1db05e589fa0 tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c  Sat Jan 28 12:01:19 2006
+++ b/tools/libxc/xc_private.c  Sat Jan 28 14:31:43 2006
@@ -201,6 +201,7 @@
     {
     case XENMEM_increase_reservation:
     case XENMEM_decrease_reservation:
+    case XENMEM_populate_physmap:
         if ( mlock(reservation, sizeof(*reservation)) != 0 )
         {
             PERROR("Could not mlock");
@@ -229,6 +230,13 @@
             goto out1;
         }
         break;
+    case XENMEM_reserved_phys_area:
+        if ( mlock(arg, sizeof(struct xen_reserved_phys_area)) )
+        {
+            PERROR("Could not mlock");
+            goto out1;
+        }
+        break;
     }
 
     ret = do_xen_hypercall(xc_handle, &hypercall);
@@ -237,6 +245,7 @@
     {
     case XENMEM_increase_reservation:
     case XENMEM_decrease_reservation:
+    case XENMEM_populate_physmap:
         safe_munlock(reservation, sizeof(*reservation));
         if ( reservation->extent_start != NULL )
             safe_munlock(reservation->extent_start,
@@ -246,6 +255,9 @@
         safe_munlock(xmml, sizeof(*xmml));
         safe_munlock(xmml->extent_start,
                      xmml->max_extents * sizeof(unsigned long));
+        break;
+    case XENMEM_reserved_phys_area:
+        safe_munlock(arg, sizeof(struct xen_reserved_phys_area));
         break;
     }
 
diff -r ce057aa33cad -r 1db05e589fa0 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Sat Jan 28 12:01:19 2006
+++ b/tools/libxc/xenctrl.h     Sat Jan 28 14:31:43 2006
@@ -528,6 +528,4 @@
                    unsigned long long ptr, unsigned long long val);
 int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu);
 
-int xc_get_pfn_hole_start(int xc_handle, domid_t dom);
-
 #endif
diff -r ce057aa33cad -r 1db05e589fa0 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Sat Jan 28 12:01:19 2006
+++ b/xen/arch/x86/mm.c Sat Jan 28 14:31:43 2006
@@ -104,6 +104,7 @@
 #include <asm/uaccess.h>
 #include <asm/ldt.h>
 #include <asm/x86_emulate.h>
+#include <public/memory.h>
 
 #ifdef VERBOSE
 #define MEM_LOG(_f, _a...)                           \
@@ -1930,56 +1931,6 @@
             break;
         }
 
-        case MMUEXT_PFN_HOLE_BASE:
-        {
-            if (FOREIGNDOM->start_pfn_hole) {
-                rc = FOREIGNDOM->start_pfn_hole;
-                okay = 1;
-            } else {
-                rc = FOREIGNDOM->start_pfn_hole =
-                    FOREIGNDOM->max_pages;
-                okay = 1;
-                if (shadow_mode_translate(FOREIGNDOM)) {
-                    /* Fill in a few entries in the hole.  At the
-                       moment, this means the shared info page and the
-                       grant table pages. */
-                    struct domain_mmap_cache c1, c2;
-                    unsigned long pfn, mfn, x;
-                    domain_mmap_cache_init(&c1);
-                    domain_mmap_cache_init(&c2);
-                    shadow_lock(FOREIGNDOM);
-                    pfn = FOREIGNDOM->start_pfn_hole;
-                    mfn = virt_to_phys(FOREIGNDOM->shared_info) >> PAGE_SHIFT;
-                    set_p2m_entry(FOREIGNDOM, pfn, mfn, &c1, &c2);
-                    set_pfn_from_mfn(mfn, pfn);
-                    pfn++;
-                    for (x = 0; x < NR_GRANT_FRAMES; x++) {
-                        mfn = gnttab_shared_mfn(FOREIGNDOM,
-                                                FOREIGNDOM->grant_table,
-                                                x);
-                        set_p2m_entry(FOREIGNDOM, pfn, mfn, &c1, &c2);
-                        set_pfn_from_mfn(mfn, pfn);
-                        pfn++;
-                    }
-                    shadow_unlock(FOREIGNDOM);
-                    domain_mmap_cache_destroy(&c1);
-                    domain_mmap_cache_destroy(&c2);
-                }
-            }
-            break;
-        }
-
-        case MMUEXT_PFN_HOLE_SIZE:
-        {
-            if (shadow_mode_translate(FOREIGNDOM)) {
-                rc = PFN_HOLE_SIZE;
-            } else {
-                rc = 0;
-            }
-            okay = 1;
-            break;
-        }
-
         default:
             MEM_LOG("Invalid extended pt command 0x%x", op.cmd);
             okay = 0;
@@ -2815,6 +2766,62 @@
 }
 
 
+long arch_memory_op(int op, void *arg)
+{
+    struct xen_reserved_phys_area xrpa;
+    unsigned long pfn;
+    struct domain *d;
+    unsigned int i;
+
+    switch ( op )
+    {
+    case XENMEM_reserved_phys_area:
+        if ( copy_from_user(&xrpa, arg, sizeof(xrpa)) )
+            return -EFAULT;
+
+        /* No guest has more than one reserved area. */
+        if ( xrpa.idx != 0 )
+            return -ESRCH;
+
+        if ( (d = find_domain_by_id(xrpa.domid)) == NULL )
+            return -ESRCH;
+
+        /* Only initialised translated guests have a reserved area. */
+        if ( !shadow_mode_translate(d) || (d->max_pages == 0) )
+        {
+            put_domain(d);
+            return -ESRCH;
+        }
+
+        LOCK_BIGLOCK(d);
+        if ( d->arch.first_reserved_pfn == 0 )
+        {
+            d->arch.first_reserved_pfn = pfn = d->max_pages;
+            guest_physmap_add_page(
+                d, pfn + 0, virt_to_phys(d->shared_info) >> PAGE_SHIFT);
+            for ( i = 0; i < NR_GRANT_FRAMES; i++ )
+                guest_physmap_add_page(
+                    d, pfn + 1 + i, gnttab_shared_mfn(d, d->grant_table, i));
+        }
+        UNLOCK_BIGLOCK(d);
+
+        xrpa.first_pfn = d->arch.first_reserved_pfn;
+        xrpa.nr_pfns   = 32;
+
+        put_domain(d);
+
+        if ( copy_to_user(arg, &xrpa, sizeof(xrpa)) )
+            return -EFAULT;
+
+        break;
+
+    default:
+        return subarch_memory_op(op, arg);
+    }
+
+    return 0;
+}
+
 
 /*************************
  * Writable Pagetables
diff -r ce057aa33cad -r 1db05e589fa0 xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c  Sat Jan 28 12:01:19 2006
+++ b/xen/arch/x86/x86_32/mm.c  Sat Jan 28 14:31:43 2006
@@ -182,7 +182,7 @@
     }
 }
 
-long arch_memory_op(int op, void *arg)
+long subarch_memory_op(int op, void *arg)
 {
     struct xen_machphys_mfn_list xmml;
     unsigned long mfn;
diff -r ce057aa33cad -r 1db05e589fa0 xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Sat Jan 28 12:01:19 2006
+++ b/xen/arch/x86/x86_64/mm.c  Sat Jan 28 14:31:43 2006
@@ -182,7 +182,7 @@
     }
 }
 
-long arch_memory_op(int op, void *arg)
+long subarch_memory_op(int op, void *arg)
 {
     struct xen_machphys_mfn_list xmml;
     l3_pgentry_t l3e;
diff -r ce057aa33cad -r 1db05e589fa0 xen/include/asm-x86/config.h
--- a/xen/include/asm-x86/config.h      Sat Jan 28 12:01:19 2006
+++ b/xen/include/asm-x86/config.h      Sat Jan 28 14:31:43 2006
@@ -59,9 +59,6 @@
 #define STACK_ORDER 2
 #endif
 #endif
-
-/* How large is the PFN reserved area, when we have one? */
-#define PFN_HOLE_SIZE 32
 
 #ifndef STACK_ORDER
 #define STACK_ORDER 1
diff -r ce057aa33cad -r 1db05e589fa0 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Sat Jan 28 12:01:19 2006
+++ b/xen/include/asm-x86/domain.h      Sat Jan 28 14:31:43 2006
@@ -109,6 +109,9 @@
 
     pagetable_t         phys_table;         /* guest 1:1 pagetable */
     struct vmx_platform vmx_platform;
+
+    /* Shadow-translated guest: Pseudophys base address of reserved area. */
+    unsigned long first_reserved_pfn;
 } __cacheline_aligned;
 
 struct arch_vcpu
diff -r ce057aa33cad -r 1db05e589fa0 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Sat Jan 28 12:01:19 2006
+++ b/xen/include/asm-x86/mm.h  Sat Jan 28 14:31:43 2006
@@ -382,5 +382,6 @@
 
 /* Arch-specific portion of memory_op hypercall. */
 long arch_memory_op(int op, void *arg);
+long subarch_memory_op(int op, void *arg);
 
 #endif /* __ASM_X86_MM_H__ */
diff -r ce057aa33cad -r 1db05e589fa0 xen/include/public/memory.h
--- a/xen/include/public/memory.h       Sat Jan 28 12:01:19 2006
+++ b/xen/include/public/memory.h       Sat Jan 28 14:31:43 2006
@@ -94,6 +94,26 @@
     unsigned int nr_extents;
 } xen_machphys_mfn_list_t;
 
+/*
+ * Returns the base and size of the specified reserved 'RAM hole' in the
+ * specified guest's pseudophysical address space.
+ * arg == addr of xen_reserved_phys_area_t.
+ */
+#define XENMEM_reserved_phys_area   7
+typedef struct xen_reserved_phys_area {
+    /* Which request to report about? */
+    domid_t domid;
+
+    /*
+     * Which reserved area to report? Out-of-range request reports
+     * -ESRCH. Currently no architecture will have more than one reserved area.
+     */
+    unsigned int idx;
+
+    /* Base and size of the specified reserved area. */
+    unsigned long first_pfn, nr_pfns;
+} xen_reserved_phys_area_t;
+
 #endif /* __XEN_PUBLIC_MEMORY_H__ */
 
 /*
diff -r ce057aa33cad -r 1db05e589fa0 xen/include/public/xen.h
--- a/xen/include/public/xen.h  Sat Jan 28 12:01:19 2006
+++ b/xen/include/public/xen.h  Sat Jan 28 14:31:43 2006
@@ -144,15 +144,6 @@
  * cmd: MMUEXT_SET_LDT
  * linear_addr: Linear address of LDT base (NB. must be page-aligned).
  * nr_ents: Number of entries in LDT.
- *
- * cmd: MMUEXT_PFN_HOLE_BASE
- * No additional arguments.  Returns the first pfn in the Xen-reserved
- * pfn hole.  Note that we delay allocating the hole until the first
- * time this is called.
- *
- * cmd: MMUEXT_PFN_HOLE_SIZE
- * No additional arguments.  Returns the number of pfns in the
- * Xen-reserved pfn hole.
  */
 #define MMUEXT_PIN_L1_TABLE      0
 #define MMUEXT_PIN_L2_TABLE      1
@@ -169,8 +160,6 @@
 #define MMUEXT_FLUSH_CACHE      12
 #define MMUEXT_SET_LDT          13
 #define MMUEXT_NEW_USER_BASEPTR 15
-#define MMUEXT_PFN_HOLE_BASE    16
-#define MMUEXT_PFN_HOLE_SIZE    17
 
 #ifndef __ASSEMBLY__
 struct mmuext_op {
diff -r ce057aa33cad -r 1db05e589fa0 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Sat Jan 28 12:01:19 2006
+++ b/xen/include/xen/sched.h   Sat Jan 28 14:31:43 2006
@@ -153,9 +153,6 @@
 
     /* Control-plane tools handle for this domain. */
     xen_domain_handle_t handle;
-
-    /* Start of the PFN hole */
-    unsigned long start_pfn_hole;
 };
 
 struct domain_setup_info

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Replace the MMUEXTOP 'pfn hole' commands with a new, Xen patchbot -unstable <=