WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [HVM] Move shadow initialisation into dom

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [HVM] Move shadow initialisation into domain-creation hypercall.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 02 Nov 2006 22:10:13 +0000
Delivery-date: Thu, 02 Nov 2006 21:29:54 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID cf3d69ba5633284234f428b26e4465b593b70244
# Parent  96f51a000ed024728ea5653f9f0f0550affc3f8b
[HVM] Move shadow initialisation into domain-creation hypercall.
Allocate HVM guest memory in the libxc builder function rather
than in xend. Clean up fall out from these changes.

Todo: Fix ia64. Move PV builder to same model (it should allocate the
memory rather than xend doing so -- then it can avoid using
xc_get_pfn_list()).

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 tools/ioemu/vl.c                        |    9 +-
 tools/libxc/xc_hvm_build.c              |  121 +++++---------------------------
 tools/python/xen/xend/XendDomainInfo.py |    8 +-
 tools/python/xen/xend/image.py          |   14 ---
 xen/arch/x86/domain.c                   |   36 +++++----
 xen/arch/x86/domctl.c                   |   47 ++----------
 xen/arch/x86/mm/shadow/common.c         |    4 -
 xen/include/asm-x86/shadow.h            |    3 
 8 files changed, 71 insertions(+), 171 deletions(-)

diff -r 96f51a000ed0 -r cf3d69ba5633 tools/ioemu/vl.c
--- a/tools/ioemu/vl.c  Wed Nov 01 18:32:45 2006 +0000
+++ b/tools/ioemu/vl.c  Wed Nov 01 18:37:23 2006 +0000
@@ -6420,14 +6420,13 @@ int main(int argc, char **argv)
     }
 
 #if defined(__i386__) || defined(__x86_64__)
-    if (xc_get_pfn_list(xc_handle, domid, page_array, nr_pages) != nr_pages) {
+    for ( i = 0; i < tmp_nr_pages; i++)
+        page_array[i] = i;
+    if (xc_domain_translate_gpfn_list(xc_handle, domid, tmp_nr_pages,
+                                      page_array, page_array)) {
         fprintf(logfile, "xc_get_pfn_list returned error %d\n", errno);
         exit(-1);
     }
-
-    if (ram_size > HVM_BELOW_4G_RAM_END)
-        for (i = 0; i < nr_pages - (HVM_BELOW_4G_RAM_END >> PAGE_SHIFT); i++)
-            page_array[tmp_nr_pages - 1 - i] = page_array[nr_pages - 1 - i];
 
     phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
                                          PROT_READ|PROT_WRITE, page_array,
diff -r 96f51a000ed0 -r cf3d69ba5633 tools/libxc/xc_hvm_build.c
--- a/tools/libxc/xc_hvm_build.c        Wed Nov 01 18:32:45 2006 +0000
+++ b/tools/libxc/xc_hvm_build.c        Wed Nov 01 18:37:23 2006 +0000
@@ -196,7 +196,6 @@ static int setup_guest(int xc_handle,
 static int setup_guest(int xc_handle,
                        uint32_t dom, int memsize,
                        char *image, unsigned long image_size,
-                       unsigned long nr_pages,
                        vcpu_guest_context_t *ctxt,
                        unsigned long shared_info_frame,
                        unsigned int vcpus,
@@ -207,18 +206,13 @@ static int setup_guest(int xc_handle,
                        unsigned long *store_mfn)
 {
     xen_pfn_t *page_array = NULL;
-    unsigned long count, i;
-    unsigned long long ptr;
-    xc_mmu_t *mmu = NULL;
-
+    unsigned long i, nr_pages = (unsigned long)memsize << (20 - PAGE_SHIFT);
+    unsigned long shared_page_nr;
     shared_info_t *shared_info;
     void *e820_page;
-
     struct domain_setup_info dsi;
     uint64_t v_end;
 
-    unsigned long shared_page_nr;
-
     memset(&dsi, 0, sizeof(struct domain_setup_info));
 
     if ( (parseelfimage(image, image_size, &dsi)) != 0 )
@@ -230,7 +224,6 @@ static int setup_guest(int xc_handle,
         goto error_out;
     }
 
-    /* memsize is in megabytes */
     v_end = (unsigned long long)memsize << 20;
 
     IPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n"
@@ -255,52 +248,26 @@ static int setup_guest(int xc_handle,
         goto error_out;
     }
 
-    if ( xc_get_pfn_list(xc_handle, dom, page_array, nr_pages) != nr_pages )
-    {
-        PERROR("Could not get the page frame list.\n");
-        goto error_out;
-    }
-
-    /* HVM domains must be put into shadow mode at the start of day. */
-    /* XXX *After* xc_get_pfn_list()!! */
-    if ( xc_shadow_control(xc_handle, dom, XEN_DOMCTL_SHADOW_OP_ENABLE,
-                           NULL, 0, NULL, 
-                           XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT  |
-                           XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE |
-                           XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL, 
-                           NULL) )
-    {
-        PERROR("Could not enable shadow paging for domain.\n");
-        goto error_out;
-    }        
+    for ( i = 0; i < nr_pages; i++ )
+        page_array[i] = i;
+    for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < nr_pages; i++ )
+        page_array[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
+
+    if ( xc_domain_memory_populate_physmap(xc_handle, dom, nr_pages,
+                                           0, 0, page_array) )
+    {
+        PERROR("Could not allocate memory for HVM guest.\n");
+        goto error_out;
+    }
+
+    if ( xc_domain_translate_gpfn_list(xc_handle, dom, nr_pages,
+                                       page_array, page_array) )
+    {
+        PERROR("Could not translate addresses of HVM guest.\n");
+        goto error_out;
+    }
 
     loadelfimage(image, xc_handle, dom, page_array, &dsi);
-
-    if ( (mmu = xc_init_mmu_updates(xc_handle, dom)) == NULL )
-        goto error_out;
-
-    /* Write the machine->phys table entries. */
-    for ( count = 0; count < nr_pages; count++ )
-    {
-        unsigned long gpfn_count_skip;
-
-        ptr = (unsigned long long)page_array[count] << PAGE_SHIFT;
-
-        gpfn_count_skip = 0;
-
-        /*
-         * physical address space from HVM_BELOW_4G_RAM_END to 4G is reserved
-         * for PCI devices MMIO. So if HVM has more than HVM_BELOW_4G_RAM_END
-         * RAM, memory beyond HVM_BELOW_4G_RAM_END will go to 4G above.
-         */
-        if ( count >= (HVM_BELOW_4G_RAM_END >> PAGE_SHIFT) )
-            gpfn_count_skip = HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
-
-        if ( xc_add_mmu_update(xc_handle, mmu,
-                               ptr | MMU_MACHPHYS_UPDATE,
-                               count + gpfn_count_skip) )
-            goto error_out;
-    }
 
     if ( set_hvm_info(xc_handle, dom, page_array, vcpus, acpi) )
     {
@@ -352,22 +319,13 @@ static int setup_guest(int xc_handle,
     if ( xc_clear_domain_page(xc_handle, dom, *store_mfn) )
         goto error_out;
 
-    /* Send the page update requests down to the hypervisor. */
-    if ( xc_finish_mmu_updates(xc_handle, mmu) )
-        goto error_out;
-
-    free(mmu);
     free(page_array);
 
-    /*
-     * Initial register values:
-     */
     ctxt->user_regs.eip = dsi.v_kernentry;
 
     return 0;
 
  error_out:
-    free(mmu);
     free(page_array);
     return -1;
 }
@@ -387,31 +345,10 @@ static int xc_hvm_build_internal(int xc_
     struct xen_domctl launch_domctl, domctl;
     int rc, i;
     vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
-    unsigned long nr_pages;
-    xen_capabilities_info_t xen_caps;
 
     if ( (image == NULL) || (image_size == 0) )
     {
         ERROR("Image required");
-        goto error_out;
-    }
-
-    if ( (rc = xc_version(xc_handle, XENVER_capabilities, &xen_caps)) != 0 )
-    {
-        PERROR("Failed to get xen version info");
-        goto error_out;
-    }
-
-    if ( !strstr(xen_caps, "hvm") )
-    {
-        PERROR("CPU doesn't support HVM extensions or "
-               "the extensions are not enabled");
-        goto error_out;
-    }
-
-    if ( (nr_pages = xc_get_tot_pages(xc_handle, domid)) < 0 )
-    {
-        PERROR("Could not find total pages for domain");
         goto error_out;
     }
 
@@ -430,24 +367,10 @@ static int xc_hvm_build_internal(int xc_
         goto error_out;
     }
 
-#if 0
-    /* HVM domains must be put into shadow mode at the start of day */
-    if ( xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_ENABLE,
-                           NULL, 0, NULL, 
-                           XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT  |
-                           XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE |
-                           XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL, 
-                           NULL) )
-    {
-        PERROR("Could not enable shadow paging for domain.\n");
-        goto error_out;
-    }        
-#endif
-
     memset(ctxt, 0, sizeof(*ctxt));
-
     ctxt->flags = VGCF_HVM_GUEST;
-    if ( setup_guest(xc_handle, domid, memsize, image, image_size, nr_pages,
+
+    if ( setup_guest(xc_handle, domid, memsize, image, image_size,
                      ctxt, domctl.u.getdomaininfo.shared_info_frame,
                      vcpus, pae, acpi, apic, store_evtchn, store_mfn) < 0)
     {
diff -r 96f51a000ed0 -r cf3d69ba5633 tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Wed Nov 01 18:32:45 2006 +0000
+++ b/tools/python/xen/xend/XendDomainInfo.py   Wed Nov 01 18:37:23 2006 +0000
@@ -1295,9 +1295,11 @@ class XendDomainInfo:
             shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
             self.info['shadow_memory'] = shadow_cur
 
-            # initial memory reservation
-            xc.domain_memory_increase_reservation(self.domid, reservation, 0,
-                                                  0)
+            # Initial memory reservation
+            if not (self._infoIsSet('image') and
+                    sxp.name(self.info['image']) == "hvm"):
+                xc.domain_memory_increase_reservation(
+                    self.domid, reservation, 0, 0)
 
             self._createChannels()
 
diff -r 96f51a000ed0 -r cf3d69ba5633 tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py    Wed Nov 01 18:32:45 2006 +0000
+++ b/tools/python/xen/xend/image.py    Wed Nov 01 18:37:23 2006 +0000
@@ -478,22 +478,12 @@ class X86_HVM_ImageHandler(HVMImageHandl
 
     def getRequiredAvailableMemory(self, mem_kb):
         # Add 8 MiB overhead for QEMU's video RAM.
-        return self.getRequiredInitialReservation(mem_kb) + 8192
+        return mem_kb + 8192
 
     def getRequiredInitialReservation(self, mem_kb):
-        page_kb = 4
-        # This was derived emperically:
-        #   2.4 MB overhead per 1024 MB RAM
-        #   + 4 to avoid low-memory condition
-        extra_mb = (2.4/1024) * (mem_kb/1024.0) + 4;
-        extra_pages = int( math.ceil( extra_mb*1024 / page_kb ))
-        return mem_kb + extra_pages * page_kb
+        return mem_kb
 
     def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
-        # The given value is the configured value -- we need to include the
-        # overhead due to getRequiredInitialReservation.
-        maxmem_kb = self.getRequiredInitialReservation(maxmem_kb)
-
         # 256 pages (1MB) per vcpu,
         # plus 1 page per MiB of RAM for the P2M map,
         # plus 1 page per MiB of RAM to shadow the resident processes.  
diff -r 96f51a000ed0 -r cf3d69ba5633 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Wed Nov 01 18:32:45 2006 +0000
+++ b/xen/arch/x86/domain.c     Wed Nov 01 18:37:23 2006 +0000
@@ -155,19 +155,12 @@ int arch_domain_create(struct domain *d)
 {
     l1_pgentry_t gdt_l1e;
     int vcpuid, pdpt_order;
-    int i;
-
-    if ( is_hvm_domain(d) && !hvm_enabled )
-    {
-        gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest "
-                 "on a non-VT/AMDV platform.\n");
-        return -EINVAL;
-    }
+    int i, rc = -ENOMEM;
 
     pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t));
     d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order);
     if ( d->arch.mm_perdomain_pt == NULL )
-        goto fail_nomem;
+        goto fail;
     memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE << pdpt_order);
 
     /*
@@ -192,7 +185,7 @@ int arch_domain_create(struct domain *d)
     d->arch.mm_perdomain_l3 = alloc_xenheap_page();
     if ( (d->arch.mm_perdomain_l2 == NULL) ||
          (d->arch.mm_perdomain_l3 == NULL) )
-        goto fail_nomem;
+        goto fail;
 
     memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
     for ( i = 0; i < (1 << pdpt_order); i++ )
@@ -219,26 +212,41 @@ int arch_domain_create(struct domain *d)
         d->arch.ioport_caps = 
             rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex);
         if ( d->arch.ioport_caps == NULL )
-            goto fail_nomem;
+            goto fail;
 
         if ( (d->shared_info = alloc_xenheap_page()) == NULL )
-            goto fail_nomem;
+            goto fail;
 
         memset(d->shared_info, 0, PAGE_SIZE);
         share_xen_page_with_guest(
             virt_to_page(d->shared_info), d, XENSHARE_writable);
     }
 
+    if ( is_hvm_domain(d) )
+    {
+        if ( !hvm_enabled )
+        {
+            gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest "
+                     "on a non-VT/AMDV platform.\n");
+            rc = -EINVAL;
+            goto fail;
+        }
+
+        rc = shadow_enable(d, SHM2_refcounts|SHM2_translate|SHM2_external);
+        if ( rc != 0 )
+            goto fail;
+    }
+
     return 0;
 
- fail_nomem:
+ fail:
     free_xenheap_page(d->shared_info);
 #ifdef __x86_64__
     free_xenheap_page(d->arch.mm_perdomain_l2);
     free_xenheap_page(d->arch.mm_perdomain_l3);
 #endif
     free_xenheap_pages(d->arch.mm_perdomain_pt, pdpt_order);
-    return -ENOMEM;
+    return rc;
 }
 
 void arch_domain_destroy(struct domain *d)
diff -r 96f51a000ed0 -r cf3d69ba5633 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Wed Nov 01 18:32:45 2006 +0000
+++ b/xen/arch/x86/domctl.c     Wed Nov 01 18:37:23 2006 +0000
@@ -224,45 +224,18 @@ long arch_do_domctl(
 
             spin_lock(&d->page_alloc_lock);
 
-            if ( is_hvm_domain(d) && shadow_mode_translate(d) )
-            {
-                /* HVM domain: scan P2M to get guaranteed physmap order. */
-                for ( i = 0, gmfn = 0;
-                      (i < max_pfns) && (i < d->tot_pages); 
-                      i++, gmfn++ )
+            list_ent = d->page_list.next;
+            for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
+            {
+                mfn = page_to_mfn(list_entry(
+                    list_ent, struct page_info, list));
+                if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
+                                          i, &mfn, 1) )
                 {
-                    if ( unlikely(i == (HVM_BELOW_4G_MMIO_START>>PAGE_SHIFT)) )
-                    {
-                        /* skip MMIO range */
-                        gmfn += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
-                    }
-                    mfn = gmfn_to_mfn(d, gmfn);
-                    if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
-                                              i, &mfn, 1) )
-                    {
-                        ret = -EFAULT;
-                        break;
-                    }
+                    ret = -EFAULT;
+                    break;
                 }
-            }
-            else 
-            {        
-                /* Other guests: return in order of ownership list. */
-                list_ent = d->page_list.next;
-                for ( i = 0;
-                      (i < max_pfns) && (list_ent != &d->page_list);
-                      i++ )
-                {
-                    mfn = page_to_mfn(list_entry(
-                        list_ent, struct page_info, list));
-                    if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
-                                              i, &mfn, 1) )
-                    {
-                        ret = -EFAULT;
-                        break;
-                    }
-                    list_ent = mfn_to_page(mfn)->list.next;
-                }
+                list_ent = mfn_to_page(mfn)->list.next;
             }
             
             spin_unlock(&d->page_alloc_lock);
diff -r 96f51a000ed0 -r cf3d69ba5633 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Wed Nov 01 18:32:45 2006 +0000
+++ b/xen/arch/x86/mm/shadow/common.c   Wed Nov 01 18:37:23 2006 +0000
@@ -2461,7 +2461,7 @@ static void sh_new_mode(struct domain *d
         sh_update_paging_modes(v);
 }
 
-static int shadow_enable(struct domain *d, u32 mode)
+int shadow_enable(struct domain *d, u32 mode)
 /* Turn on "permanent" shadow features: external, translate, refcount.
  * Can only be called once on a domain, and these features cannot be
  * disabled. 
@@ -3092,6 +3092,8 @@ int shadow_domctl(struct domain *d,
         if ( shadow_mode_log_dirty(d) )
             if ( (rc = shadow_log_dirty_disable(d)) != 0 ) 
                 return rc;
+        if ( is_hvm_domain(d) )
+            return -EINVAL;
         if ( d->arch.shadow.mode & SHM2_enable )
             if ( (rc = shadow_test_disable(d)) != 0 ) 
                 return rc;
diff -r 96f51a000ed0 -r cf3d69ba5633 xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      Wed Nov 01 18:32:45 2006 +0000
+++ b/xen/include/asm-x86/shadow.h      Wed Nov 01 18:37:23 2006 +0000
@@ -313,6 +313,9 @@ static inline int shadow_guest_paging_le
 /**************************************************************************/
 /* Entry points into the shadow code */
 
+/* Enable arbitrary shadow mode. */
+int shadow_enable(struct domain *d, u32 mode);
+
 /* Turning on shadow test mode */
 int shadow_test_enable(struct domain *d);
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [HVM] Move shadow initialisation into domain-creation hypercall., Xen patchbot-unstable <=