WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 2/3] continuable destroy domain: x86 part

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 2/3] continuable destroy domain: x86 part
From: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
Date: Fri, 31 Aug 2007 23:06:09 +0900
Cc: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>, KRYSANS@xxxxxxxxxx, xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
Delivery-date: Fri, 31 Aug 2007 07:08:03 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.4.2.1i
# HG changeset patch
# User yamahata@xxxxxxxxxxxxx
# Date 1188557397 -32400
# Node ID 357ef76c2f1aa35523bad4c5825292c70bcc3381
# Parent  1ab773a3182bb785a3e0ec53fa4ab5bd6bfd5fa9
Implement x86 continuable domain destroy.
This patch addresses the following bug report.
http://bugzilla.xensource.com/bugzilla/show_bug.cgi?id=1037
PATCHNAME: x86_continuable_domain_destroy

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>

diff -r 1ab773a3182b -r 357ef76c2f1a xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Fri Aug 31 22:22:35 2007 +0900
+++ b/xen/arch/x86/domain.c     Fri Aug 31 19:49:57 2007 +0900
@@ -437,6 +437,9 @@ int arch_domain_create(struct domain *d)
     int vcpuid, pdpt_order, paging_initialised = 0;
     int rc = -ENOMEM;
 
+    d->arch.relmem = RELMEM_not_started;
+    INIT_LIST_HEAD(&d->arch.relmem_list);
+
     pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t));
     d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order);
     if ( d->arch.mm_perdomain_pt == NULL )
@@ -1599,12 +1602,13 @@ int hypercall_xlat_continuation(unsigned
 }
 #endif
 
-static void relinquish_memory(struct domain *d, struct list_head *list,
-                              unsigned long type)
+static int relinquish_memory(struct domain *d, struct list_head *list,
+                             unsigned long type)
 {
     struct list_head *ent;
     struct page_info  *page;
     unsigned long     x, y;
+    int ret = 0;
 
     /* Use a recursive lock, as we may enter 'free_domheap_page'. */
     spin_lock_recursive(&d->page_alloc_lock);
@@ -1619,6 +1623,7 @@ static void relinquish_memory(struct dom
         {
             /* Couldn't get a reference -- someone is freeing this page. */
             ent = ent->next;
+            list_move_tail(&page->list, &d->arch.relmem_list);
             continue;
         }
 
@@ -1653,10 +1658,18 @@ static void relinquish_memory(struct dom
 
         /* Follow the list chain and /then/ potentially free the page. */
         ent = ent->next;
+        list_move_tail(&page->list, &d->arch.relmem_list);
         put_page(page);
-    }
-
+        if ( hypercall_preempt_check() )
+        {
+            ret = -EAGAIN;
+            break;
+        }
+    }
+    if ( ret == 0 )
+        list_splice_init(&d->arch.relmem_list, list);
     spin_unlock_recursive(&d->page_alloc_lock);
+    return ret;
 }
 
 static void vcpu_destroy_pagetables(struct vcpu *v)
@@ -1719,35 +1732,75 @@ static void vcpu_destroy_pagetables(stru
 
 int domain_relinquish_resources(struct domain *d)
 {
+    int ret;
     struct vcpu *v;
 
-    BUG_ON(!cpus_empty(d->domain_dirty_cpumask));
-
-    /* Tear down paging-assistance stuff. */
-    paging_teardown(d);
-
-    /* Drop the in-use references to page-table bases. */
-    for_each_vcpu ( d, v )
-        vcpu_destroy_pagetables(v);
-
-    /*
-     * Relinquish GDT mappings. No need for explicit unmapping of the LDT as
-     * it automatically gets squashed when the guest's mappings go away.
-     */
-    for_each_vcpu(d, v)
-        destroy_gdt(v);
-
-    /* Relinquish every page of memory. */
+    switch (d->arch.relmem) {
+    case RELMEM_not_started:
+        BUG_ON(!cpus_empty(d->domain_dirty_cpumask));
+
+        /* Tear down paging-assistance stuff. */
+        paging_teardown(d);
+
+        /* Drop the in-use references to page-table bases. */
+        for_each_vcpu ( d, v )
+            vcpu_destroy_pagetables(v);
+
+        /*
+         * Relinquish GDT mappings. No need for explicit unmapping of the LDT
+         * as it automatically gets squashed when the guest's mappings go away.
+         */
+        for_each_vcpu(d, v)
+            destroy_gdt(v);
+
+        d->arch.relmem = RELMEM_xen_l4;
+        /* fallthrough */
+        /* Relinquish every page of memory. */
 #if CONFIG_PAGING_LEVELS >= 4
-    relinquish_memory(d, &d->xenpage_list, PGT_l4_page_table);
-    relinquish_memory(d, &d->page_list, PGT_l4_page_table);
+    case RELMEM_xen_l4:
+        ret = relinquish_memory(d, &d->xenpage_list, PGT_l4_page_table);
+        if ( ret )
+            return ret;
+        d->arch.relmem = RELMEM_dom_l4;
+        /* fallthrough */
+       case RELMEM_dom_l4:
+        ret = relinquish_memory(d, &d->page_list, PGT_l4_page_table);
+        if ( ret )
+            return ret;
+        d->arch.relmem = RELMEM_xen_l3;
+        /* fallthrough */
 #endif
 #if CONFIG_PAGING_LEVELS >= 3
-    relinquish_memory(d, &d->xenpage_list, PGT_l3_page_table);
-    relinquish_memory(d, &d->page_list, PGT_l3_page_table);
-#endif
-    relinquish_memory(d, &d->xenpage_list, PGT_l2_page_table);
-    relinquish_memory(d, &d->page_list, PGT_l2_page_table);
+       case RELMEM_xen_l3:
+        ret = relinquish_memory(d, &d->xenpage_list, PGT_l3_page_table);
+        if ( ret )
+            return ret;
+        d->arch.relmem = RELMEM_dom_l3;
+        /* fallthrough */
+       case RELMEM_dom_l3:
+        ret = relinquish_memory(d, &d->page_list, PGT_l3_page_table);
+        if ( ret )
+            return ret;
+        d->arch.relmem = RELMEM_xen_l2;
+        /* fallthrough */
+#endif
+       case RELMEM_xen_l2:
+        ret = relinquish_memory(d, &d->xenpage_list, PGT_l2_page_table);
+        if ( ret )
+            return ret;
+        d->arch.relmem = RELMEM_dom_l2;
+        /* fallthrough */
+       case RELMEM_dom_l2:
+        ret = relinquish_memory(d, &d->page_list, PGT_l2_page_table);
+        if ( ret )
+            return ret;
+        d->arch.relmem = RELMEM_done;
+        /* fallthrough */
+       case RELMEM_done:
+        break;
+    default:
+        BUG();
+    }
 
     /* Free page used by xen oprofile buffer. */
     free_xenoprof_pages(d);
diff -r 1ab773a3182b -r 357ef76c2f1a xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Fri Aug 31 22:22:35 2007 +0900
+++ b/xen/arch/x86/domctl.c     Fri Aug 31 19:49:57 2007 +0900
@@ -257,10 +257,14 @@ long arch_do_domctl(
                 break;
             }
 
+            spin_lock(&d->page_alloc_lock);
+
+            if ( unlikely(d->is_dying) ) {
+                spin_unlock(&d->page_alloc_lock);
+                goto getmemlist_out;
+            }
+
             ret = 0;
-
-            spin_lock(&d->page_alloc_lock);
-
             list_ent = d->page_list.next;
             for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
             {
@@ -279,7 +283,7 @@ long arch_do_domctl(
 
             domctl->u.getmemlist.num_pfns = i;
             copy_to_guest(u_domctl, domctl, 1);
-
+        getmemlist_out:
             rcu_unlock_domain(d);
         }
     }
diff -r 1ab773a3182b -r 357ef76c2f1a xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Fri Aug 31 22:22:35 2007 +0900
+++ b/xen/include/asm-x86/domain.h      Fri Aug 31 19:49:57 2007 +0900
@@ -234,6 +234,19 @@ struct arch_domain
     bool_t is_32bit_pv;
     /* Is shared-info page in 32-bit format? */
     bool_t has_32bit_shinfo;
+
+    /* for contiuable domain_destroy() */
+    enum {
+        RELMEM_not_started,
+        RELMEM_xen_l4,
+        RELMEM_dom_l4,
+        RELMEM_xen_l3,
+        RELMEM_dom_l3,
+        RELMEM_xen_l2,
+        RELMEM_dom_l2,
+        RELMEM_done,
+    } relmem;
+    struct list_head relmem_list;
 } __cacheline_aligned;
 
 #ifdef CONFIG_X86_PAE

Attachment: 15810_357ef76c2f1a_x86_continuable_domain_destroy.patch
Description: Text Data

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 2/3] continuable destroy domain: x86 part, Isaku Yamahata <=