WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Remove page-scrub lists and async scrubbi

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Remove page-scrub lists and async scrubbing.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 02 Jul 2009 08:51:16 -0700
Delivery-date: Thu, 02 Jul 2009 08:57:25 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1246549531 -3600
# Node ID 8fecba36bc63a9587f520ed5da3bfffdb1173fb4
# Parent  863ae334467c44e939fed3a2a38c1af76451b94e
Remove page-scrub lists and async scrubbing.

The original user for this was domain destruction. Now that this is
preemptible all the way back up to dom0 userspace, asynchrony is
better iontroduced at that level, if at all, imo.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/ia64/xen/dom0_ops.c |    2 
 xen/arch/ia64/xen/domain.c   |    1 
 xen/arch/x86/domain.c        |    1 
 xen/arch/x86/sysctl.c        |    2 
 xen/common/domain.c          |    1 
 xen/common/page_alloc.c      |  139 +++----------------------------------------
 xen/common/tmem_xen.c        |   10 +--
 xen/include/xen/mm.h         |   15 ----
 8 files changed, 18 insertions(+), 153 deletions(-)

diff -r 863ae334467c -r 8fecba36bc63 xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c      Thu Jul 02 16:16:15 2009 +0100
+++ b/xen/arch/ia64/xen/dom0_ops.c      Thu Jul 02 16:45:31 2009 +0100
@@ -718,7 +718,7 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
         pi->nr_nodes         = num_online_nodes();
         pi->total_pages      = total_pages; 
         pi->free_pages       = avail_domheap_pages();
-        pi->scrub_pages      = avail_scrub_pages();
+        pi->scrub_pages      = 0;
         pi->cpu_khz          = local_cpu_data->proc_freq / 1000;
 
         pi->max_cpu_id = last_cpu(cpu_online_map);
diff -r 863ae334467c -r 8fecba36bc63 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Thu Jul 02 16:16:15 2009 +0100
+++ b/xen/arch/ia64/xen/domain.c        Thu Jul 02 16:45:31 2009 +0100
@@ -360,7 +360,6 @@ static void continue_cpu_idle_loop(void)
 #else
            irq_stat[cpu].idle_timestamp = jiffies;
 #endif
-           page_scrub_schedule_work();
            while ( !softirq_pending(cpu) )
                default_idle();
            raise_softirq(SCHEDULE_SOFTIRQ);
diff -r 863ae334467c -r 8fecba36bc63 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Thu Jul 02 16:16:15 2009 +0100
+++ b/xen/arch/x86/domain.c     Thu Jul 02 16:45:31 2009 +0100
@@ -120,7 +120,6 @@ void idle_loop(void)
     {
         if ( cpu_is_offline(smp_processor_id()) )
             play_dead();
-        page_scrub_schedule_work();
         (*pm_idle)();
         do_softirq();
     }
diff -r 863ae334467c -r 8fecba36bc63 xen/arch/x86/sysctl.c
--- a/xen/arch/x86/sysctl.c     Thu Jul 02 16:16:15 2009 +0100
+++ b/xen/arch/x86/sysctl.c     Thu Jul 02 16:45:31 2009 +0100
@@ -67,7 +67,7 @@ long arch_do_sysctl(
         pi->nr_nodes = num_online_nodes();
         pi->total_pages = total_pages;
         pi->free_pages = avail_domheap_pages();
-        pi->scrub_pages = avail_scrub_pages();
+        pi->scrub_pages = 0;
         pi->cpu_khz = cpu_khz;
         memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
         if ( hvm_enabled )
diff -r 863ae334467c -r 8fecba36bc63 xen/common/domain.c
--- a/xen/common/domain.c       Thu Jul 02 16:16:15 2009 +0100
+++ b/xen/common/domain.c       Thu Jul 02 16:45:31 2009 +0100
@@ -394,7 +394,6 @@ int domain_kill(struct domain *d)
         /* fallthrough */
     case DOMDYING_dying:
         rc = domain_relinquish_resources(d);
-        page_scrub_kick();
         if ( rc != 0 )
         {
             BUG_ON(rc != -EAGAIN);
diff -r 863ae334467c -r 8fecba36bc63 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Thu Jul 02 16:16:15 2009 +0100
+++ b/xen/common/page_alloc.c   Thu Jul 02 16:45:31 2009 +0100
@@ -64,18 +64,6 @@ integer_param("dma_bits", dma_bitsize);
 #define round_pgdown(_p)  ((_p)&PAGE_MASK)
 #define round_pgup(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
 
-#ifndef NDEBUG
-/* Avoid callers relying on allocations returning zeroed pages. */
-#define scrub_page(p) memset((p), 0xc2, PAGE_SIZE)
-#else
-/* For a production build, clear_page() is the fastest way to scrub. */
-#define scrub_page(p) clear_page(p)
-#endif
-
-static DEFINE_SPINLOCK(page_scrub_lock);
-PAGE_LIST_HEAD(page_scrub_list);
-static unsigned long scrub_pages;
-
 /* Offlined page list, protected by heap_lock. */
 PAGE_LIST_HEAD(page_offlined_list);
 /* Broken page list, protected by heap_lock. */
@@ -945,7 +933,6 @@ void __init end_boot_allocator(void)
  */
 void __init scrub_heap_pages(void)
 {
-    void *p;
     unsigned long mfn;
 
     if ( !opt_bootscrub )
@@ -969,21 +956,7 @@ void __init scrub_heap_pages(void)
 
         /* Re-check page status with lock held. */
         if ( !allocated_in_map(mfn) )
-        {
-            if ( is_xen_heap_mfn(mfn) )
-            {
-                p = page_to_virt(mfn_to_page(mfn));
-                memguard_unguard_range(p, PAGE_SIZE);
-                scrub_page(p);
-                memguard_guard_range(p, PAGE_SIZE);
-            }
-            else
-            {
-                p = map_domain_page(mfn);
-                scrub_page(p);
-                unmap_domain_page(p);
-            }
-        }
+            scrub_one_page(mfn_to_page(mfn));
 
         spin_unlock(&heap_lock);
     }
@@ -1247,10 +1220,7 @@ void free_domheap_pages(struct page_info
             for ( i = 0; i < (1 << order); i++ )
             {
                 page_set_owner(&pg[i], NULL);
-                spin_lock(&page_scrub_lock);
-                page_list_add(&pg[i], &page_scrub_list);
-                scrub_pages++;
-                spin_unlock(&page_scrub_lock);
+                scrub_one_page(&pg[i]);
             }
         }
     }
@@ -1322,96 +1292,19 @@ __initcall(pagealloc_keyhandler_init);
 __initcall(pagealloc_keyhandler_init);
 
 
-
-/*************************
- * PAGE SCRUBBING
- */
-
-static DEFINE_PER_CPU(struct timer, page_scrub_timer);
-
-static void page_scrub_softirq(void)
-{
-    PAGE_LIST_HEAD(list);
-    struct page_info  *pg;
-    void             *p;
-    int               i;
-    s_time_t          start = NOW();
-    static spinlock_t serialise_lock = SPIN_LOCK_UNLOCKED;
-
-    /* free_heap_pages() does not parallelise well. Serialise this function. */
-    if ( !spin_trylock(&serialise_lock) )
-    {
-        set_timer(&this_cpu(page_scrub_timer), NOW() + MILLISECS(1));
-        return;
-    }
-
-    /* Aim to do 1ms of work every 10ms. */
-    do {
-        spin_lock(&page_scrub_lock);
-
-        /* Peel up to 16 pages from the list. */
-        for ( i = 0; i < 16; i++ )
-        {
-            if ( !(pg = page_list_remove_head(&page_scrub_list)) )
-                break;
-            page_list_add_tail(pg, &list);
-        }
-        
-        if ( unlikely(i == 0) )
-        {
-            spin_unlock(&page_scrub_lock);
-            goto out;
-        }
-
-        scrub_pages -= i;
-
-        spin_unlock(&page_scrub_lock);
-
-        /* Scrub each page in turn. */
-        while ( (pg = page_list_remove_head(&list)) ) {
-            p = map_domain_page(page_to_mfn(pg));
-            scrub_page(p);
-            unmap_domain_page(p);
-            free_heap_pages(pg, 0);
-        }
-    } while ( (NOW() - start) < MILLISECS(1) );
-
-    set_timer(&this_cpu(page_scrub_timer), NOW() + MILLISECS(10));
-
- out:
-    spin_unlock(&serialise_lock);
-}
-
-void scrub_list_splice(struct page_list_head *list)
-{
-    spin_lock(&page_scrub_lock);
-    page_list_splice(list, &page_scrub_list);
-    spin_unlock(&page_scrub_lock);
-}
-
-void scrub_list_add(struct page_info *pg)
-{
-    spin_lock(&page_scrub_lock);
-    page_list_add(pg, &page_scrub_list);
-    spin_unlock(&page_scrub_lock);
-}
-
 void scrub_one_page(struct page_info *pg)
 {
     void *p = map_domain_page(page_to_mfn(pg));
 
-    scrub_page(p);
+#ifndef NDEBUG
+    /* Avoid callers relying on allocations returning zeroed pages. */
+    memset(p, 0xc2, PAGE_SIZE);
+#else
+    /* For a production build, clear_page() is the fastest way to scrub. */
+    clear_page(p);
+#endif
+
     unmap_domain_page(p);
-}
-
-static void page_scrub_timer_fn(void *unused)
-{
-    page_scrub_schedule_work();
-}
-
-unsigned long avail_scrub_pages(void)
-{
-    return scrub_pages;
 }
 
 static void dump_heap(unsigned char key)
@@ -1438,18 +1331,6 @@ static __init int register_heap_trigger(
     return 0;
 }
 __initcall(register_heap_trigger);
-
-
-static __init int page_scrub_init(void)
-{
-    int cpu;
-    for_each_cpu ( cpu )
-        init_timer(&per_cpu(page_scrub_timer, cpu),
-                   page_scrub_timer_fn, NULL, cpu);
-    open_softirq(PAGE_SCRUB_SOFTIRQ, page_scrub_softirq);
-    return 0;
-}
-__initcall(page_scrub_init);
 
 /*
  * Local variables:
diff -r 863ae334467c -r 8fecba36bc63 xen/common/tmem_xen.c
--- a/xen/common/tmem_xen.c     Thu Jul 02 16:16:15 2009 +0100
+++ b/xen/common/tmem_xen.c     Thu Jul 02 16:45:31 2009 +0100
@@ -195,12 +195,14 @@ EXPORT void tmh_release_avail_pages_to_h
 EXPORT void tmh_release_avail_pages_to_host(void)
 {
     spin_lock(&tmh_page_list_lock);
-    if ( !page_list_empty(&tmh_page_list) )
+    while ( !page_list_empty(&tmh_page_list) )
     {
-        scrub_list_splice(&tmh_page_list);
-        INIT_PAGE_LIST_HEAD(&tmh_page_list);
-        tmh_page_list_pages = 0;
+        struct page_info *pg = page_list_first(&tmh_page_list);
+        scrub_one_page(pg);
+        free_domheap_page(pg);
     }
+    INIT_PAGE_LIST_HEAD(&tmh_page_list);
+    tmh_page_list_pages = 0;
     spin_unlock(&tmh_page_list_lock);
 }
 
diff -r 863ae334467c -r 8fecba36bc63 xen/include/xen/mm.h
--- a/xen/include/xen/mm.h      Thu Jul 02 16:16:15 2009 +0100
+++ b/xen/include/xen/mm.h      Thu Jul 02 16:45:31 2009 +0100
@@ -299,22 +299,7 @@ page_list_splice(struct page_list_head *
 # define page_list_splice(list, hd)        list_splice(list, hd)
 #endif
 
-/* Automatic page scrubbing for dead domains. */
-extern struct page_list_head page_scrub_list;
-#define page_scrub_schedule_work()                 \
-    do {                                           \
-        if ( !page_list_empty(&page_scrub_list) )  \
-            raise_softirq(PAGE_SCRUB_SOFTIRQ);     \
-    } while ( 0 )
-#define page_scrub_kick()                                               \
-    do {                                                                \
-        if ( !page_list_empty(&page_scrub_list) )                       \
-            cpumask_raise_softirq(cpu_online_map, PAGE_SCRUB_SOFTIRQ);  \
-    } while ( 0 )
-void scrub_list_splice(struct page_list_head *);
-void scrub_list_add(struct page_info *);
 void scrub_one_page(struct page_info *);
-unsigned long avail_scrub_pages(void);
 
 int guest_remove_page(struct domain *d, unsigned long gmfn);
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Remove page-scrub lists and async scrubbing., Xen patchbot-unstable <=