WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Schedule page scrubbing for dead domains off the per-cpu

To: xen-changelog@xxxxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Schedule page scrubbing for dead domains off the per-cpu periodic
From: BitKeeper Bot <riel@xxxxxxxxxxx>
Date: Tue, 22 Mar 2005 19:26:37 +0000
Delivery-date: Tue, 22 Mar 2005 20:05:19 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-archive: <http://sourceforge.net/mailarchive/forum.php?forum=xen-changelog>
List-help: <mailto:xen-changelog-request@lists.sourceforge.net?subject=help>
List-id: <xen-changelog.lists.sourceforge.net>
List-post: <mailto:xen-changelog@lists.sourceforge.net>
List-subscribe: <https://lists.sourceforge.net/lists/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.sourceforge.net?subject=subscribe>
List-unsubscribe: <https://lists.sourceforge.net/lists/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.sourceforge.net?subject=unsubscribe>
Reply-to: Xen Development List <xen-devel@xxxxxxxxxxxxxxxxxxxxx>
Sender: xen-changelog-admin@xxxxxxxxxxxxxxxxxxxxx
ChangeSet 1.1159.256.68, 2005/03/22 19:26:37+00:00, kaf24@xxxxxxxxxxxxxxxxxxxx

        Schedule page scrubbing for dead domains off the per-cpu periodic
        ticker. We take 10% of busy cpus and all of idle cpu time.
        Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>



 arch/x86/domain.c     |    3 +
 common/page_alloc.c   |   89 +++++++++++++++++++++++++++++++++++++++++++-------
 common/schedule.c     |    2 +
 include/xen/mm.h      |   13 +++++++
 include/xen/softirq.h |    3 +
 5 files changed, 97 insertions(+), 13 deletions(-)


diff -Nru a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     2005-03-22 15:04:13 -05:00
+++ b/xen/arch/x86/domain.c     2005-03-22 15:04:13 -05:00
@@ -69,7 +69,10 @@
     {
         irq_stat[cpu].idle_timestamp = jiffies;
         while ( !softirq_pending(cpu) )
+        {
+            page_scrub_schedule_work();
             default_idle();
+        }
         do_softirq();
     }
 }
diff -Nru a/xen/common/page_alloc.c b/xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   2005-03-22 15:04:13 -05:00
+++ b/xen/common/page_alloc.c   2005-03-22 15:04:13 -05:00
@@ -28,6 +28,7 @@
 #include <xen/spinlock.h>
 #include <xen/slab.h>
 #include <xen/irq.h>
+#include <xen/softirq.h>
 #include <asm/domain_page.h>
 
 /*
@@ -551,7 +552,6 @@
 {
     int            i, drop_dom_ref;
     struct domain *d = pg->u.inuse.domain;
-    void          *p;
 
     ASSERT(!in_irq());
 
@@ -579,26 +579,31 @@
             pg[i].tlbflush_timestamp  = tlbflush_current_time();
             pg[i].u.free.cpu_mask     = 1 << d->processor;
             list_del(&pg[i].list);
+        }
+
+        d->tot_pages -= 1 << order;
+        drop_dom_ref = (d->tot_pages == 0);
+
+        spin_unlock_recursive(&d->page_alloc_lock);
 
+        if ( likely(!test_bit(DF_DYING, &d->flags)) )
+        {
+            free_heap_pages(MEMZONE_DOM, pg, order);
+        }
+        else
+        {
             /*
              * Normally we expect a domain to clear pages before freeing them,
              * if it cares about the secrecy of their contents. However, after
              * a domain has died we assume responsibility for erasure.
              */
-            if ( unlikely(test_bit(DF_DYING, &d->flags)) )
+            for ( i = 0; i < (1 << order); i++ )
             {
-                p = map_domain_mem(page_to_phys(&pg[i]));
-                clear_page(p);
-                unmap_domain_mem(p);
+                spin_lock(&page_scrub_lock);
+                list_add(&pg[i].list, &page_scrub_list);
+                spin_unlock(&page_scrub_lock);
             }
         }
-
-        d->tot_pages -= 1 << order;
-        drop_dom_ref = (d->tot_pages == 0);
-
-        spin_unlock_recursive(&d->page_alloc_lock);
-
-        free_heap_pages(MEMZONE_DOM, pg, order);
     }
     else
     {
@@ -616,3 +621,63 @@
 {
     return avail[MEMZONE_DOM];
 }
+
+
+
+/*************************
+ * PAGE SCRUBBING
+ */
+
+static spinlock_t page_scrub_lock;
+struct list_head page_scrub_list;
+
+static void page_scrub_softirq(void)
+{
+    struct list_head *ent;
+    struct pfn_info  *pg;
+    void             *p;
+    int               i;
+    s_time_t          start = NOW();
+
+    /* Aim to do 1ms of work (ten percent of a 10ms jiffy). */
+    do {
+        spin_lock(&page_scrub_lock);
+
+        if ( unlikely((ent = page_scrub_list.next) == &page_scrub_list) )
+        {
+            spin_unlock(&page_scrub_lock);
+            return;
+        }
+        
+        /* Peel up to 16 pages from the list. */
+        for ( i = 0; i < 16; i++ )
+            if ( (ent = ent->next) == &page_scrub_list )
+                break;
+        
+        /* Remove peeled pages from the list. */
+        ent->next->prev = &page_scrub_list;
+        page_scrub_list.next = ent->next;
+        
+        spin_unlock(&page_scrub_lock);
+        
+        /* Working backwards, scrub each page in turn. */
+        while ( ent != &page_scrub_list )
+        {
+            pg = list_entry(ent, struct pfn_info, list);
+            ent = ent->prev;
+            p = map_domain_mem(page_to_phys(pg));
+            clear_page(p);
+            unmap_domain_mem(p);
+            free_heap_pages(MEMZONE_DOM, pg, 0);
+        }
+    } while ( (NOW() - start) < MILLISECS(1) );
+}
+
+static __init int page_scrub_init(void)
+{
+    spin_lock_init(&page_scrub_lock);
+    INIT_LIST_HEAD(&page_scrub_list);
+    open_softirq(PAGE_SCRUB_SOFTIRQ, page_scrub_softirq);
+    return 0;
+}
+__initcall(page_scrub_init);
diff -Nru a/xen/common/schedule.c b/xen/common/schedule.c
--- a/xen/common/schedule.c     2005-03-22 15:04:13 -05:00
+++ b/xen/common/schedule.c     2005-03-22 15:04:13 -05:00
@@ -437,6 +437,8 @@
     if ( !is_idle_task(d) && update_dom_time(d) )
         send_guest_virq(d, VIRQ_TIMER);
 
+    page_scrub_schedule_work();
+
     t_timer[d->processor].expires = NOW() + MILLISECS(10);
     add_ac_timer(&t_timer[d->processor]);
 }
diff -Nru a/xen/include/xen/mm.h b/xen/include/xen/mm.h
--- a/xen/include/xen/mm.h      2005-03-22 15:04:13 -05:00
+++ b/xen/include/xen/mm.h      2005-03-22 15:04:13 -05:00
@@ -2,6 +2,10 @@
 #ifndef __XEN_MM_H__
 #define __XEN_MM_H__
 
+#include <xen/config.h>
+#include <xen/list.h>
+#include <xen/spinlock.h>
+
 struct domain;
 struct pfn_info;
 
@@ -33,6 +37,15 @@
 unsigned long avail_domheap_pages(void);
 #define alloc_domheap_page(_d) (alloc_domheap_pages(_d,0))
 #define free_domheap_page(_p) (free_domheap_pages(_p,0))
+
+/* Automatic page scrubbing for dead domains. */
+extern spinlock_t page_scrub_lock;
+extern struct list_head page_scrub_list;
+#define page_scrub_schedule_work()              \
+    do {                                        \
+        if ( !list_empty(&page_scrub_list) )    \
+            raise_softirq(PAGE_SCRUB_SOFTIRQ);  \
+    } while ( 0 )
 
 #include <asm/mm.h>
 
diff -Nru a/xen/include/xen/softirq.h b/xen/include/xen/softirq.h
--- a/xen/include/xen/softirq.h 2005-03-22 15:04:13 -05:00
+++ b/xen/include/xen/softirq.h 2005-03-22 15:04:13 -05:00
@@ -7,7 +7,8 @@
 #define NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ 2
 #define KEYPRESS_SOFTIRQ                  3
 #define NMI_SOFTIRQ                       4
-#define NR_SOFTIRQS                       5
+#define PAGE_SCRUB_SOFTIRQ                5
+#define NR_SOFTIRQS                       6
 
 #ifndef __ASSEMBLY__
 


-------------------------------------------------------
This SF.net email is sponsored by: 2005 Windows Mobile Application Contest
Submit applications for Windows Mobile(tm)-based Pocket PCs or Smartphones
for the chance to win $25,000 and application distribution. Enter today at
http://ads.osdn.com/?ad_id=6882&alloc_id=15148&op=click
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxxx
https://lists.sourceforge.net/lists/listinfo/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>