WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 8 of 8] libxc: save: move static stats variable to st

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 8 of 8] libxc: save: move static stats variable to stack variable
From: Ian Campbell <ian.campbell@xxxxxxxxxx>
Date: Tue, 24 May 2011 10:14:34 +0100
Cc: Shriram Rajagopalan <rshriram@xxxxxxxxx>, Jim Fehlig <jfehlig@xxxxxxxxxx>
Delivery-date: Tue, 24 May 2011 02:22:48 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1306228466@xxxxxxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <patchbomb.1306228466@xxxxxxxxxxxxxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mercurial-patchbomb/1.6.4
# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1306228450 -3600
# Node ID 32d62506e3be95124097775dc79c42304a18084c
# Parent  5463bdc1d77942b50b28eea059eaba4d1ec7d2ac
libxc: save: move static stats variable to stack variable.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>

diff -r 5463bdc1d779 -r 32d62506e3be tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c      Tue May 24 10:14:10 2011 +0100
+++ b/tools/libxc/xc_domain_save.c      Tue May 24 10:14:10 2011 +0100
@@ -270,9 +270,9 @@ struct time_stats {
 };
 
 static int print_stats(xc_interface *xch, uint32_t domid, int pages_sent,
+                       struct time_stats *last,
                        xc_shadow_op_stats_t *stats, int print)
 {
-    static struct time_stats last;
     struct time_stats now;
 
     gettimeofday(&now.wall, NULL);
@@ -289,12 +289,12 @@ static int print_stats(xc_interface *xch
         long long d0_cpu_delta;
         long long d1_cpu_delta;
 
-        wall_delta = tv_delta(&now.wall,&last.wall)/1000;
+        wall_delta = tv_delta(&now.wall,&last->wall)/1000;
         if ( wall_delta == 0 )
             wall_delta = 1;
 
-        d0_cpu_delta = (now.d0_cpu - last.d0_cpu)/1000;
-        d1_cpu_delta = (now.d1_cpu - last.d1_cpu)/1000;
+        d0_cpu_delta = (now.d0_cpu - last->d0_cpu)/1000;
+        d1_cpu_delta = (now.d1_cpu - last->d1_cpu)/1000;
 
         DPRINTF("delta %lldms, dom0 %d%%, target %d%%, sent %dMb/s, "
                 "dirtied %dMb/s %" PRId32 " pages\n",
@@ -306,7 +306,7 @@ static int print_stats(xc_interface *xch
                 stats->dirty_count);
     }
 
-    last = now;
+    *last = now;
 
     return 0;
 }
@@ -843,7 +843,8 @@ int xc_domain_save(xc_interface *xch, in
     DECLARE_HYPERCALL_BUFFER(unsigned long, to_send);
     unsigned long *to_fix = NULL;
 
-    xc_shadow_op_stats_t stats;
+    struct time_stats time_stats;
+    xc_shadow_op_stats_t shadow_stats;
 
     unsigned long needed_to_fix = 0;
     unsigned long total_sent    = 0;
@@ -1053,7 +1054,7 @@ int xc_domain_save(xc_interface *xch, in
         DPRINTF("Had %d unexplained entries in p2m table\n", err);
     }
 
-    print_stats(xch, dom, 0, &stats, 0);
+    print_stats(xch, dom, 0, &time_stats, &shadow_stats, 0);
 
     tmem_saved = xc_tmem_save(xch, dom, io_fd, live, XC_SAVE_ID_TMEM);
     if ( tmem_saved == -1 )
@@ -1377,7 +1378,7 @@ int xc_domain_save(xc_interface *xch, in
 
         if ( last_iter )
         {
-            print_stats( xch, dom, sent_this_iter, &stats, 1);
+            print_stats( xch, dom, sent_this_iter, &time_stats, &shadow_stats, 
1);
 
             DPRINTF("Total pages sent= %ld (%.2fx)\n",
                     total_sent, ((float)total_sent)/dinfo->p2m_size );
@@ -1439,7 +1440,7 @@ int xc_domain_save(xc_interface *xch, in
 
             if ( xc_shadow_control(xch, dom,
                                    XEN_DOMCTL_SHADOW_OP_CLEAN, 
HYPERCALL_BUFFER(to_send),
-                                   dinfo->p2m_size, NULL, 0, &stats) != 
dinfo->p2m_size )
+                                   dinfo->p2m_size, NULL, 0, &shadow_stats) != 
dinfo->p2m_size )
             {
                 PERROR("Error flushing shadow PT");
                 goto out;
@@ -1447,7 +1448,7 @@ int xc_domain_save(xc_interface *xch, in
 
             sent_last_iter = sent_this_iter;
 
-            print_stats(xch, dom, sent_this_iter, &stats, 1);
+            print_stats(xch, dom, sent_this_iter, &time_stats, &shadow_stats, 
1);
 
         }
     } /* end of infinite for loop */
@@ -1810,7 +1811,7 @@ int xc_domain_save(xc_interface *xch, in
         callbacks->checkpoint(callbacks->data) > 0)
     {
         /* reset stats timer */
-        print_stats(xch, dom, 0, &stats, 0);
+        print_stats(xch, dom, 0, &time_stats, &shadow_stats, 0);
 
         rc = 1;
         /* last_iter = 1; */
@@ -1821,11 +1822,11 @@ int xc_domain_save(xc_interface *xch, in
             goto out;
         }
         DPRINTF("SUSPEND shinfo %08lx\n", info.shared_info_frame);
-        print_stats(xch, dom, 0, &stats, 1);
+        print_stats(xch, dom, 0, &time_stats, &shadow_stats, 1);
 
         if ( xc_shadow_control(xch, dom,
                                XEN_DOMCTL_SHADOW_OP_CLEAN, 
HYPERCALL_BUFFER(to_send),
-                               dinfo->p2m_size, NULL, 0, &stats) != 
dinfo->p2m_size )
+                               dinfo->p2m_size, NULL, 0, &shadow_stats) != 
dinfo->p2m_size )
         {
             PERROR("Error flushing shadow PT");
         }

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>