WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] libxc: Fix save/restore code for style.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] libxc: Fix save/restore code for style.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Mon, 09 Apr 2007 12:30:36 -0700
Delivery-date: Mon, 09 Apr 2007 12:30:14 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1176113483 -3600
# Node ID 7cc1d532f9eed4c27d1837a70d3a51852b5f37d2
# Parent  b0aaa113a60f236639f8b7f353459fcd9977d3be
libxc: Fix save/restore code for style.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 tools/libxc/xc_domain_restore.c |  360 ++++++++++++++----------
 tools/libxc/xc_hvm_save.c       |   63 ++--
 tools/libxc/xc_linux_save.c     |  580 +++++++++++++++++++++-------------------
 3 files changed, 556 insertions(+), 447 deletions(-)

diff -r b0aaa113a60f -r 7cc1d532f9ee tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c   Fri Apr 06 22:48:25 2007 +0100
+++ b/tools/libxc/xc_domain_restore.c   Mon Apr 09 11:11:23 2007 +0100
@@ -62,17 +62,17 @@ read_exact(int fd, void *buf, size_t cou
     int r = 0, s;
     unsigned char *b = buf;
 
-    while (r < count) {
+    while ( r < count )
+    {
         s = read(fd, &b[r], count - r);
-        if ((s == -1) && (errno == EINTR))
+        if ( (s == -1) && (errno == EINTR) )
             continue;
-        if (s <= 0) {
+        if ( s <= 0 )
             break;
-        }
         r += s;
     }
 
-    return (r == count) ? 1 : 0;
+    return (r == count);
 }
 
 /*
@@ -93,20 +93,21 @@ static int uncanonicalize_pagetable(int 
     pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8);
 
     /* First pass: work out how many (if any) MFNs we need to alloc */
-    for(i = 0; i < pte_last; i++) {
-        
-        if(pt_levels == 2)
+    for ( i = 0; i < pte_last; i++ )
+    {
+        if ( pt_levels == 2 )
             pte = ((uint32_t *)page)[i];
         else
             pte = ((uint64_t *)page)[i];
-        
+
         /* XXX SMH: below needs fixing for PROT_NONE etc */
-        if(!(pte & _PAGE_PRESENT))
-            continue; 
+        if ( !(pte & _PAGE_PRESENT) )
+            continue;
         
         pfn = (pte >> PAGE_SHIFT) & MFN_MASK_X86;
         
-        if(pfn >= p2m_size) {
+        if ( pfn >= p2m_size )
+        {
             /* This "page table page" is probably not one; bail. */
             ERROR("Frame number in type %lu page table is out of range: "
                   "i=%d pfn=0x%lx p2m_size=%lu",
@@ -114,16 +115,18 @@ static int uncanonicalize_pagetable(int 
             return 0;
         }
         
-        if(p2m[pfn] == INVALID_P2M_ENTRY) {
+        if ( p2m[pfn] == INVALID_P2M_ENTRY )
+        {
             /* Have a 'valid' PFN without a matching MFN - need to alloc */
             p2m_batch[nr_mfns++] = pfn; 
         }
     }
-    
-    
-    /* Allocate the requistite number of mfns */
-    if (nr_mfns && xc_domain_memory_populate_physmap(
-            xc_handle, dom, nr_mfns, 0, 0, p2m_batch) != 0) { 
+
+    /* Allocate the requisite number of mfns. */
+    if ( nr_mfns &&
+         (xc_domain_memory_populate_physmap(xc_handle, dom, nr_mfns, 0, 0,
+                                            p2m_batch) != 0) )
+    { 
         ERROR("Failed to allocate memory for batch.!\n"); 
         errno = ENOMEM;
         return 0; 
@@ -131,26 +134,26 @@ static int uncanonicalize_pagetable(int 
     
     /* Second pass: uncanonicalize each present PTE */
     nr_mfns = 0;
-    for(i = 0; i < pte_last; i++) {
-
-        if(pt_levels == 2)
+    for ( i = 0; i < pte_last; i++ )
+    {
+        if ( pt_levels == 2 )
             pte = ((uint32_t *)page)[i];
         else
             pte = ((uint64_t *)page)[i];
         
         /* XXX SMH: below needs fixing for PROT_NONE etc */
-        if(!(pte & _PAGE_PRESENT))
+        if ( !(pte & _PAGE_PRESENT) )
             continue;
         
         pfn = (pte >> PAGE_SHIFT) & MFN_MASK_X86;
         
-        if(p2m[pfn] == INVALID_P2M_ENTRY)
+        if ( p2m[pfn] == INVALID_P2M_ENTRY )
             p2m[pfn] = p2m_batch[nr_mfns++];
 
         pte &= ~MADDR_MASK_X86;
         pte |= (uint64_t)p2m[pfn] << PAGE_SHIFT;
 
-        if(pt_levels == 2)
+        if ( pt_levels == 2 )
             ((uint32_t *)page)[i] = (uint32_t)pte;
         else
             ((uint64_t *)page)[i] = (uint64_t)pte;
@@ -161,62 +164,72 @@ static int uncanonicalize_pagetable(int 
 
 
 /* Load the p2m frame list, plus potential extended info chunk */
-static xen_pfn_t * load_p2m_frame_list(int io_fd, int *pae_extended_cr3)
+static xen_pfn_t *load_p2m_frame_list(int io_fd, int *pae_extended_cr3)
 {
     xen_pfn_t *p2m_frame_list;
     vcpu_guest_context_t ctxt;
 
-    if (!(p2m_frame_list = malloc(P2M_FL_SIZE))) {
+    if ( (p2m_frame_list = malloc(P2M_FL_SIZE)) == NULL )
+    {
         ERROR("Couldn't allocate p2m_frame_list array");
         return NULL;
     }
     
     /* Read first entry of P2M list, or extended-info signature (~0UL). */
-    if (!read_exact(io_fd, p2m_frame_list, sizeof(long))) {
-            ERROR("read extended-info signature failed");
-            return NULL;
-        }
+    if ( !read_exact(io_fd, p2m_frame_list, sizeof(long)) )
+    {
+        ERROR("read extended-info signature failed");
+        return NULL;
+    }
     
-    if (p2m_frame_list[0] == ~0UL) {
+    if ( p2m_frame_list[0] == ~0UL )
+    {
         uint32_t tot_bytes;
         
         /* Next 4 bytes: total size of following extended info. */
-        if (!read_exact(io_fd, &tot_bytes, sizeof(tot_bytes))) {
+        if ( !read_exact(io_fd, &tot_bytes, sizeof(tot_bytes)) )
+        {
             ERROR("read extended-info size failed");
             return NULL;
         }
         
-        while (tot_bytes) {
+        while ( tot_bytes )
+        {
             uint32_t chunk_bytes;
             char     chunk_sig[4];
             
             /* 4-character chunk signature + 4-byte remaining chunk size. */
-            if (!read_exact(io_fd, chunk_sig, sizeof(chunk_sig)) ||
-                !read_exact(io_fd, &chunk_bytes, sizeof(chunk_bytes))) {
+            if ( !read_exact(io_fd, chunk_sig, sizeof(chunk_sig)) ||
+                 !read_exact(io_fd, &chunk_bytes, sizeof(chunk_bytes)) )
+            {
                 ERROR("read extended-info chunk signature failed");
                 return NULL;
             }
             tot_bytes -= 8;
-            
+
             /* VCPU context structure? */
-            if (!strncmp(chunk_sig, "vcpu", 4)) {
-                if (!read_exact(io_fd, &ctxt, sizeof(ctxt))) {
+            if ( !strncmp(chunk_sig, "vcpu", 4) )
+            {
+                if ( !read_exact(io_fd, &ctxt, sizeof(ctxt)) )
+                {
                     ERROR("read extended-info vcpu context failed");
                     return NULL;
                 }
                 tot_bytes   -= sizeof(struct vcpu_guest_context);
                 chunk_bytes -= sizeof(struct vcpu_guest_context);
                 
-                if (ctxt.vm_assist & (1UL << VMASST_TYPE_pae_extended_cr3))
+                if ( ctxt.vm_assist & (1UL << VMASST_TYPE_pae_extended_cr3) )
                     *pae_extended_cr3 = 1;
             }
             
             /* Any remaining bytes of this chunk: read and discard. */
-            while (chunk_bytes) {
+            while ( chunk_bytes )
+            {
                 unsigned long sz = chunk_bytes;
                 if ( sz > P2M_FL_SIZE )
                     sz = P2M_FL_SIZE;
-                if (!read_exact(io_fd, p2m_frame_list, sz)) {
+                if ( !read_exact(io_fd, p2m_frame_list, sz) )
+                {
                     ERROR("read-and-discard extended-info chunk bytes failed");
                     return NULL;
                 }
@@ -224,24 +237,24 @@ static xen_pfn_t * load_p2m_frame_list(i
                 tot_bytes   -= sz;
             }
         }
-        
+
         /* Now read the real first entry of P2M list. */
-        if (!read_exact(io_fd, p2m_frame_list, sizeof(long))) {
+        if ( !read_exact(io_fd, p2m_frame_list, sizeof(long)) )
+        {
             ERROR("read first entry of p2m_frame_list failed");
             return NULL;
         }
     }
-    
+
     /* First entry is already read into the p2m array. */
-    if (!read_exact(io_fd, &p2m_frame_list[1], P2M_FL_SIZE - sizeof(long))) {
-            ERROR("read p2m_frame_list failed");
-            return NULL;
+    if ( !read_exact(io_fd, &p2m_frame_list[1], P2M_FL_SIZE - sizeof(long)) )
+    {
+        ERROR("read p2m_frame_list failed");
+        return NULL;
     }
     
     return p2m_frame_list;
 }
-
-
 
 int xc_domain_restore(int xc_handle, int io_fd, uint32_t dom,
                       unsigned int store_evtchn, unsigned long *store_mfn,
@@ -323,20 +336,23 @@ int xc_domain_restore(int xc_handle, int
         domctl.cmd    = XEN_DOMCTL_set_address_size;
         domctl.u.address_size.size = sizeof(unsigned long) * 8;
         rc = do_domctl(xc_handle, &domctl);
-        if ( rc != 0 ) {
+        if ( rc != 0 )
+        {
             ERROR("Unable to set guest address size.");
             goto out;
         }
         rc = 1;
     }
 
-    if(!get_platform_info(xc_handle, dom,
-                          &max_mfn, &hvirt_start, &pt_levels)) {
+    if ( !get_platform_info(xc_handle, dom,
+                            &max_mfn, &hvirt_start, &pt_levels) )
+    {
         ERROR("Unable to get platform info.");
         return 1;
     }
 
-    if (lock_pages(&ctxt, sizeof(ctxt))) {
+    if ( lock_pages(&ctxt, sizeof(ctxt)) )
+    {
         /* needed for build domctl, but might as well do early */
         ERROR("Unable to lock ctxt");
         return 1;
@@ -356,19 +372,22 @@ int xc_domain_restore(int xc_handle, int
     region_mfn = calloc(MAX_BATCH_SIZE, sizeof(xen_pfn_t));
     p2m_batch  = calloc(MAX_BATCH_SIZE, sizeof(xen_pfn_t));
 
-    if ((p2m == NULL) || (pfn_type == NULL) ||
-        (region_mfn == NULL) || (p2m_batch == NULL)) {
+    if ( (p2m == NULL) || (pfn_type == NULL) ||
+         (region_mfn == NULL) || (p2m_batch == NULL) )
+    {
         ERROR("memory alloc failed");
         errno = ENOMEM;
         goto out;
     }
 
-    if (lock_pages(region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE)) {
+    if ( lock_pages(region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) )
+    {
         ERROR("Could not lock region_mfn");
         goto out;
     }
 
-    if (lock_pages(p2m_batch, sizeof(xen_pfn_t) * MAX_BATCH_SIZE)) {
+    if ( lock_pages(p2m_batch, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) )
+    {
         ERROR("Could not lock p2m_batch");
         goto out;
     }
@@ -376,7 +395,8 @@ int xc_domain_restore(int xc_handle, int
     /* Get the domain's shared-info frame. */
     domctl.cmd = XEN_DOMCTL_getdomaininfo;
     domctl.domain = (domid_t)dom;
-    if (xc_domctl(xc_handle, &domctl) < 0) {
+    if ( xc_domctl(xc_handle, &domctl) < 0 )
+    {
         ERROR("Could not get information on new domain");
         goto out;
     }
@@ -402,8 +422,8 @@ int xc_domain_restore(int xc_handle, int
     prev_pc = 0;
 
     n = m = 0;
-    while (1) {
-
+    for ( ; ; )
+    {
         int j, nr_mfns = 0; 
 
         this_pc = (n * 100) / p2m_size;
@@ -413,39 +433,45 @@ int xc_domain_restore(int xc_handle, int
             prev_pc = this_pc;
         }
 
-        if (!read_exact(io_fd, &j, sizeof(int))) {
+        if ( !read_exact(io_fd, &j, sizeof(int)) )
+        {
             ERROR("Error when reading batch size");
             goto out;
         }
 
         PPRINTF("batch %d\n",j);
 
-        if (j == -1) {
+        if ( j == -1 )
+        {
             verify = 1;
             DPRINTF("Entering page verify mode\n");
             continue;
         }
 
-        if (j == -2) {
+        if ( j == -2 )
+        {
             new_ctxt_format = 1;
-            if (!read_exact(io_fd, &max_vcpu_id, sizeof(int)) ||
-                (max_vcpu_id >= 64) ||
-                !read_exact(io_fd, &vcpumap, sizeof(uint64_t))) {
+            if ( !read_exact(io_fd, &max_vcpu_id, sizeof(int)) ||
+                 (max_vcpu_id >= 64) ||
+                 !read_exact(io_fd, &vcpumap, sizeof(uint64_t)) )
+            {
                 ERROR("Error when reading max_vcpu_id");
                 goto out;
             }
             continue;
         }
 
-        if (j == 0)
+        if ( j == 0 )
             break;  /* our work here is done */
 
-        if (j > MAX_BATCH_SIZE) {
+        if ( j > MAX_BATCH_SIZE )
+        {
             ERROR("Max batch size exceeded. Giving up.");
             goto out;
         }
 
-        if (!read_exact(io_fd, region_pfn_type, j*sizeof(unsigned long))) {
+        if ( !read_exact(io_fd, region_pfn_type, j*sizeof(unsigned long)) )
+        {
             ERROR("Error when reading region pfn types");
             goto out;
         }
@@ -466,10 +492,11 @@ int xc_domain_restore(int xc_handle, int
             }
         } 
 
-
         /* Now allocate a bunch of mfns for this batch */
-        if (nr_mfns && xc_domain_memory_populate_physmap(
-                xc_handle, dom, nr_mfns, 0, 0, p2m_batch) != 0) { 
+        if ( nr_mfns &&
+             (xc_domain_memory_populate_physmap(xc_handle, dom, nr_mfns, 0,
+                                                0, p2m_batch) != 0) )
+        { 
             ERROR("Failed to allocate memory for batch.!\n"); 
             errno = ENOMEM;
             goto out;
@@ -483,11 +510,12 @@ int xc_domain_restore(int xc_handle, int
             pfn      = region_pfn_type[i] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
             pagetype = region_pfn_type[i] &  XEN_DOMCTL_PFINFO_LTAB_MASK;
 
-            if ( pagetype == XEN_DOMCTL_PFINFO_XTAB)
+            if ( pagetype == XEN_DOMCTL_PFINFO_XTAB )
                 region_mfn[i] = ~0UL; /* map will fail but we don't care */
             else 
             {
-                if (p2m[pfn] == INVALID_P2M_ENTRY) {
+                if ( p2m[pfn] == INVALID_P2M_ENTRY )
+                {
                     /* We just allocated a new mfn above; update p2m */
                     p2m[pfn] = p2m_batch[nr_mfns++]; 
                     nr_pfns++; 
@@ -534,7 +562,8 @@ int xc_domain_restore(int xc_handle, int
             /* In verify mode, we use a copy; otherwise we work in place */
             page = verify ? (void *)buf : (region_base + i*PAGE_SIZE);
 
-            if (!read_exact(io_fd, page, PAGE_SIZE)) {
+            if ( !read_exact(io_fd, page, PAGE_SIZE) )
+            {
                 ERROR("Error when reading page (type was %lx)", pagetype);
                 goto out;
             }
@@ -579,13 +608,11 @@ int xc_domain_restore(int xc_handle, int
 
             }
 
-
-            if (verify) {
-
+            if ( verify )
+            {
                 int res = memcmp(buf, (region_base + i*PAGE_SIZE), PAGE_SIZE);
-
-                if (res) {
-
+                if ( res )
+                {
                     int v;
 
                     DPRINTF("************** pfn=%lx type=%lx gotcs=%08lx "
@@ -593,20 +620,21 @@ int xc_domain_restore(int xc_handle, int
                             csum_page(region_base + i*PAGE_SIZE),
                             csum_page(buf));
 
-                    for (v = 0; v < 4; v++) {
-
+                    for ( v = 0; v < 4; v++ )
+                    {
                         unsigned long *p = (unsigned long *)
                             (region_base + i*PAGE_SIZE);
-                        if (buf[v] != p[v])
+                        if ( buf[v] != p[v] )
                             DPRINTF("    %d: %08lx %08lx\n", v, buf[v], p[v]);
                     }
                 }
             }
 
-            if (!hvm 
-                && xc_add_mmu_update(xc_handle, mmu,
-                                     (((unsigned long long)mfn) << PAGE_SHIFT)
-                                     | MMU_MACHPHYS_UPDATE, pfn)) {
+            if ( !hvm &&
+                 xc_add_mmu_update(xc_handle, mmu,
+                                   (((unsigned long long)mfn) << PAGE_SHIFT)
+                                   | MMU_MACHPHYS_UPDATE, pfn) )
+            {
                 ERROR("failed machpys update mfn=%lx pfn=%lx", mfn, pfn);
                 goto out;
             }
@@ -631,7 +659,8 @@ int xc_domain_restore(int xc_handle, int
      * Ensure we flush all machphys updates before potential PAE-specific
      * reallocations below.
      */
-    if (!hvm && xc_flush_mmu_updates(xc_handle, mmu)) {
+    if ( !hvm && xc_flush_mmu_updates(xc_handle, mmu) )
+    {
         ERROR("Error doing flush_mmu_updates()");
         goto out;
     }
@@ -666,9 +695,9 @@ int xc_domain_restore(int xc_handle, int
         *store_mfn = magic_pfns[2];
 
         /* Read vcpu contexts */
-        for (i = 0; i <= max_vcpu_id; i++) 
-        {
-            if (!(vcpumap & (1ULL << i)))
+        for ( i = 0; i <= max_vcpu_id; i++ )
+        {
+            if ( !(vcpumap & (1ULL << i)) )
                 continue;
 
             if ( !read_exact(io_fd, &(ctxt), sizeof(ctxt)) )
@@ -715,8 +744,8 @@ int xc_domain_restore(int xc_handle, int
 
     /* Non-HVM guests only from here on */
 
-    if ((pt_levels == 3) && !pae_extended_cr3) {
-
+    if ( (pt_levels == 3) && !pae_extended_cr3 )
+    {
         /*
         ** XXX SMH on PAE we need to ensure PGDs are in MFNs < 4G. This
         ** is a little awkward and involves (a) finding all such PGDs and
@@ -746,21 +775,24 @@ int xc_domain_restore(int xc_handle, int
                     xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
                                          PROT_READ, p2m[i]);
 
-                for(j = 0; j < 4; j++)
+                for ( j = 0; j < 4; j++ )
                     l3ptes[j] = l3tab[j];
 
                 munmap(l3tab, PAGE_SIZE);
 
-                if (!(new_mfn=xc_make_page_below_4G(xc_handle, dom, p2m[i]))) {
+                new_mfn = xc_make_page_below_4G(xc_handle, dom, p2m[i]);
+                if ( !new_mfn )
+                {
                     ERROR("Couldn't get a page below 4GB :-(");
                     goto out;
                 }
 
                 p2m[i] = new_mfn;
-                if (xc_add_mmu_update(xc_handle, mmu,
-                                      (((unsigned long long)new_mfn)
-                                       << PAGE_SHIFT) |
-                                      MMU_MACHPHYS_UPDATE, i)) {
+                if ( xc_add_mmu_update(xc_handle, mmu,
+                                       (((unsigned long long)new_mfn)
+                                        << PAGE_SHIFT) |
+                                       MMU_MACHPHYS_UPDATE, i) )
+                {
                     ERROR("Couldn't m2p on PAE root pgdir");
                     goto out;
                 }
@@ -769,11 +801,10 @@ int xc_domain_restore(int xc_handle, int
                     xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
                                          PROT_READ | PROT_WRITE, p2m[i]);
 
-                for(j = 0; j < 4; j++)
+                for ( j = 0; j < 4; j++ )
                     l3tab[j] = l3ptes[j];
 
                 munmap(l3tab, PAGE_SIZE);
-
             }
         }
 
@@ -789,19 +820,22 @@ int xc_domain_restore(int xc_handle, int
                 j++;
             }
 
-            if(i == (p2m_size-1) || j == MAX_BATCH_SIZE) {
-
-                if (!(region_base = xc_map_foreign_batch(
-                          xc_handle, dom, PROT_READ | PROT_WRITE,
-                          region_mfn, j))) {
+            if ( (i == (p2m_size-1)) || (j == MAX_BATCH_SIZE) )
+            {
+                region_base = xc_map_foreign_batch(
+                    xc_handle, dom, PROT_READ | PROT_WRITE, region_mfn, j);
+                if ( region_base == NULL )
+                {
                     ERROR("map batch failed");
                     goto out;
                 }
 
-                for(k = 0; k < j; k++) {
-                    if(!uncanonicalize_pagetable(xc_handle, dom, 
-                                                 XEN_DOMCTL_PFINFO_L1TAB,
-                                                 region_base + k*PAGE_SIZE)) {
+                for ( k = 0; k < j; k++ )
+                {
+                    if ( !uncanonicalize_pagetable(
+                        xc_handle, dom, XEN_DOMCTL_PFINFO_L1TAB,
+                        region_base + k*PAGE_SIZE) )
+                    {
                         ERROR("failed uncanonicalize pt!");
                         goto out;
                     }
@@ -812,7 +846,8 @@ int xc_domain_restore(int xc_handle, int
             }
         }
 
-        if (xc_flush_mmu_updates(xc_handle, mmu)) {
+        if ( xc_flush_mmu_updates(xc_handle, mmu) )
+        {
             ERROR("Error doing xc_flush_mmu_updates()");
             goto out;
         }
@@ -854,8 +889,10 @@ int xc_domain_restore(int xc_handle, int
         nr_pins++;
 
         /* Batch full? Then flush. */
-        if (nr_pins == MAX_PIN_BATCH) {
-            if (xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0) {
+        if ( nr_pins == MAX_PIN_BATCH )
+        {
+            if ( xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0 )
+            {
                 ERROR("Failed to pin batch of %d page tables", nr_pins);
                 goto out;
             }
@@ -864,7 +901,8 @@ int xc_domain_restore(int xc_handle, int
     }
 
     /* Flush final partial batch. */
-    if ((nr_pins != 0) && (xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0)) {
+    if ( (nr_pins != 0) && (xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0) )
+    {
         ERROR("Failed to pin batch of %d page tables", nr_pins);
         goto out;
     }
@@ -878,36 +916,40 @@ int xc_domain_restore(int xc_handle, int
         unsigned long *pfntab;
         int nr_frees, rc;
 
-        if (!read_exact(io_fd, &count, sizeof(count))) {
+        if ( !read_exact(io_fd, &count, sizeof(count)) )
+        {
             ERROR("Error when reading pfn count");
             goto out;
         }
 
-        if(!(pfntab = malloc(sizeof(unsigned long) * count))) {
+        if ( !(pfntab = malloc(sizeof(unsigned long) * count)) )
+        {
             ERROR("Out of memory");
             goto out;
         }
 
-        if (!read_exact(io_fd, pfntab, sizeof(unsigned long)*count)) {
+        if ( !read_exact(io_fd, pfntab, sizeof(unsigned long)*count) )
+        {
             ERROR("Error when reading pfntab");
             goto out;
         }
 
         nr_frees = 0; 
-        for (i = 0; i < count; i++) {
-
+        for ( i = 0; i < count; i++ )
+        {
             unsigned long pfn = pfntab[i];
 
-            if(p2m[pfn] != INVALID_P2M_ENTRY) {
+            if ( p2m[pfn] != INVALID_P2M_ENTRY )
+            {
                 /* pfn is not in physmap now, but was at some point during 
                    the save/migration process - need to free it */
                 pfntab[nr_frees++] = p2m[pfn];
-                p2m[pfn]  = INVALID_P2M_ENTRY; // not in pseudo-physical map
-            }
-        }
-
-        if (nr_frees > 0) {
-
+                p2m[pfn]  = INVALID_P2M_ENTRY; /* not in pseudo-physical map */
+            }
+        }
+
+        if ( nr_frees > 0 )
+        {
             struct xen_memory_reservation reservation = {
                 .nr_extents   = nr_frees,
                 .extent_order = 0,
@@ -915,20 +957,24 @@ int xc_domain_restore(int xc_handle, int
             };
             set_xen_guest_handle(reservation.extent_start, pfntab);
 
-            if ((rc = xc_memory_op(xc_handle, XENMEM_decrease_reservation,
-                                   &reservation)) != nr_frees) {
+            if ( (rc = xc_memory_op(xc_handle, XENMEM_decrease_reservation,
+                                    &reservation)) != nr_frees )
+            {
                 ERROR("Could not decrease reservation : %d", rc);
                 goto out;
-            } else
+            }
+            else
                 DPRINTF("Decreased reservation by %d pages\n", count);
         }
     }
 
-    for (i = 0; i <= max_vcpu_id; i++) {
-        if (!(vcpumap & (1ULL << i)))
+    for ( i = 0; i <= max_vcpu_id; i++ )
+    {
+        if ( !(vcpumap & (1ULL << i)) )
             continue;
 
-        if (!read_exact(io_fd, &ctxt, sizeof(ctxt))) {
+        if ( !read_exact(io_fd, &ctxt, sizeof(ctxt)) )
+        {
             ERROR("Error when reading ctxt %d", i);
             goto out;
         }
@@ -936,14 +982,16 @@ int xc_domain_restore(int xc_handle, int
         if ( !new_ctxt_format )
             ctxt.flags |= VGCF_online;
 
-        if (i == 0) {
+        if ( i == 0 )
+        {
             /*
              * Uncanonicalise the suspend-record frame number and poke
              * resume record.
              */
             pfn = ctxt.user_regs.edx;
-            if ((pfn >= p2m_size) ||
-                (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB)) {
+            if ( (pfn >= p2m_size) ||
+                 (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
+            {
                 ERROR("Suspend record frame number is bad");
                 goto out;
             }
@@ -962,15 +1010,18 @@ int xc_domain_restore(int xc_handle, int
         }
 
         /* Uncanonicalise each GDT frame number. */
-        if (ctxt.gdt_ents > 8192) {
+        if ( ctxt.gdt_ents > 8192 )
+        {
             ERROR("GDT entry count out of range");
             goto out;
         }
 
-        for (j = 0; (512*j) < ctxt.gdt_ents; j++) {
+        for ( j = 0; (512*j) < ctxt.gdt_ents; j++ )
+        {
             pfn = ctxt.gdt_frames[j];
-            if ((pfn >= p2m_size) ||
-                (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB)) {
+            if ( (pfn >= p2m_size) ||
+                 (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
+            {
                 ERROR("GDT frame number is bad");
                 goto out;
             }
@@ -980,14 +1031,16 @@ int xc_domain_restore(int xc_handle, int
         /* Uncanonicalise the page table base pointer. */
         pfn = xen_cr3_to_pfn(ctxt.ctrlreg[3]);
 
-        if (pfn >= p2m_size) {
+        if ( pfn >= p2m_size )
+        {
             ERROR("PT base is bad: pfn=%lu p2m_size=%lu type=%08lx",
                   pfn, p2m_size, pfn_type[pfn]);
             goto out;
         }
 
         if ( (pfn_type[pfn] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) !=
-             ((unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT) ) {
+             ((unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT) )
+        {
             ERROR("PT base is bad. pfn=%lu nr=%lu type=%08lx %08lx",
                   pfn, p2m_size, pfn_type[pfn],
                   (unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT);
@@ -1001,14 +1054,16 @@ int xc_domain_restore(int xc_handle, int
         {
             pfn = xen_cr3_to_pfn(ctxt.ctrlreg[1]);
 
-            if (pfn >= p2m_size) {
+            if ( pfn >= p2m_size )
+            {
                 ERROR("User PT base is bad: pfn=%lu p2m_size=%lu type=%08lx",
                       pfn, p2m_size, pfn_type[pfn]);
                 goto out;
             }
 
             if ( (pfn_type[pfn] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) !=
-                 ((unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT) ) {
+                 ((unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT) )
+            {
                 ERROR("User PT base is bad. pfn=%lu nr=%lu type=%08lx %08lx",
                       pfn, p2m_size, pfn_type[pfn],
                       (unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT);
@@ -1023,14 +1078,16 @@ int xc_domain_restore(int xc_handle, int
         domctl.u.vcpucontext.vcpu = i;
         set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt);
         rc = xc_domctl(xc_handle, &domctl);
-        if (rc != 0) {
+        if ( rc != 0 )
+        {
             ERROR("Couldn't build vcpu%d", i);
             goto out;
         }
         rc = 1;
     }
 
-    if (!read_exact(io_fd, shared_info_page, PAGE_SIZE)) {
+    if ( !read_exact(io_fd, shared_info_page, PAGE_SIZE) )
+    {
         ERROR("Error when reading shared info page");
         goto out;
     }
@@ -1048,9 +1105,11 @@ int xc_domain_restore(int xc_handle, int
     munmap(page, PAGE_SIZE);
 
     /* Uncanonicalise the pfn-to-mfn table frame-number list. */
-    for (i = 0; i < P2M_FL_ENTRIES; i++) {
+    for ( i = 0; i < P2M_FL_ENTRIES; i++ )
+    {
         pfn = p2m_frame_list[i];
-        if ((pfn >= p2m_size) || (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB)) {
+        if ( (pfn >= p2m_size) || (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
+        {
             ERROR("PFN-to-MFN frame number is bad");
             goto out;
         }
@@ -1059,8 +1118,9 @@ int xc_domain_restore(int xc_handle, int
     }
 
     /* Copy the P2M we've constructed to the 'live' P2M */
-    if (!(live_p2m = xc_map_foreign_batch(xc_handle, dom, PROT_WRITE,
-                                          p2m_frame_list, P2M_FL_ENTRIES))) {
+    if ( !(live_p2m = xc_map_foreign_batch(xc_handle, dom, PROT_WRITE,
+                                           p2m_frame_list, P2M_FL_ENTRIES)) )
+    {
         ERROR("Couldn't map p2m table");
         goto out;
     }
diff -r b0aaa113a60f -r 7cc1d532f9ee tools/libxc/xc_hvm_save.c
--- a/tools/libxc/xc_hvm_save.c Fri Apr 06 22:48:25 2007 +0100
+++ b/tools/libxc/xc_hvm_save.c Mon Apr 09 11:11:23 2007 +0100
@@ -119,8 +119,8 @@ static uint64_t llgettimeofday(void)
 
 static uint64_t tv_delta(struct timeval *new, struct timeval *old)
 {
-    return ((new->tv_sec - old->tv_sec)*1000000 ) +
-        (new->tv_usec - old->tv_usec);
+    return (((new->tv_sec - old->tv_sec)*1000000) +
+            (new->tv_usec - old->tv_usec));
 }
 
 
@@ -130,9 +130,7 @@ static uint64_t tv_delta(struct timeval 
 
 static inline ssize_t write_exact(int fd, void *buf, size_t count)
 {
-    if(write(fd, buf, count) != count)
-        return 0;
-    return 1;
+    return (write(fd, buf, count) == count);
 }
 
 static int print_stats(int xc_handle, uint32_t domid, int pages_sent,
@@ -156,15 +154,14 @@ static int print_stats(int xc_handle, ui
         DPRINTF("ARRHHH!!\n");
 
     wall_delta = tv_delta(&wall_now,&wall_last)/1000;
-
-    if (wall_delta == 0) wall_delta = 1;
+    if ( wall_delta == 0 )
+        wall_delta = 1;
 
     d0_cpu_delta = (d0_cpu_now - d0_cpu_last)/1000;
     d1_cpu_delta = (d1_cpu_now - d1_cpu_last)/1000;
 
-    if (print)
-        DPRINTF(
-                "delta %lldms, dom0 %d%%, target %d%%, sent %dMb/s, "
+    if ( print )
+        DPRINTF("delta %lldms, dom0 %d%%, target %d%%, sent %dMb/s, "
                 "dirtied %dMb/s %" PRId32 " pages\n",
                 wall_delta,
                 (int)((d0_cpu_delta*100)/wall_delta),
@@ -189,18 +186,19 @@ static int analysis_phase(int xc_handle,
 
     start = llgettimeofday();
 
-    for (j = 0; j < runs; j++) {
+    for ( j = 0; j < runs; j++ )
+    {
         int i;
 
         xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_CLEAN,
                           arr, pfn_array_size, NULL, 0, NULL);
         DPRINTF("#Flush\n");
-        for ( i = 0; i < 40; i++ ) {
+        for ( i = 0; i < 40; i++ )
+        {
             usleep(50000);
             now = llgettimeofday();
             xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_PEEK,
                               NULL, 0, NULL, 0, &stats);
-
             DPRINTF("now= %lld faults= %"PRId32" dirty= %"PRId32"\n",
                     ((now-start)+500)/1000,
                     stats.fault_count, stats.dirty_count);
@@ -216,39 +214,39 @@ static int suspend_and_state(int (*suspe
 {
     int i = 0;
 
-    if (!(*suspend)(dom)) {
+    if ( !(*suspend)(dom) )
+    {
         ERROR("Suspend request failed");
         return -1;
     }
 
  retry:
 
-    if (xc_domain_getinfo(xc_handle, dom, 1, info) != 1) {
+    if ( xc_domain_getinfo(xc_handle, dom, 1, info) != 1 )
+    {
         ERROR("Could not get domain info");
         return -1;
     }
 
-    if ( xc_vcpu_getcontext(xc_handle, dom, 0 /* XXX */, ctxt))
+    if ( xc_vcpu_getcontext(xc_handle, dom, 0, ctxt) )
         ERROR("Could not get vcpu context");
 
-
-    if (info->shutdown && info->shutdown_reason == SHUTDOWN_suspend)
-        return 0; // success        
-
-    if (info->paused) {
-        // try unpausing domain, wait, and retest
+    if ( info->shutdown && (info->shutdown_reason == SHUTDOWN_suspend) )
+        return 0; /* success */
+
+    if ( info->paused )
+    {
+        /* Try unpausing domain, wait, and retest. */
         xc_domain_unpause( xc_handle, dom );
-
         ERROR("Domain was paused. Wait and re-test.");
-        usleep(10000);  // 10ms
-
+        usleep(10000);  /* 10ms */
         goto retry;
     }
 
-
-    if( ++i < 100 ) {
+    if ( ++i < 100 )
+    {
         ERROR("Retry suspend domain.");
-        usleep(10000);  // 10ms
+        usleep(10000); /* 10ms */
         goto retry;
     }
 
@@ -350,8 +348,6 @@ int xc_hvm_save(int xc_handle, int io_fd
             ERROR("Couldn't enable shadow mode");
             goto out;
         }
-
-        DPRINTF("hvm domain live migration debug start: logdirty enable.\n");
     }
     else
     {
@@ -378,7 +374,6 @@ int xc_hvm_save(int xc_handle, int io_fd
         ERROR("Error when writing to state file (1)");
         goto out;
     }
-    
 
     /* pretend we sent all the pages last iteration */
     sent_last_iter = pfn_array_size;
@@ -452,7 +447,7 @@ int xc_hvm_save(int xc_handle, int io_fd
         prev_pc = 0;
         N=0;
 
-        DPRINTF("Saving HVM domain memory pages: iter %d   0%%", iter);
+        DPRINTF("Saving memory pages: iter %d   0%%", iter);
 
         while ( N < pfn_array_size )
         {
@@ -474,7 +469,7 @@ int xc_hvm_save(int xc_handle, int io_fd
                     pfn_array_size, NULL, 0, NULL);
                 if ( rc != pfn_array_size )
                 {
-                    ERROR("Error peeking HVM shadow bitmap");
+                    ERROR("Error peeking shadow bitmap");
                     goto out;
                 }
             }
@@ -703,7 +698,7 @@ int xc_hvm_save(int xc_handle, int io_fd
     /* save vcpu/vmcs contexts */
     for ( i = 0; i < nr_vcpus; i++ )
     {
-        if (!(vcpumap & (1ULL << i)))
+        if ( !(vcpumap & (1ULL << i)) )
             continue;
 
         if ( xc_vcpu_getcontext(xc_handle, dom, i, &ctxt) )
diff -r b0aaa113a60f -r 7cc1d532f9ee tools/libxc/xc_linux_save.c
--- a/tools/libxc/xc_linux_save.c       Fri Apr 06 22:48:25 2007 +0100
+++ b/tools/libxc/xc_linux_save.c       Mon Apr 09 11:11:23 2007 +0100
@@ -27,7 +27,6 @@
 #define DEF_MAX_ITERS   29   /* limit us to 30 times round loop   */
 #define DEF_MAX_FACTOR   3   /* never send more than 3x p2m_size  */
 
-
 /* max mfn of the whole machine */
 static unsigned long max_mfn;
 
@@ -56,10 +55,9 @@ static unsigned long m2p_mfn0;
  * in the guest's pseudophysical map.
  */
 #define MFN_IS_IN_PSEUDOPHYS_MAP(_mfn)          \
-(((_mfn) < (max_mfn)) &&                        \
- ((mfn_to_pfn(_mfn) < (p2m_size)) &&               \
-  (live_p2m[mfn_to_pfn(_mfn)] == (_mfn))))
-
+    (((_mfn) < (max_mfn)) &&                    \
+     ((mfn_to_pfn(_mfn) < (p2m_size)) &&        \
+      (live_p2m[mfn_to_pfn(_mfn)] == (_mfn))))
 
 /* Returns TRUE if MFN is successfully converted to a PFN. */
 #define translate_mfn_to_pfn(_pmfn)                             \
@@ -116,7 +114,7 @@ static inline int count_bits ( int nr, v
     int i, count = 0;
     volatile unsigned long *p = (volatile unsigned long *)addr;
     /* We know that the array is padded to unsigned long. */
-    for( i = 0; i < (nr / (sizeof(unsigned long)*8)); i++, p++ )
+    for ( i = 0; i < (nr / (sizeof(unsigned long)*8)); i++, p++ )
         count += hweight32(*p);
     return count;
 }
@@ -151,9 +149,6 @@ static inline int permute( int i, int nr
     return i;
 }
 
-
-
-
 static uint64_t tv_to_us(struct timeval *new)
 {
     return (new->tv_sec * 1000000) + new->tv_usec;
@@ -168,8 +163,8 @@ static uint64_t llgettimeofday(void)
 
 static uint64_t tv_delta(struct timeval *new, struct timeval *old)
 {
-    return ((new->tv_sec - old->tv_sec)*1000000 ) +
-        (new->tv_usec - old->tv_usec);
+    return (((new->tv_sec - old->tv_sec)*1000000) +
+            (new->tv_usec - old->tv_usec));
 }
 
 static int noncached_write(int fd, int live, void *buffer, int len) 
@@ -179,8 +174,8 @@ static int noncached_write(int fd, int l
     int rc = write(fd,buffer,len);
 
     write_count += len;
-
-    if (write_count >= MAX_PAGECACHE_USAGE*PAGE_SIZE) {
+    if ( write_count >= (MAX_PAGECACHE_USAGE * PAGE_SIZE) )
+    {
         /* Time to discard cache - dont care if this fails */
         discard_file_cache(fd, 0 /* no flush */);
         write_count = 0;
@@ -190,7 +185,6 @@ static int noncached_write(int fd, int l
 }
 
 #ifdef ADAPTIVE_SAVE
-
 
 /*
 ** We control the rate at which we transmit (or save) to minimize impact
@@ -200,26 +194,22 @@ static int noncached_write(int fd, int l
 #define MAX_MBIT_RATE    500      /* maximum transmit rate for migrate */
 #define START_MBIT_RATE  100      /* initial transmit rate for migrate */
 
-
 /* Scaling factor to convert between a rate (in Mb/s) and time (in usecs) */
 #define RATE_TO_BTU      781250
 
 /* Amount in bytes we allow ourselves to send in a burst */
 #define BURST_BUDGET (100*1024)
 
-
 /* We keep track of the current and previous transmission rate */
 static int mbit_rate, ombit_rate = 0;
 
 /* Have we reached the maximum transmission rate? */
 #define RATE_IS_MAX() (mbit_rate == MAX_MBIT_RATE)
 
-
 static inline void initialize_mbit_rate()
 {
     mbit_rate = START_MBIT_RATE;
 }
-
 
 static int ratewrite(int io_fd, int live, void *buf, int n)
 {
@@ -230,39 +220,47 @@ static int ratewrite(int io_fd, int live
     struct timespec delay;
     long long delta;
 
-    if (START_MBIT_RATE == 0)
+    if ( START_MBIT_RATE == 0 )
         return noncached_write(io_fd, live, buf, n);
 
     budget -= n;
-    if (budget < 0) {
-        if (mbit_rate != ombit_rate) {
+    if ( budget < 0 )
+    {
+        if ( mbit_rate != ombit_rate )
+        {
             burst_time_us = RATE_TO_BTU / mbit_rate;
             ombit_rate = mbit_rate;
             DPRINTF("rate limit: %d mbit/s burst budget %d slot time %d\n",
                     mbit_rate, BURST_BUDGET, burst_time_us);
         }
-        if (last_put.tv_sec == 0) {
+        if ( last_put.tv_sec == 0 )
+        {
             budget += BURST_BUDGET;
             gettimeofday(&last_put, NULL);
-        } else {
-            while (budget < 0) {
+        }
+        else
+        {
+            while ( budget < 0 )
+            {
                 gettimeofday(&now, NULL);
                 delta = tv_delta(&now, &last_put);
-                while (delta > burst_time_us) {
+                while ( delta > burst_time_us )
+                {
                     budget += BURST_BUDGET;
                     last_put.tv_usec += burst_time_us;
-                    if (last_put.tv_usec > 1000000) {
+                    if ( last_put.tv_usec > 1000000 
+                    {
                         last_put.tv_usec -= 1000000;
                         last_put.tv_sec++;
                     }
                     delta -= burst_time_us;
                 }
-                if (budget > 0)
+                if ( budget > 0 )
                     break;
                 delay.tv_sec = 0;
                 delay.tv_nsec = 1000 * (burst_time_us - delta);
-                while (delay.tv_nsec > 0)
-                    if (nanosleep(&delay, &delay) == 0)
+                while ( delay.tv_nsec > 0 )
+                    if ( nanosleep(&delay, &delay) == 0 )
                         break;
             }
         }
@@ -278,15 +276,10 @@ static int ratewrite(int io_fd, int live
 
 #endif
 
-
 static inline ssize_t write_exact(int fd, void *buf, size_t count)
 {
-    if(write(fd, buf, count) != count)
-        return 0;
-    return 1;
-}
-
-
+    return (write(fd, buf, count) == count);
+}
 
 static int print_stats(int xc_handle, uint32_t domid, int pages_sent,
                        xc_shadow_op_stats_t *stats, int print)
@@ -309,15 +302,14 @@ static int print_stats(int xc_handle, ui
         DPRINTF("ARRHHH!!\n");
 
     wall_delta = tv_delta(&wall_now,&wall_last)/1000;
-
-    if (wall_delta == 0) wall_delta = 1;
+    if ( wall_delta == 0 )
+        wall_delta = 1;
 
     d0_cpu_delta = (d0_cpu_now - d0_cpu_last)/1000;
     d1_cpu_delta = (d1_cpu_now - d1_cpu_last)/1000;
 
-    if (print)
-        DPRINTF(
-                "delta %lldms, dom0 %d%%, target %d%%, sent %dMb/s, "
+    if ( print )
+        DPRINTF("delta %lldms, dom0 %d%%, target %d%%, sent %dMb/s, "
                 "dirtied %dMb/s %" PRId32 " pages\n",
                 wall_delta,
                 (int)((d0_cpu_delta*100)/wall_delta),
@@ -327,10 +319,11 @@ static int print_stats(int xc_handle, ui
                 stats->dirty_count);
 
 #ifdef ADAPTIVE_SAVE
-    if (((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8))) > mbit_rate) {
+    if ( ((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8))) > mbit_rate )
+    {
         mbit_rate = (int)((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8)))
             + 50;
-        if (mbit_rate > MAX_MBIT_RATE)
+        if ( mbit_rate > MAX_MBIT_RATE )
             mbit_rate = MAX_MBIT_RATE;
     }
 #endif
@@ -352,18 +345,19 @@ static int analysis_phase(int xc_handle,
 
     start = llgettimeofday();
 
-    for (j = 0; j < runs; j++) {
+    for ( j = 0; j < runs; j++ )
+    {
         int i;
 
         xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_CLEAN,
                           arr, p2m_size, NULL, 0, NULL);
         DPRINTF("#Flush\n");
-        for ( i = 0; i < 40; i++ ) {
+        for ( i = 0; i < 40; i++ )
+        {
             usleep(50000);
             now = llgettimeofday();
             xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_PEEK,
                               NULL, 0, NULL, 0, &stats);
-
             DPRINTF("now= %lld faults= %"PRId32" dirty= %"PRId32"\n",
                     ((now-start)+500)/1000,
                     stats.fault_count, stats.dirty_count);
@@ -380,34 +374,40 @@ static int suspend_and_state(int (*suspe
 {
     int i = 0;
 
-    if (!(*suspend)(dom)) {
+    if ( !(*suspend)(dom) )
+    {
         ERROR("Suspend request failed");
         return -1;
     }
 
  retry:
 
-    if (xc_domain_getinfo(xc_handle, dom, 1, info) != 1) {
+    if ( xc_domain_getinfo(xc_handle, dom, 1, info) != 1 )
+    {
         ERROR("Could not get domain info");
         return -1;
     }
 
-    if ( xc_vcpu_getcontext(xc_handle, dom, 0 /* XXX */, ctxt))
+    if ( xc_vcpu_getcontext(xc_handle, dom, 0, ctxt) )
         ERROR("Could not get vcpu context");
 
 
-    if (info->dying) {
+    if ( info->dying )
+    {
         ERROR("domain is dying");
         return -1;
     }
 
-    if (info->crashed) {
+    if ( info->crashed )
+    {
         ERROR("domain has crashed");
         return -1;
     }
 
-    if (info->shutdown) {
-        switch (info->shutdown_reason) {
+    if ( info->shutdown )
+    {
+        switch ( info->shutdown_reason )
+        {
         case SHUTDOWN_poweroff:
         case SHUTDOWN_reboot:
             ERROR("domain has shut down");
@@ -420,20 +420,19 @@ static int suspend_and_state(int (*suspe
         }
     }
 
-    if (info->paused) {
-        // try unpausing domain, wait, and retest
+    if ( info->paused )
+    {
+        /* Try unpausing domain, wait, and retest. */
         xc_domain_unpause( xc_handle, dom );
-
         ERROR("Domain was paused. Wait and re-test.");
-        usleep(10000);  // 10ms
-
+        usleep(10000); /* 10ms */
         goto retry;
     }
 
-
-    if( ++i < 100 ) {
+    if ( ++i < 100 )
+    {
         ERROR("Retry suspend domain");
-        usleep(10000);  // 10ms
+        usleep(10000); /* 10ms */
         goto retry;
     }
 
@@ -453,18 +452,18 @@ static void *map_frame_list_list(int xc_
     int count = 100;
     void *p;
 
-    while (count-- && shinfo->arch.pfn_to_mfn_frame_list_list == 0)
+    while ( count-- && (shinfo->arch.pfn_to_mfn_frame_list_list == 0) )
         usleep(10000);
 
-    if (shinfo->arch.pfn_to_mfn_frame_list_list == 0) {
+    if ( shinfo->arch.pfn_to_mfn_frame_list_list == 0 )
+    {
         ERROR("Timed out waiting for frame list updated.");
         return NULL;
     }
 
     p = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ,
                              shinfo->arch.pfn_to_mfn_frame_list_list);
-
-    if (p == NULL)
+    if ( p == NULL )
         ERROR("Couldn't map p2m_frame_list_list (errno %d)", errno);
 
     return p;
@@ -493,10 +492,10 @@ static int canonicalize_pagetable(unsign
     */
     xen_start = xen_end = pte_last = PAGE_SIZE / ((pt_levels == 2) ? 4 : 8);
 
-    if (pt_levels == 2 && type == XEN_DOMCTL_PFINFO_L2TAB)
+    if ( (pt_levels == 2) && (type == XEN_DOMCTL_PFINFO_L2TAB) )
         xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT);
 
-    if (pt_levels == 3 && type == XEN_DOMCTL_PFINFO_L3TAB)
+    if ( (pt_levels == 3) && (type == XEN_DOMCTL_PFINFO_L3TAB) )
         xen_start = L3_PAGETABLE_ENTRIES_PAE;
 
     /*
@@ -505,27 +504,31 @@ static int canonicalize_pagetable(unsign
     ** Xen always ensures is present in that L2. Guests must ensure
     ** that this check will fail for other L2s.
     */
-    if (pt_levels == 3 && type == XEN_DOMCTL_PFINFO_L2TAB) {
+    if ( (pt_levels == 3) && (type == XEN_DOMCTL_PFINFO_L2TAB) )
+    {
         int hstart;
         uint64_t he;
 
         hstart = (hvirt_start >> L2_PAGETABLE_SHIFT_PAE) & 0x1ff;
         he = ((const uint64_t *) spage)[hstart];
 
-        if ( ((he >> PAGE_SHIFT) & MFN_MASK_X86) == m2p_mfn0 ) {
+        if ( ((he >> PAGE_SHIFT) & MFN_MASK_X86) == m2p_mfn0 )
+        {
             /* hvirt starts with xen stuff... */
             xen_start = hstart;
-        } else if ( hvirt_start != 0xf5800000 ) {
+        }
+        else if ( hvirt_start != 0xf5800000 )
+        {
             /* old L2s from before hole was shrunk... */
             hstart = (0xf5800000 >> L2_PAGETABLE_SHIFT_PAE) & 0x1ff;
             he = ((const uint64_t *) spage)[hstart];
-
-            if( ((he >> PAGE_SHIFT) & MFN_MASK_X86) == m2p_mfn0 )
+            if ( ((he >> PAGE_SHIFT) & MFN_MASK_X86) == m2p_mfn0 )
                 xen_start = hstart;
         }
     }
 
-    if (pt_levels == 4 && type == XEN_DOMCTL_PFINFO_L4TAB) {
+    if ( (pt_levels == 4) && (type == XEN_DOMCTL_PFINFO_L4TAB) )
+    {
         /*
         ** XXX SMH: should compute these from hvirt_start (which we have)
         ** and hvirt_end (which we don't)
@@ -535,27 +538,29 @@ static int canonicalize_pagetable(unsign
     }
 
     /* Now iterate through the page table, canonicalizing each PTE */
-    for (i = 0; i < pte_last; i++ ) {
-
+    for (i = 0; i < pte_last; i++ )
+    {
         unsigned long pfn, mfn;
 
-        if (pt_levels == 2)
+        if ( pt_levels == 2 )
             pte = ((const uint32_t*)spage)[i];
         else
             pte = ((const uint64_t*)spage)[i];
 
-        if (i >= xen_start && i < xen_end)
+        if ( (i >= xen_start) && (i < xen_end) )
             pte = 0;
 
-        if (pte & _PAGE_PRESENT) {
-
+        if ( pte & _PAGE_PRESENT )
+        {
             mfn = (pte >> PAGE_SHIFT) & MFN_MASK_X86;
-            if (!MFN_IS_IN_PSEUDOPHYS_MAP(mfn)) {
+            if ( !MFN_IS_IN_PSEUDOPHYS_MAP(mfn) )
+            {
                 /* This will happen if the type info is stale which
                    is quite feasible under live migration */
                 pfn  = 0;  /* zap it - we'll retransmit this page later */
                 race = 1;  /* inform the caller of race; fatal if !live */ 
-            } else
+            }
+            else
                 pfn = mfn_to_pfn(mfn);
 
             pte &= ~MADDR_MASK_X86;
@@ -566,25 +571,20 @@ static int canonicalize_pagetable(unsign
              * a 64bit hypervisor. We zap these here to avoid any
              * surprise at restore time...
              */
-            if ( pt_levels == 3 &&
-                 type == XEN_DOMCTL_PFINFO_L3TAB &&
-                 pte & (_PAGE_USER|_PAGE_RW|_PAGE_ACCESSED) )
-            {
+            if ( (pt_levels == 3) &&
+                 (type == XEN_DOMCTL_PFINFO_L3TAB) &&
+                 (pte & (_PAGE_USER|_PAGE_RW|_PAGE_ACCESSED)) )
                 pte &= ~(_PAGE_USER|_PAGE_RW|_PAGE_ACCESSED);
-            }
-        }
-
-        if (pt_levels == 2)
+        }
+
+        if ( pt_levels == 2 )
             ((uint32_t*)dpage)[i] = pte;
         else
             ((uint64_t*)dpage)[i] = pte;
-
-    }
-
-    return race; 
-}
-
-
+    }
+
+    return race;
+}
 
 static xen_pfn_t *xc_map_m2p(int xc_handle,
                                  unsigned long max_mfn,
@@ -601,37 +601,43 @@ static xen_pfn_t *xc_map_m2p(int xc_hand
     m2p_chunks = M2P_CHUNKS(max_mfn);
 
     xmml.max_extents = m2p_chunks;
-    if (!(extent_start = malloc(m2p_chunks * sizeof(xen_pfn_t)))) {
+    if ( !(extent_start = malloc(m2p_chunks * sizeof(xen_pfn_t))) )
+    {
         ERROR("failed to allocate space for m2p mfns");
         return NULL;
     }
     set_xen_guest_handle(xmml.extent_start, extent_start);
 
-    if (xc_memory_op(xc_handle, XENMEM_machphys_mfn_list, &xmml) ||
-        (xmml.nr_extents != m2p_chunks)) {
+    if ( xc_memory_op(xc_handle, XENMEM_machphys_mfn_list, &xmml) ||
+         (xmml.nr_extents != m2p_chunks) )
+    {
         ERROR("xc_get_m2p_mfns");
         return NULL;
     }
 
-    if ((m2p = mmap(NULL, m2p_size, prot,
-                    MAP_SHARED, xc_handle, 0)) == MAP_FAILED) {
+    if ( (m2p = mmap(NULL, m2p_size, prot,
+                     MAP_SHARED, xc_handle, 0)) == MAP_FAILED )
+    {
         ERROR("failed to mmap m2p");
         return NULL;
     }
 
-    if (!(entries = malloc(m2p_chunks * sizeof(privcmd_mmap_entry_t)))) {
+    if ( !(entries = malloc(m2p_chunks * sizeof(privcmd_mmap_entry_t))) )
+    {
         ERROR("failed to allocate space for mmap entries");
         return NULL;
     }
 
-    for (i=0; i < m2p_chunks; i++) {
+    for ( i = 0; i < m2p_chunks; i++ )
+    {
         entries[i].va = (unsigned long)(((void *)m2p) + (i * M2P_CHUNK_SIZE));
         entries[i].mfn = extent_start[i];
         entries[i].npages = M2P_CHUNK_SIZE >> PAGE_SHIFT;
     }
 
-    if ((rc = xc_map_foreign_ranges(xc_handle, DOMID_XEN,
-        entries, m2p_chunks)) < 0) {
+    if ( (rc = xc_map_foreign_ranges(xc_handle, DOMID_XEN,
+                                     entries, m2p_chunks)) < 0 )
+    {
         ERROR("xc_mmap_foreign_ranges failed (rc = %d)", rc);
         return NULL;
     }
@@ -643,8 +649,6 @@ static xen_pfn_t *xc_map_m2p(int xc_hand
 
     return m2p;
 }
-
-
 
 int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
                   uint32_t max_factor, uint32_t flags, int (*suspend)(int))
@@ -699,33 +703,35 @@ int xc_linux_save(int xc_handle, int io_
     uint64_t vcpumap = 1ULL;
 
     /* If no explicit control parameters given, use defaults */
-    if(!max_iters)
-        max_iters = DEF_MAX_ITERS;
-    if(!max_factor)
-        max_factor = DEF_MAX_FACTOR;
+    max_iters  = max_iters  ? : DEF_MAX_ITERS;
+    max_factor = max_factor ? : DEF_MAX_FACTOR;
 
     initialize_mbit_rate();
 
-    if(!get_platform_info(xc_handle, dom,
-                          &max_mfn, &hvirt_start, &pt_levels)) {
+    if ( !get_platform_info(xc_handle, dom,
+                            &max_mfn, &hvirt_start, &pt_levels) )
+    {
         ERROR("Unable to get platform info.");
         return 1;
     }
 
-    if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
+    if ( xc_domain_getinfo(xc_handle, dom, 1, &info) != 1 )
+    {
         ERROR("Could not get domain info");
         return 1;
     }
 
-    if (xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt)) {
+    if ( xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt) )
+    {
         ERROR("Could not get vcpu context");
         goto out;
     }
     shared_info_frame = info.shared_info_frame;
 
     /* Map the shared info frame */
-    if(!(live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
-                                            PROT_READ, shared_info_frame))) {
+    if ( !(live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+                                              PROT_READ, shared_info_frame)) )
+    {
         ERROR("Couldn't map live_shinfo");
         goto out;
     }
@@ -734,16 +740,15 @@ int xc_linux_save(int xc_handle, int io_
 
     live_p2m_frame_list_list = map_frame_list_list(xc_handle, dom,
                                                    live_shinfo);
-
-    if (!live_p2m_frame_list_list)
+    if ( !live_p2m_frame_list_list )
         goto out;
 
     live_p2m_frame_list =
         xc_map_foreign_batch(xc_handle, dom, PROT_READ,
                              live_p2m_frame_list_list,
                              P2M_FLL_ENTRIES);
-
-    if (!live_p2m_frame_list) {
+    if ( !live_p2m_frame_list )
+    {
         ERROR("Couldn't map p2m_frame_list");
         goto out;
     }
@@ -756,69 +761,70 @@ int xc_linux_save(int xc_handle, int io_
     live_p2m = xc_map_foreign_batch(xc_handle, dom, PROT_READ,
                                     live_p2m_frame_list,
                                     P2M_FL_ENTRIES);
-
-    if (!live_p2m) {
+    if ( !live_p2m )
+    {
         ERROR("Couldn't map p2m table");
         goto out;
     }
 
     /* Setup the mfn_to_pfn table mapping */
-    if(!(live_m2p = xc_map_m2p(xc_handle, max_mfn, PROT_READ))) {
+    if ( !(live_m2p = xc_map_m2p(xc_handle, max_mfn, PROT_READ)) )
+    {
         ERROR("Failed to map live M2P table");
         goto out;
     }
 
 
     /* Get a local copy of the live_P2M_frame_list */
-    if(!(p2m_frame_list = malloc(P2M_FL_SIZE))) {
+    if ( !(p2m_frame_list = malloc(P2M_FL_SIZE)) )
+    {
         ERROR("Couldn't allocate p2m_frame_list array");
         goto out;
     }
     memcpy(p2m_frame_list, live_p2m_frame_list, P2M_FL_SIZE);
 
     /* Canonicalise the pfn-to-mfn table frame-number list. */
-    for (i = 0; i < p2m_size; i += fpp) {
-        if (!translate_mfn_to_pfn(&p2m_frame_list[i/fpp])) {
+    for ( i = 0; i < p2m_size; i += fpp )
+    {
+        if ( !translate_mfn_to_pfn(&p2m_frame_list[i/fpp]) )
+        {
             ERROR("Frame# in pfn-to-mfn frame list is not in pseudophys");
             ERROR("entry %d: p2m_frame_list[%ld] is 0x%"PRIx64, i, i/fpp,
-                (uint64_t)p2m_frame_list[i/fpp]);
+                  (uint64_t)p2m_frame_list[i/fpp]);
             goto out;
         }
     }
 
     /* Domain is still running at this point */
-    if (live) {
-
-        if (xc_shadow_control(xc_handle, dom,
-                              XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
-                              NULL, 0, NULL, 0, NULL) < 0) {
+    if ( live )
+    {
+        /* Live suspend. Enable log-dirty mode. */
+        if ( xc_shadow_control(xc_handle, dom,
+                               XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
+                               NULL, 0, NULL, 0, NULL) < 0 )
+        {
             ERROR("Couldn't enable shadow mode");
             goto out;
         }
-
-        last_iter = 0;
-
-    } else {
-
-        /* This is a non-live suspend. Issue the call back to get the
-           domain suspended */
-
-        last_iter = 1;
-
-        if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info, &ctxt)) {
+    }
+    else
+    {
+        /* This is a non-live suspend. Suspend the domain .*/
+        if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info, &ctxt) )
+        {
             ERROR("Domain appears not to have suspended");
             goto out;
         }
-
-    }
+    }
+
+    last_iter = !live;
 
     /* pretend we sent all the pages last iteration */
     sent_last_iter = p2m_size;
 
-
     /* calculate the power of 2 order of p2m_size, e.g.
        15->4 16->4 17->5 */
-    for (i = p2m_size-1, order_nr = 0; i ; i >>= 1, order_nr++)
+    for ( i = p2m_size-1, order_nr = 0; i ; i >>= 1, order_nr++ )
         continue;
 
     /* Setup to_send / to_fix and to_skip bitmaps */
@@ -826,20 +832,23 @@ int xc_linux_save(int xc_handle, int io_
     to_fix  = calloc(1, BITMAP_SIZE);
     to_skip = malloc(BITMAP_SIZE);
 
-    if (!to_send || !to_fix || !to_skip) {
+    if ( !to_send || !to_fix || !to_skip )
+    {
         ERROR("Couldn't allocate to_send array");
         goto out;
     }
 
     memset(to_send, 0xff, BITMAP_SIZE);
 
-    if (lock_pages(to_send, BITMAP_SIZE)) {
+    if ( lock_pages(to_send, BITMAP_SIZE) )
+    {
         ERROR("Unable to lock to_send");
         return 1;
     }
 
     /* (to fix is local only) */
-    if (lock_pages(to_skip, BITMAP_SIZE)) {
+    if ( lock_pages(to_skip, BITMAP_SIZE) )
+    {
         ERROR("Unable to lock to_skip");
         return 1;
     }
@@ -849,14 +858,15 @@ int xc_linux_save(int xc_handle, int io_
     /* We want zeroed memory so use calloc rather than malloc. */
     pfn_type   = calloc(MAX_BATCH_SIZE, sizeof(*pfn_type));
     pfn_batch  = calloc(MAX_BATCH_SIZE, sizeof(*pfn_batch));
-
-    if ((pfn_type == NULL) || (pfn_batch == NULL)) {
+    if ( (pfn_type == NULL) || (pfn_batch == NULL) )
+    {
         ERROR("failed to alloc memory for pfn_type and/or pfn_batch arrays");
         errno = ENOMEM;
         goto out;
     }
 
-    if (lock_pages(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type))) {
+    if ( lock_pages(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type)) )
+    {
         ERROR("Unable to lock");
         goto out;
     }
@@ -867,10 +877,11 @@ int xc_linux_save(int xc_handle, int io_
     {
         int err=0;
         unsigned long mfn;
-        for (i = 0; i < p2m_size; i++) {
-
+        for ( i = 0; i < p2m_size; i++ )
+        {
             mfn = live_p2m[i];
-            if((mfn != INVALID_P2M_ENTRY) && (mfn_to_pfn(mfn) != i)) {
+            if( (mfn != INVALID_P2M_ENTRY) && (mfn_to_pfn(mfn) != i) )
+            {
                 DPRINTF("i=0x%x mfn=%lx live_m2p=%lx\n", i,
                         mfn, mfn_to_pfn(mfn));
                 err++;
@@ -879,10 +890,9 @@ int xc_linux_save(int xc_handle, int io_
         DPRINTF("Had %d unexplained entries in p2m table\n", err);
     }
 
-
     /* Start writing out the saved-domain record. */
-
-    if (!write_exact(io_fd, &p2m_size, sizeof(unsigned long))) {
+    if ( !write_exact(io_fd, &p2m_size, sizeof(unsigned long)) )
+    {
         ERROR("write: p2m_size");
         goto out;
     }
@@ -892,23 +902,26 @@ int xc_linux_save(int xc_handle, int io_
      * a PAE guest understands extended CR3 (PDPTs above 4GB). Turns off
      * slow paths in the restore code.
      */
-    if ((pt_levels == 3) &&
-        (ctxt.vm_assist & (1UL << VMASST_TYPE_pae_extended_cr3))) {
+    if ( (pt_levels == 3) &&
+         (ctxt.vm_assist & (1UL << VMASST_TYPE_pae_extended_cr3)) )
+    {
         unsigned long signature = ~0UL;
         uint32_t tot_sz   = sizeof(struct vcpu_guest_context) + 8;
         uint32_t chunk_sz = sizeof(struct vcpu_guest_context);
         char chunk_sig[]  = "vcpu";
-        if (!write_exact(io_fd, &signature, sizeof(signature)) ||
-            !write_exact(io_fd, &tot_sz,    sizeof(tot_sz)) ||
-            !write_exact(io_fd, &chunk_sig, 4) ||
-            !write_exact(io_fd, &chunk_sz,  sizeof(chunk_sz)) ||
-            !write_exact(io_fd, &ctxt,      sizeof(ctxt))) {
+        if ( !write_exact(io_fd, &signature, sizeof(signature)) ||
+             !write_exact(io_fd, &tot_sz,    sizeof(tot_sz)) ||
+             !write_exact(io_fd, &chunk_sig, 4) ||
+             !write_exact(io_fd, &chunk_sz,  sizeof(chunk_sz)) ||
+             !write_exact(io_fd, &ctxt,      sizeof(ctxt)) )
+        {
             ERROR("write: extended info");
             goto out;
         }
     }
 
-    if (!write_exact(io_fd, p2m_frame_list, P2M_FL_SIZE)) {
+    if ( !write_exact(io_fd, p2m_frame_list, P2M_FL_SIZE) )
+    {
         ERROR("write: p2m_frame_list");
         goto out;
     }
@@ -916,57 +929,65 @@ int xc_linux_save(int xc_handle, int io_
     print_stats(xc_handle, dom, 0, &stats, 0);
 
     /* Now write out each data page, canonicalising page tables as we go... */
-
-    while(1) {
-
+    for ( ; ; )
+    {
         unsigned int prev_pc, sent_this_iter, N, batch;
 
         iter++;
         sent_this_iter = 0;
         skip_this_iter = 0;
         prev_pc = 0;
-        N=0;
+        N = 0;
 
         DPRINTF("Saving memory pages: iter %d   0%%", iter);
 
-        while( N < p2m_size ){
-
+        while ( N < p2m_size )
+        {
             unsigned int this_pc = (N * 100) / p2m_size;
-
-            if ((this_pc - prev_pc) >= 5) {
+            int rc;
+
+            if ( (this_pc - prev_pc) >= 5 )
+            {
                 DPRINTF("\b\b\b\b%3d%%", this_pc);
                 prev_pc = this_pc;
             }
 
-            /* slightly wasteful to peek the whole array evey time,
-               but this is fast enough for the moment. */
-            if (!last_iter && xc_shadow_control(
-                    xc_handle, dom, XEN_DOMCTL_SHADOW_OP_PEEK,
-                    to_skip, p2m_size, NULL, 0, NULL) != p2m_size) {
-                ERROR("Error peeking shadow bitmap");
-                goto out;
-            }
-
+            if ( !last_iter )
+            {
+                /* Slightly wasteful to peek the whole array evey time,
+                   but this is fast enough for the moment. */
+                rc = xc_shadow_control(
+                    xc_handle, dom, XEN_DOMCTL_SHADOW_OP_PEEK, to_skip, 
+                    p2m_size, NULL, 0, NULL);
+                if ( rc != p2m_size )
+                {
+                    ERROR("Error peeking shadow bitmap");
+                    goto out;
+                }
+            }
 
             /* load pfn_type[] with the mfn of all the pages we're doing in
                this batch. */
-            for (batch = 0; batch < MAX_BATCH_SIZE && N < p2m_size ; N++) {
-
+            for  ( batch = 0;
+                   (batch < MAX_BATCH_SIZE) && (N < p2m_size);
+                   N++ )
+            {
                 int n = permute(N, p2m_size, order_nr);
 
-                if (debug) {
+                if ( debug )
                     DPRINTF("%d pfn= %08lx mfn= %08lx %d  [mfn]= %08lx\n",
                             iter, (unsigned long)n, live_p2m[n],
                             test_bit(n, to_send),
                             mfn_to_pfn(live_p2m[n]&0xFFFFF));
-                }
-
-                if (!last_iter && test_bit(n, to_send)&& test_bit(n, to_skip))
+
+                if ( !last_iter &&
+                     test_bit(n, to_send) &&
+                     test_bit(n, to_skip) )
                     skip_this_iter++; /* stats keeping */
 
-                if (!((test_bit(n, to_send) && !test_bit(n, to_skip)) ||
-                      (test_bit(n, to_send) && last_iter) ||
-                      (test_bit(n, to_fix)  && last_iter)))
+                if ( !((test_bit(n, to_send) && !test_bit(n, to_skip)) ||
+                       (test_bit(n, to_send) && last_iter) ||
+                       (test_bit(n, to_fix)  && last_iter)) )
                     continue;
 
                 /*
@@ -979,20 +1000,22 @@ int xc_linux_save(int xc_handle, int io_
                 pfn_batch[batch] = n;
                 pfn_type[batch]  = live_p2m[n];
 
-                if(!is_mapped(pfn_type[batch])) {
-
+                if ( !is_mapped(pfn_type[batch]) )
+                {
                     /*
                     ** not currently in psuedo-physical map -- set bit
                     ** in to_fix since we must send this page in last_iter
                     ** unless its sent sooner anyhow, or it never enters
                     ** pseudo-physical map (e.g. for ballooned down domains)
                     */
-
                     set_bit(n, to_fix);
                     continue;
                 }
 
-                if(last_iter && test_bit(n, to_fix) && !test_bit(n, to_send)) {
+                if ( last_iter &&
+                     test_bit(n, to_fix) &&
+                     !test_bit(n, to_send) )
+                {
                     needed_to_fix++;
                     DPRINTF("Fix! iter %d, pfn %x. mfn %lx\n",
                             iter, n, pfn_type[batch]);
@@ -1003,11 +1026,13 @@ int xc_linux_save(int xc_handle, int io_
                 batch++;
             }
 
-            if (batch == 0)
+            if ( batch == 0 )
                 goto skip; /* vanishingly unlikely... */
 
-            if ((region_base = xc_map_foreign_batch(
-                     xc_handle, dom, PROT_READ, pfn_type, batch)) == 0) {
+            region_base = xc_map_foreign_batch(
+                xc_handle, dom, PROT_READ, pfn_type, batch);
+            if ( region_base == NULL )
+            {
                 ERROR("map batch failed");
                 goto out;
             }
@@ -1033,7 +1058,7 @@ int xc_linux_save(int xc_handle, int io_
                     continue;
                 }
 
-                if (debug)
+                if ( debug )
                     DPRINTF("%d pfn= %08lx mfn= %08lx [mfn]= %08lx"
                             " sum= %08lx\n",
                             iter,
@@ -1049,13 +1074,15 @@ int xc_linux_save(int xc_handle, int io_
                     pfn_batch[j];
             }
 
-            if(!write_exact(io_fd, &batch, sizeof(unsigned int))) {
+            if ( !write_exact(io_fd, &batch, sizeof(unsigned int)) )
+            {
                 ERROR("Error when writing to state file (2) (errno %d)",
                       errno);
                 goto out;
             }
 
-            if(!write_exact(io_fd, pfn_type, sizeof(unsigned long)*j)) {
+            if ( !write_exact(io_fd, pfn_type, sizeof(unsigned long)*j) )
+            {
                 ERROR("Error when writing to state file (3) (errno %d)",
                       errno);
                 goto out;
@@ -1083,22 +1110,26 @@ int xc_linux_save(int xc_handle, int io_
                     race = 
                         canonicalize_pagetable(pagetype, pfn, spage, page); 
 
-                    if(race && !live) {
+                    if ( race && !live )
+                    {
                         ERROR("Fatal PT race (pfn %lx, type %08lx)", pfn,
                               pagetype);
                         goto out;
                     }
 
-                    if (ratewrite(io_fd, live, page, PAGE_SIZE) != PAGE_SIZE) {
+                    if ( ratewrite(io_fd, live, page, PAGE_SIZE) != PAGE_SIZE )
+                    {
                         ERROR("Error when writing to state file (4)"
                               " (errno %d)", errno);
                         goto out;
                     }
-
-                }  else {
-
+                }
+                else
+                {
                     /* We have a normal page: just write it directly. */
-                    if (ratewrite(io_fd, live, spage, PAGE_SIZE) != PAGE_SIZE) 
{
+                    if ( ratewrite(io_fd, live, spage, PAGE_SIZE) !=
+                         PAGE_SIZE )
+                    {
                         ERROR("Error when writing to state file (5)"
                               " (errno %d)", errno);
                         goto out;
@@ -1119,7 +1150,8 @@ int xc_linux_save(int xc_handle, int io_
         DPRINTF("\r %d: sent %d, skipped %d, ",
                 iter, sent_this_iter, skip_this_iter );
 
-        if (last_iter) {
+        if ( last_iter )
+        {
             print_stats( xc_handle, dom, sent_this_iter, &stats, 1);
 
             DPRINTF("Total pages sent= %ld (%.2fx)\n",
@@ -1127,14 +1159,16 @@ int xc_linux_save(int xc_handle, int io_
             DPRINTF("(of which %ld were fixups)\n", needed_to_fix  );
         }
 
-        if (last_iter && debug) {
+        if ( last_iter && debug )
+        {
             int minusone = -1;
             memset(to_send, 0xff, BITMAP_SIZE);
             debug = 0;
             DPRINTF("Entering debug resend-all mode\n");
 
             /* send "-1" to put receiver into debug mode */
-            if(!write_exact(io_fd, &minusone, sizeof(int))) {
+            if ( !write_exact(io_fd, &minusone, sizeof(int)) )
+            {
                 ERROR("Error when writing to state file (6) (errno %d)",
                       errno);
                 goto out;
@@ -1143,19 +1177,22 @@ int xc_linux_save(int xc_handle, int io_
             continue;
         }
 
-        if (last_iter)
+        if ( last_iter )
             break;
 
-        if (live) {
-            if (((sent_this_iter > sent_last_iter) && RATE_IS_MAX()) ||
-                (iter >= max_iters) ||
-                (sent_this_iter+skip_this_iter < 50) ||
-                (total_sent > p2m_size*max_factor)) {
+        if ( live )
+        {
+            if ( ((sent_this_iter > sent_last_iter) && RATE_IS_MAX()) ||
+                 (iter >= max_iters) ||
+                 (sent_this_iter+skip_this_iter < 50) ||
+                 (total_sent > p2m_size*max_factor) )
+            {
                 DPRINTF("Start last iteration\n");
                 last_iter = 1;
 
-                if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info,
-                                      &ctxt)) {
+                if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info,
+                                       &ctxt) )
+                {
                     ERROR("Domain appears not to have suspended");
                     goto out;
                 }
@@ -1166,9 +1203,10 @@ int xc_linux_save(int xc_handle, int io_
                         (unsigned long)ctxt.user_regs.edx);
             }
 
-            if (xc_shadow_control(xc_handle, dom, 
-                                  XEN_DOMCTL_SHADOW_OP_CLEAN, to_send, 
-                                  p2m_size, NULL, 0, &stats) != p2m_size) {
+            if ( xc_shadow_control(xc_handle, dom, 
+                                   XEN_DOMCTL_SHADOW_OP_CLEAN, to_send, 
+                                   p2m_size, NULL, 0, &stats) != p2m_size )
+            {
                 ERROR("Error flushing shadow PT");
                 goto out;
             }
@@ -1178,7 +1216,7 @@ int xc_linux_save(int xc_handle, int io_
             print_stats(xc_handle, dom, sent_this_iter, &stats, 1);
 
         }
-    } /* end of while 1 */
+    } /* end of infinite for loop */
 
     DPRINTF("All memory is saved\n");
 
@@ -1189,20 +1227,23 @@ int xc_linux_save(int xc_handle, int io_
             uint64_t vcpumap;
         } chunk = { -2, info.max_vcpu_id };
 
-        if (info.max_vcpu_id >= 64) {
+        if ( info.max_vcpu_id >= 64 )
+        {
             ERROR("Too many VCPUS in guest!");
             goto out;
         }
 
-        for (i = 1; i <= info.max_vcpu_id; i++) {
+        for ( i = 1; i <= info.max_vcpu_id; i++ )
+        {
             xc_vcpuinfo_t vinfo;
-            if ((xc_vcpu_getinfo(xc_handle, dom, i, &vinfo) == 0) &&
-                vinfo.online)
+            if ( (xc_vcpu_getinfo(xc_handle, dom, i, &vinfo) == 0) &&
+                 vinfo.online )
                 vcpumap |= 1ULL << i;
         }
 
         chunk.vcpumap = vcpumap;
-        if(!write_exact(io_fd, &chunk, sizeof(chunk))) {
+        if ( !write_exact(io_fd, &chunk, sizeof(chunk)) )
+        {
             ERROR("Error when writing to state file (errno %d)", errno);
             goto out;
         }
@@ -1210,7 +1251,8 @@ int xc_linux_save(int xc_handle, int io_
 
     /* Zero terminate */
     i = 0;
-    if (!write_exact(io_fd, &i, sizeof(int))) {
+    if ( !write_exact(io_fd, &i, sizeof(int)) )
+    {
         ERROR("Error when writing to state file (6') (errno %d)", errno);
         goto out;
     }
@@ -1220,24 +1262,28 @@ int xc_linux_save(int xc_handle, int io_
         unsigned int i,j;
         unsigned long pfntab[1024];
 
-        for (i = 0, j = 0; i < p2m_size; i++) {
-            if (!is_mapped(live_p2m[i]))
+        for ( i = 0, j = 0; i < p2m_size; i++ )
+        {
+            if ( !is_mapped(live_p2m[i]) )
                 j++;
         }
 
-        if(!write_exact(io_fd, &j, sizeof(unsigned int))) {
+        if ( !write_exact(io_fd, &j, sizeof(unsigned int)) )
+        {
             ERROR("Error when writing to state file (6a) (errno %d)", errno);
             goto out;
         }
 
-        for (i = 0, j = 0; i < p2m_size; ) {
-
-            if (!is_mapped(live_p2m[i]))
+        for ( i = 0, j = 0; i < p2m_size; )
+        {
+            if ( !is_mapped(live_p2m[i]) )
                 pfntab[j++] = i;
 
             i++;
-            if (j == 1024 || i == p2m_size) {
-                if(!write_exact(io_fd, &pfntab, sizeof(unsigned long)*j)) {
+            if ( (j == 1024) || (i == p2m_size) )
+            {
+                if ( !write_exact(io_fd, &pfntab, sizeof(unsigned long)*j) )
+                {
                     ERROR("Error when writing to state file (6b) (errno %d)",
                           errno);
                     goto out;
@@ -1245,34 +1291,39 @@ int xc_linux_save(int xc_handle, int io_
                 j = 0;
             }
         }
-
     }
 
     /* Canonicalise the suspend-record frame number. */
-    if ( !translate_mfn_to_pfn(&ctxt.user_regs.edx) ){
+    if ( !translate_mfn_to_pfn(&ctxt.user_regs.edx) )
+    {
         ERROR("Suspend record is not in range of pseudophys map");
         goto out;
     }
 
-    for (i = 0; i <= info.max_vcpu_id; i++) {
-        if (!(vcpumap & (1ULL << i)))
+    for ( i = 0; i <= info.max_vcpu_id; i++ )
+    {
+        if ( !(vcpumap & (1ULL << i)) )
             continue;
 
-        if ((i != 0) && xc_vcpu_getcontext(xc_handle, dom, i, &ctxt)) {
+        if ( (i != 0) && xc_vcpu_getcontext(xc_handle, dom, i, &ctxt) )
+        {
             ERROR("No context for VCPU%d", i);
             goto out;
         }
 
         /* Canonicalise each GDT frame number. */
-        for ( j = 0; (512*j) < ctxt.gdt_ents; j++ ) {
-            if ( !translate_mfn_to_pfn(&ctxt.gdt_frames[j]) ) {
+        for ( j = 0; (512*j) < ctxt.gdt_ents; j++ )
+        {
+            if ( !translate_mfn_to_pfn(&ctxt.gdt_frames[j]) )
+            {
                 ERROR("GDT frame is not in range of pseudophys map");
                 goto out;
             }
         }
 
         /* Canonicalise the page table base pointer. */
-        if ( !MFN_IS_IN_PSEUDOPHYS_MAP(xen_cr3_to_pfn(ctxt.ctrlreg[3])) ) {
+        if ( !MFN_IS_IN_PSEUDOPHYS_MAP(xen_cr3_to_pfn(ctxt.ctrlreg[3])) )
+        {
             ERROR("PT base is not in range of pseudophys map");
             goto out;
         }
@@ -1282,7 +1333,8 @@ int xc_linux_save(int xc_handle, int io_
         /* Guest pagetable (x86/64) stored in otherwise-unused CR1. */
         if ( (pt_levels == 4) && ctxt.ctrlreg[1] )
         {
-            if ( !MFN_IS_IN_PSEUDOPHYS_MAP(xen_cr3_to_pfn(ctxt.ctrlreg[1])) ) {
+            if ( !MFN_IS_IN_PSEUDOPHYS_MAP(xen_cr3_to_pfn(ctxt.ctrlreg[1])) )
+            {
                 ERROR("PT base is not in range of pseudophys map");
                 goto out;
             }
@@ -1291,7 +1343,8 @@ int xc_linux_save(int xc_handle, int io_
                 xen_pfn_to_cr3(mfn_to_pfn(xen_cr3_to_pfn(ctxt.ctrlreg[1])));
         }
 
-        if (!write_exact(io_fd, &ctxt, sizeof(ctxt))) {
+        if ( !write_exact(io_fd, &ctxt, sizeof(ctxt)) )
+        {
             ERROR("Error when writing to state file (1) (errno %d)", errno);
             goto out;
         }
@@ -1302,7 +1355,8 @@ int xc_linux_save(int xc_handle, int io_
      */
     memcpy(page, live_shinfo, PAGE_SIZE);
     ((shared_info_t *)page)->arch.pfn_to_mfn_frame_list_list = 0;
-    if (!write_exact(io_fd, page, PAGE_SIZE)) {
+    if ( !write_exact(io_fd, page, PAGE_SIZE) )
+    {
         ERROR("Error when writing to state file (1) (errno %d)", errno);
         goto out;
     }
@@ -1312,30 +1366,30 @@ int xc_linux_save(int xc_handle, int io_
 
  out:
 
-    if (live) {
-        if(xc_shadow_control(xc_handle, dom, 
-                             XEN_DOMCTL_SHADOW_OP_OFF,
-                             NULL, 0, NULL, 0, NULL) < 0) {
+    if ( live )
+    {
+        if ( xc_shadow_control(xc_handle, dom, 
+                               XEN_DOMCTL_SHADOW_OP_OFF,
+                               NULL, 0, NULL, 0, NULL) < 0 )
             DPRINTF("Warning - couldn't disable shadow mode");
-        }
-    }
-
-    // flush last write and discard cache for file
+    }
+
+    /* Flush last write and discard cache for file. */
     discard_file_cache(io_fd, 1 /* flush */);
 
-    if (live_shinfo)
+    if ( live_shinfo )
         munmap(live_shinfo, PAGE_SIZE);
 
-    if (live_p2m_frame_list_list)
+    if ( live_p2m_frame_list_list )
         munmap(live_p2m_frame_list_list, PAGE_SIZE);
 
-    if (live_p2m_frame_list)
+    if ( live_p2m_frame_list )
         munmap(live_p2m_frame_list, P2M_FLL_ENTRIES * PAGE_SIZE);
 
-    if (live_p2m)
+    if ( live_p2m )
         munmap(live_p2m, ROUNDUP(p2m_size * sizeof(xen_pfn_t), PAGE_SHIFT));
 
-    if (live_m2p)
+    if ( live_m2p )
         munmap(live_m2p, M2P_SIZE(max_mfn));
 
     free(pfn_type);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] libxc: Fix save/restore code for style., Xen patchbot-unstable <=