WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [linux-2.6.18-xen] [IA64] Coding style fix

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [linux-2.6.18-xen] [IA64] Coding style fix
From: "Xen patchbot-linux-2.6.18-xen" <patchbot-linux-2.6.18-xen@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 23 Jan 2008 01:11:38 -0800
Delivery-date: Wed, 23 Jan 2008 01:18:13 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1200691259 25200
# Node ID 71a415f9179bbd2bf630520949fd5da24187c119
# Parent  77f831cbb91ddca3a7539fa9197d4abc2d2bfcf9
[IA64] Coding style fix

Mainly white spaces, // comments and * ops.

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
---
 arch/ia64/oprofile/xenoprof.c |   20 -
 arch/ia64/xen/hypercall.S     |   16 -
 arch/ia64/xen/hypervisor.c    |  448 +++++++++++++++++++++---------------------
 arch/ia64/xen/util.c          |   23 --
 arch/ia64/xen/xcom_privcmd.c  |    4 
 include/asm-ia64/hypervisor.h |   10 
 include/asm-ia64/maddr.h      |   10 
 include/asm-ia64/xenoprof.h   |   10 
 8 files changed, 272 insertions(+), 269 deletions(-)

diff -r 77f831cbb91d -r 71a415f9179b arch/ia64/oprofile/xenoprof.c
--- a/arch/ia64/oprofile/xenoprof.c     Fri Jan 18 16:52:25 2008 +0000
+++ b/arch/ia64/oprofile/xenoprof.c     Fri Jan 18 14:20:59 2008 -0700
@@ -51,9 +51,9 @@ void xenoprof_arch_stop(void)
 }
 
 /* XXX move them to an appropriate header file. */
-struct resource* xen_ia64_allocate_resource(unsigned long size); 
-void xen_ia64_release_resource(struct resource* res); 
-void xen_ia64_unmap_resource(struct resource* res); 
+struct resource* xen_ia64_allocate_resource(unsigned long size);
+void xen_ia64_release_resource(struct resource *res);
+void xen_ia64_unmap_resource(struct resource *res);
 
 struct resource*
 xenoprof_ia64_allocate_resource(int32_t max_samples)
@@ -73,7 +73,7 @@ xenoprof_ia64_allocate_resource(int32_t 
        return xen_ia64_allocate_resource(bufsize);
 }
 
-void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer* sbuf)
+void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer *sbuf)
 {
        if (sbuf->buffer) {
                xen_ia64_unmap_resource(sbuf->arch.res);
@@ -82,11 +82,11 @@ void xenoprof_arch_unmap_shared_buffer(s
        }
 }
 
-int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer* get_buffer,
-                                    struct xenoprof_shared_buffer* sbuf)
+int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer *get_buffer,
+                                    struct xenoprof_shared_buffer *sbuf)
 {
        int ret;
-       struct resource* res;
+       struct resource *res;
 
        sbuf->buffer = NULL;
        sbuf->arch.res = NULL;
@@ -112,11 +112,11 @@ int xenoprof_arch_map_shared_buffer(stru
        return ret;
 }
 
-int xenoprof_arch_set_passive(struct xenoprof_passive* pdomain,
-                              struct xenoprof_shared_buffer* sbuf)
+int xenoprof_arch_set_passive(struct xenoprof_passive *pdomain,
+                              struct xenoprof_shared_buffer *sbuf)
 {
        int ret;
-       struct resource* res;
+       struct resource *res;
 
        sbuf->buffer = NULL;
        sbuf->arch.res = NULL;
diff -r 77f831cbb91d -r 71a415f9179b arch/ia64/xen/hypercall.S
--- a/arch/ia64/xen/hypercall.S Fri Jan 18 16:52:25 2008 +0000
+++ b/arch/ia64/xen/hypercall.S Fri Jan 18 14:20:59 2008 -0700
@@ -17,7 +17,7 @@ GLOBAL_ENTRY(xen_get_psr)
 GLOBAL_ENTRY(xen_get_psr)
        XEN_HYPER_GET_PSR
        br.ret.sptk.many rp
-    ;;
+       ;;
 END(xen_get_psr)
 
 GLOBAL_ENTRY(xen_get_ivr)
@@ -124,13 +124,13 @@ END(xen_set_eflag)
 #endif /* ASM_SUPPORTED */
 
 GLOBAL_ENTRY(xen_send_ipi)
-        mov r14=r32
-        mov r15=r33
-        mov r2=0x400
-        break 0x1000
-        ;;
-        br.ret.sptk.many rp
-        ;;
+       mov r14=r32
+       mov r15=r33
+       mov r2=0x400
+       break 0x1000
+       ;;
+       br.ret.sptk.many rp
+       ;;
 END(xen_send_ipi)
 
 GLOBAL_ENTRY(__hypercall)
diff -r 77f831cbb91d -r 71a415f9179b arch/ia64/xen/hypervisor.c
--- a/arch/ia64/xen/hypervisor.c        Fri Jan 18 16:52:25 2008 +0000
+++ b/arch/ia64/xen/hypervisor.c        Fri Jan 18 14:20:59 2008 -0700
@@ -20,7 +20,6 @@
  *
  */
 
-//#include <linux/kernel.h>
 #include <linux/spinlock.h>
 #include <linux/bootmem.h>
 #include <linux/module.h>
@@ -35,7 +34,8 @@
 #include <xen/xencons.h>
 #include <xen/balloon.h>
 
-shared_info_t *HYPERVISOR_shared_info __read_mostly = (shared_info_t 
*)XSI_BASE;
+shared_info_t *HYPERVISOR_shared_info __read_mostly =
+       (shared_info_t *)XSI_BASE;
 EXPORT_SYMBOL(HYPERVISOR_shared_info);
 
 start_info_t *xen_start_info;
@@ -60,7 +60,7 @@ xen_setup(char **cmdline_p)
 
        if (ia64_platform_is("xen"))
                dig_setup(cmdline_p);
-       
+
        if (!is_running_on_xen() || !is_initial_xendomain())
                return;
 
@@ -79,9 +79,11 @@ xen_cpu_init(void)
        xen_smp_intr_init();
 }
 
-//XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
-// move those to lib/contiguous_bitmap?
-//XXX discontigmem/sparsemem
+/*
+ *XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
+ * move those to lib/contiguous_bitmap?
+ *XXX discontigmem/sparsemem
+ */
 
 /*
  * Bitmap is indexed by page number. If bit is set, the page is part of a
@@ -104,16 +106,16 @@ create_contiguous_bitmap(u64 start, u64 
        pte_t *pte;
 
        bitmap_start = (unsigned long)contiguous_bitmap +
-                      ((__pa(start) >> PAGE_SHIFT) >> 3);
+                      ((__pa(start) >> PAGE_SHIFT) >> 3);
        bitmap_end = (unsigned long)contiguous_bitmap +
-                    (((__pa(end) >> PAGE_SHIFT) + 2 * BITS_PER_LONG) >> 3);
+                    (((__pa(end) >> PAGE_SHIFT) + 2 * BITS_PER_LONG) >> 3);
 
        start_page = bitmap_start & PAGE_MASK;
        end_page = PAGE_ALIGN(bitmap_end);
        node = paddr_to_nid(__pa(start));
 
        bitmap = alloc_bootmem_pages_node(NODE_DATA(node),
-                                         end_page - start_page);
+                                         end_page - start_page);
        BUG_ON(!bitmap);
        memset(bitmap, 0, end_page - start_page);
 
@@ -121,26 +123,26 @@ create_contiguous_bitmap(u64 start, u64 
                pgd = pgd_offset_k(address);
                if (pgd_none(*pgd))
                        pgd_populate(&init_mm, pgd,
-                                    alloc_bootmem_pages_node(NODE_DATA(node),
-                                                             PAGE_SIZE));
+                                    alloc_bootmem_pages_node(NODE_DATA(node),
+                                                             PAGE_SIZE));
                pud = pud_offset(pgd, address);
 
                if (pud_none(*pud))
                        pud_populate(&init_mm, pud,
-                                    alloc_bootmem_pages_node(NODE_DATA(node),
-                                                             PAGE_SIZE));
+                                    alloc_bootmem_pages_node(NODE_DATA(node),
+                                                             PAGE_SIZE));
                pmd = pmd_offset(pud, address);
 
                if (pmd_none(*pmd))
                        pmd_populate_kernel(&init_mm, pmd,
-                                           alloc_bootmem_pages_node
-                                           (NODE_DATA(node), PAGE_SIZE));
+                                           alloc_bootmem_pages_node
+                                           (NODE_DATA(node), PAGE_SIZE));
                pte = pte_offset_kernel(pmd, address);
 
                if (pte_none(*pte))
                        set_pte(pte,
-                               pfn_pte(__pa(bitmap + (address - start_page))
-                                       >> PAGE_SHIFT, PAGE_KERNEL));
+                               pfn_pte(__pa(bitmap + (address - start_page))
+                                       >> PAGE_SHIFT, PAGE_KERNEL));
        }
        return 0;
 }
@@ -225,9 +227,11 @@ static void contiguous_bitmap_clear(
        }
 }
 
-// __xen_create_contiguous_region(), __xen_destroy_contiguous_region()
-// are based on i386 xen_create_contiguous_region(),
-// xen_destroy_contiguous_region()
+/*
+ * __xen_create_contiguous_region(), __xen_destroy_contiguous_region()
+ * are based on i386 xen_create_contiguous_region(),
+ * xen_destroy_contiguous_region()
+ */
 
 /* Protected by balloon_lock. */
 #define MAX_CONTIG_ORDER 7
@@ -273,9 +277,8 @@ __xen_create_contiguous_region(unsigned 
        balloon_lock(flags);
 
        /* Get a new contiguous memory extent. */
-       for (i = 0; i < num_gpfn; i++) {
+       for (i = 0; i < num_gpfn; i++)
                in_frames[i] = start_gpfn + i;
-       }
        out_frame = start_gpfn;
        error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
        success = (exchange.nr_exchanged == num_gpfn);
@@ -357,7 +360,7 @@ __xen_destroy_contiguous_region(unsigned
                         .domid        = DOMID_SELF
                 },
                .nr_exchanged = 0
-        };
+       };
        
 
        if (!test_bit(start_gpfn, contiguous_bitmap))
@@ -375,17 +378,16 @@ __xen_destroy_contiguous_region(unsigned
 
        contiguous_bitmap_clear(start_gpfn, num_gpfn);
 
-        /* Do the exchange for non-contiguous MFNs. */
+       /* Do the exchange for non-contiguous MFNs. */
        in_frame = start_gpfn;
-       for (i = 0; i < num_gpfn; i++) {
+       for (i = 0; i < num_gpfn; i++)
                out_frames[i] = start_gpfn + i;
-       }
        error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
        success = (exchange.nr_exchanged == 1);
        BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0)));
        BUG_ON(success && (error != 0));
        if (unlikely(error == -ENOSYS)) {
-                /* Compatibility when XENMEM_exchange is unsupported. */
+               /* Compatibility when XENMEM_exchange is unsupported. */
                error = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
                                             &exchange.in);
                BUG_ON(error != 1);
@@ -405,11 +407,10 @@ xen_limit_pages_to_max_mfn(struct page *
                                            order, address_bits);
 }
 
-
-///////////////////////////////////////////////////////////////////////////
-// grant table hack
-// cmd: GNTTABOP_xxx
-
+/****************************************************************************
+ * grant table hack
+ * cmd: GNTTABOP_xxx
+ */
 #include <linux/mm.h>
 #include <xen/interface/xen.h>
 #include <xen/gnttab.h>
@@ -428,16 +429,19 @@ gnttab_map_grant_ref_pre(struct gnttab_m
 
        if (flags & GNTMAP_host_map) {
                if (flags & GNTMAP_application_map) {
-                       xprintd("GNTMAP_application_map is not supported yet: 
flags 0x%x\n", flags);
+                       xprintd("GNTMAP_application_map is not supported yet:"
+                               " flags 0x%x\n", flags);
                        BUG();
                }
                if (flags & GNTMAP_contains_pte) {
-                       xprintd("GNTMAP_contains_pte is not supported yet flags 
0x%x\n", flags);
+                       xprintd("GNTMAP_contains_pte is not supported yet"
+                               " flags 0x%x\n", flags);
                        BUG();
                }
        } else if (flags & GNTMAP_device_map) {
-               xprintd("GNTMAP_device_map is not supported yet 0x%x\n", flags);
-               BUG();//XXX not yet. actually this flag is not used.
+               xprintd("GNTMAP_device_map is not supported yet 0x%x\n",
+                       flags);
+               BUG(); /* XXX not yet. actually this flag is not used. */
        } else {
                BUG();
        }
@@ -457,15 +461,17 @@ HYPERVISOR_grant_table_op(unsigned int c
 }
 EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
 
-///////////////////////////////////////////////////////////////////////////
-// foreign mapping
+/**************************************************************************
+ * foreign mapping
+ */
 #include <linux/efi.h>
-#include <asm/meminit.h> // for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}()
+#include <asm/meminit.h> /* for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}() */
 
 static unsigned long privcmd_resource_min = 0;
-// Xen/ia64 currently can handle pseudo physical address bits up to
-// (PAGE_SHIFT * 3)
-static unsigned long privcmd_resource_max = GRANULEROUNDDOWN((1UL << 
(PAGE_SHIFT * 3)) - 1);
+/* Xen/ia64 currently can handle pseudo physical address bits up to
+ * (PAGE_SHIFT * 3) */
+static unsigned long privcmd_resource_max =
+       GRANULEROUNDDOWN((1UL << (PAGE_SHIFT * 3)) - 1);
 static unsigned long privcmd_resource_align = IA64_GRANULE_SIZE;
 
 static unsigned long
@@ -500,18 +506,18 @@ xen_ia64_privcmd_init(void)
        efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
        efi_desc_size = ia64_boot_param->efi_memdesc_size;
 
-       // at first check the used highest address
+       /* at first check the used highest address */
        for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-               // nothing
+               /* nothing */;
        }
        md = p - efi_desc_size;
        privcmd_resource_min = GRANULEROUNDUP(md_end_addr(md));
        if (xen_ia64_privcmd_check_size(privcmd_resource_min,
-                                       privcmd_resource_max)) {
+                                       privcmd_resource_max))
                goto out;
-       }
-
-       // the used highest address is too large. try to find the largest gap.
+
+       /* the used highest address is too large.
+        * try to find the largest gap. */
        tmp_min = privcmd_resource_max;
        tmp_max = 0;
        gap_size = 0;
@@ -525,23 +531,21 @@ xen_ia64_privcmd_init(void)
 
                md = p;
                end = md_end_addr(md);
-               if (end > privcmd_resource_max) {
+               if (end > privcmd_resource_max)
                        break;
-               }
                if (end < prev_end) {
-                       // work around. 
-                       // Xen may pass incompletely sorted memory
-                       // descriptors like
-                       // [x, x + length]
-                       // [x, x]
-                       // this order should be reversed.
+                       /* work around. 
+                        * Xen may pass incompletely sorted memory
+                        * descriptors like
+                        * [x, x + length]
+                        * [x, x]
+                        * this order should be reversed. */
                        continue;
                }
                next = p + efi_desc_size;
                next_start = next->phys_addr;
-               if (next_start > privcmd_resource_max) {
+               if (next_start > privcmd_resource_max)
                        next_start = privcmd_resource_max;
-               }
                if (end < next_start && gap_size < (next_start - end)) {
                        tmp_min = end;
                        tmp_max = next_start;
@@ -560,19 +564,21 @@ xen_ia64_privcmd_init(void)
        privcmd_resource_max = tmp_max;
        if (!xen_ia64_privcmd_check_size(privcmd_resource_min,
                                         privcmd_resource_max)) {
-               // Any large enough gap isn't found.
-               // go ahead anyway with the warning hoping that large region
-               // won't be requested.
-               printk(KERN_WARNING "xen privcmd: large enough region for 
privcmd mmap is not found.\n");
+               /* Any large enough gap isn't found.
+                * go ahead anyway with the warning hoping that large region
+                * won't be requested. */
+               printk(KERN_WARNING "xen privcmd: "
+                      "large enough region for privcmd mmap is not found.\n");
        }
 
 out:
-       printk(KERN_INFO "xen privcmd uses pseudo physical addr range [0x%lx, 
0x%lx] (%ldMB)\n",
+       printk(KERN_INFO "xen privcmd uses pseudo physical addr range "
+              "[0x%lx, 0x%lx] (%ldMB)\n",
               privcmd_resource_min, privcmd_resource_max, 
               (privcmd_resource_max - privcmd_resource_min) >> 20);
        BUG_ON(privcmd_resource_min >= privcmd_resource_max);
 
-       // XXX this should be somewhere appropriate
+       /* XXX this should be somewhere appropriate */
        (void)p2m_expose_init();
 
        return 0;
@@ -587,12 +593,12 @@ struct xen_ia64_privcmd_entry {
 
 struct xen_ia64_privcmd_range {
        atomic_t                        ref_count;
-       unsigned long                   pgoff; // in PAGE_SIZE
-       struct resource*                res;
-
-       // for foreign domain p2m mapping
-       void*                           private;
-       void (*callback)(struct xen_ia64_privcmd_range* range, void* arg);
+       unsigned long                   pgoff; /* in PAGE_SIZE */
+       struct resource                 *res;
+
+       /* for foreign domain p2m mapping */
+       void                            *private;
+       void (*callback)(struct xen_ia64_privcmd_range *range, void *arg);
 
        unsigned long                   num_entries;
        struct xen_ia64_privcmd_entry   entries[0];
@@ -600,30 +606,30 @@ struct xen_ia64_privcmd_range {
 
 struct xen_ia64_privcmd_vma {
        int                             is_privcmd_mmapped;
-       struct xen_ia64_privcmd_range*  range;
+       struct xen_ia64_privcmd_range   *range;
 
        unsigned long                   num_entries;
-       struct xen_ia64_privcmd_entry*  entries;
+       struct xen_ia64_privcmd_entry   *entries;
 };
 
 static void
-xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry* entry)
+xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry *entry)
 {
        atomic_set(&entry->map_count, 0);
        entry->gpfn = INVALID_GPFN;
 }
 
 static int
-xen_ia64_privcmd_entry_mmap(struct vm_area_struct* vma,
+xen_ia64_privcmd_entry_mmap(struct vm_area_struct *vma,
                            unsigned long addr,
-                           struct xen_ia64_privcmd_range* privcmd_range,
+                           struct xen_ia64_privcmd_range *privcmd_range,
                            int i,
                            unsigned long gmfn,
                            pgprot_t prot,
                            domid_t domid)
 {
        int error = 0;
-       struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
+       struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
        unsigned long gpfn;
        unsigned long flags;
 
@@ -639,21 +645,18 @@ xen_ia64_privcmd_entry_mmap(struct vm_ar
        gpfn = (privcmd_range->res->start >> PAGE_SHIFT) + i;
 
        flags = ASSIGN_writable;
-       if (pgprot_val(prot) == PROT_READ) {
+       if (pgprot_val(prot) == PROT_READ)
                flags = ASSIGN_readonly;
-       }
        error = HYPERVISOR_add_physmap_with_gmfn(gpfn, gmfn, flags, domid);
-       if (error != 0) {
+       if (error != 0)
                goto out;
-       }
 
        prot = vma->vm_page_prot;
        error = remap_pfn_range(vma, addr, gpfn, 1 << PAGE_SHIFT, prot);
        if (error != 0) {
                error = HYPERVISOR_zap_physmap(gpfn, 0);
-               if (error) {
-                       BUG();//XXX
-               }
+               if (error)
+                       BUG(); /* XXX */
        } else {
                atomic_inc(&entry->map_count);
                entry->gpfn = gpfn;
@@ -664,47 +667,44 @@ out:
 }
 
 static void
-xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range* privcmd_range,
+xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range *privcmd_range,
                              int i)
 {
-       struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
+       struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
        unsigned long gpfn = entry->gpfn;
-       //gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
-       //      (vma->vm_pgoff - privcmd_range->pgoff);
+       /* gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
+               (vma->vm_pgoff - privcmd_range->pgoff); */
        int error;
 
        error = HYPERVISOR_zap_physmap(gpfn, 0);
-       if (error) {
-               BUG();//XXX
-       }
+       if (error)
+               BUG(); /* XXX */
        entry->gpfn = INVALID_GPFN;
 }
 
 static void
-xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range* privcmd_range,
+xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range *privcmd_range,
                            int i)
 {
-       struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
-       if (entry->gpfn != INVALID_GPFN) {
+       struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
+       if (entry->gpfn != INVALID_GPFN)
                atomic_inc(&entry->map_count);
-       } else {
+       else
                BUG_ON(atomic_read(&entry->map_count) != 0);
-       }
 }
 
 static void
-xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range* privcmd_range,
+xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range *privcmd_range,
                             int i)
 {
-       struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
+       struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
        if (entry->gpfn != INVALID_GPFN &&
-           atomic_dec_and_test(&entry->map_count)) {
+           atomic_dec_and_test(&entry->map_count))
                xen_ia64_privcmd_entry_munmap(privcmd_range, i);
-       }
-}
-
-static void xen_ia64_privcmd_vma_open(struct vm_area_struct* vma);
-static void xen_ia64_privcmd_vma_close(struct vm_area_struct* vma);
+}
+
+static void xen_ia64_privcmd_vma_open(struct vm_area_struct *vma);
+static void xen_ia64_privcmd_vma_close(struct vm_area_struct *vma);
 
 struct vm_operations_struct xen_ia64_privcmd_vm_ops = {
        .open = &xen_ia64_privcmd_vma_open,
@@ -712,12 +712,13 @@ struct vm_operations_struct xen_ia64_pri
 };
 
 static void
-__xen_ia64_privcmd_vma_open(struct vm_area_struct* vma,
-                           struct xen_ia64_privcmd_vma* privcmd_vma,
-                           struct xen_ia64_privcmd_range* privcmd_range)
+__xen_ia64_privcmd_vma_open(struct vm_area_struct *vma,
+                           struct xen_ia64_privcmd_vma *privcmd_vma,
+                           struct xen_ia64_privcmd_range *privcmd_range)
 {
        unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
-       unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+       unsigned long num_entries =
+               (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
        unsigned long i;
 
        BUG_ON(entry_offset < 0);
@@ -727,36 +728,37 @@ __xen_ia64_privcmd_vma_open(struct vm_ar
        privcmd_vma->num_entries = num_entries;
        privcmd_vma->entries = &privcmd_range->entries[entry_offset];
        vma->vm_private_data = privcmd_vma;
-       for (i = 0; i < privcmd_vma->num_entries; i++) {
+       for (i = 0; i < privcmd_vma->num_entries; i++)
                xen_ia64_privcmd_entry_open(privcmd_range, entry_offset + i);
-       }
 
        vma->vm_private_data = privcmd_vma;
        vma->vm_ops = &xen_ia64_privcmd_vm_ops;
 }
 
 static void
-xen_ia64_privcmd_vma_open(struct vm_area_struct* vma)
-{
-       struct xen_ia64_privcmd_vma* old_privcmd_vma = (struct 
xen_ia64_privcmd_vma*)vma->vm_private_data;
-       struct xen_ia64_privcmd_vma* privcmd_vma = (struct 
xen_ia64_privcmd_vma*)vma->vm_private_data;
-       struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
+xen_ia64_privcmd_vma_open(struct vm_area_struct *vma)
+{
+       struct xen_ia64_privcmd_vma *old_privcmd_vma =
+               (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
+       struct xen_ia64_privcmd_vma *privcmd_vma =
+               (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
+       struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
 
        atomic_inc(&privcmd_range->ref_count);
-       // vm_op->open() can't fail.
+       /* vm_op->open() can't fail. */
        privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL | __GFP_NOFAIL);
-       // copy original value if necessary
+       /* copy original value if necessary */
        privcmd_vma->is_privcmd_mmapped = old_privcmd_vma->is_privcmd_mmapped;
 
        __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
 }
 
 static void
-xen_ia64_privcmd_vma_close(struct vm_area_struct* vma)
-{
-       struct xen_ia64_privcmd_vma* privcmd_vma =
+xen_ia64_privcmd_vma_close(struct vm_area_struct *vma)
+{
+       struct xen_ia64_privcmd_vma *privcmd_vma =
                (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
-       struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
+       struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
        unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
        unsigned long i;
 
@@ -770,7 +772,7 @@ xen_ia64_privcmd_vma_close(struct vm_are
        if (atomic_dec_and_test(&privcmd_range->ref_count)) {
 #if 1
                for (i = 0; i < privcmd_range->num_entries; i++) {
-                       struct xen_ia64_privcmd_entry* entry =
+                       struct xen_ia64_privcmd_entry *entry =
                                &privcmd_range->entries[i];
                        BUG_ON(atomic_read(&entry->map_count) != 0);
                        BUG_ON(entry->gpfn != INVALID_GPFN);
@@ -788,7 +790,7 @@ int
 int
 privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
 {
-       struct xen_ia64_privcmd_vma* privcmd_vma =
+       struct xen_ia64_privcmd_vma *privcmd_vma =
                (struct xen_ia64_privcmd_vma *)vma->vm_private_data;
        return (xchg(&privcmd_vma->is_privcmd_mmapped, 1) == 0);
 }
@@ -799,9 +801,9 @@ privcmd_mmap(struct file * file, struct 
        int error;
        unsigned long size = vma->vm_end - vma->vm_start;
        unsigned long num_entries = size >> PAGE_SHIFT;
-       struct xen_ia64_privcmd_range* privcmd_range = NULL;
-       struct xen_ia64_privcmd_vma* privcmd_vma = NULL;
-       struct resource* res = NULL;
+       struct xen_ia64_privcmd_range *privcmd_range = NULL;
+       struct xen_ia64_privcmd_vma *privcmd_vma = NULL;
+       struct resource *res = NULL;
        unsigned long i;
        BUG_ON(!is_running_on_xen());
 
@@ -811,26 +813,22 @@ privcmd_mmap(struct file * file, struct 
        privcmd_range =
                vmalloc(sizeof(*privcmd_range) +
                        sizeof(privcmd_range->entries[0]) * num_entries);
-       if (privcmd_range == NULL) {
+       if (privcmd_range == NULL)
                goto out_enomem0;
-       }
        privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL);
-       if (privcmd_vma == NULL) {
+       if (privcmd_vma == NULL)
                goto out_enomem1;
-       }
        privcmd_vma->is_privcmd_mmapped = 0;
 
        res = kzalloc(sizeof(*res), GFP_KERNEL);
-       if (res == NULL) {
+       if (res == NULL)
                goto out_enomem1;
-       }
        res->name = "Xen privcmd mmap";
        error = allocate_resource(&iomem_resource, res, size,
                                  privcmd_resource_min, privcmd_resource_max,
                                  privcmd_resource_align, NULL, NULL);
-       if (error) {
+       if (error)
                goto out_enomem1;
-       }
        privcmd_range->res = res;
 
        /* DONTCOPY is essential for Xen as copy_page_range is broken. */
@@ -841,9 +839,8 @@ privcmd_mmap(struct file * file, struct 
        privcmd_range->num_entries = num_entries;
        privcmd_range->private = NULL;
        privcmd_range->callback = NULL;
-       for (i = 0; i < privcmd_range->num_entries; i++) {
+       for (i = 0; i < privcmd_range->num_entries; i++)
                xen_ia64_privcmd_init_entry(&privcmd_range->entries[i]);
-       }
 
        __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
        return 0;
@@ -858,15 +855,15 @@ out_enomem0:
 
 int
 direct_remap_pfn_range(struct vm_area_struct *vma,
-                      unsigned long address,   // process virtual address
-                      unsigned long gmfn,      // gmfn, gmfn + 1, ... gmfn + 
size/PAGE_SIZE
+                      unsigned long address,   /* process virtual address */
+                      unsigned long gmfn,      /* gmfn, gmfn + 1, ... gmfn + 
size/PAGE_SIZE */
                       unsigned long size,
                       pgprot_t prot,
-                      domid_t  domid)          // target domain
-{
-       struct xen_ia64_privcmd_vma* privcmd_vma =
+                      domid_t  domid)          /* target domain */
+{
+       struct xen_ia64_privcmd_vma *privcmd_vma =
                (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
-       struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
+       struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
        unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
 
        unsigned long i;
@@ -875,28 +872,27 @@ direct_remap_pfn_range(struct vm_area_st
        BUG_ON(!is_running_on_xen());
 
 #if 0
-       if (prot != vm->vm_page_prot) {
+       if (prot != vm->vm_page_prot)
                return -EINVAL;
-       }
 #endif
 
        i = (address - vma->vm_start) >> PAGE_SHIFT;
        for (offset = 0; offset < size; offset += PAGE_SIZE) {
                error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & 
PAGE_MASK, privcmd_range, entry_offset + i, gmfn, prot, domid);
-               if (error != 0) {
+               if (error != 0)
                        break;
-               }
 
                i++;
                gmfn++;
-        }
+       }
 
        return error;
 }
 
 
-///////////////////////////////////////////////////////////////////////////
-// expose p2m table
+/**************************************************************************
+ * expose p2m table
+ */
 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M
 #include <linux/cpu.h>
 #include <asm/uaccess.h>
@@ -914,9 +910,10 @@ static struct resource p2m_resource = {
 };
 static unsigned long p2m_assign_start_pfn __read_mostly;
 static unsigned long p2m_assign_end_pfn __read_mostly;
-static unsigned long p2m_expose_size;  // this is referenced only when resume.
-                                       // so __read_mostly doesn't make sense.
-volatile const pte_t* p2m_pte __read_mostly;
+static unsigned long p2m_expose_size;  /* this is referenced only when resume.
+                                        * so __read_mostly doesn't make sense.
+                                        */
+volatile const pte_t *p2m_pte __read_mostly;
 
 #define GRANULE_PFN    PTRS_PER_PTE
 static unsigned long p2m_granule_pfn __read_mostly = GRANULE_PFN;
@@ -929,13 +926,13 @@ static int xen_ia64_p2m_expose __read_mo
 static int xen_ia64_p2m_expose __read_mostly = 1;
 module_param(xen_ia64_p2m_expose, int, 0);
 MODULE_PARM_DESC(xen_ia64_p2m_expose,
-                 "enable/disable xen/ia64 p2m exposure optimization\n");
+                "enable/disable xen/ia64 p2m exposure optimization\n");
 
 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
 static int xen_ia64_p2m_expose_use_dtr __read_mostly = 1;
 module_param(xen_ia64_p2m_expose_use_dtr, int, 0);
 MODULE_PARM_DESC(xen_ia64_p2m_expose_use_dtr,
-                 "use/unuse dtr to map exposed p2m table\n");
+                "use/unuse dtr to map exposed p2m table\n");
 
 static const int p2m_page_shifts[] = {
        _PAGE_SIZE_4K,
@@ -957,21 +954,21 @@ struct p2m_itr_arg {
 };
 static struct p2m_itr_arg p2m_itr_arg __read_mostly;
 
-// This should be in asm-ia64/kregs.h
+/* This should be in asm-ia64/kregs.h */
 #define IA64_TR_P2M_TABLE      3
 
 static void
-p2m_itr(void* info)
-{
-       struct p2m_itr_arg* arg = (struct p2m_itr_arg*)info;
+p2m_itr(void *info)
+{
+       struct p2m_itr_arg *arg = (struct p2m_itr_arg*)info;
        ia64_itr(0x2, IA64_TR_P2M_TABLE,
-                arg->vaddr, arg->pteval, arg->log_page_size);
+                arg->vaddr, arg->pteval, arg->log_page_size);
        ia64_srlz_d();
 }
 
 static int
 p2m_expose_dtr_call(struct notifier_block *self,
-                    unsigned long event, void* ptr)
+                   unsigned long event, void *ptr)
 {
        unsigned int cpu = (unsigned int)(long)ptr;
        if (event != CPU_ONLINE)
@@ -1050,15 +1047,16 @@ p2m_expose_init(void)
                                continue;
 
                        granule_pfn = max(page_size >> PAGE_SHIFT,
-                                         p2m_granule_pfn);
+                                         p2m_granule_pfn);
                        p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn,
-                                                       granule_pfn);
+                                                       granule_pfn);
                        p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn,
-                                                     granule_pfn);
+                                                     granule_pfn);
                        num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
                        p2m_expose_size = num_pfn << PAGE_SHIFT;
                        p2m_size = p2m_table_size(num_pfn);
-                       p2m_size = ROUNDUP(p2m_size, granule_pfn << PAGE_SHIFT);
+                       p2m_size = ROUNDUP(p2m_size,
+                                          granule_pfn << PAGE_SHIFT);
                        if (p2m_size == page_size)
                                break;
                }
@@ -1073,20 +1071,21 @@ p2m_expose_init(void)
        {
                BUG_ON(p2m_granule_pfn & (p2m_granule_pfn - 1));
                p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn,
-                                               p2m_granule_pfn);
-               p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn, p2m_granule_pfn);
+                                               p2m_granule_pfn);
+               p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn,
+                                             p2m_granule_pfn);
                num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
                p2m_expose_size = num_pfn << PAGE_SHIFT;
                p2m_size = p2m_table_size(num_pfn);
                p2m_size = ROUNDUP(p2m_size, p2m_granule_pfn << PAGE_SHIFT);
                align = max(privcmd_resource_align,
-                           p2m_granule_pfn << PAGE_SHIFT);
+                           p2m_granule_pfn << PAGE_SHIFT);
        }
        
-       // use privcmd region
+       /* use privcmd region */
        error = allocate_resource(&iomem_resource, &p2m_resource, p2m_size,
-                                 privcmd_resource_min, privcmd_resource_max,
-                                 align, NULL, NULL);
+                                 privcmd_resource_min, privcmd_resource_max,
+                                 align, NULL, NULL);
        if (error) {
                printk(KERN_ERR P2M_PREFIX
                       "can't allocate region for p2m exposure "
@@ -1099,8 +1098,8 @@ p2m_expose_init(void)
        p2m_assign_end_pfn = p2m_resource.end >> PAGE_SHIFT;
        
        error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
-                                     p2m_assign_start_pfn,
-                                     p2m_expose_size, p2m_granule_pfn);
+                                     p2m_assign_start_pfn,
+                                     p2m_expose_size, p2m_granule_pfn);
        if (error) {
                printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
                       error);
@@ -1115,9 +1114,9 @@ p2m_expose_init(void)
 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
        if (xen_ia64_p2m_expose_use_dtr) {
                p2m_itr_arg.vaddr = (unsigned long)__va(p2m_assign_start_pfn
-                                                       << PAGE_SHIFT);
+                                                       << PAGE_SHIFT);
                p2m_itr_arg.pteval = pte_val(pfn_pte(p2m_assign_start_pfn,
-                                                    PAGE_KERNEL));
+                                                    PAGE_KERNEL));
                p2m_itr_arg.log_page_size = log_page_size;
                smp_mb();
                smp_call_function(&p2m_itr, &p2m_itr_arg, 1, 1);
@@ -1165,8 +1164,8 @@ p2m_expose_resume(void)
         * interrupts are masked when resume.
         */
        error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
-                                     p2m_assign_start_pfn,
-                                     p2m_expose_size, p2m_granule_pfn);
+                                     p2m_assign_start_pfn,
+                                     p2m_expose_size, p2m_granule_pfn);
        if (error) {
                printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
                       error);
@@ -1193,11 +1192,11 @@ p2m_expose_resume(void)
        }
 }
 
-//XXX inlinize?
+/* XXX inlinize? */
 unsigned long
 p2m_phystomach(unsigned long gpfn)
 {
-       volatile const pte_t* pte;
+       volatile const pte_t *pte;
        unsigned long mfn;
        unsigned long pteval;
        
@@ -1209,8 +1208,8 @@ p2m_phystomach(unsigned long gpfn)
 
        mfn = INVALID_MFN;
        if (likely(__get_user(pteval, (unsigned long __user *)pte) == 0 &&
-                  pte_present(__pte(pteval)) &&
-                  pte_pfn(__pte(pteval)) != (INVALID_MFN >> PAGE_SHIFT)))
+                  pte_present(__pte(pteval)) &&
+                  pte_pfn(__pte(pteval)) != (INVALID_MFN >> PAGE_SHIFT)))
                mfn = (pteval & _PFN_MASK) >> PAGE_SHIFT;
 
        return mfn;
@@ -1224,8 +1223,9 @@ EXPORT_SYMBOL_GPL(p2m_pte);
 EXPORT_SYMBOL_GPL(p2m_pte);
 EXPORT_SYMBOL_GPL(p2m_phystomach);
 
-///////////////////////////////////////////////////////////////////////////
-// foreign domain p2m mapping
+/**************************************************************************
+ * foreign domain p2m mapping
+ */
 #include <asm/xen/xencomm.h>
 #include <xen/public/privcmd.h>
 
@@ -1235,10 +1235,10 @@ struct foreign_p2m_private {
 };
 
 static void
-xen_foreign_p2m_unexpose(struct xen_ia64_privcmd_range* privcmd_range,
-                        void* arg)
-{
-       struct foreign_p2m_private* private = (struct foreign_p2m_private*)arg;
+xen_foreign_p2m_unexpose(struct xen_ia64_privcmd_range *privcmd_range,
+                        void *arg)
+{
+       struct foreign_p2m_private *private = (struct foreign_p2m_private*)arg;
        int ret;
 
        privcmd_range->private = NULL;
@@ -1252,17 +1252,19 @@ xen_foreign_p2m_unexpose(struct xen_ia64
 }
 
 int
-xen_foreign_p2m_expose(privcmd_hypercall_t* hypercall)
-{
-       // hypercall->
-       // arg0: cmd = IA64_DOM0VP_expose_foreign_p2m
-       // arg1: va
-       // arg2: domid
-       // arg3: __user* memmap_info
-       // arg4: flags
+xen_foreign_p2m_expose(privcmd_hypercall_t *hypercall)
+{
+       /*
+        * hypercall->
+        * arg0: cmd = IA64_DOM0VP_expose_foreign_p2m
+        * arg1: va
+        * arg2: domid
+        * arg3: __user* memmap_info
+        * arg4: flags
+        */
 
        int ret = 0;
-       struct mm_struct* mm = current->mm;
+       struct mm_struct *mm = current->mm;
 
        unsigned long vaddr = hypercall->arg[1];
        domid_t domid = hypercall->arg[2];
@@ -1271,19 +1273,19 @@ xen_foreign_p2m_expose(privcmd_hypercall
 
        struct xen_ia64_memmap_info memmap_info;
        size_t memmap_size;
-       struct xen_ia64_memmap_info* k_memmap_info = NULL;
+       struct xen_ia64_memmap_info *k_memmap_info = NULL;
        unsigned long max_gpfn;
        unsigned long p2m_size;
-       struct resource* res;
+       struct resource *res;
        unsigned long gpfn;
 
-       struct vm_area_struct* vma;
-       void* p;
+       struct vm_area_struct *vma;
+       void *p;
        unsigned long prev_src_gpfn_end;
 
-       struct xen_ia64_privcmd_vma* privcmd_vma;
-       struct xen_ia64_privcmd_range* privcmd_range;
-       struct foreign_p2m_private* private = NULL;
+       struct xen_ia64_privcmd_vma *privcmd_vma;
+       struct xen_ia64_privcmd_range *privcmd_range;
+       struct foreign_p2m_private *private = NULL;
 
        BUG_ON(hypercall->arg[0] != IA64_DOM0VP_expose_foreign_p2m);
 
@@ -1338,12 +1340,14 @@ xen_foreign_p2m_expose(privcmd_hypercall
        }
        
        gpfn = res->start >> PAGE_SHIFT;
-       // arg0: dest_gpfn
-       // arg1: domid
-       // arg2: XEN_GUEST_HANDLE(char) buffer: memmap_info
-       // arg3: flags
-       // The hypercall checks its intergirty/simplfies it and 
-       // copy it back for us.
+       /*
+        * arg0: dest_gpfn
+        * arg1: domid
+        * arg2: XEN_GUEST_HANDLE(char) buffer: memmap_info
+        * arg3: flags
+        * The hypercall checks its intergirty/simplfies it and 
+        * copy it back for us.
+        */
        ret = xencomm_arch_expose_foreign_p2m(gpfn, domid,
              xencomm_map_no_alloc(k_memmap_info, memmap_size),
              hypercall->arg[4]);
@@ -1385,7 +1389,7 @@ xen_foreign_p2m_expose(privcmd_hypercall
                                      vma->vm_page_prot);
                if (ret) {
                        for (i = 0; i < gpfn + gpfn_offset; i++) {
-                               struct xen_ia64_privcmd_entry* entry =
+                               struct xen_ia64_privcmd_entry *entry =
                                        &privcmd_range->entries[i];
                                BUG_ON(atomic_read(&entry->map_count) != 1 &&
                                       atomic_read(&entry->map_count) != 0);
@@ -1399,7 +1403,7 @@ xen_foreign_p2m_expose(privcmd_hypercall
                for (i = gpfn_offset;
                     i < gpfn_offset + (size >> PAGE_SHIFT);
                     i++) {
-                       struct xen_ia64_privcmd_entry* entry =
+                       struct xen_ia64_privcmd_entry *entry =
                                &privcmd_range->entries[i];
                        BUG_ON(atomic_read(&entry->map_count) != 0);
                        BUG_ON(entry->gpfn != INVALID_GPFN);
@@ -1424,13 +1428,13 @@ kfree_out:
 }
 #endif
 
-///////////////////////////////////////////////////////////////////////////
-// for xenoprof
-
+/**************************************************************************
+ * for xenoprof
+ */
 struct resource*
 xen_ia64_allocate_resource(unsigned long size)
 {
-       struct resource* res;
+       struct resource *res;
        int error;
        
        res = kzalloc(sizeof(*res), GFP_KERNEL);
@@ -1440,8 +1444,8 @@ xen_ia64_allocate_resource(unsigned long
        res->name = "Xen";
        res->flags = IORESOURCE_MEM;
        error = allocate_resource(&iomem_resource, res, PAGE_ALIGN(size),
-                                 privcmd_resource_min, privcmd_resource_max,
-                                 IA64_GRANULE_SIZE, NULL, NULL);
+                                 privcmd_resource_min, privcmd_resource_max,
+                                 IA64_GRANULE_SIZE, NULL, NULL);
        if (error) {
                kfree(res);
                return ERR_PTR(error);
@@ -1451,7 +1455,7 @@ EXPORT_SYMBOL_GPL(xen_ia64_allocate_reso
 EXPORT_SYMBOL_GPL(xen_ia64_allocate_resource);
 
 void
-xen_ia64_release_resource(struct resource* res)
+xen_ia64_release_resource(struct resource *res)
 {
        release_resource(res);
        kfree(res);
@@ -1459,7 +1463,7 @@ EXPORT_SYMBOL_GPL(xen_ia64_release_resou
 EXPORT_SYMBOL_GPL(xen_ia64_release_resource);
 
 void
-xen_ia64_unmap_resource(struct resource* res)
+xen_ia64_unmap_resource(struct resource *res)
 {
        unsigned long gpfn = res->start >> PAGE_SHIFT;
        unsigned long nr_pages = (res->end - res->start) >> PAGE_SHIFT;
@@ -1476,8 +1480,9 @@ xen_ia64_unmap_resource(struct resource*
 }
 EXPORT_SYMBOL_GPL(xen_ia64_unmap_resource);
 
-///////////////////////////////////////////////////////////////////////////
-// opt feature
+/**************************************************************************
+ * opt feature
+ */
 void
 xen_ia64_enable_opt_feature(void)
 {
@@ -1491,8 +1496,9 @@ xen_ia64_enable_opt_feature(void)
        HYPERVISOR_opt_feature(&optf);
 }
 
-///////////////////////////////////////////////////////////////////////////
-// suspend/resume
+/**************************************************************************
+ * suspend/resume
+ */
 void
 xen_post_suspend(int suspend_cancelled)
 {
diff -r 77f831cbb91d -r 71a415f9179b arch/ia64/xen/util.c
--- a/arch/ia64/xen/util.c      Fri Jan 18 16:52:25 2008 +0000
+++ b/arch/ia64/xen/util.c      Fri Jan 18 14:20:59 2008 -0700
@@ -35,25 +35,23 @@ struct vm_struct *alloc_vm_area(unsigned
        int order;
        unsigned long virt;
        unsigned long nr_pages;
-       struct vm_struct* area;
-       
+       struct vm_struct *area;
+
        order = get_order(size);
        virt = __get_free_pages(GFP_KERNEL, order);
-       if (virt == 0) {
+       if (virt == 0)
                goto err0;
-       }
        nr_pages = 1 << order;
        scrub_pages(virt, nr_pages);
-       
+
        area = kmalloc(sizeof(*area), GFP_KERNEL);
-       if (area == NULL) {
+       if (area == NULL)
                goto err1;
-       }
-       
-        area->flags = VM_IOREMAP;//XXX
+
+        area->flags = VM_IOREMAP; /* XXX */
         area->addr = (void*)virt;
         area->size = size;
-        area->pages = NULL; //XXX
+        area->pages = NULL; /* XXX */
         area->nr_pages = nr_pages;
         area->phys_addr = 0;   /* xenbus_map_ring_valloc uses this field!  */
 
@@ -63,7 +61,6 @@ err1:
        free_pages(virt, order);
 err0:
        return NULL;
-       
 }
 EXPORT_SYMBOL_GPL(alloc_vm_area);
 
@@ -73,8 +70,8 @@ void free_vm_area(struct vm_struct *area
        unsigned long i;
        unsigned long phys_addr = __pa(area->addr);
 
-       // This area is used for foreign page mappping.
-       // So underlying machine page may not be assigned.
+       /* This area is used for foreign page mappping.
+        * So underlying machine page may not be assigned. */
        for (i = 0; i < (1 << order); i++) {
                unsigned long ret;
                unsigned long gpfn = (phys_addr >> PAGE_SHIFT) + i;
diff -r 77f831cbb91d -r 71a415f9179b arch/ia64/xen/xcom_privcmd.c
--- a/arch/ia64/xen/xcom_privcmd.c      Fri Jan 18 16:52:25 2008 +0000
+++ b/arch/ia64/xen/xcom_privcmd.c      Fri Jan 18 14:20:59 2008 -0700
@@ -120,8 +120,8 @@ xencomm_privcmd_sysctl(privcmd_hypercall
                        .interface_version = XEN_SYSCTL_INTERFACE_VERSION,
                        .u.perfc_op = {
                                .cmd = XEN_SYSCTL_PERFCOP_query,
-                               // .desc.p = NULL,
-                               // .val.p = NULL,
+                               /* .desc.p = NULL, */
+                               /* .val.p = NULL, */
                        },
                };
 
diff -r 77f831cbb91d -r 71a415f9179b include/asm-ia64/hypervisor.h
--- a/include/asm-ia64/hypervisor.h     Fri Jan 18 16:52:25 2008 +0000
+++ b/include/asm-ia64/hypervisor.h     Fri Jan 18 14:20:59 2008 -0700
@@ -117,7 +117,7 @@ HYPERVISOR_poll(
 }
 
 #ifndef CONFIG_VMX_GUEST
-// for drivers/xen/privcmd/privcmd.c
+/* for drivers/xen/privcmd/privcmd.c */
 #define machine_to_phys_mapping 0
 struct vm_area_struct;
 int direct_remap_pfn_range(struct vm_area_struct *vma,
@@ -131,7 +131,7 @@ int privcmd_mmap(struct file * file, str
 int privcmd_mmap(struct file * file, struct vm_area_struct * vma);
 #define HAVE_ARCH_PRIVCMD_MMAP
 
-// for drivers/xen/balloon/balloon.c
+/* for drivers/xen/balloon/balloon.c */
 #ifdef CONFIG_XEN_SCRUB_PAGES
 #define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
 #else
@@ -178,8 +178,8 @@ void xen_ia64_enable_opt_feature(void);
 #define __pte_ma(_x)   ((pte_t) {(_x)})        /* unmodified use */
 #define pfn_pte_ma(_x,_y)      __pte_ma(0)     /* unmodified use */
 
-// for netfront.c, netback.c
-#define MULTI_UVMFLAGS_INDEX 0 //XXX any value
+/* for netfront.c, netback.c */
+#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */
 
 static inline void
 MULTI_update_va_mapping(
@@ -216,7 +216,7 @@ MULTI_grant_table_op(multicall_entry_t *
                (-ENOSYS);                                              \
        })
 
-// for debug
+/* for debug */
 asmlinkage int xprintk(const char *fmt, ...);
 #define xprintd(fmt, ...)      xprintk("%s:%d " fmt, __func__, __LINE__, \
                                        ##__VA_ARGS__)
diff -r 77f831cbb91d -r 71a415f9179b include/asm-ia64/maddr.h
--- a/include/asm-ia64/maddr.h  Fri Jan 18 16:52:25 2008 +0000
+++ b/include/asm-ia64/maddr.h  Fri Jan 18 14:20:59 2008 -0700
@@ -31,8 +31,8 @@ pfn_to_mfn_for_dma(unsigned long pfn)
        if (p2m_initialized)
                return p2m_phystomach(pfn);
        mfn = HYPERVISOR_phystomach(pfn);
-       BUG_ON(mfn == 0); // XXX
-       BUG_ON(mfn == INVALID_P2M_ENTRY); // XXX
+       BUG_ON(mfn == 0); /* XXX */
+       BUG_ON(mfn == INVALID_P2M_ENTRY); /* XXX */
        BUG_ON(mfn == INVALID_MFN);
        return mfn;
 }
@@ -52,7 +52,7 @@ mfn_to_pfn_for_dma(unsigned long mfn)
        unsigned long pfn;
        pfn = HYPERVISOR_machtophys(mfn);
        BUG_ON(pfn == 0);
-       //BUG_ON(pfn == INVALID_M2P_ENTRY);
+       /* BUG_ON(pfn == INVALID_M2P_ENTRY); */
        return pfn;
 }
 
@@ -98,11 +98,11 @@ mfn_to_local_pfn(unsigned long mfn)
 
 #define mfn_to_virt(mfn) (__va((mfn) << PAGE_SHIFT))
 #define virt_to_mfn(virt) (__pa(virt) >> PAGE_SHIFT)
-#define virt_to_machine(virt) __pa(virt) // for tpmfront.c
+#define virt_to_machine(virt) __pa(virt) /* for tpmfront.c */
 
 #define set_phys_to_machine(pfn, mfn) do { } while (0)
 
-typedef unsigned long maddr_t; // to compile netback, netfront
+typedef unsigned long maddr_t; /* to compile netback, netfront */
 #ifndef _ASM_IA64_SN_TYPES_H /* paddr_t is defined in asm-ia64/sn/types.h */
 typedef unsigned long paddr_t;
 #endif
diff -r 77f831cbb91d -r 71a415f9179b include/asm-ia64/xenoprof.h
--- a/include/asm-ia64/xenoprof.h       Fri Jan 18 16:52:25 2008 +0000
+++ b/include/asm-ia64/xenoprof.h       Fri Jan 18 14:20:59 2008 -0700
@@ -36,13 +36,13 @@ struct xenoprof_arch_shared_buffer {
 };
 
 struct xenoprof_shared_buffer;
-void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer* sbuf);
+void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer *sbuf);
 struct xenoprof_get_buffer;
-int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer* get_buffer,
-                                    struct xenoprof_shared_buffer* sbuf);
+int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer *get_buffer,
+                                    struct xenoprof_shared_buffer *sbuf);
 struct xenoprof_passive;
-int xenoprof_arch_set_passive(struct xenoprof_passive* pdomain,
-                              struct xenoprof_shared_buffer* sbuf);
+int xenoprof_arch_set_passive(struct xenoprof_passive *pdomain,
+                              struct xenoprof_shared_buffer *sbuf);
 
 #endif /* CONFIG_XEN */
 #endif /* __ASM_XENOPROF_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [linux-2.6.18-xen] [IA64] Coding style fix, Xen patchbot-linux-2.6.18-xen <=