WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Eliminate unnecessary casts from __trace_

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Eliminate unnecessary casts from __trace_var() invocations
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Sat, 09 Oct 2010 14:55:41 -0700
Delivery-date: Sat, 09 Oct 2010 15:01:09 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1285005168 -3600
# Node ID 62a44418d8a07275f78389b173222ffd6dbebb30
# Parent  a1d2d8222d0105d04870c2756a293df71b1f68ae
Eliminate unnecessary casts from __trace_var() invocations

This is possible now that its last parameter's type is 'const void *'.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 xen/arch/x86/hvm/svm/svm.c      |    2 -
 xen/arch/x86/hvm/vmx/vmx.c      |    2 -
 xen/arch/x86/mm/p2m.c           |   10 ++++-----
 xen/arch/x86/mm/shadow/common.c |    7 ++----
 xen/arch/x86/mm/shadow/multi.c  |   10 ++++-----
 xen/arch/x86/trace.c            |   41 ++++++++++++++++++----------------------
 xen/common/memory.c             |    2 -
 xen/common/schedule.c           |    5 +---
 xen/include/asm-x86/hvm/trace.h |    2 -
 9 files changed, 38 insertions(+), 43 deletions(-)

diff -r a1d2d8222d01 -r 62a44418d8a0 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Mon Sep 20 18:51:19 2010 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Mon Sep 20 18:52:48 2010 +0100
@@ -932,7 +932,7 @@ static void svm_do_nested_pgfault(paddr_
         _d.qualification = 0;
         _d.mfn = mfn_x(gfn_to_mfn_query(p2m, gfn, &_d.p2mt));
         
-        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d);
+        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
     }
 
     if ( hvm_hap_nested_page_fault(gfn) )
diff -r a1d2d8222d01 -r 62a44418d8a0 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Mon Sep 20 18:51:19 2010 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Mon Sep 20 18:52:48 2010 +0100
@@ -2082,7 +2082,7 @@ static void ept_handle_violation(unsigne
         _d.qualification = qualification;
         _d.mfn = mfn_x(gfn_to_mfn_query(p2m, gfn, &_d.p2mt));
         
-        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d);
+        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
     }
 
     if ( (qualification & EPT_GLA_VALID) &&
diff -r a1d2d8222d01 -r 62a44418d8a0 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Mon Sep 20 18:51:19 2010 +0100
+++ b/xen/arch/x86/mm/p2m.c     Mon Sep 20 18:52:48 2010 +0100
@@ -889,7 +889,7 @@ p2m_pod_zero_check_superpage(struct p2m_
         t.d = d->domain_id;
         t.order = 9;
 
-        __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), (unsigned char 
*)&t);
+        __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), &t);
     }
 
     /* Finally!  We've passed all the checks, and can add the mfn superpage
@@ -1004,7 +1004,7 @@ p2m_pod_zero_check(struct p2m_domain *p2
                 t.d = d->domain_id;
                 t.order = 0;
         
-                __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), (unsigned 
char *)&t);
+                __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), &t);
             }
 
             /* Add to cache, and account for the new p2m PoD entry */
@@ -1178,7 +1178,7 @@ p2m_pod_demand_populate(struct p2m_domai
         t.d = d->domain_id;
         t.order = order;
         
-        __trace_var(TRC_MEM_POD_POPULATE, 0, sizeof(t), (unsigned char *)&t);
+        __trace_var(TRC_MEM_POD_POPULATE, 0, sizeof(t), &t);
     }
 
     return 0;
@@ -1209,7 +1209,7 @@ remap_and_retry:
         t.gfn = gfn;
         t.d = d->domain_id;
         
-        __trace_var(TRC_MEM_POD_SUPERPAGE_SPLINTER, 0, sizeof(t), (unsigned 
char *)&t);
+        __trace_var(TRC_MEM_POD_SUPERPAGE_SPLINTER, 0, sizeof(t), &t);
     }
 
     return 0;
@@ -1276,7 +1276,7 @@ p2m_set_entry(struct p2m_domain *p2m, un
         t.d = p2m->domain->domain_id;
         t.order = page_order;
 
-        __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), (unsigned char *)&t);
+        __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t);
     }
 
 #if CONFIG_PAGING_LEVELS >= 4
diff -r a1d2d8222d01 -r 62a44418d8a0 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Mon Sep 20 18:51:19 2010 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Mon Sep 20 18:52:48 2010 +0100
@@ -715,7 +715,7 @@ static inline void trace_resync(int even
     {
         /* Convert gmfn to gfn */
         unsigned long gfn = mfn_to_gfn(current->domain, gmfn);
-        __trace_var(event, 0/*!tsc*/, sizeof(gfn), (unsigned char*)&gfn);
+        __trace_var(event, 0/*!tsc*/, sizeof(gfn), &gfn);
     }
 }
 
@@ -1314,8 +1314,7 @@ static inline void trace_shadow_prealloc
         unsigned long gfn;
         ASSERT(mfn_valid(smfn));
         gfn = mfn_to_gfn(d, backpointer(mfn_to_page(smfn)));
-        __trace_var(TRC_SHADOW_PREALLOC_UNPIN, 0/*!tsc*/,
-                    sizeof(gfn), (unsigned char*)&gfn);
+        __trace_var(TRC_SHADOW_PREALLOC_UNPIN, 0/*!tsc*/, sizeof(gfn), &gfn);
     }
 }
 
@@ -2200,7 +2199,7 @@ static inline void trace_shadow_wrmap_bf
     {
         /* Convert gmfn to gfn */
         unsigned long gfn = mfn_to_gfn(current->domain, gmfn);
-        __trace_var(TRC_SHADOW_WRMAP_BF, 0/*!tsc*/, sizeof(gfn), (unsigned 
char*)&gfn);
+        __trace_var(TRC_SHADOW_WRMAP_BF, 0/*!tsc*/, sizeof(gfn), &gfn);
     }
 }
 
diff -r a1d2d8222d01 -r 62a44418d8a0 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Mon Sep 20 18:51:19 2010 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Mon Sep 20 18:52:48 2010 +0100
@@ -2883,7 +2883,7 @@ static inline void trace_shadow_gen(u32 
     if ( tb_init_done )
     {
         event |= (GUEST_PAGING_LEVELS-2)<<8;
-        __trace_var(event, 0/*!tsc*/, sizeof(va), (unsigned char*)&va);
+        __trace_var(event, 0/*!tsc*/, sizeof(va), &va);
     }
 }
 
@@ -2907,7 +2907,7 @@ static inline void trace_shadow_fixup(gu
         d.va = va;
         d.flags = this_cpu(trace_shadow_path_flags);
 
-        __trace_var(event, 0/*!tsc*/, sizeof(d), (unsigned char*)&d);
+        __trace_var(event, 0/*!tsc*/, sizeof(d), &d);
     }
 }
                                           
@@ -2931,7 +2931,7 @@ static inline void trace_not_shadow_faul
         d.va = va;
         d.flags = this_cpu(trace_shadow_path_flags);
 
-        __trace_var(event, 0/*!tsc*/, sizeof(d), (unsigned char*)&d);
+        __trace_var(event, 0/*!tsc*/, sizeof(d), &d);
     }
 }
                                           
@@ -2957,7 +2957,7 @@ static inline void trace_shadow_emulate_
         d.gfn=gfn_x(gfn);
         d.va = va;
 
-        __trace_var(event, 0/*!tsc*/, sizeof(d), (unsigned char*)&d);
+        __trace_var(event, 0/*!tsc*/, sizeof(d), &d);
     }
 }
 
@@ -2990,7 +2990,7 @@ static inline void trace_shadow_emulate(
 #endif
         d.flags = this_cpu(trace_shadow_path_flags);
 
-        __trace_var(event, 0/*!tsc*/, sizeof(d), (unsigned char*)&d);
+        __trace_var(event, 0/*!tsc*/, sizeof(d), &d);
     }
 }
 
diff -r a1d2d8222d01 -r 62a44418d8a0 xen/arch/x86/trace.c
--- a/xen/arch/x86/trace.c      Mon Sep 20 18:51:19 2010 +0100
+++ b/xen/arch/x86/trace.c      Mon Sep 20 18:52:48 2010 +0100
@@ -25,8 +25,7 @@ asmlinkage void trace_hypercall(void)
         d.eip = regs->eip;
         d.eax = regs->eax;
 
-        __trace_var(TRC_PV_HYPERCALL, 1,
-                    sizeof(d), (unsigned char *)&d);
+        __trace_var(TRC_PV_HYPERCALL, 1, sizeof(d), &d);
     }
     else
 #endif
@@ -42,7 +41,7 @@ asmlinkage void trace_hypercall(void)
         d.eip = regs->eip;
         d.eax = regs->eax;
 
-        __trace_var(event, 1/*tsc*/, sizeof(d), (unsigned char*)&d);
+        __trace_var(event, 1/*tsc*/, sizeof(d), &d);
     }
 }
 
@@ -64,8 +63,7 @@ void __trace_pv_trap(int trapnr, unsigne
         d.error_code = error_code;
         d.use_error_code=!!use_error_code;
                 
-        __trace_var(TRC_PV_TRAP, 1,
-                    sizeof(d), (unsigned char *)&d);
+        __trace_var(TRC_PV_TRAP, 1, sizeof(d), &d);
     }
     else
 #endif        
@@ -85,7 +83,7 @@ void __trace_pv_trap(int trapnr, unsigne
                 
         event = TRC_PV_TRAP;
         event |= TRC_64_FLAG;
-        __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
+        __trace_var(event, 1, sizeof(d), &d);
     }
 }
 
@@ -104,7 +102,7 @@ void __trace_pv_page_fault(unsigned long
         d.addr = addr;
         d.error_code = error_code;
                 
-        __trace_var(TRC_PV_PAGE_FAULT, 1, sizeof(d), (unsigned char *)&d);
+        __trace_var(TRC_PV_PAGE_FAULT, 1, sizeof(d), &d);
     }
     else
 #endif        
@@ -120,7 +118,7 @@ void __trace_pv_page_fault(unsigned long
         d.error_code = error_code;
         event = TRC_PV_PAGE_FAULT;
         event |= TRC_64_FLAG;
-        __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
+        __trace_var(event, 1, sizeof(d), &d);
     }
 }
 
@@ -130,13 +128,13 @@ void __trace_trap_one_addr(unsigned even
     if ( is_pv_32on64_vcpu(current) )
     {
         u32 d = va;
-        __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
-    }
-    else
-#endif        
-    {
-        event |= TRC_64_FLAG;
-        __trace_var(event, 1, sizeof(va), (unsigned char *)&va);
+        __trace_var(event, 1, sizeof(d), &d);
+    }
+    else
+#endif        
+    {
+        event |= TRC_64_FLAG;
+        __trace_var(event, 1, sizeof(va), &va);
     }
 }
 
@@ -151,7 +149,7 @@ void __trace_trap_two_addr(unsigned even
         } __attribute__((packed)) d;
         d.va1=va1;
         d.va2=va2;
-        __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
+        __trace_var(event, 1, sizeof(d), &d);
     }
     else
 #endif        
@@ -162,7 +160,7 @@ void __trace_trap_two_addr(unsigned even
         d.va1=va1;
         d.va2=va2;
         event |= TRC_64_FLAG;
-        __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
+        __trace_var(event, 1, sizeof(d), &d);
     }
 }
 
@@ -189,8 +187,7 @@ void __trace_ptwr_emulation(unsigned lon
         d.eip = eip;
         d.pte = npte;
 
-        __trace_var(TRC_PV_PTWR_EMULATION_PAE, 1,
-                    sizeof(d), (unsigned char *)&d);
+        __trace_var(TRC_PV_PTWR_EMULATION_PAE, 1, sizeof(d), &d);
     }
     else
 #endif        
@@ -208,6 +205,6 @@ void __trace_ptwr_emulation(unsigned lon
         event = ((CONFIG_PAGING_LEVELS == 3) ?
                  TRC_PV_PTWR_EMULATION_PAE : TRC_PV_PTWR_EMULATION);
         event |= TRC_64_FLAG;
-        __trace_var(event, 1/*tsc*/, sizeof(d), (unsigned char *)&d);
-    }
-}
+        __trace_var(event, 1/*tsc*/, sizeof(d), &d);
+    }
+}
diff -r a1d2d8222d01 -r 62a44418d8a0 xen/common/memory.c
--- a/xen/common/memory.c       Mon Sep 20 18:51:19 2010 +0100
+++ b/xen/common/memory.c       Mon Sep 20 18:52:48 2010 +0100
@@ -234,7 +234,7 @@ static void decrease_reservation(struct 
             t.d = a->domain->domain_id;
             t.order = a->extent_order;
         
-            __trace_var(TRC_MEM_DECREASE_RESERVATION, 0, sizeof(t), (unsigned 
char *)&t);
+            __trace_var(TRC_MEM_DECREASE_RESERVATION, 0, sizeof(t), &t);
         }
 
         /* See if populate-on-demand wants to handle this */
diff -r a1d2d8222d01 -r 62a44418d8a0 xen/common/schedule.c
--- a/xen/common/schedule.c     Mon Sep 20 18:51:19 2010 +0100
+++ b/xen/common/schedule.c     Mon Sep 20 18:52:48 2010 +0100
@@ -93,7 +93,7 @@ static inline void trace_runstate_change
     event |= ( v->runstate.state & 0x3 ) << 8;
     event |= ( new_state & 0x3 ) << 4;
 
-    __trace_var(event, 1/*tsc*/, sizeof(d), (unsigned char *)&d);
+    __trace_var(event, 1/*tsc*/, sizeof(d), &d);
 }
 
 static inline void trace_continue_running(struct vcpu *v)
@@ -106,8 +106,7 @@ static inline void trace_continue_runnin
     d.vcpu = v->vcpu_id;
     d.domain = v->domain->domain_id;
 
-    __trace_var(TRC_SCHED_CONTINUE_RUNNING, 1/*tsc*/, sizeof(d),
-                (unsigned char *)&d);
+    __trace_var(TRC_SCHED_CONTINUE_RUNNING, 1/*tsc*/, sizeof(d), &d);
 }
 
 static inline void vcpu_urgent_count_update(struct vcpu *v)
diff -r a1d2d8222d01 -r 62a44418d8a0 xen/include/asm-x86/hvm/trace.h
--- a/xen/include/asm-x86/hvm/trace.h   Mon Sep 20 18:51:19 2010 +0100
+++ b/xen/include/asm-x86/hvm/trace.h   Mon Sep 20 18:52:48 2010 +0100
@@ -72,7 +72,7 @@
             _d.d[4]=(d5);                                               \
             _d.d[5]=(d6);                                               \
             __trace_var(TRC_HVM_ ## evt, cycles,                        \
-                        sizeof(u32)*count+1, (unsigned char *)&_d);     \
+                        sizeof(u32)*count+1, &_d);                      \
         }                                                               \
     } while(0)
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Eliminate unnecessary casts from __trace_var() invocations, Xen patchbot-unstable <=