WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] eliminate unnecessary casts from __trace_var() invoc

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] eliminate unnecessary casts from __trace_var() invocations
From: "Jan Beulich" <JBeulich@xxxxxxxxxx>
Date: Mon, 20 Sep 2010 14:36:30 +0100
Delivery-date: Mon, 20 Sep 2010 06:37:16 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
This is cleanup possible after converting its last parameter's type to
'const void *'.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

--- 2010-09-20.orig/xen/arch/x86/hvm/svm/svm.c  2010-09-15 17:59:07.000000000 
+0200
+++ 2010-09-20/xen/arch/x86/hvm/svm/svm.c       2010-09-20 10:00:18.000000000 
+0200
@@ -958,7 +958,7 @@ static void svm_do_nested_pgfault(paddr_
         _d.qualification = 0;
         _d.mfn = mfn_x(gfn_to_mfn_query(p2m, gfn, &_d.p2mt));
         
-        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d);
+        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
     }
 
     if ( hvm_hap_nested_page_fault(gfn) )
--- 2010-09-20.orig/xen/arch/x86/hvm/vmx/vmx.c  2010-09-15 17:59:07.000000000 
+0200
+++ 2010-09-20/xen/arch/x86/hvm/vmx/vmx.c       2010-09-20 10:00:19.000000000 
+0200
@@ -2101,7 +2101,7 @@ static void ept_handle_violation(unsigne
         _d.qualification = qualification;
         _d.mfn = mfn_x(gfn_to_mfn_query(p2m, gfn, &_d.p2mt));
         
-        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d);
+        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
     }
 
     if ( (qualification & EPT_GLA_VALID) &&
--- 2010-09-20.orig/xen/arch/x86/mm/p2m.c       2010-09-20 10:00:07.000000000 
+0200
+++ 2010-09-20/xen/arch/x86/mm/p2m.c    2010-09-20 10:00:19.000000000 +0200
@@ -889,7 +889,7 @@ p2m_pod_zero_check_superpage(struct p2m_
         t.d = d->domain_id;
         t.order = 9;
 
-        __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), (unsigned char 
*)&t);
+        __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), &t);
     }
 
     /* Finally!  We've passed all the checks, and can add the mfn superpage
@@ -1004,7 +1004,7 @@ p2m_pod_zero_check(struct p2m_domain *p2
                 t.d = d->domain_id;
                 t.order = 0;
         
-                __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), (unsigned 
char *)&t);
+                __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), &t);
             }
 
             /* Add to cache, and account for the new p2m PoD entry */
@@ -1178,7 +1178,7 @@ p2m_pod_demand_populate(struct p2m_domai
         t.d = d->domain_id;
         t.order = order;
         
-        __trace_var(TRC_MEM_POD_POPULATE, 0, sizeof(t), (unsigned char *)&t);
+        __trace_var(TRC_MEM_POD_POPULATE, 0, sizeof(t), &t);
     }
 
     return 0;
@@ -1209,7 +1209,7 @@ remap_and_retry:
         t.gfn = gfn;
         t.d = d->domain_id;
         
-        __trace_var(TRC_MEM_POD_SUPERPAGE_SPLINTER, 0, sizeof(t), (unsigned 
char *)&t);
+        __trace_var(TRC_MEM_POD_SUPERPAGE_SPLINTER, 0, sizeof(t), &t);
     }
 
     return 0;
@@ -1276,7 +1276,7 @@ p2m_set_entry(struct p2m_domain *p2m, un
         t.d = p2m->domain->domain_id;
         t.order = page_order;
 
-        __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), (unsigned char *)&t);
+        __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t);
     }
 
 #if CONFIG_PAGING_LEVELS >= 4
--- 2010-09-20.orig/xen/arch/x86/mm/shadow/common.c     2010-09-06 
08:21:15.000000000 +0200
+++ 2010-09-20/xen/arch/x86/mm/shadow/common.c  2010-09-20 10:00:19.000000000 
+0200
@@ -715,7 +715,7 @@ static inline void trace_resync(int even
     {
         /* Convert gmfn to gfn */
         unsigned long gfn = mfn_to_gfn(current->domain, gmfn);
-        __trace_var(event, 0/*!tsc*/, sizeof(gfn), (unsigned char*)&gfn);
+        __trace_var(event, 0/*!tsc*/, sizeof(gfn), &gfn);
     }
 }
 
@@ -1314,8 +1314,7 @@ static inline void trace_shadow_prealloc
         unsigned long gfn;
         ASSERT(mfn_valid(smfn));
         gfn = mfn_to_gfn(d, backpointer(mfn_to_page(smfn)));
-        __trace_var(TRC_SHADOW_PREALLOC_UNPIN, 0/*!tsc*/,
-                    sizeof(gfn), (unsigned char*)&gfn);
+        __trace_var(TRC_SHADOW_PREALLOC_UNPIN, 0/*!tsc*/, sizeof(gfn), &gfn);
     }
 }
 
@@ -2200,7 +2199,7 @@ static inline void trace_shadow_wrmap_bf
     {
         /* Convert gmfn to gfn */
         unsigned long gfn = mfn_to_gfn(current->domain, gmfn);
-        __trace_var(TRC_SHADOW_WRMAP_BF, 0/*!tsc*/, sizeof(gfn), (unsigned 
char*)&gfn);
+        __trace_var(TRC_SHADOW_WRMAP_BF, 0/*!tsc*/, sizeof(gfn), &gfn);
     }
 }
 
--- 2010-09-20.orig/xen/arch/x86/mm/shadow/multi.c      2010-09-20 
10:00:07.000000000 +0200
+++ 2010-09-20/xen/arch/x86/mm/shadow/multi.c   2010-09-20 10:00:19.000000000 
+0200
@@ -2883,7 +2883,7 @@ static inline void trace_shadow_gen(u32 
     if ( tb_init_done )
     {
         event |= (GUEST_PAGING_LEVELS-2)<<8;
-        __trace_var(event, 0/*!tsc*/, sizeof(va), (unsigned char*)&va);
+        __trace_var(event, 0/*!tsc*/, sizeof(va), &va);
     }
 }
 
@@ -2907,7 +2907,7 @@ static inline void trace_shadow_fixup(gu
         d.va = va;
         d.flags = this_cpu(trace_shadow_path_flags);
 
-        __trace_var(event, 0/*!tsc*/, sizeof(d), (unsigned char*)&d);
+        __trace_var(event, 0/*!tsc*/, sizeof(d), &d);
     }
 }
                                           
@@ -2931,7 +2931,7 @@ static inline void trace_not_shadow_faul
         d.va = va;
         d.flags = this_cpu(trace_shadow_path_flags);
 
-        __trace_var(event, 0/*!tsc*/, sizeof(d), (unsigned char*)&d);
+        __trace_var(event, 0/*!tsc*/, sizeof(d), &d);
     }
 }
                                           
@@ -2957,7 +2957,7 @@ static inline void trace_shadow_emulate_
         d.gfn=gfn_x(gfn);
         d.va = va;
 
-        __trace_var(event, 0/*!tsc*/, sizeof(d), (unsigned char*)&d);
+        __trace_var(event, 0/*!tsc*/, sizeof(d), &d);
     }
 }
 
@@ -2990,7 +2990,7 @@ static inline void trace_shadow_emulate(
 #endif
         d.flags = this_cpu(trace_shadow_path_flags);
 
-        __trace_var(event, 0/*!tsc*/, sizeof(d), (unsigned char*)&d);
+        __trace_var(event, 0/*!tsc*/, sizeof(d), &d);
     }
 }
 
--- 2010-09-20.orig/xen/arch/x86/trace.c        2008-06-10 18:00:41.000000000 
+0200
+++ 2010-09-20/xen/arch/x86/trace.c     2010-09-20 10:00:19.000000000 +0200
@@ -25,8 +25,7 @@ asmlinkage void trace_hypercall(void)
         d.eip = regs->eip;
         d.eax = regs->eax;
 
-        __trace_var(TRC_PV_HYPERCALL, 1,
-                    sizeof(d), (unsigned char *)&d);
+        __trace_var(TRC_PV_HYPERCALL, 1, sizeof(d), &d);
     }
     else
 #endif
@@ -42,7 +41,7 @@ asmlinkage void trace_hypercall(void)
         d.eip = regs->eip;
         d.eax = regs->eax;
 
-        __trace_var(event, 1/*tsc*/, sizeof(d), (unsigned char*)&d);
+        __trace_var(event, 1/*tsc*/, sizeof(d), &d);
     }
 }
 
@@ -64,8 +63,7 @@ void __trace_pv_trap(int trapnr, unsigne
         d.error_code = error_code;
         d.use_error_code=!!use_error_code;
                 
-        __trace_var(TRC_PV_TRAP, 1,
-                    sizeof(d), (unsigned char *)&d);
+        __trace_var(TRC_PV_TRAP, 1, sizeof(d), &d);
     }
     else
 #endif        
@@ -85,7 +83,7 @@ void __trace_pv_trap(int trapnr, unsigne
                 
         event = TRC_PV_TRAP;
         event |= TRC_64_FLAG;
-        __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
+        __trace_var(event, 1, sizeof(d), &d);
     }
 }
 
@@ -104,7 +102,7 @@ void __trace_pv_page_fault(unsigned long
         d.addr = addr;
         d.error_code = error_code;
                 
-        __trace_var(TRC_PV_PAGE_FAULT, 1, sizeof(d), (unsigned char *)&d);
+        __trace_var(TRC_PV_PAGE_FAULT, 1, sizeof(d), &d);
     }
     else
 #endif        
@@ -120,7 +118,7 @@ void __trace_pv_page_fault(unsigned long
         d.error_code = error_code;
         event = TRC_PV_PAGE_FAULT;
         event |= TRC_64_FLAG;
-        __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
+        __trace_var(event, 1, sizeof(d), &d);
     }
 }
 
@@ -130,13 +128,13 @@ void __trace_trap_one_addr(unsigned even
     if ( is_pv_32on64_vcpu(current) )
     {
         u32 d = va;
-        __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
+        __trace_var(event, 1, sizeof(d), &d);
     }
     else
 #endif        
     {
         event |= TRC_64_FLAG;
-        __trace_var(event, 1, sizeof(va), (unsigned char *)&va);
+        __trace_var(event, 1, sizeof(va), &va);
     }
 }
 
@@ -151,7 +149,7 @@ void __trace_trap_two_addr(unsigned even
         } __attribute__((packed)) d;
         d.va1=va1;
         d.va2=va2;
-        __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
+        __trace_var(event, 1, sizeof(d), &d);
     }
     else
 #endif        
@@ -162,7 +160,7 @@ void __trace_trap_two_addr(unsigned even
         d.va1=va1;
         d.va2=va2;
         event |= TRC_64_FLAG;
-        __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
+        __trace_var(event, 1, sizeof(d), &d);
     }
 }
 
@@ -189,8 +187,7 @@ void __trace_ptwr_emulation(unsigned lon
         d.eip = eip;
         d.pte = npte;
 
-        __trace_var(TRC_PV_PTWR_EMULATION_PAE, 1,
-                    sizeof(d), (unsigned char *)&d);
+        __trace_var(TRC_PV_PTWR_EMULATION_PAE, 1, sizeof(d), &d);
     }
     else
 #endif        
@@ -208,6 +205,6 @@ void __trace_ptwr_emulation(unsigned lon
         event = ((CONFIG_PAGING_LEVELS == 3) ?
                  TRC_PV_PTWR_EMULATION_PAE : TRC_PV_PTWR_EMULATION);
         event |= TRC_64_FLAG;
-        __trace_var(event, 1/*tsc*/, sizeof(d), (unsigned char *)&d);
+        __trace_var(event, 1/*tsc*/, sizeof(d), &d);
     }
 }
--- 2010-09-20.orig/xen/common/memory.c 2010-08-12 08:17:22.000000000 +0200
+++ 2010-09-20/xen/common/memory.c      2010-09-20 10:00:19.000000000 +0200
@@ -234,7 +234,7 @@ static void decrease_reservation(struct 
             t.d = a->domain->domain_id;
             t.order = a->extent_order;
         
-            __trace_var(TRC_MEM_DECREASE_RESERVATION, 0, sizeof(t), (unsigned 
char *)&t);
+            __trace_var(TRC_MEM_DECREASE_RESERVATION, 0, sizeof(t), &t);
         }
 
         /* See if populate-on-demand wants to handle this */
--- 2010-09-20.orig/xen/common/schedule.c       2010-08-12 08:17:22.000000000 
+0200
+++ 2010-09-20/xen/common/schedule.c    2010-09-20 10:00:19.000000000 +0200
@@ -93,7 +93,7 @@ static inline void trace_runstate_change
     event |= ( v->runstate.state & 0x3 ) << 8;
     event |= ( new_state & 0x3 ) << 4;
 
-    __trace_var(event, 1/*tsc*/, sizeof(d), (unsigned char *)&d);
+    __trace_var(event, 1/*tsc*/, sizeof(d), &d);
 }
 
 static inline void trace_continue_running(struct vcpu *v)
@@ -106,8 +106,7 @@ static inline void trace_continue_runnin
     d.vcpu = v->vcpu_id;
     d.domain = v->domain->domain_id;
 
-    __trace_var(TRC_SCHED_CONTINUE_RUNNING, 1/*tsc*/, sizeof(d),
-                (unsigned char *)&d);
+    __trace_var(TRC_SCHED_CONTINUE_RUNNING, 1/*tsc*/, sizeof(d), &d);
 }
 
 static inline void vcpu_urgent_count_update(struct vcpu *v)
--- 2010-09-20.orig/xen/include/asm-x86/hvm/trace.h     2010-07-12 
10:27:12.000000000 +0200
+++ 2010-09-20/xen/include/asm-x86/hvm/trace.h  2010-09-20 10:00:19.000000000 
+0200
@@ -72,7 +72,7 @@
             _d.d[4]=(d5);                                               \
             _d.d[5]=(d6);                                               \
             __trace_var(TRC_HVM_ ## evt, cycles,                        \
-                        sizeof(u32)*count+1, (unsigned char *)&_d);     \
+                        sizeof(u32)*count+1, &_d);                      \
         }                                                               \
     } while(0)
 


Attachment: trace_var-no-cast.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH] eliminate unnecessary casts from __trace_var() invocations, Jan Beulich <=