# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1175010167 -3600
# Node ID ea0b50ca4999cbc78fa7f1356003f0dffa1a30c9
# Parent 96f167771979734080bb0f45086f1d4f699fa50d
xen: Remove legacy references to explicitly per-cpu perf counters.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
xen/arch/ia64/linux-xen/irq_ia64.c | 2
xen/arch/ia64/linux-xen/smp.c | 2
xen/arch/ia64/vmx/pal_emul.c | 2
xen/arch/ia64/vmx/vmx_process.c | 2
xen/arch/ia64/vmx/vmx_virt.c | 78 ++++++-------
xen/arch/ia64/xen/dom0_ops.c | 4
xen/arch/ia64/xen/domain.c | 8 -
xen/arch/ia64/xen/faults.c | 2
xen/arch/ia64/xen/hypercall.c | 4
xen/arch/ia64/xen/mm.c | 26 ++--
xen/arch/ia64/xen/privop.c | 30 ++---
xen/arch/ia64/xen/tlb_track.c | 42 +++----
xen/arch/ia64/xen/vcpu.c | 10 -
xen/arch/ia64/xen/vhpt.c | 22 +--
xen/arch/powerpc/mm.c | 2
xen/arch/x86/apic.c | 2
xen/arch/x86/extable.c | 2
xen/arch/x86/irq.c | 2
xen/arch/x86/mm.c | 6 -
xen/arch/x86/mm/shadow/common.c | 40 +++----
xen/arch/x86/mm/shadow/multi.c | 56 ++++-----
xen/arch/x86/smp.c | 6 -
xen/arch/x86/traps.c | 4
xen/arch/x86/x86_32/domain_page.c | 6 -
xen/arch/x86/x86_32/seg_fixup.c | 2
xen/common/page_alloc.c | 2
xen/common/schedule.c | 6 -
xen/include/asm-ia64/perfc_defn.h | 210 ++++++++++++++++++-------------------
xen/include/asm-ia64/tlb_track.h | 4
xen/include/asm-x86/perfc_defn.h | 118 ++++++++++----------
xen/include/xen/perfc.h | 18 +--
xen/include/xen/perfc_defn.h | 12 +-
32 files changed, 363 insertions(+), 369 deletions(-)
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/ia64/linux-xen/irq_ia64.c
--- a/xen/arch/ia64/linux-xen/irq_ia64.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/ia64/linux-xen/irq_ia64.c Tue Mar 27 16:42:47 2007 +0100
@@ -113,7 +113,7 @@ ia64_handle_irq (ia64_vector vector, str
unsigned long saved_tpr;
#ifdef XEN
- perfc_incrc(irqs);
+ perfc_incr(irqs);
#endif
#if IRQ_DEBUG
#ifdef XEN
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/ia64/linux-xen/smp.c
--- a/xen/arch/ia64/linux-xen/smp.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/ia64/linux-xen/smp.c Tue Mar 27 16:42:47 2007 +0100
@@ -148,7 +148,7 @@ handle_IPI (int irq, void *dev_id, struc
unsigned long ops;
#ifdef XEN
- perfc_incrc(ipis);
+ perfc_incr(ipis);
#endif
mb(); /* Order interrupt and bit testing. */
while ((ops = xchg(pending_ipis, 0)) != 0) {
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/ia64/vmx/pal_emul.c
--- a/xen/arch/ia64/vmx/pal_emul.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/ia64/vmx/pal_emul.c Tue Mar 27 16:42:47 2007 +0100
@@ -37,7 +37,7 @@ pal_emul(struct vcpu *vcpu)
vcpu_get_gr_nat(vcpu, 30, &gr30);
vcpu_get_gr_nat(vcpu, 31, &gr31);
- perfc_incrc(vmx_pal_emul);
+ perfc_incr(vmx_pal_emul);
result = xen_pal_emulator(gr28, gr29, gr30, gr31);
vcpu_set_gr(vcpu, 8, result.status, 0);
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/ia64/vmx/vmx_process.c Tue Mar 27 16:42:47 2007 +0100
@@ -151,7 +151,7 @@ vmx_ia64_handle_break (unsigned long ifa
struct domain *d = current->domain;
struct vcpu *v = current;
- perfc_incrc(vmx_ia64_handle_break);
+ perfc_incr(vmx_ia64_handle_break);
#ifdef CRASH_DEBUG
if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs) &&
IS_VMM_ADDRESS(regs->cr_iip)) {
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/ia64/vmx/vmx_virt.c
--- a/xen/arch/ia64/vmx/vmx_virt.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/ia64/vmx/vmx_virt.c Tue Mar 27 16:42:47 2007 +0100
@@ -1401,159 +1401,159 @@ if ( (cause == 0xff && opcode == 0x1e000
switch(cause) {
case EVENT_RSM:
- perfc_incrc(vmx_rsm);
+ perfc_incr(vmx_rsm);
status=vmx_emul_rsm(vcpu, inst);
break;
case EVENT_SSM:
- perfc_incrc(vmx_ssm);
+ perfc_incr(vmx_ssm);
status=vmx_emul_ssm(vcpu, inst);
break;
case EVENT_MOV_TO_PSR:
- perfc_incrc(vmx_mov_to_psr);
+ perfc_incr(vmx_mov_to_psr);
status=vmx_emul_mov_to_psr(vcpu, inst);
break;
case EVENT_MOV_FROM_PSR:
- perfc_incrc(vmx_mov_from_psr);
+ perfc_incr(vmx_mov_from_psr);
status=vmx_emul_mov_from_psr(vcpu, inst);
break;
case EVENT_MOV_FROM_CR:
- perfc_incrc(vmx_mov_from_cr);
+ perfc_incr(vmx_mov_from_cr);
status=vmx_emul_mov_from_cr(vcpu, inst);
break;
case EVENT_MOV_TO_CR:
- perfc_incrc(vmx_mov_to_cr);
+ perfc_incr(vmx_mov_to_cr);
status=vmx_emul_mov_to_cr(vcpu, inst);
break;
case EVENT_BSW_0:
- perfc_incrc(vmx_bsw0);
+ perfc_incr(vmx_bsw0);
status=vmx_emul_bsw0(vcpu, inst);
break;
case EVENT_BSW_1:
- perfc_incrc(vmx_bsw1);
+ perfc_incr(vmx_bsw1);
status=vmx_emul_bsw1(vcpu, inst);
break;
case EVENT_COVER:
- perfc_incrc(vmx_cover);
+ perfc_incr(vmx_cover);
status=vmx_emul_cover(vcpu, inst);
break;
case EVENT_RFI:
- perfc_incrc(vmx_rfi);
+ perfc_incr(vmx_rfi);
status=vmx_emul_rfi(vcpu, inst);
break;
case EVENT_ITR_D:
- perfc_incrc(vmx_itr_d);
+ perfc_incr(vmx_itr_d);
status=vmx_emul_itr_d(vcpu, inst);
break;
case EVENT_ITR_I:
- perfc_incrc(vmx_itr_i);
+ perfc_incr(vmx_itr_i);
status=vmx_emul_itr_i(vcpu, inst);
break;
case EVENT_PTR_D:
- perfc_incrc(vmx_ptr_d);
+ perfc_incr(vmx_ptr_d);
status=vmx_emul_ptr_d(vcpu, inst);
break;
case EVENT_PTR_I:
- perfc_incrc(vmx_ptr_i);
+ perfc_incr(vmx_ptr_i);
status=vmx_emul_ptr_i(vcpu, inst);
break;
case EVENT_ITC_D:
- perfc_incrc(vmx_itc_d);
+ perfc_incr(vmx_itc_d);
status=vmx_emul_itc_d(vcpu, inst);
break;
case EVENT_ITC_I:
- perfc_incrc(vmx_itc_i);
+ perfc_incr(vmx_itc_i);
status=vmx_emul_itc_i(vcpu, inst);
break;
case EVENT_PTC_L:
- perfc_incrc(vmx_ptc_l);
+ perfc_incr(vmx_ptc_l);
status=vmx_emul_ptc_l(vcpu, inst);
break;
case EVENT_PTC_G:
- perfc_incrc(vmx_ptc_g);
+ perfc_incr(vmx_ptc_g);
status=vmx_emul_ptc_g(vcpu, inst);
break;
case EVENT_PTC_GA:
- perfc_incrc(vmx_ptc_ga);
+ perfc_incr(vmx_ptc_ga);
status=vmx_emul_ptc_ga(vcpu, inst);
break;
case EVENT_PTC_E:
- perfc_incrc(vmx_ptc_e);
+ perfc_incr(vmx_ptc_e);
status=vmx_emul_ptc_e(vcpu, inst);
break;
case EVENT_MOV_TO_RR:
- perfc_incrc(vmx_mov_to_rr);
+ perfc_incr(vmx_mov_to_rr);
status=vmx_emul_mov_to_rr(vcpu, inst);
break;
case EVENT_MOV_FROM_RR:
- perfc_incrc(vmx_mov_from_rr);
+ perfc_incr(vmx_mov_from_rr);
status=vmx_emul_mov_from_rr(vcpu, inst);
break;
case EVENT_THASH:
- perfc_incrc(vmx_thash);
+ perfc_incr(vmx_thash);
status=vmx_emul_thash(vcpu, inst);
break;
case EVENT_TTAG:
- perfc_incrc(vmx_ttag);
+ perfc_incr(vmx_ttag);
status=vmx_emul_ttag(vcpu, inst);
break;
case EVENT_TPA:
- perfc_incrc(vmx_tpa);
+ perfc_incr(vmx_tpa);
status=vmx_emul_tpa(vcpu, inst);
break;
case EVENT_TAK:
- perfc_incrc(vmx_tak);
+ perfc_incr(vmx_tak);
status=vmx_emul_tak(vcpu, inst);
break;
case EVENT_MOV_TO_AR_IMM:
- perfc_incrc(vmx_mov_to_ar_imm);
+ perfc_incr(vmx_mov_to_ar_imm);
status=vmx_emul_mov_to_ar_imm(vcpu, inst);
break;
case EVENT_MOV_TO_AR:
- perfc_incrc(vmx_mov_to_ar_reg);
+ perfc_incr(vmx_mov_to_ar_reg);
status=vmx_emul_mov_to_ar_reg(vcpu, inst);
break;
case EVENT_MOV_FROM_AR:
- perfc_incrc(vmx_mov_from_ar_reg);
+ perfc_incr(vmx_mov_from_ar_reg);
status=vmx_emul_mov_from_ar_reg(vcpu, inst);
break;
case EVENT_MOV_TO_DBR:
- perfc_incrc(vmx_mov_to_dbr);
+ perfc_incr(vmx_mov_to_dbr);
status=vmx_emul_mov_to_dbr(vcpu, inst);
break;
case EVENT_MOV_TO_IBR:
- perfc_incrc(vmx_mov_to_ibr);
+ perfc_incr(vmx_mov_to_ibr);
status=vmx_emul_mov_to_ibr(vcpu, inst);
break;
case EVENT_MOV_TO_PMC:
- perfc_incrc(vmx_mov_to_pmc);
+ perfc_incr(vmx_mov_to_pmc);
status=vmx_emul_mov_to_pmc(vcpu, inst);
break;
case EVENT_MOV_TO_PMD:
- perfc_incrc(vmx_mov_to_pmd);
+ perfc_incr(vmx_mov_to_pmd);
status=vmx_emul_mov_to_pmd(vcpu, inst);
break;
case EVENT_MOV_TO_PKR:
- perfc_incrc(vmx_mov_to_pkr);
+ perfc_incr(vmx_mov_to_pkr);
status=vmx_emul_mov_to_pkr(vcpu, inst);
break;
case EVENT_MOV_FROM_DBR:
- perfc_incrc(vmx_mov_from_dbr);
+ perfc_incr(vmx_mov_from_dbr);
status=vmx_emul_mov_from_dbr(vcpu, inst);
break;
case EVENT_MOV_FROM_IBR:
- perfc_incrc(vmx_mov_from_ibr);
+ perfc_incr(vmx_mov_from_ibr);
status=vmx_emul_mov_from_ibr(vcpu, inst);
break;
case EVENT_MOV_FROM_PMC:
- perfc_incrc(vmx_mov_from_pmc);
+ perfc_incr(vmx_mov_from_pmc);
status=vmx_emul_mov_from_pmc(vcpu, inst);
break;
case EVENT_MOV_FROM_PKR:
- perfc_incrc(vmx_mov_from_pkr);
+ perfc_incr(vmx_mov_from_pkr);
status=vmx_emul_mov_from_pkr(vcpu, inst);
break;
case EVENT_MOV_FROM_CPUID:
- perfc_incrc(vmx_mov_from_cpuid);
+ perfc_incr(vmx_mov_from_cpuid);
status=vmx_emul_mov_from_cpuid(vcpu, inst);
break;
case EVENT_VMSW:
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/ia64/xen/dom0_ops.c Tue Mar 27 16:42:47 2007 +0100
@@ -372,7 +372,7 @@ do_dom0vp_op(unsigned long cmd,
} else {
ret = (ret & _PFN_MASK) >> PAGE_SHIFT;//XXX pte_pfn()
}
- perfc_incrc(dom0vp_phystomach);
+ perfc_incr(dom0vp_phystomach);
break;
case IA64_DOM0VP_machtophys:
if (!mfn_valid(arg0)) {
@@ -380,7 +380,7 @@ do_dom0vp_op(unsigned long cmd,
break;
}
ret = get_gpfn_from_mfn(arg0);
- perfc_incrc(dom0vp_machtophys);
+ perfc_incr(dom0vp_machtophys);
break;
case IA64_DOM0VP_zap_physmap:
ret = dom0vp_zap_physmap(d, arg0, (unsigned int)arg1);
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/ia64/xen/domain.c Tue Mar 27 16:42:47 2007 +0100
@@ -131,11 +131,11 @@ static void flush_vtlb_for_context_switc
if (vhpt_is_flushed || NEED_FLUSH(__get_cpu_var(tlbflush_time),
last_tlbflush_timestamp)) {
local_flush_tlb_all();
- perfc_incrc(tlbflush_clock_cswitch_purge);
+ perfc_incr(tlbflush_clock_cswitch_purge);
} else {
- perfc_incrc(tlbflush_clock_cswitch_skip);
- }
- perfc_incrc(flush_vtlb_for_context_switch);
+ perfc_incr(tlbflush_clock_cswitch_skip);
+ }
+ perfc_incr(flush_vtlb_for_context_switch);
}
}
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/ia64/xen/faults.c
--- a/xen/arch/ia64/xen/faults.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/ia64/xen/faults.c Tue Mar 27 16:42:47 2007 +0100
@@ -187,7 +187,7 @@ static int handle_lazy_cover(struct vcpu
if (!PSCB(v, interrupt_collection_enabled)) {
PSCB(v, ifs) = regs->cr_ifs;
regs->cr_ifs = 0;
- perfc_incrc(lazy_cover);
+ perfc_incr(lazy_cover);
return 1; // retry same instruction with cr.ifs off
}
return 0;
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/ia64/xen/hypercall.c
--- a/xen/arch/ia64/xen/hypercall.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/ia64/xen/hypercall.c Tue Mar 27 16:42:47 2007 +0100
@@ -161,7 +161,7 @@ ia64_hypercall(struct pt_regs *regs)
if (regs->r28 == PAL_HALT_LIGHT) {
if (vcpu_deliverable_interrupts(v) ||
event_pending(v)) {
- perfc_incrc(idle_when_pending);
+ perfc_incr(idle_when_pending);
vcpu_pend_unspecified_interrupt(v);
//printk("idle w/int#%d pending!\n",pi);
//this shouldn't happen, but it apparently does quite a bit! so don't
@@ -170,7 +170,7 @@ ia64_hypercall(struct pt_regs *regs)
//as deliver_pending_interrupt is called on the way out and will deliver it
}
else {
- perfc_incrc(pal_halt_light);
+ perfc_incr(pal_halt_light);
migrate_timer(&v->arch.hlt_timer,
v->processor);
set_timer(&v->arch.hlt_timer,
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/ia64/xen/mm.c Tue Mar 27 16:42:47 2007 +0100
@@ -1139,7 +1139,7 @@ assign_domain_page_replace(struct domain
domain_put_page(d, mpaddr, pte, old_pte, 1);
}
}
- perfc_incrc(assign_domain_page_replace);
+ perfc_incr(assign_domain_page_replace);
}
// caller must get_page(new_page) before
@@ -1202,7 +1202,7 @@ assign_domain_page_cmpxchg_rel(struct do
set_gpfn_from_mfn(old_mfn, INVALID_M2P_ENTRY);
domain_page_flush_and_put(d, mpaddr, pte, old_pte, old_page);
- perfc_incrc(assign_domain_pge_cmpxchg_rel);
+ perfc_incr(assign_domain_pge_cmpxchg_rel);
return 0;
}
@@ -1264,7 +1264,7 @@ zap_domain_page_one(struct domain *d, un
// guest_physmap_remove_page()
// zap_domain_page_one()
domain_put_page(d, mpaddr, pte, old_pte, (page_get_owner(page) != NULL));
- perfc_incrc(zap_dcomain_page_one);
+ perfc_incr(zap_dcomain_page_one);
}
unsigned long
@@ -1277,7 +1277,7 @@ dom0vp_zap_physmap(struct domain *d, uns
}
zap_domain_page_one(d, gpfn << PAGE_SHIFT, INVALID_MFN);
- perfc_incrc(dom0vp_zap_physmap);
+ perfc_incr(dom0vp_zap_physmap);
return 0;
}
@@ -1331,7 +1331,7 @@ __dom0vp_add_physmap(struct domain* d, u
get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY);
assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, flags);
//don't update p2m table because this page belongs to rd, not d.
- perfc_incrc(dom0vp_add_physmap);
+ perfc_incr(dom0vp_add_physmap);
out1:
put_domain(rd);
return error;
@@ -1501,7 +1501,7 @@ create_grant_host_mapping(unsigned long
#endif
((flags & GNTMAP_readonly) ?
ASSIGN_readonly : ASSIGN_writable));
- perfc_incrc(create_grant_host_mapping);
+ perfc_incr(create_grant_host_mapping);
return GNTST_okay;
}
@@ -1565,7 +1565,7 @@ destroy_grant_host_mapping(unsigned long
get_gpfn_from_mfn(mfn) == gpfn);
domain_page_flush_and_put(d, gpaddr, pte, old_pte, page);
- perfc_incrc(destroy_grant_host_mapping);
+ perfc_incr(destroy_grant_host_mapping);
return GNTST_okay;
}
@@ -1629,7 +1629,7 @@ steal_page(struct domain *d, struct page
free_domheap_page(new);
return -1;
}
- perfc_incrc(steal_page_refcount);
+ perfc_incr(steal_page_refcount);
}
spin_lock(&d->page_alloc_lock);
@@ -1703,7 +1703,7 @@ steal_page(struct domain *d, struct page
list_del(&page->list);
spin_unlock(&d->page_alloc_lock);
- perfc_incrc(steal_page);
+ perfc_incr(steal_page);
return 0;
}
@@ -1723,7 +1723,7 @@ guest_physmap_add_page(struct domain *d,
//BUG_ON(mfn != ((lookup_domain_mpa(d, gpfn << PAGE_SHIFT) & _PFN_MASK) >>
PAGE_SHIFT));
- perfc_incrc(guest_physmap_add_page);
+ perfc_incr(guest_physmap_add_page);
}
void
@@ -1732,7 +1732,7 @@ guest_physmap_remove_page(struct domain
{
BUG_ON(mfn == 0);//XXX
zap_domain_page_one(d, gpfn << PAGE_SHIFT, mfn);
- perfc_incrc(guest_physmap_remove_page);
+ perfc_incr(guest_physmap_remove_page);
}
static void
@@ -1812,7 +1812,7 @@ domain_page_flush_and_put(struct domain*
break;
}
#endif
- perfc_incrc(domain_page_flush_and_put);
+ perfc_incr(domain_page_flush_and_put);
}
int
@@ -2009,7 +2009,7 @@ int get_page_type(struct page_info *page
if ( unlikely(!cpus_empty(mask)) )
{
- perfc_incrc(need_flush_tlb_flush);
+ perfc_incr(need_flush_tlb_flush);
flush_tlb_mask(mask);
}
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/ia64/xen/privop.c
--- a/xen/arch/ia64/xen/privop.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/ia64/xen/privop.c Tue Mar 27 16:42:47 2007 +0100
@@ -641,15 +641,15 @@ static IA64FAULT priv_handle_op(VCPU * v
if (inst.M29.x3 != 0)
break;
if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
- perfc_incrc(mov_to_ar_imm);
+ perfc_incr(mov_to_ar_imm);
return priv_mov_to_ar_imm(vcpu, inst);
}
if (inst.M44.x4 == 6) {
- perfc_incrc(ssm);
+ perfc_incr(ssm);
return priv_ssm(vcpu, inst);
}
if (inst.M44.x4 == 7) {
- perfc_incrc(rsm);
+ perfc_incr(rsm);
return priv_rsm(vcpu, inst);
}
break;
@@ -658,9 +658,9 @@ static IA64FAULT priv_handle_op(VCPU * v
x6 = inst.M29.x6;
if (x6 == 0x2a) {
if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8)
- perfc_incrc(mov_from_ar); // privified mov from
kr
+ perfc_incr(mov_from_ar); // privified mov from
kr
else
- perfc_incrc(mov_to_ar_reg);
+ perfc_incr(mov_to_ar_reg);
return priv_mov_to_ar_reg(vcpu, inst);
}
if (inst.M29.x3 != 0)
@@ -676,9 +676,9 @@ static IA64FAULT priv_handle_op(VCPU * v
}
}
if (privify_en && x6 == 52 && inst.M28.r3 > 63)
- perfc_incrc(fc);
+ perfc_incr(fc);
else if (privify_en && x6 == 16 && inst.M43.r3 > 63)
- perfc_incrc(cpuid);
+ perfc_incr(cpuid);
else
perfc_incra(misc_privop, x6);
return (*pfunc) (vcpu, inst);
@@ -688,23 +688,23 @@ static IA64FAULT priv_handle_op(VCPU * v
break;
if (inst.B8.x6 == 0x08) {
IA64FAULT fault;
- perfc_incrc(rfi);
+ perfc_incr(rfi);
fault = priv_rfi(vcpu, inst);
if (fault == IA64_NO_FAULT)
fault = IA64_RFI_IN_PROGRESS;
return fault;
}
if (inst.B8.x6 == 0x0c) {
- perfc_incrc(bsw0);
+ perfc_incr(bsw0);
return priv_bsw0(vcpu, inst);
}
if (inst.B8.x6 == 0x0d) {
- perfc_incrc(bsw1);
+ perfc_incr(bsw1);
return priv_bsw1(vcpu, inst);
}
if (inst.B8.x6 == 0x0) {
// break instr for privified cover
- perfc_incrc(cover);
+ perfc_incr(cover);
return priv_cover(vcpu, inst);
}
break;
@@ -713,7 +713,7 @@ static IA64FAULT priv_handle_op(VCPU * v
break;
#if 0
if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
- perfc_incrc(cover);
+ perfc_incr(cover);
return priv_cover(vcpu, inst);
}
#endif
@@ -721,13 +721,13 @@ static IA64FAULT priv_handle_op(VCPU * v
break; // I26.x3 == I27.x3
if (inst.I26.x6 == 0x2a) {
if (privify_en && inst.I26.r2 > 63 && inst.I26.ar3 < 8)
- perfc_incrc(mov_from_ar); // privified
mov from kr
+ perfc_incr(mov_from_ar); // privified
mov from kr
else
- perfc_incrc(mov_to_ar_reg);
+ perfc_incr(mov_to_ar_reg);
return priv_mov_to_ar_reg(vcpu, inst);
}
if (inst.I27.x6 == 0x0a) {
- perfc_incrc(mov_to_ar_imm);
+ perfc_incr(mov_to_ar_imm);
return priv_mov_to_ar_imm(vcpu, inst);
}
break;
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/ia64/xen/tlb_track.c
--- a/xen/arch/ia64/xen/tlb_track.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/ia64/xen/tlb_track.c Tue Mar 27 16:42:47 2007 +0100
@@ -216,14 +216,14 @@ tlb_track_insert_or_dirty(struct tlb_tra
TLB_TRACK_RET_T ret = TLB_TRACK_NOT_FOUND;
#if 0 /* this is done at vcpu_tlb_track_insert_or_dirty() */
- perfc_incrc(tlb_track_iod);
+ perfc_incr(tlb_track_iod);
if (!pte_tlb_tracking(old_pte)) {
- perfc_incrc(tlb_track_iod_not_tracked);
+ perfc_incr(tlb_track_iod_not_tracked);
return TLB_TRACK_NOT_TRACKED;
}
#endif
if (pte_tlb_inserted_many(old_pte)) {
- perfc_incrc(tlb_track_iod_tracked_many);
+ perfc_incr(tlb_track_iod_tracked_many);
return TLB_TRACK_MANY;
}
@@ -260,7 +260,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
if (entry->vaddr == vaddr && entry->rid == rid) {
// tlb_track_printd("TLB_TRACK_FOUND\n");
ret = TLB_TRACK_FOUND;
- perfc_incrc(tlb_track_iod_found);
+ perfc_incr(tlb_track_iod_found);
#ifdef CONFIG_TLB_TRACK_CNT
entry->cnt++;
if (entry->cnt > TLB_TRACK_CNT_FORCE_MANY) {
@@ -276,7 +276,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
*/
// tlb_track_entry_printf(entry);
// tlb_track_printd("cnt = %ld\n", entry->cnt);
- perfc_incrc(tlb_track_iod_force_many);
+ perfc_incr(tlb_track_iod_force_many);
goto force_many;
}
#endif
@@ -294,14 +294,14 @@ tlb_track_insert_or_dirty(struct tlb_tra
if (pte_val(ret_pte) != pte_val(old_pte)) {
// tlb_track_printd("TLB_TRACK_AGAIN\n");
ret = TLB_TRACK_AGAIN;
- perfc_incrc(tlb_track_iod_again);
+ perfc_incr(tlb_track_iod_again);
} else {
// tlb_track_printd("TLB_TRACK_MANY del entry 0x%p\n",
// entry);
ret = TLB_TRACK_MANY;
list_del(&entry->list);
// tlb_track_entry_printf(entry);
- perfc_incrc(tlb_track_iod_tracked_many_del);
+ perfc_incr(tlb_track_iod_tracked_many_del);
}
goto out;
}
@@ -314,7 +314,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
*/
// tlb_track_printd("TLB_TRACK_AGAIN\n");
ret = TLB_TRACK_AGAIN;
- perfc_incrc(tlb_track_iod_again);
+ perfc_incr(tlb_track_iod_again);
goto out;
}
@@ -323,7 +323,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
/* Other thread else removed the tlb_track_entry after we got old_pte
before we got spin lock. */
ret = TLB_TRACK_AGAIN;
- perfc_incrc(tlb_track_iod_again);
+ perfc_incr(tlb_track_iod_again);
goto out;
}
if (new_entry == NULL && bit_to_be_set == _PAGE_TLB_INSERTED) {
@@ -334,10 +334,10 @@ tlb_track_insert_or_dirty(struct tlb_tra
/* entry can't be allocated.
fall down into full flush mode. */
bit_to_be_set |= _PAGE_TLB_INSERTED_MANY;
- perfc_incrc(tlb_track_iod_new_failed);
+ perfc_incr(tlb_track_iod_new_failed);
}
// tlb_track_printd("new_entry 0x%p\n", new_entry);
- perfc_incrc(tlb_track_iod_new_entry);
+ perfc_incr(tlb_track_iod_new_entry);
goto again;
}
@@ -348,7 +348,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
if (tlb_track_pte_zapped(old_pte, ret_pte)) {
// tlb_track_printd("zapped TLB_TRACK_AGAIN\n");
ret = TLB_TRACK_AGAIN;
- perfc_incrc(tlb_track_iod_again);
+ perfc_incr(tlb_track_iod_again);
goto out;
}
@@ -359,7 +359,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
// tlb_track_printd("iserted TLB_TRACK_MANY\n");
BUG_ON(!pte_tlb_inserted(ret_pte));
ret = TLB_TRACK_MANY;
- perfc_incrc(tlb_track_iod_new_many);
+ perfc_incr(tlb_track_iod_new_many);
goto out;
}
BUG_ON(pte_tlb_inserted(ret_pte));
@@ -381,7 +381,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
#ifdef CONFIG_TLB_TRACK_CNT
entry->cnt = 0;
#endif
- perfc_incrc(tlb_track_iod_insert);
+ perfc_incr(tlb_track_iod_insert);
// tlb_track_entry_printf(entry);
} else {
goto out;
@@ -392,7 +392,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
cpu_set(v->processor, entry->pcpu_dirty_mask);
BUG_ON(v->vcpu_id >= NR_CPUS);
vcpu_set(v->vcpu_id, entry->vcpu_dirty_mask);
- perfc_incrc(tlb_track_iod_dirtied);
+ perfc_incr(tlb_track_iod_dirtied);
out:
spin_unlock(&tlb_track->hash_lock);
@@ -432,19 +432,19 @@ tlb_track_search_and_remove(struct tlb_t
struct list_head* head = tlb_track_hash_head(tlb_track, ptep);
struct tlb_track_entry* entry;
- perfc_incrc(tlb_track_sar);
+ perfc_incr(tlb_track_sar);
if (!pte_tlb_tracking(old_pte)) {
- perfc_incrc(tlb_track_sar_not_tracked);
+ perfc_incr(tlb_track_sar_not_tracked);
return TLB_TRACK_NOT_TRACKED;
}
if (!pte_tlb_inserted(old_pte)) {
BUG_ON(pte_tlb_inserted_many(old_pte));
- perfc_incrc(tlb_track_sar_not_found);
+ perfc_incr(tlb_track_sar_not_found);
return TLB_TRACK_NOT_FOUND;
}
if (pte_tlb_inserted_many(old_pte)) {
BUG_ON(!pte_tlb_inserted(old_pte));
- perfc_incrc(tlb_track_sar_many);
+ perfc_incr(tlb_track_sar_many);
return TLB_TRACK_MANY;
}
@@ -475,14 +475,14 @@ tlb_track_search_and_remove(struct tlb_t
pte_tlb_inserted(current_pte))) {
BUG_ON(pte_tlb_inserted_many(current_pte));
spin_unlock(&tlb_track->hash_lock);
- perfc_incrc(tlb_track_sar_many);
+ perfc_incr(tlb_track_sar_many);
return TLB_TRACK_MANY;
}
list_del(&entry->list);
spin_unlock(&tlb_track->hash_lock);
*entryp = entry;
- perfc_incrc(tlb_track_sar_found);
+ perfc_incr(tlb_track_sar_found);
// tlb_track_entry_printf(entry);
#ifdef CONFIG_TLB_TRACK_CNT
// tlb_track_printd("cnt = %ld\n", entry->cnt);
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/ia64/xen/vcpu.c Tue Mar 27 16:42:47 2007 +0100
@@ -1616,7 +1616,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
*pteval = (address & _PAGE_PPN_MASK) |
__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX;
*itir = PAGE_SHIFT << 2;
- perfc_incrc(phys_translate);
+ perfc_incr(phys_translate);
return IA64_NO_FAULT;
}
} else if (!region && warn_region0_address) {
@@ -1637,7 +1637,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
if (trp != NULL) {
*pteval = trp->pte.val;
*itir = trp->itir;
- perfc_incrc(tr_translate);
+ perfc_incr(tr_translate);
return IA64_NO_FAULT;
}
}
@@ -1647,7 +1647,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
if (trp != NULL) {
*pteval = trp->pte.val;
*itir = trp->itir;
- perfc_incrc(tr_translate);
+ perfc_incr(tr_translate);
return IA64_NO_FAULT;
}
}
@@ -1660,7 +1660,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
&& vcpu_match_tr_entry_no_p(trp, address, rid)) {
*pteval = pte.val;
*itir = trp->itir;
- perfc_incrc(dtlb_translate);
+ perfc_incr(dtlb_translate);
return IA64_USE_TLB;
}
@@ -1709,7 +1709,7 @@ out:
out:
*itir = rr & RR_PS_MASK;
*pteval = pte.val;
- perfc_incrc(vhpt_translate);
+ perfc_incr(vhpt_translate);
return IA64_NO_FAULT;
}
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/ia64/xen/vhpt.c Tue Mar 27 16:42:47 2007 +0100
@@ -48,14 +48,14 @@ local_vhpt_flush(void)
/* this must be after flush */
tlbflush_update_time(&__get_cpu_var(vhpt_tlbflush_timestamp),
flush_time);
- perfc_incrc(local_vhpt_flush);
+ perfc_incr(local_vhpt_flush);
}
void
vcpu_vhpt_flush(struct vcpu* v)
{
__vhpt_flush(vcpu_vhpt_maddr(v));
- perfc_incrc(vcpu_vhpt_flush);
+ perfc_incr(vcpu_vhpt_flush);
}
static void
@@ -248,7 +248,7 @@ void vcpu_flush_vtlb_all(struct vcpu *v)
not running on this processor. There is currently no easy way to
check this. */
- perfc_incrc(vcpu_flush_vtlb_all);
+ perfc_incr(vcpu_flush_vtlb_all);
}
static void __vcpu_flush_vtlb_all(void *vcpu)
@@ -280,7 +280,7 @@ void domain_flush_vtlb_all(struct domain
__vcpu_flush_vtlb_all,
v, 1, 1);
}
- perfc_incrc(domain_flush_vtlb_all);
+ perfc_incr(domain_flush_vtlb_all);
}
// Callers may need to call smp_mb() before/after calling this.
@@ -322,7 +322,7 @@ void vcpu_flush_tlb_vhpt_range (u64 vadr
vadr, 1UL << log_range);
ia64_ptcl(vadr, log_range << 2);
ia64_srlz_i();
- perfc_incrc(vcpu_flush_tlb_vhpt_range);
+ perfc_incr(vcpu_flush_tlb_vhpt_range);
}
void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range)
@@ -361,7 +361,7 @@ void domain_flush_vtlb_range (struct dom
/* ptc.ga */
platform_global_tlb_purge(vadr, vadr + addr_range, PAGE_SHIFT);
- perfc_incrc(domain_flush_vtlb_range);
+ perfc_incr(domain_flush_vtlb_range);
}
#ifdef CONFIG_XEN_IA64_TLB_TRACK
@@ -391,11 +391,11 @@ __domain_flush_vtlb_track_entry(struct d
*/
vcpu_get_rr(current, VRN7 << VRN_SHIFT, &rr7_rid);
if (likely(rr7_rid == entry->rid)) {
- perfc_incrc(tlb_track_use_rr7);
+ perfc_incr(tlb_track_use_rr7);
} else {
swap_rr0 = 1;
vaddr = (vaddr << 3) >> 3;// force vrn0
- perfc_incrc(tlb_track_swap_rr0);
+ perfc_incr(tlb_track_swap_rr0);
}
// tlb_track_entry_printf(entry);
@@ -435,18 +435,18 @@ __domain_flush_vtlb_track_entry(struct d
/* ptc.ga */
if (local_purge) {
ia64_ptcl(vaddr, PAGE_SHIFT << 2);
- perfc_incrc(domain_flush_vtlb_local);
+ perfc_incr(domain_flush_vtlb_local);
} else {
/* ptc.ga has release semantics. */
platform_global_tlb_purge(vaddr, vaddr + PAGE_SIZE,
PAGE_SHIFT);
- perfc_incrc(domain_flush_vtlb_global);
+ perfc_incr(domain_flush_vtlb_global);
}
if (swap_rr0) {
vcpu_set_rr(current, 0, old_rid);
}
- perfc_incrc(domain_flush_vtlb_track_entry);
+ perfc_incr(domain_flush_vtlb_track_entry);
}
void
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/powerpc/mm.c Tue Mar 27 16:42:47 2007 +0100
@@ -261,7 +261,7 @@ int get_page_type(struct page_info *page
if ( unlikely(!cpus_empty(mask)) )
{
- perfc_incrc(need_flush_tlb_flush);
+ perfc_incr(need_flush_tlb_flush);
flush_tlb_mask(mask);
}
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/x86/apic.c
--- a/xen/arch/x86/apic.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/x86/apic.c Tue Mar 27 16:42:47 2007 +0100
@@ -1076,7 +1076,7 @@ fastcall void smp_apic_timer_interrupt(s
fastcall void smp_apic_timer_interrupt(struct cpu_user_regs * regs)
{
ack_APIC_irq();
- perfc_incrc(apic_timer);
+ perfc_incr(apic_timer);
raise_softirq(TIMER_SOFTIRQ);
}
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/x86/extable.c
--- a/xen/arch/x86/extable.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/x86/extable.c Tue Mar 27 16:42:47 2007 +0100
@@ -72,7 +72,7 @@ search_pre_exception_table(struct cpu_us
if ( fixup )
{
dprintk(XENLOG_INFO, "Pre-exception: %p -> %p\n", _p(addr), _p(fixup));
- perfc_incrc(exception_fixed);
+ perfc_incr(exception_fixed);
}
return fixup;
}
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/x86/irq.c Tue Mar 27 16:42:47 2007 +0100
@@ -56,7 +56,7 @@ asmlinkage void do_IRQ(struct cpu_user_r
irq_desc_t *desc = &irq_desc[vector];
struct irqaction *action;
- perfc_incrc(irqs);
+ perfc_incr(irqs);
spin_lock(&desc->lock);
desc->handler->ack(vector);
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/x86/mm.c Tue Mar 27 16:42:47 2007 +0100
@@ -1726,7 +1726,7 @@ int get_page_type(struct page_info *page
(!shadow_mode_enabled(page_get_owner(page)) ||
((nx & PGT_type_mask) == PGT_writable_page)) )
{
- perfc_incrc(need_flush_tlb_flush);
+ perfc_incr(need_flush_tlb_flush);
flush_tlb_mask(mask);
}
@@ -2729,7 +2729,7 @@ int do_update_va_mapping(unsigned long v
cpumask_t pmask;
int rc = 0;
- perfc_incrc(calls_to_update_va);
+ perfc_incr(calls_to_update_va);
if ( unlikely(!__addr_ok(va) && !paging_mode_external(d)) )
return -EINVAL;
@@ -3386,7 +3386,7 @@ int ptwr_do_page_fault(struct vcpu *v, u
goto bail;
UNLOCK_BIGLOCK(d);
- perfc_incrc(ptwr_emulations);
+ perfc_incr(ptwr_emulations);
return EXCRET_fault_fixed;
bail:
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/x86/mm/shadow/common.c Tue Mar 27 16:42:47 2007 +0100
@@ -276,7 +276,7 @@ hvm_emulate_write(enum x86_segment seg,
/* How many emulations could we save if we unshadowed on stack writes? */
if ( seg == x86_seg_ss )
- perfc_incrc(shadow_fault_emulate_stack);
+ perfc_incr(shadow_fault_emulate_stack);
rc = hvm_translate_linear_addr(
seg, offset, bytes, hvm_access_write, sh_ctxt, &addr);
@@ -804,7 +804,7 @@ void shadow_prealloc(struct domain *d, u
ASSERT(v != NULL); /* Shouldn't have enabled shadows if we've no vcpus */
/* Stage one: walk the list of pinned pages, unpinning them */
- perfc_incrc(shadow_prealloc_1);
+ perfc_incr(shadow_prealloc_1);
list_for_each_backwards_safe(l, t, &d->arch.paging.shadow.pinned_shadows)
{
sp = list_entry(l, struct shadow_page_info, list);
@@ -820,7 +820,7 @@ void shadow_prealloc(struct domain *d, u
/* Stage two: all shadow pages are in use in hierarchies that are
* loaded in cr3 on some vcpu. Walk them, unhooking the non-Xen
* mappings. */
- perfc_incrc(shadow_prealloc_2);
+ perfc_incr(shadow_prealloc_2);
for_each_vcpu(d, v2)
for ( i = 0 ; i < 4 ; i++ )
@@ -929,7 +929,7 @@ mfn_t shadow_alloc(struct domain *d,
ASSERT(shadow_locked_by_me(d));
ASSERT(order <= SHADOW_MAX_ORDER);
ASSERT(shadow_type != SH_type_none);
- perfc_incrc(shadow_alloc);
+ perfc_incr(shadow_alloc);
/* Find smallest order which can satisfy the request. */
for ( i = order; i <= SHADOW_MAX_ORDER; i++ )
@@ -967,7 +967,7 @@ mfn_t shadow_alloc(struct domain *d,
tlbflush_filter(mask, sp[i].tlbflush_timestamp);
if ( unlikely(!cpus_empty(mask)) )
{
- perfc_incrc(shadow_alloc_tlbflush);
+ perfc_incr(shadow_alloc_tlbflush);
flush_tlb_mask(mask);
}
/* Now safe to clear the page for reuse */
@@ -997,7 +997,7 @@ void shadow_free(struct domain *d, mfn_t
int i;
ASSERT(shadow_locked_by_me(d));
- perfc_incrc(shadow_free);
+ perfc_incr(shadow_free);
shadow_type = sp->type;
ASSERT(shadow_type != SH_type_none);
@@ -1406,7 +1406,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
sh_hash_audit(d);
- perfc_incrc(shadow_hash_lookups);
+ perfc_incr(shadow_hash_lookups);
key = sh_hash(n, t);
sh_hash_audit_bucket(d, key);
@@ -1434,7 +1434,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
}
else
{
- perfc_incrc(shadow_hash_lookup_head);
+ perfc_incr(shadow_hash_lookup_head);
}
return shadow_page_to_mfn(sp);
}
@@ -1442,7 +1442,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
sp = sp->next_shadow;
}
- perfc_incrc(shadow_hash_lookup_miss);
+ perfc_incr(shadow_hash_lookup_miss);
return _mfn(INVALID_MFN);
}
@@ -1460,7 +1460,7 @@ void shadow_hash_insert(struct vcpu *v,
sh_hash_audit(d);
- perfc_incrc(shadow_hash_inserts);
+ perfc_incr(shadow_hash_inserts);
key = sh_hash(n, t);
sh_hash_audit_bucket(d, key);
@@ -1486,7 +1486,7 @@ void shadow_hash_delete(struct vcpu *v,
sh_hash_audit(d);
- perfc_incrc(shadow_hash_deletes);
+ perfc_incr(shadow_hash_deletes);
key = sh_hash(n, t);
sh_hash_audit_bucket(d, key);
@@ -1713,7 +1713,7 @@ int sh_remove_write_access(struct vcpu *
|| (pg->u.inuse.type_info & PGT_count_mask) == 0 )
return 0;
- perfc_incrc(shadow_writeable);
+ perfc_incr(shadow_writeable);
/* If this isn't a "normal" writeable page, the domain is trying to
* put pagetables in special memory of some kind. We can't allow that. */
@@ -1735,7 +1735,7 @@ int sh_remove_write_access(struct vcpu *
#define GUESS(_a, _h) do { \
if ( v->arch.paging.mode->shadow.guess_wrmap(v, (_a), gmfn) ) \
- perfc_incrc(shadow_writeable_h_ ## _h); \
+ perfc_incr(shadow_writeable_h_ ## _h); \
if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 ) \
return 1; \
} while (0)
@@ -1808,7 +1808,7 @@ int sh_remove_write_access(struct vcpu *
callbacks[shtype](v, last_smfn, gmfn);
if ( (pg->u.inuse.type_info & PGT_count_mask) != old_count )
- perfc_incrc(shadow_writeable_h_5);
+ perfc_incr(shadow_writeable_h_5);
}
if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 )
@@ -1817,7 +1817,7 @@ int sh_remove_write_access(struct vcpu *
#endif /* SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC */
/* Brute-force search of all the shadows, by walking the hash */
- perfc_incrc(shadow_writeable_bf);
+ perfc_incr(shadow_writeable_bf);
hash_foreach(v, callback_mask, callbacks, gmfn);
/* If that didn't catch the mapping, something is very wrong */
@@ -1888,7 +1888,7 @@ int sh_remove_all_mappings(struct vcpu *
| 1 << SH_type_fl1_64_shadow
;
- perfc_incrc(shadow_mappings);
+ perfc_incr(shadow_mappings);
if ( (page->count_info & PGC_count_mask) == 0 )
return 0;
@@ -1903,7 +1903,7 @@ int sh_remove_all_mappings(struct vcpu *
* Heuristics for finding the (probably) single mapping of this gmfn */
/* Brute-force search of all the shadows, by walking the hash */
- perfc_incrc(shadow_mappings_bf);
+ perfc_incr(shadow_mappings_bf);
hash_foreach(v, callback_mask, callbacks, gmfn);
/* If that didn't catch the mapping, something is very wrong */
@@ -1992,9 +1992,9 @@ static int sh_remove_shadow_via_pointer(
sh_unmap_domain_page(vaddr);
if ( rc )
- perfc_incrc(shadow_up_pointer);
+ perfc_incr(shadow_up_pointer);
else
- perfc_incrc(shadow_unshadow_bf);
+ perfc_incr(shadow_unshadow_bf);
return rc;
}
@@ -2093,7 +2093,7 @@ void sh_remove_shadows(struct vcpu *v, m
}
/* Search for this shadow in all appropriate shadows */
- perfc_incrc(shadow_unshadow);
+ perfc_incr(shadow_unshadow);
sh_flags = pg->shadow_flags;
/* Lower-level shadows need to be excised from upper-level shadows.
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c Tue Mar 27 16:42:47 2007 +0100
@@ -109,7 +109,7 @@ get_shadow_status(struct vcpu *v, mfn_t
/* Look for shadows in the hash table */
{
mfn_t smfn = shadow_hash_lookup(v, mfn_x(gmfn), shadow_type);
- perfc_incrc(shadow_get_shadow_status);
+ perfc_incr(shadow_get_shadow_status);
return smfn;
}
@@ -209,7 +209,7 @@ guest_walk_tables(struct vcpu *v, unsign
{
ASSERT(!guest_op || shadow_locked_by_me(v->domain));
- perfc_incrc(shadow_guest_walk);
+ perfc_incr(shadow_guest_walk);
memset(gw, 0, sizeof(*gw));
gw->va = va;
@@ -448,14 +448,14 @@ static u32 guest_set_ad_bits(struct vcpu
== (_PAGE_DIRTY | _PAGE_ACCESSED) )
return flags; /* Guest already has A and D bits set */
flags |= _PAGE_DIRTY | _PAGE_ACCESSED;
- perfc_incrc(shadow_ad_update);
+ perfc_incr(shadow_ad_update);
}
else
{
if ( flags & _PAGE_ACCESSED )
return flags; /* Guest already has A bit set */
flags |= _PAGE_ACCESSED;
- perfc_incrc(shadow_a_update);
+ perfc_incr(shadow_a_update);
}
/* Set the bit(s) */
@@ -863,7 +863,7 @@ shadow_write_entries(void *d, void *s, i
* using map_domain_page() to get a writeable mapping if we need to. */
if ( __copy_to_user(d, d, sizeof (unsigned long)) != 0 )
{
- perfc_incrc(shadow_linear_map_failed);
+ perfc_incr(shadow_linear_map_failed);
map = sh_map_domain_page(mfn);
ASSERT(map != NULL);
dst = map + ((unsigned long)dst & (PAGE_SIZE - 1));
@@ -925,7 +925,7 @@ shadow_get_page_from_l1e(shadow_l1e_t sl
if ( unlikely(!res) )
{
- perfc_incrc(shadow_get_page_fail);
+ perfc_incr(shadow_get_page_fail);
SHADOW_PRINTK("failed: l1e=" SH_PRI_pte "\n");
}
@@ -2198,7 +2198,7 @@ static int validate_gl4e(struct vcpu *v,
mfn_t sl3mfn = _mfn(INVALID_MFN);
int result = 0;
- perfc_incrc(shadow_validate_gl4e_calls);
+ perfc_incr(shadow_validate_gl4e_calls);
if ( guest_l4e_get_flags(*new_gl4e) & _PAGE_PRESENT )
{
@@ -2250,7 +2250,7 @@ static int validate_gl3e(struct vcpu *v,
mfn_t sl2mfn = _mfn(INVALID_MFN);
int result = 0;
- perfc_incrc(shadow_validate_gl3e_calls);
+ perfc_incr(shadow_validate_gl3e_calls);
if ( guest_l3e_get_flags(*new_gl3e) & _PAGE_PRESENT )
{
@@ -2277,7 +2277,7 @@ static int validate_gl2e(struct vcpu *v,
mfn_t sl1mfn = _mfn(INVALID_MFN);
int result = 0;
- perfc_incrc(shadow_validate_gl2e_calls);
+ perfc_incr(shadow_validate_gl2e_calls);
if ( guest_l2e_get_flags(*new_gl2e) & _PAGE_PRESENT )
{
@@ -2363,7 +2363,7 @@ static int validate_gl1e(struct vcpu *v,
mfn_t gmfn;
int result = 0, mmio;
- perfc_incrc(shadow_validate_gl1e_calls);
+ perfc_incr(shadow_validate_gl1e_calls);
gfn = guest_l1e_get_gfn(*new_gl1e);
gmfn = vcpu_gfn_to_mfn(v, gfn);
@@ -2523,7 +2523,7 @@ static inline void check_for_early_unsha
u32 flags = mfn_to_page(gmfn)->shadow_flags;
if ( !(flags & (SHF_L2_32|SHF_L2_PAE|SHF_L2H_PAE|SHF_L4_64)) )
{
- perfc_incrc(shadow_early_unshadow);
+ perfc_incr(shadow_early_unshadow);
sh_remove_shadows(v, gmfn, 0, 0 /* Slow, can fail to unshadow */ );
}
}
@@ -2642,7 +2642,7 @@ static int sh_page_fault(struct vcpu *v,
SHADOW_PRINTK("d:v=%u:%u va=%#lx err=%u\n",
v->domain->domain_id, v->vcpu_id, va, regs->error_code);
- perfc_incrc(shadow_fault);
+ perfc_incr(shadow_fault);
//
// XXX: Need to think about eventually mapping superpages directly in the
// shadow (when possible), as opposed to splintering them into a
@@ -2670,7 +2670,7 @@ static int sh_page_fault(struct vcpu *v,
ASSERT(regs->error_code & PFEC_page_present);
regs->error_code ^= (PFEC_reserved_bit|PFEC_page_present);
reset_early_unshadow(v);
- perfc_incrc(shadow_fault_fast_gnp);
+ perfc_incr(shadow_fault_fast_gnp);
SHADOW_PRINTK("fast path not-present\n");
return 0;
}
@@ -2688,7 +2688,7 @@ static int sh_page_fault(struct vcpu *v,
<< PAGE_SHIFT)
| (va & ~PAGE_MASK);
}
- perfc_incrc(shadow_fault_fast_mmio);
+ perfc_incr(shadow_fault_fast_mmio);
SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa);
reset_early_unshadow(v);
handle_mmio(gpa);
@@ -2699,7 +2699,7 @@ static int sh_page_fault(struct vcpu *v,
/* This should be exceptionally rare: another vcpu has fixed
* the tables between the fault and our reading the l1e.
* Retry and let the hardware give us the right fault next time. */
- perfc_incrc(shadow_fault_fast_fail);
+ perfc_incr(shadow_fault_fast_fail);
SHADOW_PRINTK("fast path false alarm!\n");
return EXCRET_fault_fixed;
}
@@ -2746,7 +2746,7 @@ static int sh_page_fault(struct vcpu *v,
goto mmio;
}
- perfc_incrc(shadow_fault_bail_not_present);
+ perfc_incr(shadow_fault_bail_not_present);
goto not_a_shadow_fault;
}
@@ -2761,7 +2761,7 @@ static int sh_page_fault(struct vcpu *v,
!(accumulated_gflags & _PAGE_USER) )
{
/* illegal user-mode access to supervisor-only page */
- perfc_incrc(shadow_fault_bail_user_supervisor);
+ perfc_incr(shadow_fault_bail_user_supervisor);
goto not_a_shadow_fault;
}
@@ -2772,7 +2772,7 @@ static int sh_page_fault(struct vcpu *v,
{
if ( unlikely(!(accumulated_gflags & _PAGE_RW)) )
{
- perfc_incrc(shadow_fault_bail_ro_mapping);
+ perfc_incr(shadow_fault_bail_ro_mapping);
goto not_a_shadow_fault;
}
}
@@ -2787,7 +2787,7 @@ static int sh_page_fault(struct vcpu *v,
if ( accumulated_gflags & _PAGE_NX_BIT )
{
/* NX prevented this code fetch */
- perfc_incrc(shadow_fault_bail_nx);
+ perfc_incr(shadow_fault_bail_nx);
goto not_a_shadow_fault;
}
}
@@ -2802,7 +2802,7 @@ static int sh_page_fault(struct vcpu *v,
if ( !mmio && !mfn_valid(gmfn) )
{
- perfc_incrc(shadow_fault_bail_bad_gfn);
+ perfc_incr(shadow_fault_bail_bad_gfn);
SHADOW_PRINTK("BAD gfn=%"SH_PRI_gfn" gmfn=%"PRI_mfn"\n",
gfn_x(gfn), mfn_x(gmfn));
goto not_a_shadow_fault;
@@ -2844,12 +2844,12 @@ static int sh_page_fault(struct vcpu *v,
{
if ( ft == ft_demand_write )
{
- perfc_incrc(shadow_fault_emulate_write);
+ perfc_incr(shadow_fault_emulate_write);
goto emulate;
}
else if ( shadow_mode_trap_reads(d) && ft == ft_demand_read )
{
- perfc_incrc(shadow_fault_emulate_read);
+ perfc_incr(shadow_fault_emulate_read);
goto emulate;
}
}
@@ -2860,7 +2860,7 @@ static int sh_page_fault(struct vcpu *v,
goto mmio;
}
- perfc_incrc(shadow_fault_fixed);
+ perfc_incr(shadow_fault_fixed);
d->arch.paging.shadow.fault_count++;
reset_early_unshadow(v);
@@ -2920,7 +2920,7 @@ static int sh_page_fault(struct vcpu *v,
{
SHADOW_PRINTK("emulator failure, unshadowing mfn %#lx\n",
mfn_x(gmfn));
- perfc_incrc(shadow_fault_emulate_failed);
+ perfc_incr(shadow_fault_emulate_failed);
/* If this is actually a page table, then we have a bug, and need
* to support more operations in the emulator. More likely,
* though, this is a hint that this page should not be shadowed. */
@@ -2935,7 +2935,7 @@ static int sh_page_fault(struct vcpu *v,
mmio:
if ( !guest_mode(regs) )
goto not_a_shadow_fault;
- perfc_incrc(shadow_fault_mmio);
+ perfc_incr(shadow_fault_mmio);
sh_audit_gw(v, &gw);
unmap_walk(v, &gw);
SHADOW_PRINTK("mmio %#"PRIpaddr"\n", gpa);
@@ -2964,7 +2964,7 @@ sh_invlpg(struct vcpu *v, unsigned long
{
shadow_l2e_t sl2e;
- perfc_incrc(shadow_invlpg);
+ perfc_incr(shadow_invlpg);
/* First check that we can safely read the shadow l2e. SMP/PAE linux can
* run as high as 6% of invlpg calls where we haven't shadowed the l2
@@ -2983,7 +2983,7 @@ sh_invlpg(struct vcpu *v, unsigned long
+ shadow_l3_linear_offset(va)),
sizeof (sl3e)) != 0 )
{
- perfc_incrc(shadow_invlpg_fault);
+ perfc_incr(shadow_invlpg_fault);
return 0;
}
if ( (!shadow_l3e_get_flags(sl3e) & _PAGE_PRESENT) )
@@ -3002,7 +3002,7 @@ sh_invlpg(struct vcpu *v, unsigned long
sh_linear_l2_table(v) + shadow_l2_linear_offset(va),
sizeof (sl2e)) != 0 )
{
- perfc_incrc(shadow_invlpg_fault);
+ perfc_incr(shadow_invlpg_fault);
return 0;
}
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/x86/smp.c
--- a/xen/arch/x86/smp.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/x86/smp.c Tue Mar 27 16:42:47 2007 +0100
@@ -169,7 +169,7 @@ fastcall void smp_invalidate_interrupt(v
fastcall void smp_invalidate_interrupt(void)
{
ack_APIC_irq();
- perfc_incrc(ipis);
+ perfc_incr(ipis);
irq_enter();
if ( !__sync_lazy_execstate() )
{
@@ -329,7 +329,7 @@ fastcall void smp_event_check_interrupt(
fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs)
{
ack_APIC_irq();
- perfc_incrc(ipis);
+ perfc_incr(ipis);
}
fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs)
@@ -338,7 +338,7 @@ fastcall void smp_call_function_interrup
void *info = call_data->info;
ack_APIC_irq();
- perfc_incrc(ipis);
+ perfc_incr(ipis);
if ( !cpu_isset(smp_processor_id(), call_data->selected) )
return;
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/x86/traps.c Tue Mar 27 16:42:47 2007 +0100
@@ -956,7 +956,7 @@ asmlinkage int do_page_fault(struct cpu_
DEBUGGER_trap_entry(TRAP_page_fault, regs);
- perfc_incrc(page_faults);
+ perfc_incr(page_faults);
if ( unlikely((rc = fixup_page_fault(addr, regs)) != 0) )
return rc;
@@ -968,7 +968,7 @@ asmlinkage int do_page_fault(struct cpu_
if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
{
- perfc_incrc(copy_user_faults);
+ perfc_incr(copy_user_faults);
regs->eip = fixup;
return 0;
}
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/x86/x86_32/domain_page.c
--- a/xen/arch/x86/x86_32/domain_page.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/x86/x86_32/domain_page.c Tue Mar 27 16:42:47 2007 +0100
@@ -50,7 +50,7 @@ void *map_domain_page(unsigned long mfn)
ASSERT(!in_irq());
- perfc_incrc(map_domain_page_count);
+ perfc_incr(map_domain_page_count);
v = mapcache_current_vcpu();
@@ -76,7 +76,7 @@ void *map_domain_page(unsigned long mfn)
cache->shadow_epoch[vcpu] = cache->epoch;
if ( NEED_FLUSH(this_cpu(tlbflush_time), cache->tlbflush_timestamp) )
{
- perfc_incrc(domain_page_tlb_flush);
+ perfc_incr(domain_page_tlb_flush);
local_flush_tlb();
}
}
@@ -92,7 +92,7 @@ void *map_domain_page(unsigned long mfn)
}
/* /Second/, flush TLBs. */
- perfc_incrc(domain_page_tlb_flush);
+ perfc_incr(domain_page_tlb_flush);
local_flush_tlb();
cache->shadow_epoch[vcpu] = ++cache->epoch;
cache->tlbflush_timestamp = tlbflush_current_time();
diff -r 96f167771979 -r ea0b50ca4999 xen/arch/x86/x86_32/seg_fixup.c
--- a/xen/arch/x86/x86_32/seg_fixup.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/arch/x86/x86_32/seg_fixup.c Tue Mar 27 16:42:47 2007 +0100
@@ -434,7 +434,7 @@ int gpf_emulate_4gb(struct cpu_user_regs
goto fail;
/* Success! */
- perfc_incrc(seg_fixups);
+ perfc_incr(seg_fixups);
/* If requested, give a callback on otherwise unused vector 15. */
if ( VM_ASSIST(d->domain, VMASST_TYPE_4gb_segments_notify) )
diff -r 96f167771979 -r ea0b50ca4999 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/common/page_alloc.c Tue Mar 27 16:42:47 2007 +0100
@@ -423,7 +423,7 @@ static struct page_info *alloc_heap_page
if ( unlikely(!cpus_empty(mask)) )
{
- perfc_incrc(need_flush_tlb_flush);
+ perfc_incr(need_flush_tlb_flush);
flush_tlb_mask(mask);
}
diff -r 96f167771979 -r ea0b50ca4999 xen/common/schedule.c
--- a/xen/common/schedule.c Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/common/schedule.c Tue Mar 27 16:42:47 2007 +0100
@@ -606,7 +606,7 @@ static void schedule(void)
ASSERT(!in_irq());
ASSERT(this_cpu(mc_state).flags == 0);
- perfc_incrc(sched_run);
+ perfc_incr(sched_run);
sd = &this_cpu(schedule_data);
@@ -654,7 +654,7 @@ static void schedule(void)
spin_unlock_irq(&sd->schedule_lock);
- perfc_incrc(sched_ctx);
+ perfc_incr(sched_ctx);
stop_timer(&prev->periodic_timer);
@@ -681,7 +681,7 @@ static void s_timer_fn(void *unused)
static void s_timer_fn(void *unused)
{
raise_softirq(SCHEDULE_SOFTIRQ);
- perfc_incrc(sched_irq);
+ perfc_incr(sched_irq);
}
/* Per-VCPU periodic timer function: sends a virtual timer interrupt. */
diff -r 96f167771979 -r ea0b50ca4999 xen/include/asm-ia64/perfc_defn.h
--- a/xen/include/asm-ia64/perfc_defn.h Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/include/asm-ia64/perfc_defn.h Tue Mar 27 16:42:47 2007 +0100
@@ -1,34 +1,34 @@
/* This file is legitimately included multiple times. */
-PERFCOUNTER_CPU(dtlb_translate, "dtlb hit")
+PERFCOUNTER(dtlb_translate, "dtlb hit")
-PERFCOUNTER_CPU(tr_translate, "TR hit")
+PERFCOUNTER(tr_translate, "TR hit")
-PERFCOUNTER_CPU(vhpt_translate, "virtual vhpt translation")
-PERFCOUNTER_CPU(fast_vhpt_translate, "virtual vhpt fast translation")
+PERFCOUNTER(vhpt_translate, "virtual vhpt translation")
+PERFCOUNTER(fast_vhpt_translate, "virtual vhpt fast translation")
PERFCOUNTER(recover_to_page_fault, "recoveries to page fault")
PERFCOUNTER(recover_to_break_fault, "recoveries to break fault")
-PERFCOUNTER_CPU(phys_translate, "metaphysical translation")
+PERFCOUNTER(phys_translate, "metaphysical translation")
-PERFCOUNTER_CPU(idle_when_pending, "vcpu idle at event")
+PERFCOUNTER(idle_when_pending, "vcpu idle at event")
-PERFCOUNTER_CPU(pal_halt_light, "calls to pal_halt_light")
+PERFCOUNTER(pal_halt_light, "calls to pal_halt_light")
-PERFCOUNTER_CPU(lazy_cover, "lazy cover")
+PERFCOUNTER(lazy_cover, "lazy cover")
-PERFCOUNTER_CPU(mov_to_ar_imm, "privop mov_to_ar_imm")
-PERFCOUNTER_CPU(mov_to_ar_reg, "privop mov_to_ar_reg")
-PERFCOUNTER_CPU(mov_from_ar, "privop privified-mov_from_ar")
-PERFCOUNTER_CPU(ssm, "privop ssm")
-PERFCOUNTER_CPU(rsm, "privop rsm")
-PERFCOUNTER_CPU(rfi, "privop rfi")
-PERFCOUNTER_CPU(bsw0, "privop bsw0")
-PERFCOUNTER_CPU(bsw1, "privop bsw1")
-PERFCOUNTER_CPU(cover, "privop cover")
-PERFCOUNTER_CPU(fc, "privop privified-fc")
-PERFCOUNTER_CPU(cpuid, "privop privified-cpuid")
+PERFCOUNTER(mov_to_ar_imm, "privop mov_to_ar_imm")
+PERFCOUNTER(mov_to_ar_reg, "privop mov_to_ar_reg")
+PERFCOUNTER(mov_from_ar, "privop privified-mov_from_ar")
+PERFCOUNTER(ssm, "privop ssm")
+PERFCOUNTER(rsm, "privop rsm")
+PERFCOUNTER(rfi, "privop rfi")
+PERFCOUNTER(bsw0, "privop bsw0")
+PERFCOUNTER(bsw1, "privop bsw1")
+PERFCOUNTER(cover, "privop cover")
+PERFCOUNTER(fc, "privop privified-fc")
+PERFCOUNTER(cpuid, "privop privified-cpuid")
PERFCOUNTER_ARRAY(mov_to_cr, "privop mov to cr", 128)
PERFCOUNTER_ARRAY(mov_from_cr, "privop mov from cr", 128)
@@ -36,45 +36,45 @@ PERFCOUNTER_ARRAY(misc_privop, "p
PERFCOUNTER_ARRAY(misc_privop, "privop misc", 64)
// privileged instructions to fall into vmx_entry
-PERFCOUNTER_CPU(vmx_rsm, "vmx privop rsm")
-PERFCOUNTER_CPU(vmx_ssm, "vmx privop ssm")
-PERFCOUNTER_CPU(vmx_mov_to_psr, "vmx privop mov_to_psr")
-PERFCOUNTER_CPU(vmx_mov_from_psr, "vmx privop mov_from_psr")
-PERFCOUNTER_CPU(vmx_mov_from_cr, "vmx privop mov_from_cr")
-PERFCOUNTER_CPU(vmx_mov_to_cr, "vmx privop mov_to_cr")
-PERFCOUNTER_CPU(vmx_bsw0, "vmx privop bsw0")
-PERFCOUNTER_CPU(vmx_bsw1, "vmx privop bsw1")
-PERFCOUNTER_CPU(vmx_cover, "vmx privop cover")
-PERFCOUNTER_CPU(vmx_rfi, "vmx privop rfi")
-PERFCOUNTER_CPU(vmx_itr_d, "vmx privop itr_d")
-PERFCOUNTER_CPU(vmx_itr_i, "vmx privop itr_i")
-PERFCOUNTER_CPU(vmx_ptr_d, "vmx privop ptr_d")
-PERFCOUNTER_CPU(vmx_ptr_i, "vmx privop ptr_i")
-PERFCOUNTER_CPU(vmx_itc_d, "vmx privop itc_d")
-PERFCOUNTER_CPU(vmx_itc_i, "vmx privop itc_i")
-PERFCOUNTER_CPU(vmx_ptc_l, "vmx privop ptc_l")
-PERFCOUNTER_CPU(vmx_ptc_g, "vmx privop ptc_g")
-PERFCOUNTER_CPU(vmx_ptc_ga, "vmx privop ptc_ga")
-PERFCOUNTER_CPU(vmx_ptc_e, "vmx privop ptc_e")
-PERFCOUNTER_CPU(vmx_mov_to_rr, "vmx privop mov_to_rr")
-PERFCOUNTER_CPU(vmx_mov_from_rr, "vmx privop mov_from_rr")
-PERFCOUNTER_CPU(vmx_thash, "vmx privop thash")
-PERFCOUNTER_CPU(vmx_ttag, "vmx privop ttag")
-PERFCOUNTER_CPU(vmx_tpa, "vmx privop tpa")
-PERFCOUNTER_CPU(vmx_tak, "vmx privop tak")
-PERFCOUNTER_CPU(vmx_mov_to_ar_imm, "vmx privop mov_to_ar_imm")
-PERFCOUNTER_CPU(vmx_mov_to_ar_reg, "vmx privop mov_to_ar_reg")
-PERFCOUNTER_CPU(vmx_mov_from_ar_reg, "vmx privop mov_from_ar_reg")
-PERFCOUNTER_CPU(vmx_mov_to_dbr, "vmx privop mov_to_dbr")
-PERFCOUNTER_CPU(vmx_mov_to_ibr, "vmx privop mov_to_ibr")
-PERFCOUNTER_CPU(vmx_mov_to_pmc, "vmx privop mov_to_pmc")
-PERFCOUNTER_CPU(vmx_mov_to_pmd, "vmx privop mov_to_pmd")
-PERFCOUNTER_CPU(vmx_mov_to_pkr, "vmx privop mov_to_pkr")
-PERFCOUNTER_CPU(vmx_mov_from_dbr, "vmx privop mov_from_dbr")
-PERFCOUNTER_CPU(vmx_mov_from_ibr, "vmx privop mov_from_ibr")
-PERFCOUNTER_CPU(vmx_mov_from_pmc, "vmx privop mov_from_pmc")
-PERFCOUNTER_CPU(vmx_mov_from_pkr, "vmx privop mov_from_pkr")
-PERFCOUNTER_CPU(vmx_mov_from_cpuid, "vmx privop mov_from_cpuid")
+PERFCOUNTER(vmx_rsm, "vmx privop rsm")
+PERFCOUNTER(vmx_ssm, "vmx privop ssm")
+PERFCOUNTER(vmx_mov_to_psr, "vmx privop mov_to_psr")
+PERFCOUNTER(vmx_mov_from_psr, "vmx privop mov_from_psr")
+PERFCOUNTER(vmx_mov_from_cr, "vmx privop mov_from_cr")
+PERFCOUNTER(vmx_mov_to_cr, "vmx privop mov_to_cr")
+PERFCOUNTER(vmx_bsw0, "vmx privop bsw0")
+PERFCOUNTER(vmx_bsw1, "vmx privop bsw1")
+PERFCOUNTER(vmx_cover, "vmx privop cover")
+PERFCOUNTER(vmx_rfi, "vmx privop rfi")
+PERFCOUNTER(vmx_itr_d, "vmx privop itr_d")
+PERFCOUNTER(vmx_itr_i, "vmx privop itr_i")
+PERFCOUNTER(vmx_ptr_d, "vmx privop ptr_d")
+PERFCOUNTER(vmx_ptr_i, "vmx privop ptr_i")
+PERFCOUNTER(vmx_itc_d, "vmx privop itc_d")
+PERFCOUNTER(vmx_itc_i, "vmx privop itc_i")
+PERFCOUNTER(vmx_ptc_l, "vmx privop ptc_l")
+PERFCOUNTER(vmx_ptc_g, "vmx privop ptc_g")
+PERFCOUNTER(vmx_ptc_ga, "vmx privop ptc_ga")
+PERFCOUNTER(vmx_ptc_e, "vmx privop ptc_e")
+PERFCOUNTER(vmx_mov_to_rr, "vmx privop mov_to_rr")
+PERFCOUNTER(vmx_mov_from_rr, "vmx privop mov_from_rr")
+PERFCOUNTER(vmx_thash, "vmx privop thash")
+PERFCOUNTER(vmx_ttag, "vmx privop ttag")
+PERFCOUNTER(vmx_tpa, "vmx privop tpa")
+PERFCOUNTER(vmx_tak, "vmx privop tak")
+PERFCOUNTER(vmx_mov_to_ar_imm, "vmx privop mov_to_ar_imm")
+PERFCOUNTER(vmx_mov_to_ar_reg, "vmx privop mov_to_ar_reg")
+PERFCOUNTER(vmx_mov_from_ar_reg, "vmx privop mov_from_ar_reg")
+PERFCOUNTER(vmx_mov_to_dbr, "vmx privop mov_to_dbr")
+PERFCOUNTER(vmx_mov_to_ibr, "vmx privop mov_to_ibr")
+PERFCOUNTER(vmx_mov_to_pmc, "vmx privop mov_to_pmc")
+PERFCOUNTER(vmx_mov_to_pmd, "vmx privop mov_to_pmd")
+PERFCOUNTER(vmx_mov_to_pkr, "vmx privop mov_to_pkr")
+PERFCOUNTER(vmx_mov_from_dbr, "vmx privop mov_from_dbr")
+PERFCOUNTER(vmx_mov_from_ibr, "vmx privop mov_from_ibr")
+PERFCOUNTER(vmx_mov_from_pmc, "vmx privop mov_from_pmc")
+PERFCOUNTER(vmx_mov_from_pkr, "vmx privop mov_from_pkr")
+PERFCOUNTER(vmx_mov_from_cpuid, "vmx privop mov_from_cpuid")
PERFCOUNTER_ARRAY(slow_hyperprivop, "slow hyperprivops", HYPERPRIVOP_MAX + 1)
@@ -87,9 +87,9 @@ PERFSTATUS(vhpt_valid_entries, "n
PERFSTATUS(vhpt_valid_entries, "nbr of valid entries in VHPT")
PERFCOUNTER_ARRAY(vmx_mmio_access, "vmx_mmio_access", 8)
-PERFCOUNTER_CPU(vmx_pal_emul, "vmx_pal_emul")
+PERFCOUNTER(vmx_pal_emul, "vmx_pal_emul")
PERFCOUNTER_ARRAY(vmx_switch_mm_mode, "vmx_switch_mm_mode", 8)
-PERFCOUNTER_CPU(vmx_ia64_handle_break,"vmx_ia64_handle_break")
+PERFCOUNTER(vmx_ia64_handle_break,"vmx_ia64_handle_break")
PERFCOUNTER_ARRAY(vmx_inject_guest_interruption,
"vmx_inject_guest_interruption", 0x80)
PERFCOUNTER_ARRAY(fw_hypercall, "fw_hypercall", 0x20)
@@ -111,66 +111,66 @@ PERFPRIVOPADDR(thash)
#endif
// vhpt.c
-PERFCOUNTER_CPU(local_vhpt_flush, "local_vhpt_flush")
-PERFCOUNTER_CPU(vcpu_vhpt_flush, "vcpu_vhpt_flush")
-PERFCOUNTER_CPU(vcpu_flush_vtlb_all, "vcpu_flush_vtlb_all")
-PERFCOUNTER_CPU(domain_flush_vtlb_all, "domain_flush_vtlb_all")
-PERFCOUNTER_CPU(vcpu_flush_tlb_vhpt_range, "vcpu_flush_tlb_vhpt_range")
-PERFCOUNTER_CPU(domain_flush_vtlb_track_entry,
"domain_flush_vtlb_track_entry")
-PERFCOUNTER_CPU(domain_flush_vtlb_local, "domain_flush_vtlb_local")
-PERFCOUNTER_CPU(domain_flush_vtlb_global, "domain_flush_vtlb_global")
-PERFCOUNTER_CPU(domain_flush_vtlb_range, "domain_flush_vtlb_range")
+PERFCOUNTER(local_vhpt_flush, "local_vhpt_flush")
+PERFCOUNTER(vcpu_vhpt_flush, "vcpu_vhpt_flush")
+PERFCOUNTER(vcpu_flush_vtlb_all, "vcpu_flush_vtlb_all")
+PERFCOUNTER(domain_flush_vtlb_all, "domain_flush_vtlb_all")
+PERFCOUNTER(vcpu_flush_tlb_vhpt_range, "vcpu_flush_tlb_vhpt_range")
+PERFCOUNTER(domain_flush_vtlb_track_entry, "domain_flush_vtlb_track_entry")
+PERFCOUNTER(domain_flush_vtlb_local, "domain_flush_vtlb_local")
+PERFCOUNTER(domain_flush_vtlb_global, "domain_flush_vtlb_global")
+PERFCOUNTER(domain_flush_vtlb_range, "domain_flush_vtlb_range")
// domain.c
-PERFCOUNTER_CPU(flush_vtlb_for_context_switch,
"flush_vtlb_for_context_switch")
+PERFCOUNTER(flush_vtlb_for_context_switch, "flush_vtlb_for_context_switch")
// mm.c
-PERFCOUNTER_CPU(assign_domain_page_replace, "assign_domain_page_replace")
-PERFCOUNTER_CPU(assign_domain_pge_cmpxchg_rel,
"assign_domain_pge_cmpxchg_rel")
-PERFCOUNTER_CPU(zap_dcomain_page_one, "zap_dcomain_page_one")
-PERFCOUNTER_CPU(dom0vp_zap_physmap, "dom0vp_zap_physmap")
-PERFCOUNTER_CPU(dom0vp_add_physmap, "dom0vp_add_physmap")
-PERFCOUNTER_CPU(create_grant_host_mapping, "create_grant_host_mapping")
-PERFCOUNTER_CPU(destroy_grant_host_mapping, "destroy_grant_host_mapping")
-PERFCOUNTER_CPU(steal_page_refcount, "steal_page_refcount")
-PERFCOUNTER_CPU(steal_page, "steal_page")
-PERFCOUNTER_CPU(guest_physmap_add_page, "guest_physmap_add_page")
-PERFCOUNTER_CPU(guest_physmap_remove_page, "guest_physmap_remove_page")
-PERFCOUNTER_CPU(domain_page_flush_and_put, "domain_page_flush_and_put")
+PERFCOUNTER(assign_domain_page_replace, "assign_domain_page_replace")
+PERFCOUNTER(assign_domain_pge_cmpxchg_rel, "assign_domain_pge_cmpxchg_rel")
+PERFCOUNTER(zap_dcomain_page_one, "zap_dcomain_page_one")
+PERFCOUNTER(dom0vp_zap_physmap, "dom0vp_zap_physmap")
+PERFCOUNTER(dom0vp_add_physmap, "dom0vp_add_physmap")
+PERFCOUNTER(create_grant_host_mapping, "create_grant_host_mapping")
+PERFCOUNTER(destroy_grant_host_mapping, "destroy_grant_host_mapping")
+PERFCOUNTER(steal_page_refcount, "steal_page_refcount")
+PERFCOUNTER(steal_page, "steal_page")
+PERFCOUNTER(guest_physmap_add_page, "guest_physmap_add_page")
+PERFCOUNTER(guest_physmap_remove_page, "guest_physmap_remove_page")
+PERFCOUNTER(domain_page_flush_and_put, "domain_page_flush_and_put")
// dom0vp
-PERFCOUNTER_CPU(dom0vp_phystomach, "dom0vp_phystomach")
-PERFCOUNTER_CPU(dom0vp_machtophys, "dom0vp_machtophys")
+PERFCOUNTER(dom0vp_phystomach, "dom0vp_phystomach")
+PERFCOUNTER(dom0vp_machtophys, "dom0vp_machtophys")
#ifdef CONFIG_XEN_IA64_TLB_TRACK
// insert or dirty
-PERFCOUNTER_CPU(tlb_track_iod, "tlb_track_iod")
-PERFCOUNTER_CPU(tlb_track_iod_again, "tlb_track_iod_again")
-PERFCOUNTER_CPU(tlb_track_iod_not_tracked, "tlb_track_iod_not_tracked")
-PERFCOUNTER_CPU(tlb_track_iod_force_many, "tlb_track_iod_force_many")
-PERFCOUNTER_CPU(tlb_track_iod_tracked_many, "tlb_track_iod_tracked_many")
-PERFCOUNTER_CPU(tlb_track_iod_tracked_many_del,
"tlb_track_iod_tracked_many_del")
-PERFCOUNTER_CPU(tlb_track_iod_found, "tlb_track_iod_found")
-PERFCOUNTER_CPU(tlb_track_iod_new_entry, "tlb_track_iod_new_entry")
-PERFCOUNTER_CPU(tlb_track_iod_new_failed, "tlb_track_iod_new_failed")
-PERFCOUNTER_CPU(tlb_track_iod_new_many, "tlb_track_iod_new_many")
-PERFCOUNTER_CPU(tlb_track_iod_insert, "tlb_track_iod_insert")
-PERFCOUNTER_CPU(tlb_track_iod_dirtied, "tlb_track_iod_dirtied")
+PERFCOUNTER(tlb_track_iod, "tlb_track_iod")
+PERFCOUNTER(tlb_track_iod_again, "tlb_track_iod_again")
+PERFCOUNTER(tlb_track_iod_not_tracked, "tlb_track_iod_not_tracked")
+PERFCOUNTER(tlb_track_iod_force_many, "tlb_track_iod_force_many")
+PERFCOUNTER(tlb_track_iod_tracked_many, "tlb_track_iod_tracked_many")
+PERFCOUNTER(tlb_track_iod_tracked_many_del, "tlb_track_iod_tracked_many_del")
+PERFCOUNTER(tlb_track_iod_found, "tlb_track_iod_found")
+PERFCOUNTER(tlb_track_iod_new_entry, "tlb_track_iod_new_entry")
+PERFCOUNTER(tlb_track_iod_new_failed, "tlb_track_iod_new_failed")
+PERFCOUNTER(tlb_track_iod_new_many, "tlb_track_iod_new_many")
+PERFCOUNTER(tlb_track_iod_insert, "tlb_track_iod_insert")
+PERFCOUNTER(tlb_track_iod_dirtied, "tlb_track_iod_dirtied")
// search and remove
-PERFCOUNTER_CPU(tlb_track_sar, "tlb_track_sar")
-PERFCOUNTER_CPU(tlb_track_sar_not_tracked, "tlb_track_sar_not_tracked")
-PERFCOUNTER_CPU(tlb_track_sar_not_found, "tlb_track_sar_not_found")
-PERFCOUNTER_CPU(tlb_track_sar_found, "tlb_track_sar_found")
-PERFCOUNTER_CPU(tlb_track_sar_many, "tlb_track_sar_many")
+PERFCOUNTER(tlb_track_sar, "tlb_track_sar")
+PERFCOUNTER(tlb_track_sar_not_tracked, "tlb_track_sar_not_tracked")
+PERFCOUNTER(tlb_track_sar_not_found, "tlb_track_sar_not_found")
+PERFCOUNTER(tlb_track_sar_found, "tlb_track_sar_found")
+PERFCOUNTER(tlb_track_sar_many, "tlb_track_sar_many")
// flush
-PERFCOUNTER_CPU(tlb_track_use_rr7, "tlb_track_use_rr7")
-PERFCOUNTER_CPU(tlb_track_swap_rr0, "tlb_track_swap_rr0")
+PERFCOUNTER(tlb_track_use_rr7, "tlb_track_use_rr7")
+PERFCOUNTER(tlb_track_swap_rr0, "tlb_track_swap_rr0")
#endif
// tlb flush clock
#ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
-PERFCOUNTER_CPU(tlbflush_clock_cswitch_purge, "tlbflush_clock_cswitch_purge")
-PERFCOUNTER_CPU(tlbflush_clock_cswitch_skip, "tlbflush_clock_cswitch_skip")
+PERFCOUNTER(tlbflush_clock_cswitch_purge, "tlbflush_clock_cswitch_purge")
+PERFCOUNTER(tlbflush_clock_cswitch_skip, "tlbflush_clock_cswitch_skip")
#endif
diff -r 96f167771979 -r ea0b50ca4999 xen/include/asm-ia64/tlb_track.h
--- a/xen/include/asm-ia64/tlb_track.h Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/include/asm-ia64/tlb_track.h Tue Mar 27 16:42:47 2007 +0100
@@ -97,9 +97,9 @@ vcpu_tlb_track_insert_or_dirty(struct vc
{
/* optimization.
non-tracking pte is most common. */
- perfc_incrc(tlb_track_iod);
+ perfc_incr(tlb_track_iod);
if (!pte_tlb_tracking(entry->used)) {
- perfc_incrc(tlb_track_iod_not_tracked);
+ perfc_incr(tlb_track_iod_not_tracked);
return;
}
diff -r 96f167771979 -r ea0b50ca4999 xen/include/asm-x86/perfc_defn.h
--- a/xen/include/asm-x86/perfc_defn.h Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/include/asm-x86/perfc_defn.h Tue Mar 27 16:42:47 2007 +0100
@@ -12,83 +12,83 @@ PERFCOUNTER_ARRAY(cause_vector,
#define SVM_PERF_EXIT_REASON_SIZE (1+136)
PERFCOUNTER_ARRAY(svmexits, "SVMexits", SVM_PERF_EXIT_REASON_SIZE)
-PERFCOUNTER_CPU(seg_fixups, "segmentation fixups")
+PERFCOUNTER(seg_fixups, "segmentation fixups")
-PERFCOUNTER_CPU(apic_timer, "apic timer interrupts")
+PERFCOUNTER(apic_timer, "apic timer interrupts")
-PERFCOUNTER_CPU(domain_page_tlb_flush, "domain page tlb flushes")
+PERFCOUNTER(domain_page_tlb_flush, "domain page tlb flushes")
PERFCOUNTER(calls_to_mmuext_op, "calls to mmuext_op")
PERFCOUNTER(num_mmuext_ops, "mmuext ops")
PERFCOUNTER(calls_to_mmu_update, "calls to mmu_update")
PERFCOUNTER(num_page_updates, "page updates")
PERFCOUNTER(calls_to_update_va, "calls to update_va_map")
-PERFCOUNTER_CPU(page_faults, "page faults")
-PERFCOUNTER_CPU(copy_user_faults, "copy_user faults")
+PERFCOUNTER(page_faults, "page faults")
+PERFCOUNTER(copy_user_faults, "copy_user faults")
-PERFCOUNTER_CPU(map_domain_page_count, "map_domain_page count")
-PERFCOUNTER_CPU(ptwr_emulations, "writable pt emulations")
+PERFCOUNTER(map_domain_page_count, "map_domain_page count")
+PERFCOUNTER(ptwr_emulations, "writable pt emulations")
-PERFCOUNTER_CPU(exception_fixed, "pre-exception fixed")
+PERFCOUNTER(exception_fixed, "pre-exception fixed")
/* Shadow counters */
-PERFCOUNTER_CPU(shadow_alloc, "calls to shadow_alloc")
-PERFCOUNTER_CPU(shadow_alloc_tlbflush, "shadow_alloc flushed TLBs")
+PERFCOUNTER(shadow_alloc, "calls to shadow_alloc")
+PERFCOUNTER(shadow_alloc_tlbflush, "shadow_alloc flushed TLBs")
/* STATUS counters do not reset when 'P' is hit */
PERFSTATUS(shadow_alloc_count, "number of shadow pages in use")
-PERFCOUNTER_CPU(shadow_free, "calls to shadow_free")
-PERFCOUNTER_CPU(shadow_prealloc_1, "shadow recycles old shadows")
-PERFCOUNTER_CPU(shadow_prealloc_2, "shadow recycles in-use shadows")
-PERFCOUNTER_CPU(shadow_linear_map_failed, "shadow hit read-only linear map")
-PERFCOUNTER_CPU(shadow_a_update, "shadow A bit update")
-PERFCOUNTER_CPU(shadow_ad_update, "shadow A&D bit update")
-PERFCOUNTER_CPU(shadow_fault, "calls to shadow_fault")
-PERFCOUNTER_CPU(shadow_fault_fast_gnp, "shadow_fault fast path n/p")
-PERFCOUNTER_CPU(shadow_fault_fast_mmio, "shadow_fault fast path mmio")
-PERFCOUNTER_CPU(shadow_fault_fast_fail, "shadow_fault fast path error")
-PERFCOUNTER_CPU(shadow_fault_bail_bad_gfn, "shadow_fault guest bad gfn")
-PERFCOUNTER_CPU(shadow_fault_bail_not_present,
+PERFCOUNTER(shadow_free, "calls to shadow_free")
+PERFCOUNTER(shadow_prealloc_1, "shadow recycles old shadows")
+PERFCOUNTER(shadow_prealloc_2, "shadow recycles in-use shadows")
+PERFCOUNTER(shadow_linear_map_failed, "shadow hit read-only linear map")
+PERFCOUNTER(shadow_a_update, "shadow A bit update")
+PERFCOUNTER(shadow_ad_update, "shadow A&D bit update")
+PERFCOUNTER(shadow_fault, "calls to shadow_fault")
+PERFCOUNTER(shadow_fault_fast_gnp, "shadow_fault fast path n/p")
+PERFCOUNTER(shadow_fault_fast_mmio, "shadow_fault fast path mmio")
+PERFCOUNTER(shadow_fault_fast_fail, "shadow_fault fast path error")
+PERFCOUNTER(shadow_fault_bail_bad_gfn, "shadow_fault guest bad gfn")
+PERFCOUNTER(shadow_fault_bail_not_present,
"shadow_fault guest not-present")
-PERFCOUNTER_CPU(shadow_fault_bail_nx, "shadow_fault guest NX fault")
-PERFCOUNTER_CPU(shadow_fault_bail_ro_mapping, "shadow_fault guest R/W fault")
-PERFCOUNTER_CPU(shadow_fault_bail_user_supervisor,
+PERFCOUNTER(shadow_fault_bail_nx, "shadow_fault guest NX fault")
+PERFCOUNTER(shadow_fault_bail_ro_mapping, "shadow_fault guest R/W fault")
+PERFCOUNTER(shadow_fault_bail_user_supervisor,
"shadow_fault guest U/S fault")
-PERFCOUNTER_CPU(shadow_fault_emulate_read, "shadow_fault emulates a read")
-PERFCOUNTER_CPU(shadow_fault_emulate_write, "shadow_fault emulates a write")
-PERFCOUNTER_CPU(shadow_fault_emulate_failed, "shadow_fault emulator fails")
-PERFCOUNTER_CPU(shadow_fault_emulate_stack, "shadow_fault emulate stack write")
-PERFCOUNTER_CPU(shadow_fault_mmio, "shadow_fault handled as mmio")
-PERFCOUNTER_CPU(shadow_fault_fixed, "shadow_fault fixed fault")
-PERFCOUNTER_CPU(shadow_ptwr_emulate, "shadow causes ptwr to emulate")
-PERFCOUNTER_CPU(shadow_validate_gl1e_calls, "calls to shadow_validate_gl1e")
-PERFCOUNTER_CPU(shadow_validate_gl2e_calls, "calls to shadow_validate_gl2e")
-PERFCOUNTER_CPU(shadow_validate_gl3e_calls, "calls to shadow_validate_gl3e")
-PERFCOUNTER_CPU(shadow_validate_gl4e_calls, "calls to shadow_validate_gl4e")
-PERFCOUNTER_CPU(shadow_hash_lookups, "calls to shadow_hash_lookup")
-PERFCOUNTER_CPU(shadow_hash_lookup_head, "shadow hash hit in bucket head")
-PERFCOUNTER_CPU(shadow_hash_lookup_miss, "shadow hash misses")
-PERFCOUNTER_CPU(shadow_get_shadow_status, "calls to get_shadow_status")
-PERFCOUNTER_CPU(shadow_hash_inserts, "calls to shadow_hash_insert")
-PERFCOUNTER_CPU(shadow_hash_deletes, "calls to shadow_hash_delete")
-PERFCOUNTER_CPU(shadow_writeable, "shadow removes write access")
-PERFCOUNTER_CPU(shadow_writeable_h_1, "shadow writeable: 32b w2k3")
-PERFCOUNTER_CPU(shadow_writeable_h_2, "shadow writeable: 32pae w2k3")
-PERFCOUNTER_CPU(shadow_writeable_h_3, "shadow writeable: 64b w2k3")
-PERFCOUNTER_CPU(shadow_writeable_h_4, "shadow writeable: 32b linux low")
-PERFCOUNTER_CPU(shadow_writeable_h_5, "shadow writeable: 32b linux high")
-PERFCOUNTER_CPU(shadow_writeable_bf, "shadow writeable brute-force")
-PERFCOUNTER_CPU(shadow_mappings, "shadow removes all mappings")
-PERFCOUNTER_CPU(shadow_mappings_bf, "shadow rm-mappings brute-force")
-PERFCOUNTER_CPU(shadow_early_unshadow, "shadow unshadows for fork/exit")
-PERFCOUNTER_CPU(shadow_unshadow, "shadow unshadows a page")
-PERFCOUNTER_CPU(shadow_up_pointer, "shadow unshadow by up-pointer")
-PERFCOUNTER_CPU(shadow_unshadow_bf, "shadow unshadow brute-force")
-PERFCOUNTER_CPU(shadow_get_page_fail, "shadow_get_page_from_l1e failed")
-PERFCOUNTER_CPU(shadow_guest_walk, "shadow walks guest tables")
-PERFCOUNTER_CPU(shadow_invlpg, "shadow emulates invlpg")
-PERFCOUNTER_CPU(shadow_invlpg_fault, "shadow invlpg faults")
+PERFCOUNTER(shadow_fault_emulate_read, "shadow_fault emulates a read")
+PERFCOUNTER(shadow_fault_emulate_write, "shadow_fault emulates a write")
+PERFCOUNTER(shadow_fault_emulate_failed, "shadow_fault emulator fails")
+PERFCOUNTER(shadow_fault_emulate_stack, "shadow_fault emulate stack write")
+PERFCOUNTER(shadow_fault_mmio, "shadow_fault handled as mmio")
+PERFCOUNTER(shadow_fault_fixed, "shadow_fault fixed fault")
+PERFCOUNTER(shadow_ptwr_emulate, "shadow causes ptwr to emulate")
+PERFCOUNTER(shadow_validate_gl1e_calls, "calls to shadow_validate_gl1e")
+PERFCOUNTER(shadow_validate_gl2e_calls, "calls to shadow_validate_gl2e")
+PERFCOUNTER(shadow_validate_gl3e_calls, "calls to shadow_validate_gl3e")
+PERFCOUNTER(shadow_validate_gl4e_calls, "calls to shadow_validate_gl4e")
+PERFCOUNTER(shadow_hash_lookups, "calls to shadow_hash_lookup")
+PERFCOUNTER(shadow_hash_lookup_head, "shadow hash hit in bucket head")
+PERFCOUNTER(shadow_hash_lookup_miss, "shadow hash misses")
+PERFCOUNTER(shadow_get_shadow_status, "calls to get_shadow_status")
+PERFCOUNTER(shadow_hash_inserts, "calls to shadow_hash_insert")
+PERFCOUNTER(shadow_hash_deletes, "calls to shadow_hash_delete")
+PERFCOUNTER(shadow_writeable, "shadow removes write access")
+PERFCOUNTER(shadow_writeable_h_1, "shadow writeable: 32b w2k3")
+PERFCOUNTER(shadow_writeable_h_2, "shadow writeable: 32pae w2k3")
+PERFCOUNTER(shadow_writeable_h_3, "shadow writeable: 64b w2k3")
+PERFCOUNTER(shadow_writeable_h_4, "shadow writeable: 32b linux low")
+PERFCOUNTER(shadow_writeable_h_5, "shadow writeable: 32b linux high")
+PERFCOUNTER(shadow_writeable_bf, "shadow writeable brute-force")
+PERFCOUNTER(shadow_mappings, "shadow removes all mappings")
+PERFCOUNTER(shadow_mappings_bf, "shadow rm-mappings brute-force")
+PERFCOUNTER(shadow_early_unshadow, "shadow unshadows for fork/exit")
+PERFCOUNTER(shadow_unshadow, "shadow unshadows a page")
+PERFCOUNTER(shadow_up_pointer, "shadow unshadow by up-pointer")
+PERFCOUNTER(shadow_unshadow_bf, "shadow unshadow brute-force")
+PERFCOUNTER(shadow_get_page_fail, "shadow_get_page_from_l1e failed")
+PERFCOUNTER(shadow_guest_walk, "shadow walks guest tables")
+PERFCOUNTER(shadow_invlpg, "shadow emulates invlpg")
+PERFCOUNTER(shadow_invlpg_fault, "shadow invlpg faults")
/*#endif*/ /* __XEN_PERFC_DEFN_H__ */
diff -r 96f167771979 -r ea0b50ca4999 xen/include/xen/perfc.h
--- a/xen/include/xen/perfc.h Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/include/xen/perfc.h Tue Mar 27 16:42:47 2007 +0100
@@ -1,4 +1,3 @@
-
#ifndef __XEN_PERFC_H__
#define __XEN_PERFC_H__
@@ -8,13 +7,14 @@
#include <xen/smp.h>
#include <xen/percpu.h>
-/*
+/*
* NOTE: new counters must be defined in perfc_defn.h
*
+ * Counter declarations:
* PERFCOUNTER (counter, string) define a new performance counter
* PERFCOUNTER_ARRAY (counter, string, size) define an array of counters
*
- * unlike "COUNTERS", "STATUS" variables DO NOT RESET
+ * Unlike counters, status variables do not reset:
* PERFSTATUS (counter, string) define a new performance stauts
* PERFSTATUS_ARRAY (counter, string, size) define an array of status vars
*
@@ -31,16 +31,13 @@
*/
#define PERFCOUNTER( name, descr ) \
- PERFC_ ## name,
+ PERFC_##name,
#define PERFCOUNTER_ARRAY( name, descr, size ) \
- PERFC_ ## name, \
- PERFC_LAST_ ## name = PERFC_ ## name + (size) - sizeof(char[2 * !!(size) -
1]),
+ PERFC_##name, \
+ PERFC_LAST_##name = PERFC_ ## name + (size) - sizeof(char[2 * !!(size) - 1]),
#define PERFSTATUS PERFCOUNTER
#define PERFSTATUS_ARRAY PERFCOUNTER_ARRAY
-
-/* Compatibility: This should go away once all users got converted. */
-#define PERFCOUNTER_CPU PERFCOUNTER
enum perfcounter {
#include <xen/perfc_defn.h>
@@ -115,7 +112,4 @@ int perfc_control(struct xen_sysctl_perf
#endif /* PERF_COUNTERS */
-/* Compatibility: This should go away once all users got converted. */
-#define perfc_incrc perfc_incr
-
#endif /* __XEN_PERFC_H__ */
diff -r 96f167771979 -r ea0b50ca4999 xen/include/xen/perfc_defn.h
--- a/xen/include/xen/perfc_defn.h Tue Mar 27 16:35:37 2007 +0100
+++ b/xen/include/xen/perfc_defn.h Tue Mar 27 16:42:47 2007 +0100
@@ -9,13 +9,13 @@ PERFCOUNTER(calls_to_multicall,
PERFCOUNTER(calls_to_multicall, "calls to multicall")
PERFCOUNTER(calls_from_multicall, "calls from multicall")
-PERFCOUNTER_CPU(irqs, "#interrupts")
-PERFCOUNTER_CPU(ipis, "#IPIs")
+PERFCOUNTER(irqs, "#interrupts")
+PERFCOUNTER(ipis, "#IPIs")
-PERFCOUNTER_CPU(sched_irq, "sched: timer")
-PERFCOUNTER_CPU(sched_run, "sched: runs through scheduler")
-PERFCOUNTER_CPU(sched_ctx, "sched: context switches")
+PERFCOUNTER(sched_irq, "sched: timer")
+PERFCOUNTER(sched_run, "sched: runs through scheduler")
+PERFCOUNTER(sched_ctx, "sched: context switches")
-PERFCOUNTER_CPU(need_flush_tlb_flush, "PG_need_flush tlb flushes")
+PERFCOUNTER(need_flush_tlb_flush, "PG_need_flush tlb flushes")
/*#endif*/ /* __XEN_PERFC_DEFN_H__ */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|