diff -Naurp xen-unstable.hg-20050823-nooprofile/xen/arch/x86/Makefile xen-unstable.hg-20050823/xen/arch/x86/Makefile --- xen-unstable.hg-20050823-nooprofile/xen/arch/x86/Makefile 2005-08-24 07:44:52.000000000 -0500 +++ xen-unstable.hg-20050823/xen/arch/x86/Makefile 2005-08-23 07:05:17.000000000 -0500 @@ -33,7 +33,10 @@ ifneq ($(crash_debug),y) OBJS := $(patsubst cdb%.o,,$(OBJS)) endif +OBJS += oprofile/oprofile.o + default: $(TARGET) + make -C oprofile $(TARGET): $(TARGET)-syms boot/mkelf32 ./boot/mkelf32 $(TARGET)-syms $(TARGET) 0x100000 @@ -60,6 +63,9 @@ asm-offsets.s: $(TARGET_SUBARCH)/asm-off boot/mkelf32: boot/mkelf32.c $(HOSTCC) $(HOSTCFLAGS) -o $@ $< +oprofile/oprofile.o: + $(MAKE) -C oprofile + clean: rm -f *.o *.s *~ core boot/*.o boot/*~ boot/core boot/mkelf32 rm -f x86_32/*.o x86_32/*~ x86_32/core @@ -68,5 +74,6 @@ clean: rm -f acpi/*.o acpi/*~ acpi/core rm -f genapic/*.o genapic/*~ genapic/core rm -f cpu/*.o cpu/*~ cpu/core + rm -f oprofile/*.o .PHONY: default clean diff -Naurp xen-unstable.hg-20050823-nooprofile/xen/arch/x86/nmi.c xen-unstable.hg-20050823/xen/arch/x86/nmi.c --- xen-unstable.hg-20050823-nooprofile/xen/arch/x86/nmi.c 2005-08-24 07:44:52.000000000 -0500 +++ xen-unstable.hg-20050823/xen/arch/x86/nmi.c 2005-08-23 07:05:17.000000000 -0500 @@ -5,6 +5,10 @@ * * Started by Ingo Molnar * + * Modified by Aravind Menon for supporting oprofile + * These modifications are: + * Copyright (C) 2005 Hewlett-Packard Co. + * * Fixes: * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog. * Mikael Pettersson : Power Management for local APIC NMI watchdog. @@ -35,6 +39,28 @@ static unsigned int nmi_p4_cccr_val; static struct ac_timer nmi_timer[NR_CPUS]; static unsigned int nmi_timer_ticks[NR_CPUS]; +/* + * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: + * - it may be reserved by some other driver, or not + * - when not reserved by some other driver, it may be used for + * the NMI watchdog, or not + * + * This is maintained separately from nmi_active because the NMI + * watchdog may also be driven from the I/O APIC timer. + */ +static spinlock_t lapic_nmi_owner_lock = SPIN_LOCK_UNLOCKED; +static unsigned int lapic_nmi_owner; +#define LAPIC_NMI_WATCHDOG (1<<0) +#define LAPIC_NMI_RESERVED (1<<1) + +/* nmi_active: + * +1: the lapic NMI watchdog is active, but can be disabled + * 0: the lapic NMI watchdog has not been set up, and cannot + * be enabled + * -1: the lapic NMI watchdog is disabled, but can be enabled + */ +int nmi_active; + #define K7_EVNTSEL_ENABLE (1 << 22) #define K7_EVNTSEL_INT (1 << 20) #define K7_EVNTSEL_OS (1 << 17) @@ -66,8 +92,6 @@ static unsigned int nmi_timer_ticks[NR_C * max threshold. [IA32-Vol3, Section 14.9.9] */ #define MSR_P4_IQ_COUNTER0 0x30C -#define MSR_P4_IQ_CCCR0 0x36C -#define MSR_P4_CRU_ESCR0 0x3B8 /* ESCR no. 4 */ #define P4_NMI_CRU_ESCR0 P4_ESCR_EVENT_SELECT(0x3F) #define P4_NMI_IQ_CCCR0 \ (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ @@ -124,6 +148,70 @@ static inline void nmi_pm_init(void) { } * Original code written by Keith Owens. */ +static void disable_lapic_nmi_watchdog(void) +{ + if (nmi_active <= 0) + return; + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_AMD: + wrmsr(MSR_K7_EVNTSEL0, 0, 0); + break; + case X86_VENDOR_INTEL: + switch (boot_cpu_data.x86) { + case 6: + wrmsr(MSR_P6_EVNTSEL0, 0, 0); + break; + case 15: + if ( (smp_num_siblings <= 1) || + ( (smp_processor_id() % smp_num_siblings) == 0) ) + { + wrmsr(MSR_P4_IQ_CCCR0, 0, 0); + wrmsr(MSR_P4_CRU_ESCR0, 0, 0); + } else { + wrmsr(MSR_P4_IQ_CCCR1, 0, 0); + } + break; + } + break; + } + nmi_active = -1; + /* tell do_nmi() and others that we're not active any more */ + nmi_watchdog = 0; +} + +static void enable_lapic_nmi_watchdog(void) +{ + if (nmi_active < 0) { + nmi_watchdog = NMI_LOCAL_APIC; + setup_apic_nmi_watchdog(); + } +} + +int reserve_lapic_nmi(void) +{ + unsigned int old_owner; + spin_lock(&lapic_nmi_owner_lock); + old_owner = lapic_nmi_owner; + lapic_nmi_owner |= LAPIC_NMI_RESERVED; + spin_unlock(&lapic_nmi_owner_lock); + if (old_owner & LAPIC_NMI_RESERVED) + return -EBUSY; + if (old_owner & LAPIC_NMI_WATCHDOG) + disable_lapic_nmi_watchdog(); + return 0; +} + +void release_lapic_nmi(void) +{ + unsigned int new_owner; + spin_lock(&lapic_nmi_owner_lock); + new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED; + lapic_nmi_owner = new_owner; + spin_unlock(&lapic_nmi_owner_lock); + if (new_owner & LAPIC_NMI_WATCHDOG) + enable_lapic_nmi_watchdog(); +} + static void __pminit clear_msr_range(unsigned int base, unsigned int n) { unsigned int i; @@ -241,6 +329,9 @@ void __pminit setup_apic_nmi_watchdog(vo init_ac_timer(&nmi_timer[cpu], nmi_timer_fn, NULL, cpu); + lapic_nmi_owner = LAPIC_NMI_WATCHDOG; + nmi_active = 1; + nmi_pm_init(); } @@ -337,3 +428,7 @@ void nmi_watchdog_tick(struct cpu_user_r wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1); } } + +EXPORT_SYMBOL(reserve_lapic_nmi); +EXPORT_SYMBOL(release_lapic_nmi); + diff -Naurp xen-unstable.hg-20050823-nooprofile/xen/arch/x86/oprofile/Makefile xen-unstable.hg-20050823/xen/arch/x86/oprofile/Makefile --- xen-unstable.hg-20050823-nooprofile/xen/arch/x86/oprofile/Makefile 1969-12-31 18:00:00.000000000 -0600 +++ xen-unstable.hg-20050823/xen/arch/x86/oprofile/Makefile 2005-08-23 07:05:17.000000000 -0500 @@ -0,0 +1,9 @@ + +include $(BASEDIR)/Rules.mk + +default: $(OBJS) + $(LD) $(LDFLAGS) -r -o oprofile.o $(OBJS) + +%.o: %.c $(HDRS) Makefile + $(CC) $(CFLAGS) -c $< -o $@ + diff -Naurp xen-unstable.hg-20050823-nooprofile/xen/arch/x86/oprofile/nmi_int.c xen-unstable.hg-20050823/xen/arch/x86/oprofile/nmi_int.c --- xen-unstable.hg-20050823-nooprofile/xen/arch/x86/oprofile/nmi_int.c 1969-12-31 18:00:00.000000000 -0600 +++ xen-unstable.hg-20050823/xen/arch/x86/oprofile/nmi_int.c 2005-08-24 05:20:33.000000000 -0500 @@ -0,0 +1,453 @@ +/** + * @file nmi_int.c + * + * @remark Copyright 2002 OProfile authors + * @remark Read the file COPYING + * + * @author John Levon + * + * Modified by Aravind Menon for Xen + * These modifications are: + * Copyright (C) 2005 Hewlett-Packard Co. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "op_counter.h" +#include "op_x86_model.h" + +static struct op_x86_model_spec const * model; +static struct op_msrs cpu_msrs[NR_CPUS]; +static unsigned long saved_lvtpc[NR_CPUS]; + +#define VIRQ_BITMASK_SIZE (MAX_OPROF_DOMAINS/32 + 1) + +extern int active_domains[MAX_OPROF_DOMAINS]; +extern unsigned int adomains; + +extern struct domain * primary_profiler; +extern struct domain * adomain_ptrs[MAX_OPROF_DOMAINS]; +extern unsigned long virq_ovf_pending[VIRQ_BITMASK_SIZE]; + +extern int is_active(struct domain *d); +extern int active_id(struct domain *d); +extern int is_passive(struct domain *d); +extern int is_profiled(struct domain *d); + + +int nmi_profiling_started = 0; + +int active_virq_count = 0; +int passive_virq_count = 0; +int other_virq_count = 0; +int other_id = -1; +int xen_count = 0; +int dom_count = 0; +int ovf = 0; + +int nmi_callback(struct cpu_user_regs * regs, int cpu) +{ + int xen_mode = 0; + + ovf = model->check_ctrs(cpu, &cpu_msrs[cpu], regs); + xen_mode = RING_0(regs); + if (ovf) { + if (xen_mode) + xen_count++; + else + dom_count++; + + if (is_active(current->domain)) { + /* This is slightly incorrect. If we do not deliver + OVF virtual interrupts in a synchronous + manner, a process switch may happen in the domain + between the point the sample was collected and + the point at which a VIRQ was delivered. However, + it is not safe to call send_guest_virq from this + NMI context, it may lead to a deadlock since NMIs are + unmaskable. One optimization that we can do is + that if the sample occurs while domain code is + runnng, we know that it is safe to call + send_guest_virq, since we know no Xen code + is running at that time. + However, this may distort the sample distribution, + because we may lose more Xen mode samples.*/ + active_virq_count++; + if (!xen_mode) { + send_guest_virq(current, VIRQ_PMC_OVF); + clear_bit(active_id(current->domain), &virq_ovf_pending[0]); + } else + set_bit(active_id(current->domain), &virq_ovf_pending[0]); + primary_profiler->shared_info->active_samples++; + } + else if (is_passive(current->domain)) { + set_bit(active_id(primary_profiler), &virq_ovf_pending[0]); + passive_virq_count++; + primary_profiler->shared_info->passive_samples++; + } + else { + other_virq_count++; + other_id = current->domain->domain_id; + primary_profiler->shared_info->other_samples++; + } + } + return 1; +} + +static void free_msrs(void) +{ + int i; + for (i = 0; i < NR_CPUS; ++i) { + xfree(cpu_msrs[i].counters); + cpu_msrs[i].counters = NULL; + xfree(cpu_msrs[i].controls); + cpu_msrs[i].controls = NULL; + } +} + +static int allocate_msrs(void) +{ + int success = 1; + size_t controls_size = sizeof(struct op_msr) * model->num_controls; + size_t counters_size = sizeof(struct op_msr) * model->num_counters; + + int i; + for (i = 0; i < NR_CPUS; ++i) { + //if (!cpu_online(i)) + if (!test_bit(i, &cpu_online_map)) + continue; + + cpu_msrs[i].counters = xmalloc_bytes(counters_size); + if (!cpu_msrs[i].counters) { + success = 0; + break; + } + cpu_msrs[i].controls = xmalloc_bytes(controls_size); + if (!cpu_msrs[i].controls) { + success = 0; + break; + } + } + if (!success) + free_msrs(); + + return success; +} + +static void nmi_cpu_save_registers(struct op_msrs * msrs) +{ + unsigned int const nr_ctrs = model->num_counters; + unsigned int const nr_ctrls = model->num_controls; + struct op_msr * counters = msrs->counters; + struct op_msr * controls = msrs->controls; + unsigned int i; + + for (i = 0; i < nr_ctrs; ++i) { + rdmsr(counters[i].addr, + counters[i].saved.low, + counters[i].saved.high); + } + + for (i = 0; i < nr_ctrls; ++i) { + rdmsr(controls[i].addr, + controls[i].saved.low, + controls[i].saved.high); + } +} + +static void nmi_save_registers(void * dummy) +{ + int cpu = smp_processor_id(); + struct op_msrs * msrs = &cpu_msrs[cpu]; + model->fill_in_addresses(msrs); + nmi_cpu_save_registers(msrs); +} + +int nmi_reserve_counters(void) +{ + if (!allocate_msrs()) + return -ENOMEM; + + /* We walk a thin line between law and rape here. + * We need to be careful to install our NMI handler + * without actually triggering any NMIs as this will + * break the core code horrifically. + */ + /* Don't we need to do this on all CPUs?*/ + if (reserve_lapic_nmi() < 0) { + free_msrs(); + return -EBUSY; + } + /* We need to serialize save and setup for HT because the subset + * of msrs are distinct for save and setup operations + */ + on_each_cpu(nmi_save_registers, NULL, 0, 1); + return 0; +} + +static void nmi_cpu_setup(void * dummy) +{ + int cpu = smp_processor_id(); + struct op_msrs * msrs = &cpu_msrs[cpu]; + model->setup_ctrs(msrs); +} + +int nmi_setup_events(void) +{ + on_each_cpu(nmi_cpu_setup, NULL, 0, 1); + return 0; +} + +int nmi_enable_virq() +{ + set_nmi_callback(nmi_callback); + return 0; +} + +static void nmi_cpu_start(void * dummy) +{ + int cpu = smp_processor_id(); + struct op_msrs const * msrs = &cpu_msrs[cpu]; + saved_lvtpc[cpu] = apic_read(APIC_LVTPC); + apic_write(APIC_LVTPC, APIC_DM_NMI); + model->start(msrs); +} + +int nmi_start(void) +{ + on_each_cpu(nmi_cpu_start, NULL, 0, 1); + nmi_profiling_started = 1; + return 0; +} + +static void nmi_cpu_stop(void * dummy) +{ + unsigned int v; + int cpu = smp_processor_id(); + struct op_msrs const * msrs = &cpu_msrs[cpu]; + model->stop(msrs); + + /* restoring APIC_LVTPC can trigger an apic error because the delivery + * mode and vector nr combination can be illegal. That's by design: on + * power on apic lvt contain a zero vector nr which are legal only for + * NMI delivery mode. So inhibit apic err before restoring lvtpc + */ + if (!(apic_read(APIC_LVTPC) & APIC_DM_NMI) + || (apic_read(APIC_LVTPC) & APIC_LVT_MASKED)) { + printk("nmi_stop: APIC not good %ul\n", apic_read(APIC_LVTPC)); + mdelay(5000); + } + v = apic_read(APIC_LVTERR); + apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); + apic_write(APIC_LVTPC, saved_lvtpc[cpu]); + apic_write(APIC_LVTERR, v); +} + +void nmi_stop(void) +{ + nmi_profiling_started = 0; + on_each_cpu(nmi_cpu_stop, NULL, 0, 1); + active_virq_count = 0; + passive_virq_count = 0; + other_virq_count = 0; + xen_count = 0; + dom_count = 0; +} + +extern unsigned int read_ctr(struct op_msrs const * const msrs, int ctr); + +void nmi_sanity_check(struct cpu_user_regs *regs, int cpu) +{ + int i; + int masked = 0; + + /* We may have missed some NMI interrupts if we were already + in an NMI context at that time. If this happens, then + the counters are not reset and in the case of P4, the + APIC LVT disable mask is set. In both cases we end up + losing samples. On P4, this condition can be detected + by checking the APIC LVT mask. But in P6, we need to + examine the counters for overflow. So, every timer + interrupt, we check that everything is OK */ + + if (apic_read(APIC_LVTPC) & APIC_LVT_MASKED) + masked = 1; + + nmi_callback(regs, cpu); + + if (ovf && masked) { + if (is_active(current->domain)) + current->domain->shared_info->nmi_restarts++; + else if (is_passive(current->domain)) + primary_profiler->shared_info->nmi_restarts++; + } + + /*if (jiffies %1000 == 0) { + printk("cpu %d: sample count %d %d %d at %u\n", cpu, active_virq_count, passive_virq_count, other_virq_count, jiffies); + printk("other task id %d\n", other_id); + printk("%d in xen, %d in domain\n", xen_count, dom_count); + printk("counters %p %p\n", read_ctr(&cpu_msrs[cpu], 0), read_ctr(&cpu_msrs[cpu], 1)); + }*/ + + + for (i = 0; i < adomains; i++) + if (test_and_clear_bit(i, &virq_ovf_pending[0])) { + /* For now we do not support profiling of SMP guests */ + /* virq is delivered to first VCPU */ + send_guest_virq(adomain_ptrs[i]->vcpu[0], VIRQ_PMC_OVF); + } +} + +void nmi_disable_virq(void) +{ + unset_nmi_callback(); +} + +static void nmi_restore_registers(struct op_msrs * msrs) +{ + unsigned int const nr_ctrs = model->num_counters; + unsigned int const nr_ctrls = model->num_controls; + struct op_msr * counters = msrs->counters; + struct op_msr * controls = msrs->controls; + unsigned int i; + + for (i = 0; i < nr_ctrls; ++i) { + wrmsr(controls[i].addr, + controls[i].saved.low, + controls[i].saved.high); + } + + for (i = 0; i < nr_ctrs; ++i) { + wrmsr(counters[i].addr, + counters[i].saved.low, + counters[i].saved.high); + } +} + +static void nmi_cpu_shutdown(void * dummy) +{ + int cpu = smp_processor_id(); + struct op_msrs * msrs = &cpu_msrs[cpu]; + nmi_restore_registers(msrs); +} + +void nmi_release_counters(void) +{ + on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); + release_lapic_nmi(); + free_msrs(); +} + +struct op_counter_config counter_config[OP_MAX_COUNTER]; + +static int __init p4_init(void) +{ + __u8 cpu_model = current_cpu_data.x86_model; + printk("cpu model: %d\n", cpu_model); + if (cpu_model > 4) + return 0; + +#ifndef CONFIG_SMP + printk("model is op_p4_spec (uniprocessor)\n"); + model = &op_p4_spec; + return 1; +#else + //switch (smp_num_siblings) { + printk("model is op_p4_ht2_spec (SMP)\n"); + if (cpu_has_ht) + { + model = &op_p4_ht2_spec; + return 1; + } + else + { + printk("model is op_p4_spec (SMP)\n"); + model = &op_p4_spec; + return 1; + } +#endif + return 0; +} + + +static int __init ppro_init(void) +{ + __u8 cpu_model = current_cpu_data.x86_model; + + if (cpu_model > 0xd) + return 0; + + model = &op_ppro_spec; + return 1; +} + +int nmi_init(int *num_events, int *is_primary) +{ + __u8 vendor = current_cpu_data.x86_vendor; + __u8 family = current_cpu_data.x86; + int prim = 0; + + if (!cpu_has_apic) { + printk("(XEN) cpu has no APIC\n"); + return -ENODEV; + } + + if (primary_profiler == NULL) { + primary_profiler = current->domain; + prim = 1; + } + + if (primary_profiler != current->domain) + goto out; + + printk("cpu vendor: %d\n", vendor); + printk("cpu family: %d\n", family); + + switch (vendor) { + case X86_VENDOR_INTEL: + switch (family) { + /* Pentium IV */ + case 0xf: + if (!p4_init()) + return -ENODEV; + break; + /* A P6-class processor */ + case 6: + if (!ppro_init()) + return -ENODEV; + break; + default: + return -ENODEV; + } + case X86_VENDOR_AMD: + switch (family) { + case 6: + model = &op_athlon_spec; + break; + case 0xf: + model = &op_athlon_spec; + break; + } + break; + default: + return -ENODEV; + } +out: + if (copy_to_user((void *)num_events, (void *)&model->num_counters, sizeof(int))) + return -EFAULT; + if (copy_to_user((void *)is_primary, (void *)&prim, sizeof(int))) + return -EFAULT; + + return 0; +} + diff -Naurp xen-unstable.hg-20050823-nooprofile/xen/arch/x86/oprofile/op_counter.h xen-unstable.hg-20050823/xen/arch/x86/oprofile/op_counter.h --- xen-unstable.hg-20050823-nooprofile/xen/arch/x86/oprofile/op_counter.h 1969-12-31 18:00:00.000000000 -0600 +++ xen-unstable.hg-20050823/xen/arch/x86/oprofile/op_counter.h 2005-08-23 07:05:17.000000000 -0500 @@ -0,0 +1,33 @@ +/** + * @file op_counter.h + * + * @remark Copyright 2002 OProfile authors + * @remark Read the file COPYING + * + * @author John Levon + * + * Modified by Aravind Menon for Xen + * These modifications are: + * Copyright (C) 2005 Hewlett-Packard Co. + */ + +#ifndef OP_COUNTER_H +#define OP_COUNTER_H + +#define OP_MAX_COUNTER 8 + +/* Per-perfctr configuration as set via + * oprofilefs. + */ +struct op_counter_config { + unsigned long count; + unsigned long enabled; + unsigned long event; + unsigned long kernel; + unsigned long user; + unsigned long unit_mask; +}; + +extern struct op_counter_config counter_config[]; + +#endif /* OP_COUNTER_H */ diff -Naurp xen-unstable.hg-20050823-nooprofile/xen/arch/x86/oprofile/op_model_athlon.c xen-unstable.hg-20050823/xen/arch/x86/oprofile/op_model_athlon.c --- xen-unstable.hg-20050823-nooprofile/xen/arch/x86/oprofile/op_model_athlon.c 1969-12-31 18:00:00.000000000 -0600 +++ xen-unstable.hg-20050823/xen/arch/x86/oprofile/op_model_athlon.c 2005-08-24 04:34:41.000000000 -0500 @@ -0,0 +1,174 @@ +/** + * @file op_model_athlon.h + * athlon / K7 model-specific MSR operations + * + * @remark Copyright 2002 OProfile authors + * @remark Read the file COPYING + * + * @author John Levon + * @author Philippe Elie + * @author Graydon Hoare + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "op_x86_model.h" +#include "op_counter.h" + + + +// #include +// #include +// #include + +#define NUM_COUNTERS 4 +#define NUM_CONTROLS 4 + +#define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));} while (0) +#define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1);} while (0) +#define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) + +#define CTRL_READ(l,h,msrs,c) do {rdmsr(msrs->controls[(c)].addr, (l), (h));} while (0) +#define CTRL_WRITE(l,h,msrs,c) do {wrmsr(msrs->controls[(c)].addr, (l), (h));} while (0) +#define CTRL_SET_ACTIVE(n) (n |= (1<<22)) +#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) +#define CTRL_CLEAR(x) (x &= (1<<21)) +#define CTRL_SET_ENABLE(val) (val |= 1<<20) +#define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16)) +#define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17)) +#define CTRL_SET_UM(val, m) (val |= (m << 8)) +#define CTRL_SET_EVENT(val, e) (val |= e) + +static unsigned long reset_value[NUM_COUNTERS]; + +extern void pmc_log_event(struct domain *d, u64 eip, int mode, int event); + +static void athlon_fill_in_addresses(struct op_msrs * const msrs) +{ + msrs->counters[0].addr = MSR_K7_PERFCTR0; + msrs->counters[1].addr = MSR_K7_PERFCTR1; + msrs->counters[2].addr = MSR_K7_PERFCTR2; + msrs->counters[3].addr = MSR_K7_PERFCTR3; + + msrs->controls[0].addr = MSR_K7_EVNTSEL0; + msrs->controls[1].addr = MSR_K7_EVNTSEL1; + msrs->controls[2].addr = MSR_K7_EVNTSEL2; + msrs->controls[3].addr = MSR_K7_EVNTSEL3; +} + + +static void athlon_setup_ctrs(struct op_msrs const * const msrs) +{ + unsigned int low, high; + int i; + + /* clear all counters */ + for (i = 0 ; i < NUM_CONTROLS; ++i) { + CTRL_READ(low, high, msrs, i); + CTRL_CLEAR(low); + CTRL_WRITE(low, high, msrs, i); + } + + /* avoid a false detection of ctr overflows in NMI handler */ + for (i = 0; i < NUM_COUNTERS; ++i) { + CTR_WRITE(1, msrs, i); + } + + /* enable active counters */ + for (i = 0; i < NUM_COUNTERS; ++i) { + if (counter_config[i].enabled) { + reset_value[i] = counter_config[i].count; + + CTR_WRITE(counter_config[i].count, msrs, i); + + CTRL_READ(low, high, msrs, i); + CTRL_CLEAR(low); + CTRL_SET_ENABLE(low); + CTRL_SET_USR(low, counter_config[i].user); + CTRL_SET_KERN(low, counter_config[i].kernel); + CTRL_SET_UM(low, counter_config[i].unit_mask); + CTRL_SET_EVENT(low, counter_config[i].event); + CTRL_WRITE(low, high, msrs, i); + } else { + reset_value[i] = 0; + } + } +} + + +//static int athlon_check_ctrs(struct pt_regs * const regs, +// struct op_msrs const * const msrs) +static int athlon_check_ctrs(unsigned int const cpu, + struct op_msrs const * const msrs, + struct cpu_user_regs * const regs) + +{ + unsigned int low, high; + int i; + u64 eip = regs->eip; + int mode = 0; + struct vcpu *v = current; + + if (KERNEL_MODE(v, regs)) + mode = 1; + else if (RING_0(regs)) + mode = 2; + + for (i = 0 ; i < NUM_COUNTERS; ++i) { + CTR_READ(low, high, msrs, i); + if (CTR_OVERFLOWED(low)) { + //oprofile_add_sample(regs, i); + pmc_log_event(current->domain, eip, mode, i); + CTR_WRITE(reset_value[i], msrs, i); + } + } + + /* See op_model_ppro.c */ + return 1; +} + + +static void athlon_start(struct op_msrs const * const msrs) +{ + unsigned int low, high; + int i; + for (i = 0 ; i < NUM_COUNTERS ; ++i) { + if (reset_value[i]) { + CTRL_READ(low, high, msrs, i); + CTRL_SET_ACTIVE(low); + CTRL_WRITE(low, high, msrs, i); + } + } +} + + +static void athlon_stop(struct op_msrs const * const msrs) +{ + unsigned int low,high; + int i; + + /* Subtle: stop on all counters to avoid race with + * setting our pm callback */ + for (i = 0 ; i < NUM_COUNTERS ; ++i) { + CTRL_READ(low, high, msrs, i); + CTRL_SET_INACTIVE(low); + CTRL_WRITE(low, high, msrs, i); + } +} + + +struct op_x86_model_spec const op_athlon_spec = { + .num_counters = NUM_COUNTERS, + .num_controls = NUM_CONTROLS, + .fill_in_addresses = &athlon_fill_in_addresses, + .setup_ctrs = &athlon_setup_ctrs, + .check_ctrs = &athlon_check_ctrs, + .start = &athlon_start, + .stop = &athlon_stop +}; diff -Naurp xen-unstable.hg-20050823-nooprofile/xen/arch/x86/oprofile/op_model_p4.c xen-unstable.hg-20050823/xen/arch/x86/oprofile/op_model_p4.c --- xen-unstable.hg-20050823-nooprofile/xen/arch/x86/oprofile/op_model_p4.c 1969-12-31 18:00:00.000000000 -0600 +++ xen-unstable.hg-20050823/xen/arch/x86/oprofile/op_model_p4.c 2005-08-23 07:05:17.000000000 -0500 @@ -0,0 +1,748 @@ +/** + * @file op_model_p4.c + * P4 model-specific MSR operations + * + * @remark Copyright 2002 OProfile authors + * @remark Read the file COPYING + * + * @author Graydon Hoare + * + * Modified by Aravind Menon for Xen + * These modifications are: + * Copyright (C) 2005 Hewlett-Packard Co. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "op_x86_model.h" +#include "op_counter.h" + +#define NUM_EVENTS 39 + +#define NUM_COUNTERS_NON_HT 8 +#define NUM_ESCRS_NON_HT 45 +#define NUM_CCCRS_NON_HT 18 +#define NUM_CONTROLS_NON_HT (NUM_ESCRS_NON_HT + NUM_CCCRS_NON_HT) + +#define NUM_COUNTERS_HT2 4 +#define NUM_ESCRS_HT2 23 +#define NUM_CCCRS_HT2 9 +#define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2) + +static unsigned int num_counters = NUM_COUNTERS_NON_HT; + + +/* this has to be checked dynamically since the + hyper-threadedness of a chip is discovered at + kernel boot-time. */ +static inline void setup_num_counters(void) +{ +#ifdef CONFIG_SMP + if (cpu_has_ht) + num_counters = NUM_COUNTERS_HT2; +#endif +} + +static int inline addr_increment(void) +{ +#ifdef CONFIG_SMP + return cpu_has_ht ? 2 : 1; +#else + return 1; +#endif +} + + +/* tables to simulate simplified hardware view of p4 registers */ +struct p4_counter_binding { + int virt_counter; + int counter_address; + int cccr_address; +}; + +struct p4_event_binding { + int escr_select; /* value to put in CCCR */ + int event_select; /* value to put in ESCR */ + struct { + int virt_counter; /* for this counter... */ + int escr_address; /* use this ESCR */ + } bindings[2]; +}; + +/* nb: these CTR_* defines are a duplicate of defines in + event/i386.p4*events. */ + + +#define CTR_BPU_0 (1 << 0) +#define CTR_MS_0 (1 << 1) +#define CTR_FLAME_0 (1 << 2) +#define CTR_IQ_4 (1 << 3) +#define CTR_BPU_2 (1 << 4) +#define CTR_MS_2 (1 << 5) +#define CTR_FLAME_2 (1 << 6) +#define CTR_IQ_5 (1 << 7) + +static struct p4_counter_binding p4_counters [NUM_COUNTERS_NON_HT] = { + { CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0 }, + { CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0 }, + { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 }, + { CTR_IQ_4, MSR_P4_IQ_PERFCTR4, MSR_P4_IQ_CCCR4 }, + { CTR_BPU_2, MSR_P4_BPU_PERFCTR2, MSR_P4_BPU_CCCR2 }, + { CTR_MS_2, MSR_P4_MS_PERFCTR2, MSR_P4_MS_CCCR2 }, + { CTR_FLAME_2, MSR_P4_FLAME_PERFCTR2, MSR_P4_FLAME_CCCR2 }, + { CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5 } +}; + +#define NUM_UNUSED_CCCRS NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT + +/* All cccr we don't use. */ +static int p4_unused_cccr[NUM_UNUSED_CCCRS] = { + MSR_P4_BPU_CCCR1, MSR_P4_BPU_CCCR3, + MSR_P4_MS_CCCR1, MSR_P4_MS_CCCR3, + MSR_P4_FLAME_CCCR1, MSR_P4_FLAME_CCCR3, + MSR_P4_IQ_CCCR0, MSR_P4_IQ_CCCR1, + MSR_P4_IQ_CCCR2, MSR_P4_IQ_CCCR3 +}; + +/* p4 event codes in libop/op_event.h are indices into this table. */ + +static struct p4_event_binding p4_events[NUM_EVENTS] = { + + { /* BRANCH_RETIRED */ + 0x05, 0x06, + { {CTR_IQ_4, MSR_P4_CRU_ESCR2}, + {CTR_IQ_5, MSR_P4_CRU_ESCR3} } + }, + + { /* MISPRED_BRANCH_RETIRED */ + 0x04, 0x03, + { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, + { CTR_IQ_5, MSR_P4_CRU_ESCR1} } + }, + + { /* TC_DELIVER_MODE */ + 0x01, 0x01, + { { CTR_MS_0, MSR_P4_TC_ESCR0}, + { CTR_MS_2, MSR_P4_TC_ESCR1} } + }, + + { /* BPU_FETCH_REQUEST */ + 0x00, 0x03, + { { CTR_BPU_0, MSR_P4_BPU_ESCR0}, + { CTR_BPU_2, MSR_P4_BPU_ESCR1} } + }, + + { /* ITLB_REFERENCE */ + 0x03, 0x18, + { { CTR_BPU_0, MSR_P4_ITLB_ESCR0}, + { CTR_BPU_2, MSR_P4_ITLB_ESCR1} } + }, + + { /* MEMORY_CANCEL */ + 0x05, 0x02, + { { CTR_FLAME_0, MSR_P4_DAC_ESCR0}, + { CTR_FLAME_2, MSR_P4_DAC_ESCR1} } + }, + + { /* MEMORY_COMPLETE */ + 0x02, 0x08, + { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0}, + { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} } + }, + + { /* LOAD_PORT_REPLAY */ + 0x02, 0x04, + { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0}, + { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} } + }, + + { /* STORE_PORT_REPLAY */ + 0x02, 0x05, + { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0}, + { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} } + }, + + { /* MOB_LOAD_REPLAY */ + 0x02, 0x03, + { { CTR_BPU_0, MSR_P4_MOB_ESCR0}, + { CTR_BPU_2, MSR_P4_MOB_ESCR1} } + }, + + { /* PAGE_WALK_TYPE */ + 0x04, 0x01, + { { CTR_BPU_0, MSR_P4_PMH_ESCR0}, + { CTR_BPU_2, MSR_P4_PMH_ESCR1} } + }, + + { /* BSQ_CACHE_REFERENCE */ + 0x07, 0x0c, + { { CTR_BPU_0, MSR_P4_BSU_ESCR0}, + { CTR_BPU_2, MSR_P4_BSU_ESCR1} } + }, + + { /* IOQ_ALLOCATION */ + 0x06, 0x03, + { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, + { 0, 0 } } + }, + + { /* IOQ_ACTIVE_ENTRIES */ + 0x06, 0x1a, + { { CTR_BPU_2, MSR_P4_FSB_ESCR1}, + { 0, 0 } } + }, + + { /* FSB_DATA_ACTIVITY */ + 0x06, 0x17, + { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, + { CTR_BPU_2, MSR_P4_FSB_ESCR1} } + }, + + { /* BSQ_ALLOCATION */ + 0x07, 0x05, + { { CTR_BPU_0, MSR_P4_BSU_ESCR0}, + { 0, 0 } } + }, + + { /* BSQ_ACTIVE_ENTRIES */ + 0x07, 0x06, + { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */}, + { 0, 0 } } + }, + + { /* X87_ASSIST */ + 0x05, 0x03, + { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, + { CTR_IQ_5, MSR_P4_CRU_ESCR3} } + }, + + { /* SSE_INPUT_ASSIST */ + 0x01, 0x34, + { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, + { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } + }, + + { /* PACKED_SP_UOP */ + 0x01, 0x08, + { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, + { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } + }, + + { /* PACKED_DP_UOP */ + 0x01, 0x0c, + { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, + { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } + }, + + { /* SCALAR_SP_UOP */ + 0x01, 0x0a, + { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, + { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } + }, + + { /* SCALAR_DP_UOP */ + 0x01, 0x0e, + { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, + { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } + }, + + { /* 64BIT_MMX_UOP */ + 0x01, 0x02, + { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, + { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } + }, + + { /* 128BIT_MMX_UOP */ + 0x01, 0x1a, + { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, + { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } + }, + + { /* X87_FP_UOP */ + 0x01, 0x04, + { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, + { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } + }, + + { /* X87_SIMD_MOVES_UOP */ + 0x01, 0x2e, + { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, + { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } + }, + + { /* MACHINE_CLEAR */ + 0x05, 0x02, + { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, + { CTR_IQ_5, MSR_P4_CRU_ESCR3} } + }, + + { /* GLOBAL_POWER_EVENTS */ + 0x06, 0x13 /* older manual says 0x05, newer 0x13 */, + { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, + { CTR_BPU_2, MSR_P4_FSB_ESCR1} } + }, + + { /* TC_MS_XFER */ + 0x00, 0x05, + { { CTR_MS_0, MSR_P4_MS_ESCR0}, + { CTR_MS_2, MSR_P4_MS_ESCR1} } + }, + + { /* UOP_QUEUE_WRITES */ + 0x00, 0x09, + { { CTR_MS_0, MSR_P4_MS_ESCR0}, + { CTR_MS_2, MSR_P4_MS_ESCR1} } + }, + + { /* FRONT_END_EVENT */ + 0x05, 0x08, + { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, + { CTR_IQ_5, MSR_P4_CRU_ESCR3} } + }, + + { /* EXECUTION_EVENT */ + 0x05, 0x0c, + { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, + { CTR_IQ_5, MSR_P4_CRU_ESCR3} } + }, + + { /* REPLAY_EVENT */ + 0x05, 0x09, + { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, + { CTR_IQ_5, MSR_P4_CRU_ESCR3} } + }, + + { /* INSTR_RETIRED */ + 0x04, 0x02, + { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, + { CTR_IQ_5, MSR_P4_CRU_ESCR1} } + }, + + { /* UOPS_RETIRED */ + 0x04, 0x01, + { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, + { CTR_IQ_5, MSR_P4_CRU_ESCR1} } + }, + + { /* UOP_TYPE */ + 0x02, 0x02, + { { CTR_IQ_4, MSR_P4_RAT_ESCR0}, + { CTR_IQ_5, MSR_P4_RAT_ESCR1} } + }, + + { /* RETIRED_MISPRED_BRANCH_TYPE */ + 0x02, 0x05, + { { CTR_MS_0, MSR_P4_TBPU_ESCR0}, + { CTR_MS_2, MSR_P4_TBPU_ESCR1} } + }, + + { /* RETIRED_BRANCH_TYPE */ + 0x02, 0x04, + { { CTR_MS_0, MSR_P4_TBPU_ESCR0}, + { CTR_MS_2, MSR_P4_TBPU_ESCR1} } + } +}; + + +#define MISC_PMC_ENABLED_P(x) ((x) & 1 << 7) + +#define ESCR_RESERVED_BITS 0x80000003 +#define ESCR_CLEAR(escr) ((escr) &= ESCR_RESERVED_BITS) +#define ESCR_SET_USR_0(escr, usr) ((escr) |= (((usr) & 1) << 2)) +#define ESCR_SET_OS_0(escr, os) ((escr) |= (((os) & 1) << 3)) +#define ESCR_SET_USR_1(escr, usr) ((escr) |= (((usr) & 1))) +#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1)) +#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25)) +#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9)) +#define ESCR_READ(escr,high,ev,i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0) +#define ESCR_WRITE(escr,high,ev,i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0) + +#define CCCR_RESERVED_BITS 0x38030FFF +#define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) +#define CCCR_SET_REQUIRED_BITS(cccr) ((cccr) |= 0x00030000) +#define CCCR_SET_ESCR_SELECT(cccr, sel) ((cccr) |= (((sel) & 0x07) << 13)) +#define CCCR_SET_PMI_OVF_0(cccr) ((cccr) |= (1<<26)) +#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27)) +#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12)) +#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12)) +#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0) +#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0) +#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) +#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) + +#define CTR_READ(l,h,i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h));} while (0) +#define CTR_WRITE(l,i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1);} while (0) +#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000)) + + +/* this assigns a "stagger" to the current CPU, which is used throughout + the code in this module as an extra array offset, to select the "even" + or "odd" part of all the divided resources. */ +static unsigned int get_stagger(void) +{ +#ifdef CONFIG_SMP + /*int cpu = smp_processor_id(); + return (cpu != first_cpu(cpu_sibling_map[cpu]));*/ + /* We want the two logical cpus of a physical cpu to use + disjoint set of counters. The following code is wrong. */ + return 0; +#endif + return 0; +} + + +/* finally, mediate access to a real hardware counter + by passing a "virtual" counter numer to this macro, + along with your stagger setting. */ +#define VIRT_CTR(stagger, i) ((i) + ((num_counters) * (stagger))) + +static unsigned long reset_value[NUM_COUNTERS_NON_HT]; + + +static void p4_fill_in_addresses(struct op_msrs * const msrs) +{ + unsigned int i; + unsigned int addr, stag; + + setup_num_counters(); + stag = get_stagger(); + + /* the counter registers we pay attention to */ + for (i = 0; i < num_counters; ++i) { + msrs->counters[i].addr = + p4_counters[VIRT_CTR(stag, i)].counter_address; + } + + /* FIXME: bad feeling, we don't save the 10 counters we don't use. */ + + /* 18 CCCR registers */ + for (i = 0, addr = MSR_P4_BPU_CCCR0 + stag; + addr <= MSR_P4_IQ_CCCR5; ++i, addr += addr_increment()) { + msrs->controls[i].addr = addr; + } + + /* 43 ESCR registers in three or four discontiguous group */ + for (addr = MSR_P4_BSU_ESCR0 + stag; + addr < MSR_P4_IQ_ESCR0; ++i, addr += addr_increment()) { + msrs->controls[i].addr = addr; + } + + /* no IQ_ESCR0/1 on some models, we save a seconde time BSU_ESCR0/1 + * to avoid special case in nmi_{save|restore}_registers() */ + if (boot_cpu_data.x86_model >= 0x3) { + for (addr = MSR_P4_BSU_ESCR0 + stag; + addr <= MSR_P4_BSU_ESCR1; ++i, addr += addr_increment()) { + msrs->controls[i].addr = addr; + } + } else { + for (addr = MSR_P4_IQ_ESCR0 + stag; + addr <= MSR_P4_IQ_ESCR1; ++i, addr += addr_increment()) { + msrs->controls[i].addr = addr; + } + } + + for (addr = MSR_P4_RAT_ESCR0 + stag; + addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { + msrs->controls[i].addr = addr; + } + + for (addr = MSR_P4_MS_ESCR0 + stag; + addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) { + msrs->controls[i].addr = addr; + } + + for (addr = MSR_P4_IX_ESCR0 + stag; + addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) { + msrs->controls[i].addr = addr; + } + + /* there are 2 remaining non-contiguously located ESCRs */ + + if (num_counters == NUM_COUNTERS_NON_HT) { + /* standard non-HT CPUs handle both remaining ESCRs*/ + msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; + msrs->controls[i++].addr = MSR_P4_CRU_ESCR4; + + } else if (stag == 0) { + /* HT CPUs give the first remainder to the even thread, as + the 32nd control register */ + msrs->controls[i++].addr = MSR_P4_CRU_ESCR4; + + } else { + /* and two copies of the second to the odd thread, + for the 22st and 23nd control registers */ + msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; + msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; + } +} + + +static void pmc_setup_one_p4_counter(unsigned int ctr) +{ + int i; + int const maxbind = 2; + unsigned int cccr = 0; + unsigned int escr = 0; + unsigned int high = 0; + unsigned int counter_bit; + struct p4_event_binding *ev = NULL; + unsigned int stag; + + stag = get_stagger(); + + /* convert from counter *number* to counter *bit* */ + counter_bit = 1 << VIRT_CTR(stag, ctr); + + /* find our event binding structure. */ + if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) { + printk(KERN_ERR + "oprofile: P4 event code 0x%lx out of range\n", + counter_config[ctr].event); + return; + } + + ev = &(p4_events[counter_config[ctr].event - 1]); + + for (i = 0; i < maxbind; i++) { + if (ev->bindings[i].virt_counter & counter_bit) { + + /* modify ESCR */ + ESCR_READ(escr, high, ev, i); + ESCR_CLEAR(escr); + if (stag == 0) { + ESCR_SET_USR_0(escr, counter_config[ctr].user); + ESCR_SET_OS_0(escr, counter_config[ctr].kernel); + } else { + ESCR_SET_USR_1(escr, counter_config[ctr].user); + ESCR_SET_OS_1(escr, counter_config[ctr].kernel); + } + ESCR_SET_EVENT_SELECT(escr, ev->event_select); + ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); + ESCR_WRITE(escr, high, ev, i); + + /* modify CCCR */ + CCCR_READ(cccr, high, VIRT_CTR(stag, ctr)); + CCCR_CLEAR(cccr); + CCCR_SET_REQUIRED_BITS(cccr); + CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); + if (stag == 0) { + CCCR_SET_PMI_OVF_0(cccr); + } else { + CCCR_SET_PMI_OVF_1(cccr); + } + CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr)); + return; + } + } + + printk(KERN_ERR + "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n", + counter_config[ctr].event, stag, ctr); +} + + +static void p4_setup_ctrs(struct op_msrs const * const msrs) +{ + unsigned int i; + unsigned int low, high; + unsigned int addr; + unsigned int stag; + + stag = get_stagger(); + + rdmsr(MSR_IA32_MISC_ENABLE, low, high); + if (! MISC_PMC_ENABLED_P(low)) { + printk(KERN_ERR "oprofile: P4 PMC not available\n"); + return; + } + + /* clear the cccrs we will use */ + for (i = 0 ; i < num_counters ; i++) { + rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); + CCCR_CLEAR(low); + CCCR_SET_REQUIRED_BITS(low); + wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); + } + + /* clear cccrs outside our concern */ + for (i = stag ; i < NUM_UNUSED_CCCRS ; i += addr_increment()) { + rdmsr(p4_unused_cccr[i], low, high); + CCCR_CLEAR(low); + CCCR_SET_REQUIRED_BITS(low); + wrmsr(p4_unused_cccr[i], low, high); + } + + /* clear all escrs (including those outside our concern) */ + for (addr = MSR_P4_BSU_ESCR0 + stag; + addr < MSR_P4_IQ_ESCR0; addr += addr_increment()) { + wrmsr(addr, 0, 0); + } + + /* On older models clear also MSR_P4_IQ_ESCR0/1 */ + if (boot_cpu_data.x86_model < 0x3) { + wrmsr(MSR_P4_IQ_ESCR0, 0, 0); + wrmsr(MSR_P4_IQ_ESCR1, 0, 0); + } + + for (addr = MSR_P4_RAT_ESCR0 + stag; + addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { + wrmsr(addr, 0, 0); + } + + for (addr = MSR_P4_MS_ESCR0 + stag; + addr <= MSR_P4_TC_ESCR1; addr += addr_increment()){ + wrmsr(addr, 0, 0); + } + + for (addr = MSR_P4_IX_ESCR0 + stag; + addr <= MSR_P4_CRU_ESCR3; addr += addr_increment()){ + wrmsr(addr, 0, 0); + } + + if (num_counters == NUM_COUNTERS_NON_HT) { + wrmsr(MSR_P4_CRU_ESCR4, 0, 0); + wrmsr(MSR_P4_CRU_ESCR5, 0, 0); + } else if (stag == 0) { + wrmsr(MSR_P4_CRU_ESCR4, 0, 0); + } else { + wrmsr(MSR_P4_CRU_ESCR5, 0, 0); + } + + /* setup all counters */ + for (i = 0 ; i < num_counters ; ++i) { + if (counter_config[i].enabled) { + reset_value[i] = counter_config[i].count; + pmc_setup_one_p4_counter(i); + CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); + } else { + reset_value[i] = 0; + } + } +} + + +extern void pmc_log_event(struct domain *d, u64 eip, int mode, int event); +extern int is_profiled(struct domain * d); +extern struct domain * primary_profiler; + +static int p4_check_ctrs(unsigned int const cpu, + struct op_msrs const * const msrs, + struct cpu_user_regs * const regs) +{ + unsigned long ctr, low, high, stag, real; + int i, ovf = 0; + u64 eip = regs->eip; + int mode = 0; + struct vcpu *v = current; + + //if (RING_1(regs)) + if (KERNEL_MODE(v, regs)) + mode = 1; + else if (RING_0(regs)) + mode = 2; + + stag = get_stagger(); + + for (i = 0; i < num_counters; ++i) { + if (!reset_value[i]) + continue; + + /* + * there is some eccentricity in the hardware which + * requires that we perform 2 extra corrections: + * + * - check both the CCCR:OVF flag for overflow and the + * counter high bit for un-flagged overflows. + * + * - write the counter back twice to ensure it gets + * updated properly. + * + * the former seems to be related to extra NMIs happening + * during the current NMI; the latter is reported as errata + * N15 in intel doc 249199-029, pentium 4 specification + * update, though their suggested work-around does not + * appear to solve the problem. + */ + + real = VIRT_CTR(stag, i); + + CCCR_READ(low, high, real); + CTR_READ(ctr, high, real); + if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) { + pmc_log_event(current->domain, eip, mode, i); + CTR_WRITE(reset_value[i], real); + CCCR_CLEAR_OVF(low); + CCCR_WRITE(low, high, real); + CTR_WRITE(reset_value[i], real); + ovf = 1; + } + } + + /* P4 quirk: you have to re-unmask the apic vector */ + apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); + + /* See op_model_ppro.c */ + return ovf; +} + + +static void p4_start(struct op_msrs const * const msrs) +{ + unsigned int low, high, stag; + int i; + + stag = get_stagger(); + + for (i = 0; i < num_counters; ++i) { + if (!reset_value[i]) + continue; + CCCR_READ(low, high, VIRT_CTR(stag, i)); + CCCR_SET_ENABLE(low); + CCCR_WRITE(low, high, VIRT_CTR(stag, i)); + } +} + + +static void p4_stop(struct op_msrs const * const msrs) +{ + unsigned int low, high, stag; + int i; + + stag = get_stagger(); + + for (i = 0; i < num_counters; ++i) { + CCCR_READ(low, high, VIRT_CTR(stag, i)); + CCCR_SET_DISABLE(low); + CCCR_WRITE(low, high, VIRT_CTR(stag, i)); + } +} + + +#ifdef CONFIG_SMP +struct op_x86_model_spec const op_p4_ht2_spec = { + .num_counters = NUM_COUNTERS_HT2, + .num_controls = NUM_CONTROLS_HT2, + .fill_in_addresses = &p4_fill_in_addresses, + .setup_ctrs = &p4_setup_ctrs, + .check_ctrs = &p4_check_ctrs, + .start = &p4_start, + .stop = &p4_stop +}; +#endif + +struct op_x86_model_spec const op_p4_spec = { + .num_counters = NUM_COUNTERS_NON_HT, + .num_controls = NUM_CONTROLS_NON_HT, + .fill_in_addresses = &p4_fill_in_addresses, + .setup_ctrs = &p4_setup_ctrs, + .check_ctrs = &p4_check_ctrs, + .start = &p4_start, + .stop = &p4_stop +}; diff -Naurp xen-unstable.hg-20050823-nooprofile/xen/arch/x86/oprofile/op_model_ppro.c xen-unstable.hg-20050823/xen/arch/x86/oprofile/op_model_ppro.c --- xen-unstable.hg-20050823-nooprofile/xen/arch/x86/oprofile/op_model_ppro.c 1969-12-31 18:00:00.000000000 -0600 +++ xen-unstable.hg-20050823/xen/arch/x86/oprofile/op_model_ppro.c 2005-08-23 07:05:17.000000000 -0500 @@ -0,0 +1,168 @@ +/** + * @file op_model_ppro.h + * pentium pro / P6 model-specific MSR operations + * + * @remark Copyright 2002 OProfile authors + * @remark Read the file COPYING + * + * @author John Levon + * @author Philippe Elie + * @author Graydon Hoare + * + * Modified by Aravind Menon for Xen + * These modifications are: + * Copyright (C) 2005 Hewlett-Packard Co. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "op_x86_model.h" +#include "op_counter.h" + +#define NUM_COUNTERS 2 +#define NUM_CONTROLS 2 + +#define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));} while (0) +#define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(u32)(l), -1);} while (0) +#define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) + +#define CTRL_READ(l,h,msrs,c) do {rdmsr((msrs->controls[(c)].addr), (l), (h));} while (0) +#define CTRL_WRITE(l,h,msrs,c) do {wrmsr((msrs->controls[(c)].addr), (l), (h));} while (0) +#define CTRL_SET_ACTIVE(n) (n |= (1<<22)) +#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) +#define CTRL_CLEAR(x) (x &= (1<<21)) +#define CTRL_SET_ENABLE(val) (val |= 1<<20) +#define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16)) +#define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17)) +#define CTRL_SET_UM(val, m) (val |= (m << 8)) +#define CTRL_SET_EVENT(val, e) (val |= e) + +static unsigned long reset_value[NUM_COUNTERS]; + +static void ppro_fill_in_addresses(struct op_msrs * const msrs) +{ + msrs->counters[0].addr = MSR_P6_PERFCTR0; + msrs->counters[1].addr = MSR_P6_PERFCTR1; + + msrs->controls[0].addr = MSR_P6_EVNTSEL0; + msrs->controls[1].addr = MSR_P6_EVNTSEL1; +} + + +static void ppro_setup_ctrs(struct op_msrs const * const msrs) +{ + unsigned int low, high; + int i; + + /* clear all counters */ + for (i = 0 ; i < NUM_CONTROLS; ++i) { + CTRL_READ(low, high, msrs, i); + CTRL_CLEAR(low); + CTRL_WRITE(low, high, msrs, i); + } + + /* avoid a false detection of ctr overflows in NMI handler */ + for (i = 0; i < NUM_COUNTERS; ++i) { + CTR_WRITE(1, msrs, i); + } + + /* enable active counters */ + for (i = 0; i < NUM_COUNTERS; ++i) { + if (counter_config[i].enabled) { + reset_value[i] = counter_config[i].count; + + CTR_WRITE(counter_config[i].count, msrs, i); + + CTRL_READ(low, high, msrs, i); + CTRL_CLEAR(low); + CTRL_SET_ENABLE(low); + CTRL_SET_USR(low, counter_config[i].user); + CTRL_SET_KERN(low, counter_config[i].kernel); + CTRL_SET_UM(low, counter_config[i].unit_mask); + CTRL_SET_EVENT(low, counter_config[i].event); + CTRL_WRITE(low, high, msrs, i); + } + } +} + +extern void pmc_log_event(struct domain *d, u64 eip, int mode, int event); +extern int is_profiled(struct domain * d); +extern struct domain * primary_profiler; + +static int ppro_check_ctrs(unsigned int const cpu, + struct op_msrs const * const msrs, + struct cpu_user_regs * const regs) +{ + unsigned int low, high; + int i, ovf = 0; + u64 eip = regs->eip; + int mode = 0; + + if (RING_1(regs)) + mode = 1; + else if (RING_0(regs)) + mode = 2; + + for (i = 0 ; i < NUM_COUNTERS; ++i) { + CTR_READ(low, high, msrs, i); + if (CTR_OVERFLOWED(low)) { + pmc_log_event(current->domain, eip, mode, i); + CTR_WRITE(reset_value[i], msrs, i); + ovf = 1; + } + } + + /* Only P6 based Pentium M need to re-unmask the apic vector but it + * doesn't hurt other P6 variant */ + apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); + + /* We can't work out if we really handled an interrupt. We + * might have caught a *second* counter just after overflowing + * the interrupt for this counter then arrives + * and we don't find a counter that's overflowed, so we + * would return 0 and get dazed + confused. Instead we always + * assume we found an overflow. This sucks. + */ + return ovf; +} + + +static void ppro_start(struct op_msrs const * const msrs) +{ + unsigned int low,high; + CTRL_READ(low, high, msrs, 0); + CTRL_SET_ACTIVE(low); + CTRL_WRITE(low, high, msrs, 0); +} + +static void ppro_stop(struct op_msrs const * const msrs) +{ + unsigned int low,high; + CTRL_READ(low, high, msrs, 0); + CTRL_SET_INACTIVE(low); + CTRL_WRITE(low, high, msrs, 0); +} + +unsigned int read_ctr(struct op_msrs const * const msrs, int i) +{ + unsigned int low, high; + CTR_READ(low, high, msrs, i); + return low; +} + +struct op_x86_model_spec const op_ppro_spec = { + .num_counters = NUM_COUNTERS, + .num_controls = NUM_CONTROLS, + .fill_in_addresses = &ppro_fill_in_addresses, + .setup_ctrs = &ppro_setup_ctrs, + .check_ctrs = &ppro_check_ctrs, + .start = &ppro_start, + .stop = &ppro_stop +}; diff -Naurp xen-unstable.hg-20050823-nooprofile/xen/arch/x86/oprofile/op_x86_model.h xen-unstable.hg-20050823/xen/arch/x86/oprofile/op_x86_model.h --- xen-unstable.hg-20050823-nooprofile/xen/arch/x86/oprofile/op_x86_model.h 1969-12-31 18:00:00.000000000 -0600 +++ xen-unstable.hg-20050823/xen/arch/x86/oprofile/op_x86_model.h 2005-08-23 07:05:17.000000000 -0500 @@ -0,0 +1,55 @@ +/** + * @file op_x86_model.h + * interface to x86 model-specific MSR operations + * + * @remark Copyright 2002 OProfile authors + * @remark Read the file COPYING + * + * @author Graydon Hoare + * + * Modified by Aravind Menon for Xen + * These modifications are: + * Copyright (C) 2005 Hewlett-Packard Co. + */ + +#ifndef OP_X86_MODEL_H +#define OP_X86_MODEL_H + +struct op_saved_msr { + unsigned int high; + unsigned int low; +}; + +struct op_msr { + unsigned long addr; + struct op_saved_msr saved; +}; + +struct op_msrs { + struct op_msr * counters; + struct op_msr * controls; +}; + +struct pt_regs; + +/* The model vtable abstracts the differences between + * various x86 CPU model's perfctr support. + */ +struct op_x86_model_spec { + unsigned int const num_counters; + unsigned int const num_controls; + void (*fill_in_addresses)(struct op_msrs * const msrs); + void (*setup_ctrs)(struct op_msrs const * const msrs); + int (*check_ctrs)(unsigned int const cpu, + struct op_msrs const * const msrs, + struct cpu_user_regs * const regs); + void (*start)(struct op_msrs const * const msrs); + void (*stop)(struct op_msrs const * const msrs); +}; + +extern struct op_x86_model_spec const op_ppro_spec; +extern struct op_x86_model_spec const op_p4_spec; +extern struct op_x86_model_spec const op_p4_ht2_spec; +extern struct op_x86_model_spec const op_athlon_spec; + +#endif /* OP_X86_MODEL_H */ diff -Naurp xen-unstable.hg-20050823-nooprofile/xen/arch/x86/oprofile/pmc.c xen-unstable.hg-20050823/xen/arch/x86/oprofile/pmc.c --- xen-unstable.hg-20050823-nooprofile/xen/arch/x86/oprofile/pmc.c 1969-12-31 18:00:00.000000000 -0600 +++ xen-unstable.hg-20050823/xen/arch/x86/oprofile/pmc.c 2005-08-23 07:05:17.000000000 -0500 @@ -0,0 +1,308 @@ +/* + * Copyright (C) 2005 Hewlett-Packard Co. + * written by Aravind Menon, email: xenoprof@xxxxxxxxxxxxx + */ + +#include +#include + +#include "op_counter.h" + +int active_domains[MAX_OPROF_DOMAINS]; +int passive_domains[MAX_OPROF_DOMAINS]; +unsigned int adomains = 0; +unsigned int pdomains = 0; +unsigned int activated = 0; + +#define VIRQ_BITMASK_SIZE (MAX_OPROF_DOMAINS/32 + 1) + +struct domain * primary_profiler = NULL; +struct domain * adomain_ptrs[MAX_OPROF_DOMAINS]; +unsigned int virq_ovf_pending[VIRQ_BITMASK_SIZE]; + +int is_active(struct domain *d) +{ + int i; + for (i = 0; i < adomains; i++) + if (d->domain_id == active_domains[i]) + return 1; + return 0; +} + +int active_id(struct domain *d) +{ + int i; + for (i = 0; i < adomains; i++) + if (d == adomain_ptrs[i]) + return i; + return -1; +} + +void free_adomain_ptrs() +{ + int i; + int num = adomains; + + adomains = 0; + for (i = 0; i < VIRQ_BITMASK_SIZE; i++) + virq_ovf_pending[i] = 0; + + for (i = 0; i < num; i++) { + put_domain(adomain_ptrs[i]); + adomain_ptrs[i] = NULL; + } +} + +int set_adomain_ptrs(int num) +{ + int i; + struct domain *d; + + for (i = 0; i < VIRQ_BITMASK_SIZE; i++) + virq_ovf_pending[i] = 0; + + for (i = 0; i < num; i++) { + d = find_domain_by_id(active_domains[i]); + if (!d) { + free_adomain_ptrs(); + return -EFAULT; + } + adomain_ptrs[i] = d; + adomains++; + } + return 0; +} + +int set_active(struct domain *d) +{ + if (is_active(d)) + return 0; + /* hack if we run out of space */ + if (adomains >= MAX_OPROF_DOMAINS) { + adomains--; + put_domain(adomain_ptrs[adomains]); + } + active_domains[adomains] = d->domain_id; + if (get_domain(d)) + adomain_ptrs[adomains++] = d; + else { + free_adomain_ptrs(); + return -EFAULT; + } + return 0; +} + +int is_passive(struct domain *d) +{ + int i; + for (i = 0; i < pdomains; i++) + if (d->domain_id == passive_domains[i]) + return 1; + return 0; +} + +int is_profiled(struct domain *d) +{ + if (is_active(d) || is_passive(d)) + return 1; + return 0; +} + +void pmc_log_event(struct domain *d, u64 eip, int mode, int event) +{ + shared_info_t *s = NULL; + struct domain *dest = d; + int head; + int tail; + + if (!is_profiled(d)) + return; + + if (!is_passive(d)) { + s = dest->shared_info; + head = s->event_head; + tail = s->event_tail; + if ((head == tail - 1) || + (head == MAX_OPROF_EVENTS - 1 && tail == 0)) { + s->losing_samples = 1; + s->samples_lost++; + } + else { + s->event_log[head].eip = eip; + s->event_log[head].mode = mode; + s->event_log[head].event = event; + head++; + if (head >= MAX_OPROF_EVENTS) + head = 0; + s->event_head = head; + } + } + /* passive domains */ + else { + dest = primary_profiler; + s = dest->shared_info; + head = s->event_head; + tail = s->event_tail; + + /* We use the following inefficient format for logging + events from other domains. We put a special record + indicating that the next record is for another domain. + This is done for each sample from another domain */ + + head = s->event_head; + if (head >= MAX_OPROF_EVENTS) + head = 0; + /* for passive domains we need to have at least two + entries empty in the buffer */ + if ((head == tail - 1) || + (head == tail - 2) || + (head == MAX_OPROF_EVENTS - 1 && tail <= 1) || + (head == MAX_OPROF_EVENTS - 2 && tail == 0) ) { + s->losing_samples = 1; + s->samples_lost++; + } + else { + s->event_log[head].eip = ~1; + s->event_log[head].mode = ~0; + s->event_log[head].event = d->domain_id; + head++; + if (head >= MAX_OPROF_EVENTS) + head = 0; + s->event_log[head].eip = eip; + s->event_log[head].mode = mode; + s->event_log[head].event = event; + head++; + if (head >= MAX_OPROF_EVENTS) + head = 0; + s->event_head = head; + } + } +} + +static void pmc_event_init(struct domain *d) +{ + shared_info_t *s = d->shared_info; + s->event_head = 0; + s->event_tail = 0; + s->losing_samples = 0; + s->samples_lost = 0; + s->nmi_restarts = 0; + s->active_samples = 0; + s->passive_samples = 0; + s->other_samples = 0; +} + +extern int nmi_init(int *num_events, int *is_primary); +extern int nmi_reserve_counters(void); +extern int nmi_setup_events(void); +extern int nmi_enable_virq(void); +extern int nmi_start(void); +extern void nmi_stop(void); +extern void nmi_disable_virq(void); +extern void nmi_release_counters(void); + +#define PRIV_OP(op) ((op == PMC_SET_ACTIVE) || (op == PMC_SET_PASSIVE) || (op == PMC_RESERVE_COUNTERS) \ + || (op == PMC_SETUP_EVENTS) || (op == PMC_START) || (op == PMC_STOP) \ + || (op == PMC_RELEASE_COUNTERS) || (op == PMC_SHUTDOWN)) + +int do_pmc_op(int op, u64 arg1, u64 arg2) +{ + int ret = 0; + + if (PRIV_OP(op) && current->domain != primary_profiler) + return -EPERM; + + switch (op) { + case PMC_INIT: + printk("PMC_INIT]\n"); + ret = nmi_init((int *)arg1, (int *)arg2); + printk("nmi_init returned %d\n", ret); + break; + + case PMC_SET_ACTIVE: + printk("PMC_SETACTIVE]\n"); + if (adomains != 0) + return -EPERM; + if (copy_from_user((void *)&active_domains, + (void *)arg1, arg2*sizeof(int))) + return -EFAULT; + if (set_adomain_ptrs(arg2)) + return -EFAULT; + if (set_active(current->domain)) + return -EFAULT; + break; + + case PMC_SET_PASSIVE: + printk("PMC_SETPASSIVE\n"); + if (pdomains != 0) + return -EPERM; + if (copy_from_user((void *)&passive_domains, + (void *)arg1, arg2*sizeof(int))) + return -EFAULT; + pdomains = arg2; + break; + + case PMC_RESERVE_COUNTERS: + printk("PMC_RESERVE_COUNTERS\n"); + ret = nmi_reserve_counters(); + break; + + case PMC_SETUP_EVENTS: + printk("PMV_SETUP_EVENTS\n"); + if (copy_from_user((void *)&counter_config, + (void *)arg1, arg2*sizeof(struct op_counter_config))) + return -EFAULT; + ret = nmi_setup_events(); + break; + + case PMC_ENABLE_VIRQ: + printk("PMC_ENABLE_VIRQ\n"); + if (!is_active(current->domain)) { + if (current->domain != primary_profiler) + return -EPERM; + else + set_active(current->domain); + } + ret = nmi_enable_virq(); + pmc_event_init(current->domain); + activated++; + break; + + case PMC_START: + printk("PMC_START\n"); + if (activated < adomains) + return -EPERM; + ret = nmi_start(); + break; + + case PMC_STOP: + printk("PMC_STOP\n"); + nmi_stop(); + break; + + case PMC_DISABLE_VIRQ: + printk("PMC_DISBALE_VIRQ\n"); + if (!is_active(current->domain)) + return -EPERM; + nmi_disable_virq(); + activated--; + break; + + case PMC_RELEASE_COUNTERS: + printk("PMC_RELEASE_COUNTERS\n"); + nmi_release_counters(); + break; + + case PMC_SHUTDOWN: + printk("PMC_SHUTDOWN\n"); + free_adomain_ptrs(); + pdomains = 0; + activated = 0; + primary_profiler = NULL; + break; + + default: + ret = -EINVAL; + } + return ret; +} diff -Naurp xen-unstable.hg-20050823-nooprofile/xen/arch/x86/traps.c xen-unstable.hg-20050823/xen/arch/x86/traps.c --- xen-unstable.hg-20050823-nooprofile/xen/arch/x86/traps.c 2005-08-24 07:44:52.000000000 -0500 +++ xen-unstable.hg-20050823/xen/arch/x86/traps.c 2005-08-23 07:05:17.000000000 -0500 @@ -2,6 +2,10 @@ * arch/x86/traps.c * * Modifications to Linux original are copyright (c) 2002-2004, K A Fraser + * + * Modified by Aravind Menon for supporting oprofile + * These modifications are: + * Copyright (C) 2005 Hewlett-Packard Co. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -54,6 +58,7 @@ #include #include #include +#include /* * opt_nmi: one of 'ignore', 'dom0', or 'fatal'. @@ -1040,7 +1045,7 @@ static void unknown_nmi_error(unsigned c printk("Do you have a strange power saving mode enabled?\n"); } -asmlinkage void do_nmi(struct cpu_user_regs *regs, unsigned long reason) +static void default_do_nmi(struct cpu_user_regs * regs, unsigned long reason) { ++nmi_count(smp_processor_id()); @@ -1055,6 +1060,35 @@ asmlinkage void do_nmi(struct cpu_user_r unknown_nmi_error((unsigned char)(reason&0xff)); } +static int dummy_nmi_callback(struct cpu_user_regs * regs, int cpu) +{ + return 0; +} + +static nmi_callback_t nmi_callback = dummy_nmi_callback; + +asmlinkage void do_nmi(struct cpu_user_regs * regs, unsigned long reason) +{ + int cpu; + cpu = smp_processor_id(); + + if (!nmi_callback(regs, cpu)) + default_do_nmi(regs, reason); +} + +void set_nmi_callback(nmi_callback_t callback) +{ + nmi_callback = callback; +} + +void unset_nmi_callback(void) +{ + nmi_callback = dummy_nmi_callback; +} + +EXPORT_SYMBOL(set_nmi_callback); +EXPORT_SYMBOL(unset_nmi_callback); + asmlinkage int math_state_restore(struct cpu_user_regs *regs) { /* Prevent recursion. */ diff -Naurp xen-unstable.hg-20050823-nooprofile/xen/arch/x86/x86_32/entry.S xen-unstable.hg-20050823/xen/arch/x86/x86_32/entry.S --- xen-unstable.hg-20050823-nooprofile/xen/arch/x86/x86_32/entry.S 2005-08-24 07:44:52.000000000 -0500 +++ xen-unstable.hg-20050823/xen/arch/x86/x86_32/entry.S 2005-08-23 07:05:17.000000000 -0500 @@ -763,7 +763,8 @@ ENTRY(hypercall_table) .long do_boot_vcpu .long do_ni_hypercall /* 25 */ .long do_mmuext_op - .long do_acm_op /* 27 */ + .long do_acm_op + .long do_pmc_op /* 28 */ .rept NR_hypercalls-((.-hypercall_table)/4) .long do_ni_hypercall .endr diff -Naurp xen-unstable.hg-20050823-nooprofile/xen/arch/x86/x86_64/entry.S xen-unstable.hg-20050823/xen/arch/x86/x86_64/entry.S --- xen-unstable.hg-20050823-nooprofile/xen/arch/x86/x86_64/entry.S 2005-08-24 07:44:52.000000000 -0500 +++ xen-unstable.hg-20050823/xen/arch/x86/x86_64/entry.S 2005-08-23 07:05:17.000000000 -0500 @@ -593,6 +593,7 @@ ENTRY(hypercall_table) .quad do_set_segment_base /* 25 */ .quad do_mmuext_op .quad do_acm_op + .quad do_pmc_op .rept NR_hypercalls-((.-hypercall_table)/4) .quad do_ni_hypercall .endr diff -Naurp xen-unstable.hg-20050823-nooprofile/xen/include/asm-x86/msr.h xen-unstable.hg-20050823/xen/include/asm-x86/msr.h --- xen-unstable.hg-20050823-nooprofile/xen/include/asm-x86/msr.h 2005-08-24 07:44:53.000000000 -0500 +++ xen-unstable.hg-20050823/xen/include/asm-x86/msr.h 2005-08-23 07:05:17.000000000 -0500 @@ -195,6 +195,89 @@ #define MSR_P6_EVNTSEL0 0x186 #define MSR_P6_EVNTSEL1 0x187 +/* Pentium IV performance counter MSRs */ +#define MSR_P4_BPU_PERFCTR0 0x300 +#define MSR_P4_BPU_PERFCTR1 0x301 +#define MSR_P4_BPU_PERFCTR2 0x302 +#define MSR_P4_BPU_PERFCTR3 0x303 +#define MSR_P4_MS_PERFCTR0 0x304 +#define MSR_P4_MS_PERFCTR1 0x305 +#define MSR_P4_MS_PERFCTR2 0x306 +#define MSR_P4_MS_PERFCTR3 0x307 +#define MSR_P4_FLAME_PERFCTR0 0x308 +#define MSR_P4_FLAME_PERFCTR1 0x309 +#define MSR_P4_FLAME_PERFCTR2 0x30a +#define MSR_P4_FLAME_PERFCTR3 0x30b +#define MSR_P4_IQ_PERFCTR0 0x30c +#define MSR_P4_IQ_PERFCTR1 0x30d +#define MSR_P4_IQ_PERFCTR2 0x30e +#define MSR_P4_IQ_PERFCTR3 0x30f +#define MSR_P4_IQ_PERFCTR4 0x310 +#define MSR_P4_IQ_PERFCTR5 0x311 +#define MSR_P4_BPU_CCCR0 0x360 +#define MSR_P4_BPU_CCCR1 0x361 +#define MSR_P4_BPU_CCCR2 0x362 +#define MSR_P4_BPU_CCCR3 0x363 +#define MSR_P4_MS_CCCR0 0x364 +#define MSR_P4_MS_CCCR1 0x365 +#define MSR_P4_MS_CCCR2 0x366 +#define MSR_P4_MS_CCCR3 0x367 +#define MSR_P4_FLAME_CCCR0 0x368 +#define MSR_P4_FLAME_CCCR1 0x369 +#define MSR_P4_FLAME_CCCR2 0x36a +#define MSR_P4_FLAME_CCCR3 0x36b +#define MSR_P4_IQ_CCCR0 0x36c +#define MSR_P4_IQ_CCCR1 0x36d +#define MSR_P4_IQ_CCCR2 0x36e +#define MSR_P4_IQ_CCCR3 0x36f +#define MSR_P4_IQ_CCCR4 0x370 +#define MSR_P4_IQ_CCCR5 0x371 +#define MSR_P4_ALF_ESCR0 0x3ca +#define MSR_P4_ALF_ESCR1 0x3cb +#define MSR_P4_BPU_ESCR0 0x3b2 +#define MSR_P4_BPU_ESCR1 0x3b3 +#define MSR_P4_BSU_ESCR0 0x3a0 +#define MSR_P4_BSU_ESCR1 0x3a1 +#define MSR_P4_CRU_ESCR0 0x3b8 +#define MSR_P4_CRU_ESCR1 0x3b9 +#define MSR_P4_CRU_ESCR2 0x3cc +#define MSR_P4_CRU_ESCR3 0x3cd +#define MSR_P4_CRU_ESCR4 0x3e0 +#define MSR_P4_CRU_ESCR5 0x3e1 +#define MSR_P4_DAC_ESCR0 0x3a8 +#define MSR_P4_DAC_ESCR1 0x3a9 +#define MSR_P4_FIRM_ESCR0 0x3a4 +#define MSR_P4_FIRM_ESCR1 0x3a5 +#define MSR_P4_FLAME_ESCR0 0x3a6 +#define MSR_P4_FLAME_ESCR1 0x3a7 +#define MSR_P4_FSB_ESCR0 0x3a2 +#define MSR_P4_FSB_ESCR1 0x3a3 +#define MSR_P4_IQ_ESCR0 0x3ba +#define MSR_P4_IQ_ESCR1 0x3bb +#define MSR_P4_IS_ESCR0 0x3b4 +#define MSR_P4_IS_ESCR1 0x3b5 +#define MSR_P4_ITLB_ESCR0 0x3b6 +#define MSR_P4_ITLB_ESCR1 0x3b7 +#define MSR_P4_IX_ESCR0 0x3c8 +#define MSR_P4_IX_ESCR1 0x3c9 +#define MSR_P4_MOB_ESCR0 0x3aa +#define MSR_P4_MOB_ESCR1 0x3ab +#define MSR_P4_MS_ESCR0 0x3c0 +#define MSR_P4_MS_ESCR1 0x3c1 +#define MSR_P4_PMH_ESCR0 0x3ac +#define MSR_P4_PMH_ESCR1 0x3ad +#define MSR_P4_RAT_ESCR0 0x3bc +#define MSR_P4_RAT_ESCR1 0x3bd +#define MSR_P4_SAAT_ESCR0 0x3ae +#define MSR_P4_SAAT_ESCR1 0x3af +#define MSR_P4_SSU_ESCR0 0x3be +#define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */ +#define MSR_P4_TBPU_ESCR0 0x3c2 +#define MSR_P4_TBPU_ESCR1 0x3c3 +#define MSR_P4_TC_ESCR0 0x3c4 +#define MSR_P4_TC_ESCR1 0x3c5 +#define MSR_P4_U2L_ESCR0 0x3b0 +#define MSR_P4_U2L_ESCR1 0x3b1 /* K7/K8 MSRs. Not complete. See the architecture manual for a more complete list. */ #define MSR_K7_EVNTSEL0 0xC0010000 diff -Naurp xen-unstable.hg-20050823-nooprofile/xen/include/asm-x86/nmi.h xen-unstable.hg-20050823/xen/include/asm-x86/nmi.h --- xen-unstable.hg-20050823-nooprofile/xen/include/asm-x86/nmi.h 1969-12-31 18:00:00.000000000 -0600 +++ xen-unstable.hg-20050823/xen/include/asm-x86/nmi.h 2005-08-23 07:05:17.000000000 -0500 @@ -0,0 +1,26 @@ +/* + * linux/include/asm-i386/nmi.h + */ +#ifndef ASM_NMI_H +#define ASM_NMI_H + +struct cpu_user_regs; + +typedef int (*nmi_callback_t)(struct cpu_user_regs * regs, int cpu); + +/** + * set_nmi_callback + * + * Set a handler for an NMI. Only one handler may be + * set. Return 1 if the NMI was handled. + */ +void set_nmi_callback(nmi_callback_t callback); + +/** + * unset_nmi_callback + * + * Remove the handler previously set. + */ +void unset_nmi_callback(void); + +#endif /* ASM_NMI_H */ diff -Naurp xen-unstable.hg-20050823-nooprofile/xen/include/public/xen.h xen-unstable.hg-20050823/xen/include/public/xen.h --- xen-unstable.hg-20050823-nooprofile/xen/include/public/xen.h 2005-08-24 07:44:53.000000000 -0500 +++ xen-unstable.hg-20050823/xen/include/public/xen.h 2005-08-23 07:05:17.000000000 -0500 @@ -4,6 +4,10 @@ * Guest OS interface to Xen. * * Copyright (c) 2004, K A Fraser + * + * Modified by Aravind Menon for supporting oprofile + * These modifications are: + * Copyright (C) 2005 Hewlett-Packard Co. */ #ifndef __XEN_PUBLIC_XEN_H__ @@ -59,6 +63,7 @@ #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ #define __HYPERVISOR_mmuext_op 26 #define __HYPERVISOR_acm_op 27 +#define __HYPERVISOR_pmc_op 28 /* * VIRTUAL INTERRUPTS @@ -72,7 +77,8 @@ #define VIRQ_PARITY_ERR 4 /* (DOM0) NMI parity error. */ #define VIRQ_IO_ERR 5 /* (DOM0) NMI I/O error. */ #define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */ -#define NR_VIRQS 7 +#define VIRQ_PMC_OVF 7 /* PMC Overflow */ +#define NR_VIRQS 8 /* * MMU-UPDATE REQUESTS @@ -240,6 +246,21 @@ struct mmuext_op { #define VMASST_TYPE_writable_pagetables 2 #define MAX_VMASST_TYPE 2 +/* + * Commands to HYPERVISOR_pmc_op(). + */ +#define PMC_INIT 0 +#define PMC_SET_ACTIVE 1 +#define PMC_SET_PASSIVE 2 +#define PMC_RESERVE_COUNTERS 3 +#define PMC_SETUP_EVENTS 4 +#define PMC_ENABLE_VIRQ 5 +#define PMC_START 6 +#define PMC_STOP 7 +#define PMC_DISABLE_VIRQ 8 +#define PMC_RELEASE_COUNTERS 9 +#define PMC_SHUTDOWN 10 + #ifndef __ASSEMBLY__ typedef u16 domid_t; @@ -292,6 +313,8 @@ typedef struct /* Event channel endpoints per domain. */ #define NR_EVENT_CHANNELS 1024 +#define MAX_OPROF_EVENTS 32 +#define MAX_OPROF_DOMAINS 25 /* * Per-VCPU information goes here. This will be cleaned up more when Xen * actually supports multi-VCPU guests. @@ -408,6 +431,21 @@ typedef struct shared_info { arch_shared_info_t arch; + /* Oprofile structures */ + u8 event_head; + u8 event_tail; + struct { + u64 eip; + u8 mode; + u8 event; + } event_log[MAX_OPROF_EVENTS]; + u8 losing_samples; + u64 samples_lost; + u32 nmi_restarts; + u64 active_samples; + u64 passive_samples; + u64 other_samples; + } shared_info_t; /*