# HG changeset patch # User yamahata@xxxxxxxxxxxxx # Date 1161347000 -32400 # Node ID 79b5aa23ba53eb1f88abed7b015486731f098efe # Parent 61fc35f8aded1331e90fe67b3e5a599edcfd5f57 copy linux/arch/i386/oprofile/xenoprof.c to arch/ia64/oprofile/ PATCHNAME: copy_from_x86_xenoprof_c_linux_side Signed-off-by: Isaku Yamahata diff -r 61fc35f8aded -r 79b5aa23ba53 linux-2.6-xen-sparse/arch/ia64/oprofile/xenoprof.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/linux-2.6-xen-sparse/arch/ia64/oprofile/xenoprof.c Fri Oct 20 21:23:20 2006 +0900 @@ -0,0 +1,584 @@ +/** + * @file xenoprof.c + * + * @remark Copyright 2002 OProfile authors + * @remark Read the file COPYING + * + * @author John Levon + * + * Modified by Aravind Menon and Jose Renato Santos for Xen + * These modifications are: + * Copyright (C) 2005 Hewlett-Packard Co. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "op_counter.h" + +#include +#include +#include +#include <../../../drivers/oprofile/cpu_buffer.h> +#include <../../../drivers/oprofile/event_buffer.h> + +#define MAX_XENOPROF_SAMPLES 16 + +static int xenoprof_start(void); +static void xenoprof_stop(void); + +static int xenoprof_enabled = 0; +static unsigned int num_events = 0; +static int is_primary = 0; +static int active_defined; + +/* sample buffers shared with Xen */ +xenoprof_buf_t * xenoprof_buf[MAX_VIRT_CPUS]; +/* Shared buffer area */ +char * shared_buffer = NULL; +/* Number of buffers in shared area (one per VCPU) */ +int nbuf; +/* Mappings of VIRQ_XENOPROF to irq number (per cpu) */ +int ovf_irq[NR_CPUS]; +/* cpu model type string - copied from Xen memory space on XENOPROF_init command */ +char cpu_type[XENOPROF_CPU_TYPE_SIZE]; + +/* Passive sample buffers shared with Xen */ +xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS]; +/* Passive shared buffer area */ +char *p_shared_buffer[MAX_OPROF_DOMAINS]; + +#ifdef CONFIG_PM + +static int xenoprof_suspend(struct sys_device * dev, pm_message_t state) +{ + if (xenoprof_enabled == 1) + xenoprof_stop(); + return 0; +} + + +static int xenoprof_resume(struct sys_device * dev) +{ + if (xenoprof_enabled == 1) + xenoprof_start(); + return 0; +} + + +static struct sysdev_class oprofile_sysclass = { + set_kset_name("oprofile"), + .resume = xenoprof_resume, + .suspend = xenoprof_suspend +}; + + +static struct sys_device device_oprofile = { + .id = 0, + .cls = &oprofile_sysclass, +}; + + +static int __init init_driverfs(void) +{ + int error; + if (!(error = sysdev_class_register(&oprofile_sysclass))) + error = sysdev_register(&device_oprofile); + return error; +} + + +static void __exit exit_driverfs(void) +{ + sysdev_unregister(&device_oprofile); + sysdev_class_unregister(&oprofile_sysclass); +} + +#else +#define init_driverfs() do { } while (0) +#define exit_driverfs() do { } while (0) +#endif /* CONFIG_PM */ + +unsigned long long oprofile_samples = 0; +unsigned long long p_oprofile_samples = 0; + +unsigned int pdomains; +struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS]; + +static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive) +{ + int head, tail, size; + + head = buf->event_head; + tail = buf->event_tail; + size = buf->event_size; + + if (tail > head) { + while (tail < size) { + oprofile_add_pc(buf->event_log[tail].eip, + buf->event_log[tail].mode, + buf->event_log[tail].event); + if (!is_passive) + oprofile_samples++; + else + p_oprofile_samples++; + tail++; + } + tail = 0; + } + while (tail < head) { + oprofile_add_pc(buf->event_log[tail].eip, + buf->event_log[tail].mode, + buf->event_log[tail].event); + if (!is_passive) + oprofile_samples++; + else + p_oprofile_samples++; + tail++; + } + + buf->event_tail = tail; +} + +static void xenoprof_handle_passive(void) +{ + int i, j; + int flag_domain, flag_switch = 0; + + for (i = 0; i < pdomains; i++) { + flag_domain = 0; + for (j = 0; j < passive_domains[i].nbuf; j++) { + xenoprof_buf_t *buf = p_xenoprof_buf[i][j]; + if (buf->event_head == buf->event_tail) + continue; + if (!flag_domain) { + if (!oprofile_add_domain_switch(passive_domains[i]. + domain_id)) + goto done; + flag_domain = 1; + } + xenoprof_add_pc(buf, 1); + flag_switch = 1; + } + } +done: + if (flag_switch) + oprofile_add_domain_switch(COORDINATOR_DOMAIN); +} + +static irqreturn_t +xenoprof_ovf_interrupt(int irq, void * dev_id, struct pt_regs * regs) +{ + struct xenoprof_buf * buf; + int cpu; + static unsigned long flag; + + cpu = smp_processor_id(); + buf = xenoprof_buf[cpu]; + + xenoprof_add_pc(buf, 0); + + if (is_primary && !test_and_set_bit(0, &flag)) { + xenoprof_handle_passive(); + smp_mb__before_clear_bit(); + clear_bit(0, &flag); + } + + return IRQ_HANDLED; +} + + +static void unbind_virq(void) +{ + int i; + + for_each_cpu(i) { + if (ovf_irq[i] >= 0) { + unbind_from_irqhandler(ovf_irq[i], NULL); + ovf_irq[i] = -1; + } + } +} + + +static int bind_virq(void) +{ + int i, result; + + for_each_cpu(i) { + result = bind_virq_to_irqhandler(VIRQ_XENOPROF, + i, + xenoprof_ovf_interrupt, + SA_INTERRUPT, + "xenoprof", + NULL); + + if (result < 0) { + unbind_virq(); + return result; + } + + ovf_irq[i] = result; + } + + return 0; +} + + +static int map_xenoprof_buffer(int max_samples) +{ + struct xenoprof_get_buffer get_buffer; + struct xenoprof_buf *buf; + int npages, ret, i; + struct vm_struct *area; + + if ( shared_buffer ) + return 0; + + get_buffer.max_samples = max_samples; + + if ( (ret = HYPERVISOR_xenoprof_op(XENOPROF_get_buffer, &get_buffer)) ) + return ret; + + nbuf = get_buffer.nbuf; + npages = (get_buffer.bufsize * nbuf - 1) / PAGE_SIZE + 1; + + area = alloc_vm_area(npages * PAGE_SIZE); + if (area == NULL) + return -ENOMEM; + + if ( (ret = direct_kernel_remap_pfn_range( + (unsigned long)area->addr, + get_buffer.buf_maddr >> PAGE_SHIFT, + npages * PAGE_SIZE, __pgprot(_KERNPG_TABLE), DOMID_SELF)) ) { + vunmap(area->addr); + return ret; + } + + shared_buffer = area->addr; + for (i=0; i< nbuf; i++) { + buf = (struct xenoprof_buf*) + &shared_buffer[i * get_buffer.bufsize]; + BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS); + xenoprof_buf[buf->vcpu_id] = buf; + } + + return 0; +} + + +static int xenoprof_setup(void) +{ + int ret; + int i; + + if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) ) + return ret; + + if ( (ret = bind_virq()) ) + return ret; + + if (is_primary) { + struct xenoprof_counter counter; + + /* Define dom0 as an active domain if not done yet */ + if (!active_defined) { + domid_t domid; + ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL); + if (ret) + goto err; + domid = 0; + ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid); + if (ret) + goto err; + active_defined = 1; + } + + ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL); + if (ret) + goto err; + for (i=0; i MAX_OPROF_DOMAINS) + return -E2BIG; + + ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL); + if (ret) + return ret; + + for (i=0; i MAX_OPROF_DOMAINS) + return -E2BIG; + + ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL); + if (ret) + return ret; + + for (i = 0; i < pdoms; i++) { + passive_domains[i].domain_id = p_domains[i]; + passive_domains[i].max_samples = 2048; + ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive, + &passive_domains[i]); + if (ret) + goto out; + + npages = (passive_domains[i].bufsize * passive_domains[i].nbuf - 1) / PAGE_SIZE + 1; + + area = alloc_vm_area(npages * PAGE_SIZE); + if (area == NULL) { + ret = -ENOMEM; + goto out; + } + + ret = direct_kernel_remap_pfn_range( + (unsigned long)area->addr, + passive_domains[i].buf_maddr >> PAGE_SHIFT, + npages * PAGE_SIZE, prot, DOMID_SELF); + if (ret) { + vunmap(area->addr); + goto out; + } + + p_shared_buffer[i] = area->addr; + + for (j = 0; j < passive_domains[i].nbuf; j++) { + buf = (struct xenoprof_buf *) + &p_shared_buffer[i][j * passive_domains[i].bufsize]; + BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS); + p_xenoprof_buf[i][buf->vcpu_id] = buf; + } + + } + + pdomains = pdoms; + return 0; + +out: + for (j = 0; j < i; j++) { + vunmap(p_shared_buffer[j]); + p_shared_buffer[j] = NULL; + } + + return ret; +} + +struct op_counter_config counter_config[OP_MAX_COUNTER]; + +static int xenoprof_create_files(struct super_block * sb, struct dentry * root) +{ + unsigned int i; + + for (i = 0; i < num_events; ++i) { + struct dentry * dir; + char buf[2]; + + snprintf(buf, 2, "%d", i); + dir = oprofilefs_mkdir(sb, root, buf); + oprofilefs_create_ulong(sb, dir, "enabled", + &counter_config[i].enabled); + oprofilefs_create_ulong(sb, dir, "event", + &counter_config[i].event); + oprofilefs_create_ulong(sb, dir, "count", + &counter_config[i].count); + oprofilefs_create_ulong(sb, dir, "unit_mask", + &counter_config[i].unit_mask); + oprofilefs_create_ulong(sb, dir, "kernel", + &counter_config[i].kernel); + oprofilefs_create_ulong(sb, dir, "user", + &counter_config[i].user); + } + + return 0; +} + + +struct oprofile_operations xenoprof_ops = { + .create_files = xenoprof_create_files, + .set_active = xenoprof_set_active, + .set_passive = xenoprof_set_passive, + .setup = xenoprof_setup, + .shutdown = xenoprof_shutdown, + .start = xenoprof_start, + .stop = xenoprof_stop +}; + + +/* in order to get driverfs right */ +static int using_xenoprof; + +int __init oprofile_arch_init(struct oprofile_operations * ops) +{ + struct xenoprof_init init; + int ret, i; + + ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init); + + if (!ret) { + num_events = init.num_events; + is_primary = init.is_primary; + + /* just in case - make sure we do not overflow event list + (i.e. counter_config list) */ + if (num_events > OP_MAX_COUNTER) + num_events = OP_MAX_COUNTER; + + /* cpu_type is detected by Xen */ + cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0; + strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1); + xenoprof_ops.cpu_type = cpu_type; + + init_driverfs(); + using_xenoprof = 1; + *ops = xenoprof_ops; + + for (i=0; i