# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 8ab9b43ad5574e19a1b754597a16832f80e6eba4
# Parent 112e0e3b48525157c0968d3323a2629fe9a8e6de
[IA64] xenoprof ia64 xen side
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
Signed-off-by: Alex Williamson <alex.williamson@xxxxxx>
---
xen/arch/ia64/Rules.mk | 1
xen/arch/ia64/linux-xen/Makefile | 2
xen/arch/ia64/linux-xen/perfmon.c | 976 ++++++++++++++++++++++++-
xen/arch/ia64/linux-xen/perfmon_default_smpl.c | 16
xen/arch/ia64/linux/Makefile | 1
xen/arch/ia64/xen/Makefile | 2
xen/arch/ia64/xen/dom0_ops.c | 6
xen/arch/ia64/xen/domain.c | 32
xen/arch/ia64/xen/hypercall.c | 2
xen/arch/ia64/xen/oprofile/Makefile | 1
xen/arch/ia64/xen/oprofile/perfmon.c | 258 ++++--
xen/arch/ia64/xen/oprofile/xenoprof.c | 56 +
xen/include/asm-ia64/config.h | 3
xen/include/asm-ia64/domain.h | 4
xen/include/asm-ia64/linux-xen/asm/perfmon.h | 8
xen/include/asm-ia64/xenoprof.h | 64 +
xen/include/public/arch-ia64.h | 22
17 files changed, 1356 insertions(+), 98 deletions(-)
diff -r 112e0e3b4852 -r 8ab9b43ad557 xen/arch/ia64/Rules.mk
--- a/xen/arch/ia64/Rules.mk Tue Nov 28 11:34:03 2006 -0700
+++ b/xen/arch/ia64/Rules.mk Tue Nov 28 21:35:13 2006 -0700
@@ -3,6 +3,7 @@
HAS_ACPI := y
HAS_VGA := y
+xenoprof := y
VALIDATE_VT ?= n
no_warns ?= n
xen_ia64_expose_p2m ?= y
diff -r 112e0e3b4852 -r 8ab9b43ad557 xen/arch/ia64/linux-xen/Makefile
--- a/xen/arch/ia64/linux-xen/Makefile Tue Nov 28 11:34:03 2006 -0700
+++ b/xen/arch/ia64/linux-xen/Makefile Tue Nov 28 21:35:13 2006 -0700
@@ -18,3 +18,5 @@ obj-y += iosapic.o
obj-y += iosapic.o
obj-y += numa.o
obj-y += mm_numa.o
+obj-y += perfmon.o
+obj-y += perfmon_default_smpl.o
diff -r 112e0e3b4852 -r 8ab9b43ad557 xen/arch/ia64/linux-xen/perfmon.c
--- a/xen/arch/ia64/linux-xen/perfmon.c Tue Nov 28 11:34:03 2006 -0700
+++ b/xen/arch/ia64/linux-xen/perfmon.c Tue Nov 28 21:35:13 2006 -0700
@@ -17,6 +17,12 @@
*
* More information about perfmon available at:
* http://www.hpl.hp.com/research/linux/perfmon
+ *
+ *
+ * For Xen/IA64 xenoprof
+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
*/
#include <linux/config.h>
@@ -42,7 +48,11 @@
#include <linux/rcupdate.h>
#include <linux/completion.h>
+#ifndef XEN
#include <asm/errno.h>
+#else
+#include <xen/errno.h>
+#endif
#include <asm/intrinsics.h>
#include <asm/page.h>
#include <asm/perfmon.h>
@@ -51,6 +61,15 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/delay.h>
+
+#ifdef XEN
+#include <xen/guest_access.h>
+#include <asm/hw_irq.h>
+#define CONFIG_PERFMON
+#define pid vcpu_id
+#define thread arch._thread
+#define task_pt_regs vcpu_regs
+#endif
#ifdef CONFIG_PERFMON
/*
@@ -287,7 +306,9 @@ typedef struct pfm_context {
unsigned long ctx_ovfl_regs[4]; /* which registers
overflowed (notification) */
+#ifndef XEN
struct completion ctx_restart_done; /* use for blocking
notification mode */
+#endif
unsigned long ctx_used_pmds[4]; /* bitmask of PMD used
*/
unsigned long ctx_all_pmds[4]; /* bitmask of all
accessible PMDs */
@@ -320,6 +341,7 @@ typedef struct pfm_context {
unsigned long ctx_smpl_size; /* size of sampling
buffer */
void *ctx_smpl_vaddr; /* user level virtual
address of smpl buffer */
+#ifndef XEN
wait_queue_head_t ctx_msgq_wait;
pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
int ctx_msgq_head;
@@ -327,6 +349,7 @@ typedef struct pfm_context {
struct fasync_struct *ctx_async_queue;
wait_queue_head_t ctx_zombieq; /* termination cleanup
wait queue */
+#endif
} pfm_context_t;
/*
@@ -371,6 +394,9 @@ typedef struct {
unsigned int pfs_sys_use_dbregs; /* incremented when
a system wide session uses debug regs */
unsigned int pfs_ptrace_use_dbregs; /* incremented when
a process uses debug regs */
struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task
owning a system-wide session */
+#ifdef XEN
+#define XENOPROF_TASK ((struct task_struct*)1)
+#endif
} pfm_session_t;
/*
@@ -499,10 +525,14 @@ static pfm_stats_t pfm_stats[NR_CPUS];
static pfm_stats_t pfm_stats[NR_CPUS];
static pfm_session_t pfm_sessions; /* global sessions information
*/
+#ifndef XEN
static DEFINE_SPINLOCK(pfm_alt_install_check);
+#endif
static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
+#ifndef XEN
static struct proc_dir_entry *perfmon_dir;
+#endif
static pfm_uuid_t pfm_null_uuid = {0,};
static spinlock_t pfm_buffer_fmt_lock;
@@ -514,6 +544,7 @@ pfm_sysctl_t pfm_sysctl;
pfm_sysctl_t pfm_sysctl;
EXPORT_SYMBOL(pfm_sysctl);
+#ifndef XEN
static ctl_table pfm_ctl_table[]={
{1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL,
&proc_dointvec, NULL,},
{2, "debug_ovfl", &pfm_sysctl.debug_ovfl, sizeof(int), 0666, NULL,
&proc_dointvec, NULL,},
@@ -533,10 +564,12 @@ static struct ctl_table_header *pfm_sysc
static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct
pt_regs *regs);
static int pfm_flush(struct file *filp);
+#endif
#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
#define pfm_get_cpu_data(a,b) per_cpu(a, b)
+#ifndef XEN
static inline void
pfm_put_task(struct task_struct *task)
{
@@ -568,6 +601,7 @@ pfm_unreserve_page(unsigned long a)
{
ClearPageReserved(vmalloc_to_page((void*)a));
}
+#endif
static inline unsigned long
pfm_protect_ctx_ctxsw(pfm_context_t *x)
@@ -582,6 +616,7 @@ pfm_unprotect_ctx_ctxsw(pfm_context_t *x
spin_unlock(&(x)->ctx_lock);
}
+#ifndef XEN
static inline unsigned int
pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct)
{
@@ -606,16 +641,19 @@ static struct file_system_type pfm_fs_ty
.get_sb = pfmfs_get_sb,
.kill_sb = kill_anon_super,
};
+#endif
DEFINE_PER_CPU(unsigned long, pfm_syst_info);
DEFINE_PER_CPU(struct task_struct *, pmu_owner);
DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
DEFINE_PER_CPU(unsigned long, pmu_activation_number);
+#ifndef XEN
EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
/* forward declaration */
static struct file_operations pfm_file_ops;
+#endif
/*
* forward declarations
@@ -641,7 +679,9 @@ static pmu_config_t *pmu_confs[]={
};
+#ifndef XEN
static int pfm_end_notify_user(pfm_context_t *ctx);
+#endif
static inline void
pfm_clear_psr_pp(void)
@@ -750,6 +790,7 @@ pfm_write_soft_counter(pfm_context_t *ct
ia64_set_pmd(i, val & ovfl_val);
}
+#ifndef XEN
static pfm_msg_t *
pfm_get_new_msg(pfm_context_t *ctx)
{
@@ -837,6 +878,7 @@ pfm_rvfree(void *mem, unsigned long size
}
return;
}
+#endif
static pfm_context_t *
pfm_context_alloc(void)
@@ -864,6 +906,7 @@ pfm_context_free(pfm_context_t *ctx)
}
}
+#ifndef XEN
static void
pfm_mask_monitoring(struct task_struct *task)
{
@@ -1034,6 +1077,7 @@ pfm_restore_monitoring(struct task_struc
}
pfm_set_psr_l(psr);
}
+#endif
static inline void
pfm_save_pmds(unsigned long *pmds, unsigned long mask)
@@ -1047,6 +1091,7 @@ pfm_save_pmds(unsigned long *pmds, unsig
}
}
+#ifndef XEN
/*
* reload from thread state (used for ctxw only)
*/
@@ -1100,7 +1145,37 @@ pfm_copy_pmds(struct task_struct *task,
ctx->ctx_pmds[i].val));
}
}
-
+#else
+static inline void
+xenpfm_restore_pmds(pfm_context_t* ctx)
+{
+ int i;
+ unsigned long ovfl_val = pmu_conf->ovfl_val;
+ unsigned long mask = ctx->ctx_all_pmds[0];
+ unsigned long val;
+
+ for (i = 0; mask; i++, mask >>= 1) {
+ if ((mask & 0x1) == 0)
+ continue;
+
+ val = ctx->ctx_pmds[i].val;
+ /*
+ * We break up the 64 bit value into 2 pieces
+ * the lower bits go to the machine state in the
+ * thread (will be reloaded on ctxsw in).
+ * The upper part stays in the soft-counter.
+ */
+ if (PMD_IS_COUNTING(i)) {
+ ctx->ctx_pmds[i].val = val & ~ovfl_val;
+ val &= ovfl_val;
+ }
+ ia64_set_pmd(i, val);
+ }
+ ia64_srlz_d();
+}
+#endif
+
+#ifndef XEN
/*
* propagate PMC from context to thread-state
*/
@@ -1133,6 +1208,23 @@ pfm_restore_pmcs(unsigned long *pmcs, un
}
ia64_srlz_d();
}
+#else
+static inline void
+xenpfm_restore_pmcs(pfm_context_t* ctx)
+{
+ int i;
+ unsigned long mask = ctx->ctx_all_pmcs[0];
+
+ for (i = 0; mask; i++, mask >>= 1) {
+ if ((mask & 0x1) == 0)
+ continue;
+ ia64_set_pmc(i, ctx->ctx_pmcs[i]);
+ DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
+ }
+ ia64_srlz_d();
+
+}
+#endif
static inline int
pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
@@ -1305,7 +1397,11 @@ pfm_reserve_session(struct task_struct *
DPRINT(("reserving system wide session on CPU%u currently on
CPU%u\n", cpu, smp_processor_id()));
+#ifndef XEN
pfm_sessions.pfs_sys_session[cpu] = task;
+#else
+ pfm_sessions.pfs_sys_session[cpu] = XENOPROF_TASK;
+#endif
pfm_sessions.pfs_sys_sessions++ ;
@@ -1332,7 +1428,11 @@ pfm_reserve_session(struct task_struct *
error_conflict:
DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
+#ifndef XEN
pfm_sessions.pfs_sys_session[cpu]->pid,
+#else
+ -1,
+#endif
cpu));
abort:
UNLOCK_PFS(flags);
@@ -1392,6 +1492,7 @@ pfm_unreserve_session(pfm_context_t *ctx
return 0;
}
+#ifndef XEN
/*
* removes virtual mapping of the sampling buffer.
* IMPORTANT: cannot be called with interrupts disable, e.g. inside
@@ -1428,6 +1529,7 @@ pfm_remove_smpl_mapping(struct task_stru
return 0;
}
+#endif
/*
* free actual physical storage used by sampling buffer
@@ -1477,6 +1579,7 @@ pfm_exit_smpl_buffer(pfm_buffer_fmt_t *f
}
+#ifndef XEN
/*
* pfmfs should _never_ be mounted by userland - too much of security hassle,
* no real gain from having the whole whorehouse mounted. So we don't need
@@ -1901,6 +2004,7 @@ pfm_flush(struct file *filp)
return 0;
}
+#endif
/*
* called either on explicit close() or from exit_files().
* Only the LAST user of the file gets to this point, i.e., it is
@@ -1916,19 +2020,27 @@ pfm_flush(struct file *filp)
* When called from exit_files(), the current task is not yet ZOMBIE but we
* flush the PMU state to the context.
*/
+#ifndef XEN
static int
pfm_close(struct inode *inode, struct file *filp)
-{
+#else
+static int
+pfm_close(pfm_context_t *ctx)
+#endif
+{
+#ifndef XEN
pfm_context_t *ctx;
struct task_struct *task;
struct pt_regs *regs;
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
+#endif
unsigned long smpl_buf_size = 0UL;
void *smpl_buf_addr = NULL;
int free_possible = 1;
int state, is_system;
+#ifndef XEN
DPRINT(("pfm_close called private=%p\n", filp->private_data));
if (PFM_IS_FILE(filp) == 0) {
@@ -1943,10 +2055,14 @@ pfm_close(struct inode *inode, struct fi
}
PROTECT_CTX(ctx, flags);
+#else
+ BUG_ON(!spin_is_locked(&ctx->ctx_lock));
+#endif
state = ctx->ctx_state;
is_system = ctx->ctx_fl_system;
+#ifndef XEN
task = PFM_CTX_TASK(ctx);
regs = task_pt_regs(task);
@@ -2045,8 +2161,15 @@ pfm_close(struct inode *inode, struct fi
pfm_context_unload(ctx, NULL, 0, regs);
#endif
}
-
+#else
+ /* XXX XEN */
+ /* unload context */
+ BUG_ON(state != PFM_CTX_UNLOADED);
+#endif
+
+#ifndef XEN
doit:
+#endif
/* reload state, may have changed during opening of critical section */
state = ctx->ctx_state;
@@ -2087,6 +2210,7 @@ doit:
pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
}
+#ifndef XEN
/*
* disconnect file descriptor from context must be done
* before we unlock.
@@ -2107,6 +2231,9 @@ doit:
* MUST be done with interrupts ENABLED.
*/
if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
+#else
+ UNPROTECT_CTX_NOIRQ(ctx);
+#endif
/*
* return the memory used by the context
@@ -2116,6 +2243,7 @@ doit:
return 0;
}
+#ifndef XEN
static int
pfm_no_open(struct inode *irrelevant, struct file *dontcare)
{
@@ -2255,6 +2383,7 @@ pfm_remap_buffer(struct vm_area_struct *
}
return 0;
}
+#endif
/*
* allocate a sampling buffer and remaps it into the user address space of the
task
@@ -2262,6 +2391,7 @@ static int
static int
pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned
long rsize, void **user_vaddr)
{
+#ifndef XEN
struct mm_struct *mm = task->mm;
struct vm_area_struct *vma = NULL;
unsigned long size;
@@ -2374,8 +2504,13 @@ error_kmem:
pfm_rvfree(smpl_buf, size);
return -ENOMEM;
-}
-
+#else
+ /* XXX */
+ return 0;
+#endif
+}
+
+#ifndef XEN
/*
* XXX: do something better here
*/
@@ -2399,6 +2534,7 @@ pfm_bad_permissions(struct task_struct *
|| (current->gid != task->sgid)
|| (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE);
}
+#endif
static int
pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
@@ -2535,6 +2671,7 @@ pfm_reset_pmu_state(pfm_context_t *ctx)
ctx->ctx_used_dbrs[0] = 0UL;
}
+#ifndef XEN
static int
pfm_ctx_getsize(void *arg, size_t *sz)
{
@@ -2642,20 +2779,31 @@ pfm_get_task(pfm_context_t *ctx, pid_t p
}
return ret;
}
-
-
-
+#endif
+
+
+#ifndef XEN
static int
pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs
*regs)
-{
+#else
+static pfm_context_t*
+pfm_context_create(pfarg_context_t* req)
+#endif
+{
+#ifndef XEN
pfarg_context_t *req = (pfarg_context_t *)arg;
struct file *filp;
+#else
+ pfm_context_t *ctx;
+#endif
int ctx_flags;
int ret;
+#ifndef XEN
/* let's check the arguments first */
ret = pfarg_is_sane(current, req);
if (ret < 0) return ret;
+#endif
ctx_flags = req->ctx_flags;
@@ -2664,6 +2812,7 @@ pfm_context_create(pfm_context_t *ctx, v
ctx = pfm_context_alloc();
if (!ctx) goto error;
+#ifndef XEN
ret = pfm_alloc_fd(&filp);
if (ret < 0) goto error_file;
@@ -2673,6 +2822,7 @@ pfm_context_create(pfm_context_t *ctx, v
* attach context to file
*/
filp->private_data = ctx;
+#endif
/*
* does the user want to sample?
@@ -2704,10 +2854,12 @@ pfm_context_create(pfm_context_t *ctx, v
* ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
*/
+#ifndef XEN
/*
* init restart semaphore to locked
*/
init_completion(&ctx->ctx_restart_done);
+#endif
/*
* activation is used in SMP only
@@ -2715,12 +2867,14 @@ pfm_context_create(pfm_context_t *ctx, v
ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
SET_LAST_CPU(ctx, -1);
+#ifndef XEN
/*
* initialize notification message queue
*/
ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
init_waitqueue_head(&ctx->ctx_msgq_wait);
init_waitqueue_head(&ctx->ctx_zombieq);
+#endif
DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d
no_msg=%d ctx_fd=%d \n",
ctx,
@@ -2736,19 +2890,35 @@ pfm_context_create(pfm_context_t *ctx, v
*/
pfm_reset_pmu_state(ctx);
+#ifndef XEN
return 0;
+#else
+ return ctx;
+#endif
buffer_error:
+#ifndef XEN
pfm_free_fd(ctx->ctx_fd, filp);
+#endif
if (ctx->ctx_buf_fmt) {
+#ifndef XEN
pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
- }
+#else
+ pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, NULL);
+#endif
+ }
+#ifndef XEN
error_file:
+#endif
pfm_context_free(ctx);
error:
+#ifndef XEN
return ret;
+#else
+ return NULL;
+#endif
}
static inline unsigned long
@@ -2860,7 +3030,9 @@ static int
static int
pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
+#ifndef XEN
struct thread_struct *thread = NULL;
+#endif
struct task_struct *task;
pfarg_reg_t *req = (pfarg_reg_t *)arg;
unsigned long value, pmc_pm;
@@ -2877,9 +3049,14 @@ pfm_write_pmcs(pfm_context_t *ctx, void
is_system = ctx->ctx_fl_system;
task = ctx->ctx_task;
impl_pmds = pmu_conf->impl_pmds[0];
+#ifdef XEN
+ task = NULL;
+ BUG_ON(regs != NULL);
+#endif
if (state == PFM_CTX_ZOMBIE) return -EINVAL;
+#ifndef XEN
if (is_loaded) {
thread = &task->thread;
/*
@@ -2893,6 +3070,13 @@ pfm_write_pmcs(pfm_context_t *ctx, void
}
can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
}
+#else
+ /* XXX FIXME */
+ if (state != PFM_CTX_UNLOADED) {
+ return -EBUSY;
+ }
+#endif
+
expert_mode = pfm_sysctl.expert_mode;
for (i = 0; i < count; i++, req++) {
@@ -3046,6 +3230,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void
*/
ctx->ctx_pmcs[cnum] = value;
+#ifndef XEN
if (is_loaded) {
/*
* write thread state
@@ -3071,6 +3256,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void
}
#endif
}
+#endif
DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx
used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx
used_monitors=0x%lx ovfl_regs=0x%lx\n",
cnum,
@@ -3102,7 +3288,9 @@ static int
static int
pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
+#ifndef XEN
struct thread_struct *thread = NULL;
+#endif
struct task_struct *task;
pfarg_reg_t *req = (pfarg_reg_t *)arg;
unsigned long value, hw_value, ovfl_mask;
@@ -3118,9 +3306,14 @@ pfm_write_pmds(pfm_context_t *ctx, void
is_system = ctx->ctx_fl_system;
ovfl_mask = pmu_conf->ovfl_val;
task = ctx->ctx_task;
+#ifdef XEN
+ task = NULL;
+ BUG_ON(regs != NULL);
+#endif
if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
+#ifndef XEN
/*
* on both UP and SMP, we can only write to the PMC when the task is
* the owner of the local PMU.
@@ -3138,6 +3331,12 @@ pfm_write_pmds(pfm_context_t *ctx, void
}
can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
}
+#else
+ /* XXX FIXME */
+ if (state != PFM_CTX_UNLOADED) {
+ return -EBUSY;
+ }
+#endif
expert_mode = pfm_sysctl.expert_mode;
for (i = 0; i < count; i++, req++) {
@@ -3230,6 +3429,8 @@ pfm_write_pmds(pfm_context_t *ctx, void
ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
}
+ /* XXX FIXME */
+#ifndef XEN
if (is_loaded) {
/*
* write thread state
@@ -3252,6 +3453,7 @@ pfm_write_pmds(pfm_context_t *ctx, void
#endif
}
}
+#endif
DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx
ctx_pmd=0x%lx short_reset=0x%lx "
"long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx
used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx
ovfl_regs=0x%lx\n",
@@ -3288,6 +3490,7 @@ abort_mission:
return ret;
}
+#ifndef XEN
/*
* By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this
function.
* Therefore we know, we do not have to worry about the PMU overflow
interrupt. If an
@@ -3471,6 +3674,7 @@ pfm_mod_read_pmds(struct task_struct *ta
return pfm_read_pmds(ctx, req, nreq, regs);
}
EXPORT_SYMBOL(pfm_mod_read_pmds);
+#endif
/*
* Only call this function when a process it trying to
@@ -3552,6 +3756,7 @@ pfm_release_debug_registers(struct task_
return ret;
}
+#ifndef XEN
static int
pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
@@ -3720,6 +3925,7 @@ pfm_debug(pfm_context_t *ctx, void *arg,
}
return 0;
}
+#endif
/*
* arg can be NULL and count can be zero for this function
@@ -3727,7 +3933,9 @@ static int
static int
pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct
pt_regs *regs)
{
+#ifndef XEN
struct thread_struct *thread = NULL;
+#endif
struct task_struct *task;
pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
unsigned long flags;
@@ -3744,6 +3952,12 @@ pfm_write_ibr_dbr(int mode, pfm_context_
is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
is_system = ctx->ctx_fl_system;
task = ctx->ctx_task;
+#ifdef XEN
+ task = NULL;
+ BUG_ON(regs != NULL);
+ /* currently dbrs, ibrs aren't supported */
+ BUG();
+#endif
if (state == PFM_CTX_ZOMBIE) return -EINVAL;
@@ -3752,6 +3966,10 @@ pfm_write_ibr_dbr(int mode, pfm_context_
* the owner of the local PMU.
*/
if (is_loaded) {
+#ifdef XEN
+ /* XXX */
+ return -EBUSY;
+#else
thread = &task->thread;
/*
* In system wide and when the context is loaded, access can
only happen
@@ -3763,6 +3981,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_
return -EBUSY;
}
can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
+#endif
}
/*
@@ -3777,10 +3996,14 @@ pfm_write_ibr_dbr(int mode, pfm_context_
/*
* don't bother if we are loaded and task is being debugged
*/
+#ifndef XEN
if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
DPRINT(("debug registers already in use for [%d]\n",
task->pid));
return -EBUSY;
}
+#else
+ /* Currently no support for is_loaded, see -EBUSY above */
+#endif
/*
* check for debug registers in system wide mode
@@ -3819,7 +4042,9 @@ pfm_write_ibr_dbr(int mode, pfm_context_
* is shared by all processes running on it
*/
if (first_time && can_access_pmu) {
+#ifndef XEN
DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid));
+#endif
for (i=0; i < pmu_conf->num_ibrs; i++) {
ia64_set_ibr(i, 0UL);
ia64_dv_serialize_instruction();
@@ -3983,6 +4208,7 @@ pfm_get_features(pfm_context_t *ctx, voi
return 0;
}
+#ifndef XEN
static int
pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
@@ -4201,12 +4427,15 @@ pfm_check_task_exist(pfm_context_t *ctx)
return ret;
}
+#endif
static int
pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs
*regs)
{
struct task_struct *task;
+#ifndef XEN
struct thread_struct *thread;
+#endif
struct pfm_context_t *old;
unsigned long flags;
#ifndef CONFIG_SMP
@@ -4220,6 +4449,17 @@ pfm_context_load(pfm_context_t *ctx, voi
state = ctx->ctx_state;
is_system = ctx->ctx_fl_system;
+#ifdef XEN
+ task = NULL;
+ old = NULL;
+ pmcs_source = pmds_source = NULL;
+#ifndef CONFIG_SMP
+ owner_task = NULL;
+#endif
+ flags = 0;
+ BUG_ON(count != 0);
+ BUG_ON(regs != NULL);
+#endif
/*
* can only load from unloaded or terminated state
*/
@@ -4230,6 +4470,7 @@ pfm_context_load(pfm_context_t *ctx, voi
return -EBUSY;
}
+#ifndef XEN
DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid,
ctx->ctx_fl_using_dbreg));
if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
@@ -4255,8 +4496,16 @@ pfm_context_load(pfm_context_t *ctx, voi
}
thread = &task->thread;
+#else
+ BUG_ON(!spin_is_locked(&ctx->ctx_lock));
+ if (!is_system) {
+ ret = -EINVAL;
+ goto error;
+ }
+#endif
ret = 0;
+#ifndef XEN
/*
* cannot load a context which is using range restrictions,
* into a task that is being debugged.
@@ -4284,6 +4533,9 @@ pfm_context_load(pfm_context_t *ctx, voi
if (ret) goto error;
}
+#else
+ BUG_ON(ctx->ctx_fl_using_dbreg);
+#endif
/*
* SMP system-wide monitoring implies self-monitoring.
@@ -4318,6 +4570,7 @@ pfm_context_load(pfm_context_t *ctx, voi
*
* XXX: needs to be atomic
*/
+#ifndef XEN
DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
thread->pfm_context, ctx));
@@ -4329,6 +4582,7 @@ pfm_context_load(pfm_context_t *ctx, voi
}
pfm_reset_msgq(ctx);
+#endif
ctx->ctx_state = PFM_CTX_LOADED;
@@ -4346,9 +4600,14 @@ pfm_context_load(pfm_context_t *ctx, voi
if (ctx->ctx_fl_excl_idle)
PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
} else {
+#ifndef XEN
thread->flags |= IA64_THREAD_PM_VALID;
- }
-
+#else
+ BUG();
+#endif
+ }
+
+#ifndef XEN
/*
* propagate into thread-state
*/
@@ -4417,12 +4676,29 @@ pfm_context_load(pfm_context_t *ctx, voi
ctx->ctx_saved_psr_up = 0UL;
ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
}
+#else
+ BUG_ON(!is_system);
+
+ /* load pmds, pmcs */
+ xenpfm_restore_pmds(ctx);
+ xenpfm_restore_pmcs(ctx);
+
+ ctx->ctx_reload_pmcs[0] = 0UL;
+ ctx->ctx_reload_pmds[0] = 0UL;
+
+ BUG_ON(ctx->ctx_fl_using_dbreg);
+
+ SET_PMU_OWNER(NULL, ctx);
+#endif
ret = 0;
+#ifndef XEN
error_unres:
if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
+#endif
error:
+#ifndef XEN
/*
* we must undo the dbregs setting (for system-wide)
*/
@@ -4445,6 +4721,9 @@ error:
}
}
}
+#else
+ BUG_ON(set_dbregs);
+#endif
return ret;
}
@@ -4466,7 +4745,15 @@ pfm_context_unload(pfm_context_t *ctx, v
int prev_state, is_system;
int ret;
+#ifndef XEN
DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid :
-1));
+#else
+ task = NULL;
+ tregs = NULL;
+ BUG_ON(arg != NULL);
+ BUG_ON(count != 0);
+ BUG_ON(regs != NULL);
+#endif
prev_state = ctx->ctx_state;
is_system = ctx->ctx_fl_system;
@@ -4482,8 +4769,13 @@ pfm_context_unload(pfm_context_t *ctx, v
/*
* clear psr and dcr bits
*/
+#ifndef XEN
ret = pfm_stop(ctx, NULL, 0, regs);
if (ret) return ret;
+#else
+ /* caller does it by hand */
+ ret = 0;
+#endif
ctx->ctx_state = PFM_CTX_UNLOADED;
@@ -4515,10 +4807,12 @@ pfm_context_unload(pfm_context_t *ctx, v
if (prev_state != PFM_CTX_ZOMBIE)
pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
+#ifndef XEN
/*
* disconnect context from task
*/
task->thread.pfm_context = NULL;
+#endif
/*
* disconnect task from context
*/
@@ -4530,6 +4824,7 @@ pfm_context_unload(pfm_context_t *ctx, v
return 0;
}
+#ifndef XEN
/*
* per-task mode
*/
@@ -4584,9 +4879,14 @@ pfm_context_unload(pfm_context_t *ctx, v
DPRINT(("disconnected [%d] from context\n", task->pid));
return 0;
-}
-
-
+#else
+ BUG();
+ return -EINVAL;
+#endif
+}
+
+
+#ifndef XEN
/*
* called only from exit_thread(): task == current
* we come here only if current has a context attached (loaded or masked)
@@ -5210,6 +5510,9 @@ pfm_end_notify_user(pfm_context_t *ctx)
return pfm_notify_user(ctx, msg);
}
+#else
+#define pfm_ovfl_notify_user(ctx, ovfl_pmds) do {} while(0)
+#endif
/*
* main overflow processing routine.
@@ -5226,6 +5529,9 @@ pfm_overflow_handler(struct task_struct
pfm_ovfl_ctrl_t ovfl_ctrl;
unsigned int i, has_smpl;
int must_notify = 0;
+#ifdef XEN
+ BUG_ON(task != NULL);
+#endif
if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
@@ -5400,6 +5706,7 @@ pfm_overflow_handler(struct task_struct
}
if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
+#ifndef XEN
/*
* keep track of what to reset when unblocking
*/
@@ -5428,11 +5735,18 @@ pfm_overflow_handler(struct task_struct
* anyway, so the signal receiver would come spin for nothing.
*/
must_notify = 1;
+#else
+ gdprintk(XENLOG_INFO, "%s check!\n", __func__);
+#endif
}
DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx
ovfl_notify=0x%lx masked=%d\n",
+#ifndef XEN
GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1,
PFM_GET_WORK_PENDING(task),
+#else
+ -1, 0UL,
+#endif
ctx->ctx_fl_trap_reason,
ovfl_pmds,
ovfl_notify,
@@ -5441,9 +5755,13 @@ pfm_overflow_handler(struct task_struct
* in case monitoring must be stopped, we toggle the psr bits
*/
if (ovfl_ctrl.bits.mask_monitoring) {
+#ifndef XEN
pfm_mask_monitoring(task);
ctx->ctx_state = PFM_CTX_MASKED;
ctx->ctx_fl_can_restart = 1;
+#else
+ gdprintk(XENLOG_INFO, "%s check!\n", __func__);
+#endif
}
/*
@@ -5513,14 +5831,22 @@ pfm_do_interrupt_handler(int irq, void *
*/
pmc0 = ia64_get_pmc(0);
+#ifndef XEN
task = GET_PMU_OWNER();
+#else
+ task = NULL;
+#endif
ctx = GET_PMU_CTX();
/*
* if we have some pending bits set
* assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
*/
+#ifndef XEN
if (PMC0_HAS_OVFL(pmc0) && task) {
+#else
+ if (PMC0_HAS_OVFL(pmc0)) {
+#endif
/*
* we assume that pmc0.fr is always set here
*/
@@ -5528,8 +5854,10 @@ pfm_do_interrupt_handler(int irq, void *
/* sanity check */
if (!ctx) goto report_spurious1;
+#ifndef XEN
if (ctx->ctx_fl_system == 0 && (task->thread.flags &
IA64_THREAD_PM_VALID) == 0)
goto report_spurious2;
+#endif
PROTECT_CTX_NOPRINT(ctx, flags);
@@ -5549,16 +5877,20 @@ pfm_do_interrupt_handler(int irq, void *
return retval;
report_spurious1:
+#ifndef XEN
printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d:
process %d has no PFM context\n",
this_cpu, task->pid);
+#endif
pfm_unfreeze_pmu();
return -1;
+#ifndef XEN /* XEN path doesn't take this goto */
report_spurious2:
printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d:
process %d, invalid flag\n",
this_cpu,
task->pid);
pfm_unfreeze_pmu();
return -1;
+#endif
}
static irqreturn_t
@@ -5600,6 +5932,7 @@ pfm_interrupt_handler(int irq, void *arg
return IRQ_HANDLED;
}
+#ifndef XEN
/*
* /proc/perfmon interface, for debug only
*/
@@ -5777,6 +6110,7 @@ pfm_proc_open(struct inode *inode, struc
{
return seq_open(file, &pfm_seq_ops);
}
+#endif
/*
@@ -5831,6 +6165,7 @@ pfm_syst_wide_update_task(struct task_st
}
}
+#ifndef XEN
#ifdef CONFIG_SMP
static void
@@ -6326,6 +6661,7 @@ pfm_load_regs (struct task_struct *task)
if (likely(psr_up)) pfm_set_psr_up();
}
#endif /* CONFIG_SMP */
+#endif /* XEN */
/*
* this function assumes monitoring is stopped
@@ -6333,6 +6669,7 @@ static void
static void
pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
{
+#ifndef XEN
u64 pmc0;
unsigned long mask2, val, pmd_val, ovfl_val;
int i, can_access_pmu = 0;
@@ -6438,14 +6775,20 @@ pfm_flush_pmds(struct task_struct *task,
ctx->ctx_pmds[i].val = val;
}
+#else
+ /* XXX */
+#endif
}
static struct irqaction perfmon_irqaction = {
.handler = pfm_interrupt_handler,
+#ifndef XEN
.flags = SA_INTERRUPT,
+#endif
.name = "perfmon"
};
+#ifndef XEN
static void
pfm_alt_save_pmu_state(void *data)
{
@@ -6580,11 +6923,16 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_ha
return 0;
}
EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
+#endif
/*
* perfmon initialization routine, called from the initcall() table
*/
+#ifndef XEN
static int init_pfm_fs(void);
+#else
+#define init_pfm_fs() do {} while(0)
+#endif
static int __init
pfm_probe_pmu(void)
@@ -6609,12 +6957,14 @@ found:
return 0;
}
+#ifndef XEN
static struct file_operations pfm_proc_fops = {
.open = pfm_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
+#endif
int __init
pfm_init(void)
@@ -6684,6 +7034,7 @@ pfm_init(void)
return -1;
}
+#ifndef XEN
/*
* create /proc/perfmon (mostly for debugging purposes)
*/
@@ -6702,6 +7053,7 @@ pfm_init(void)
* create /proc/sys/kernel/perfmon (for debugging purposes)
*/
pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root, 0);
+#endif
/*
* initialize all our spinlocks
@@ -6768,12 +7120,14 @@ dump_pmu_state(const char *from)
return;
}
+#ifndef XEN
printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
this_cpu,
from,
current->pid,
regs->cr_iip,
current->comm);
+#endif
task = GET_PMU_OWNER();
ctx = GET_PMU_CTX();
@@ -6808,6 +7162,7 @@ dump_pmu_state(const char *from)
}
if (ctx) {
+#ifndef XEN
printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d
ctx_task=[%d] saved_psr_up=0x%lx\n",
this_cpu,
ctx->ctx_state,
@@ -6816,10 +7171,19 @@ dump_pmu_state(const char *from)
ctx->ctx_msgq_head,
ctx->ctx_msgq_tail,
ctx->ctx_saved_psr_up);
+#else
+ printk("->CPU%d ctx_state=%d vaddr=%p addr=%p
saved_psr_up=0x%lx\n",
+ this_cpu,
+ ctx->ctx_state,
+ ctx->ctx_smpl_vaddr,
+ ctx->ctx_smpl_hdr,
+ ctx->ctx_saved_psr_up);
+#endif
}
local_irq_restore(flags);
}
+#ifndef XEN
/*
* called from process.c:copy_thread(). task is new child.
*/
@@ -6843,6 +7207,7 @@ pfm_inherit(struct task_struct *task, st
* the psr bits are already set properly in copy_threads()
*/
}
+#endif
#else /* !CONFIG_PERFMON */
asmlinkage long
sys_perfmonctl (int fd, int cmd, void *arg, int count)
@@ -6850,3 +7215,584 @@ sys_perfmonctl (int fd, int cmd, void *a
return -ENOSYS;
}
#endif /* CONFIG_PERFMON */
+
+
+#ifdef XEN
+static int xenpfm_context_unload(void);
+static int xenpfm_start_stop_locked(int is_start);
+DEFINE_PER_CPU(pfm_context_t*, xenpfm_context);
+
+/*
+ * note: some functions mask interrupt with this lock held
+ * so that this lock can't be locked from interrupt handler.
+ * lock order domlist_lock => xenpfm_context_lock
+ */
+DEFINE_SPINLOCK(xenpfm_context_lock);
+
+static int
+xenpfm_get_features(XEN_GUEST_HANDLE(pfarg_features_t) req)
+{
+ pfarg_features_t res;
+ if (guest_handle_is_null(req))
+ return -EFAULT;
+
+ memset(&res, 0, sizeof(res));
+ pfm_get_features(NULL, &res, 0, NULL);
+ if (copy_to_guest(req, &res, 1))
+ return -EFAULT;
+ return 0;
+}
+
+static int
+xenpfm_pfarg_is_sane(pfarg_context_t* pfx)
+{
+ int error;
+ int ctx_flags;
+
+ error = pfarg_is_sane(NULL, pfx);
+ if (error)
+ return error;
+
+ ctx_flags = pfx->ctx_flags;
+ if (!(ctx_flags & PFM_FL_SYSTEM_WIDE) ||
+ ctx_flags & PFM_FL_NOTIFY_BLOCK ||
+ ctx_flags & PFM_FL_OVFL_NO_MSG)
+ return -EINVAL;
+
+ /* probably more to add here */
+
+ return 0;
+}
+
+static int
+xenpfm_context_create(XEN_GUEST_HANDLE(pfarg_context_t) req)
+{
+ int error;
+ pfarg_context_t kreq;
+
+ int cpu;
+ pfm_context_t* ctx[NR_CPUS] = {[0 ... (NR_CPUS - 1)] = NULL};
+
+ if (copy_from_guest(&kreq, req, 1)) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ error = xenpfm_pfarg_is_sane(&kreq);
+ if (error)
+ goto out;
+
+ /* XXX fmt */
+ for_each_cpu(cpu) {
+ ctx[cpu] = pfm_context_create(&kreq);
+ if (ctx[cpu] == NULL) {
+ error = -ENOMEM;
+ break;
+ }
+ }
+ if (error)
+ goto out;
+
+ BUG_ON(in_irq());
+ spin_lock(&xenpfm_context_lock);
+ for_each_cpu(cpu) {
+ if (per_cpu(xenpfm_context, cpu) != NULL) {
+ error = -EBUSY;
+ break;
+ }
+ }
+ for_each_cpu(cpu) {
+ per_cpu(xenpfm_context, cpu) = ctx[cpu];
+ ctx[cpu] = NULL;
+ }
+ spin_unlock(&xenpfm_context_lock);
+
+out:
+ for_each_cpu(cpu) {
+ if (ctx[cpu] != NULL)
+ pfm_context_free(ctx[cpu]);
+ }
+ return error;
+}
+
+static int
+xenpfm_context_destroy(void)
+{
+ int cpu;
+ pfm_context_t* ctx;
+ unsigned long flags;
+ unsigned long need_unload;
+ int error = 0;
+
+again:
+ need_unload = 0;
+ BUG_ON(in_irq());
+ spin_lock_irqsave(&xenpfm_context_lock, flags);
+ for_each_cpu(cpu) {
+ ctx = per_cpu(xenpfm_context, cpu);
+ if (ctx == NULL) {
+ error = -EINVAL;
+ break;
+ }
+ PROTECT_CTX_NOIRQ(ctx);
+ if (ctx->ctx_state != PFM_CTX_UNLOADED)
+ need_unload = 1;
+ }
+ if (error) {
+ for_each_cpu(cpu) {
+ ctx = per_cpu(xenpfm_context, cpu);
+ if (ctx == NULL)
+ break;
+ UNPROTECT_CTX_NOIRQ(per_cpu(xenpfm_context, cpu));
+ }
+ goto out;
+ }
+ if (need_unload) {
+ for_each_cpu(cpu)
+ UNPROTECT_CTX_NOIRQ(per_cpu(xenpfm_context, cpu));
+ spin_unlock_irqrestore(&xenpfm_context_lock, flags);
+
+ error = xenpfm_context_unload();
+ if (error)
+ return error;
+ goto again;
+ }
+
+ for_each_cpu(cpu) {
+ pfm_context_t* ctx = per_cpu(xenpfm_context, cpu);
+ per_cpu(xenpfm_context, cpu) = NULL;
+
+ /* pfm_close() unlocks spinlock and free the context. */
+ error |= pfm_close(ctx);
+ }
+out:
+ spin_unlock_irqrestore(&xenpfm_context_lock, flags);
+ return error;
+}
+
+static int
+xenpfm_write_pmcs(XEN_GUEST_HANDLE(pfarg_reg_t) req, unsigned long count)
+{
+ unsigned long i;
+ int error = 0;
+ unsigned long flags;
+
+ for (i = 0; i < count; i++) {
+ pfarg_reg_t kreq;
+ int cpu;
+ if (copy_from_guest_offset(&kreq, req, i, 1)) {
+ error = -EFAULT;
+ goto out;
+ }
+ BUG_ON(in_irq());
+ spin_lock_irqsave(&xenpfm_context_lock, flags);
+ for_each_online_cpu(cpu) {
+ pfm_context_t* ctx = per_cpu(xenpfm_context, cpu);
+ PROTECT_CTX_NOIRQ(ctx);
+ error |= pfm_write_pmcs(ctx, (void *)&kreq, 1, NULL);
+ UNPROTECT_CTX_NOIRQ(ctx);
+ }
+ spin_unlock_irqrestore(&xenpfm_context_lock, flags);
+ if (error)
+ break;
+ }
+
+ /* XXX if is loaded, change all physical cpus pmcs. */
+ /* Currently results in error */
+out:
+ return error;
+}
+
+static int
+xenpfm_write_pmds(XEN_GUEST_HANDLE(pfarg_reg_t) req, unsigned long count)
+{
+ unsigned long i;
+ int error = 0;
+
+ for (i = 0; i < count; i++) {
+ pfarg_reg_t kreq;
+ int cpu;
+ unsigned long flags;
+ if (copy_from_guest_offset(&kreq, req, i, 1)) {
+ error = -EFAULT;
+ goto out;
+ }
+ BUG_ON(in_irq());
+ spin_lock_irqsave(&xenpfm_context_lock, flags);
+ for_each_online_cpu(cpu) {
+ pfm_context_t* ctx = per_cpu(xenpfm_context, cpu);
+ PROTECT_CTX_NOIRQ(ctx);
+ error |= pfm_write_pmds(ctx, &kreq, 1, NULL);
+ UNPROTECT_CTX_NOIRQ(ctx);
+ }
+ spin_unlock_irqrestore(&xenpfm_context_lock, flags);
+ }
+
+ /* XXX if is loaded, change all physical cpus pmds. */
+ /* Currently results in error */
+out:
+ return error;
+}
+
+struct xenpfm_context_load_arg {
+ pfarg_load_t* req;
+ int error[NR_CPUS];
+};
+
+static void
+xenpfm_context_load_cpu(void* info)
+{
+ unsigned long flags;
+ struct xenpfm_context_load_arg* arg = (struct
xenpfm_context_load_arg*)info;
+ pfm_context_t* ctx = __get_cpu_var(xenpfm_context);
+ PROTECT_CTX(ctx, flags);
+ arg->error[smp_processor_id()] = pfm_context_load(ctx, arg->req, 0,
NULL);
+ UNPROTECT_CTX(ctx, flags);
+}
+
+static int
+xenpfm_context_load(XEN_GUEST_HANDLE(pfarg_load_t) req)
+{
+ pfarg_load_t kreq;
+ int cpu;
+ struct xenpfm_context_load_arg arg;
+ int error = 0;
+
+ if (copy_from_guest(&kreq, req, 1))
+ return -EFAULT;
+
+ arg.req = &kreq;
+ for_each_online_cpu(cpu)
+ arg.error[cpu] = 0;
+
+ BUG_ON(in_irq());
+ spin_lock(&xenpfm_context_lock);
+ smp_call_function(&xenpfm_context_load_cpu, &arg, 1, 1);
+ xenpfm_context_load_cpu(&arg);
+ spin_unlock(&xenpfm_context_lock);
+ for_each_online_cpu(cpu) {
+ if (arg.error[cpu]) {
+ gdprintk(XENLOG_INFO, "%s: error %d cpu %d\n",
+ __func__, error, cpu);
+ error = arg.error[cpu];
+ }
+ }
+ return 0;
+}
+
+
+struct xenpfm_context_unload_arg {
+ int error[NR_CPUS];
+};
+
+static void
+xenpfm_context_unload_cpu(void* info)
+{
+ unsigned long flags;
+ struct xenpfm_context_unload_arg* arg = (struct
xenpfm_context_unload_arg*)info;
+ pfm_context_t* ctx = __get_cpu_var(xenpfm_context);
+ PROTECT_CTX(ctx, flags);
+ arg->error[smp_processor_id()] = pfm_context_unload(ctx, NULL, 0, NULL);
+ UNPROTECT_CTX(ctx, flags);
+}
+
+static int
+xenpfm_context_unload(void)
+{
+ int cpu;
+ struct xenpfm_context_unload_arg arg;
+ int error = 0;
+
+ for_each_online_cpu(cpu)
+ arg.error[cpu] = 0;
+
+ BUG_ON(in_irq());
+ read_lock(&domlist_lock);
+ spin_lock(&xenpfm_context_lock);
+ error = xenpfm_start_stop_locked(0);
+ read_unlock(&domlist_lock);
+ if (error) {
+ spin_unlock(&xenpfm_context_lock);
+ return error;
+ }
+
+ smp_call_function(&xenpfm_context_unload_cpu, &arg, 1, 1);
+ xenpfm_context_unload_cpu(&arg);
+ spin_unlock(&xenpfm_context_lock);
+ for_each_online_cpu(cpu) {
+ if (arg.error[cpu]) {
+ gdprintk(XENLOG_INFO, "%s: error %d cpu %d\n",
+ __func__, error, cpu);
+ error = arg.error[cpu];
+ }
+ }
+ return error;
+}
+
+static int
+__xenpfm_start(void)
+{
+ pfm_context_t* ctx = __get_cpu_var(xenpfm_context);
+ int state;
+ int error = 0;
+
+ BUG_ON(local_irq_is_enabled());
+ PROTECT_CTX_NOIRQ(ctx);
+ state = ctx->ctx_state;
+ if (state != PFM_CTX_LOADED) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ /* now update the local PMU and cpuinfo */
+ PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
+
+ /* start monitoring at kernel level */
+ pfm_set_psr_pp();
+
+ /* start monitoring at kernel level */
+ pfm_set_psr_up();
+
+ /* enable dcr pp */
+ ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) |
IA64_DCR_PP);
+ ia64_srlz_i();
+out:
+ UNPROTECT_CTX_NOIRQ(ctx);
+ return error;
+}
+
+static int
+__xenpfm_stop(void)
+{
+ pfm_context_t* ctx = __get_cpu_var(xenpfm_context);
+ int state;
+ int error = 0;
+
+ BUG_ON(local_irq_is_enabled());
+ PROTECT_CTX_NOIRQ(ctx);
+ state = ctx->ctx_state;
+ if (state != PFM_CTX_LOADED) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Update local PMU first
+ *
+ * disable dcr pp
+ */
+ ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) &
~IA64_DCR_PP);
+ ia64_srlz_i();
+
+ /* update local cpuinfo */
+ PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
+
+ /* stop monitoring, does srlz.i */
+ pfm_clear_psr_pp();
+
+ /* stop monitoring at kernel level */
+ pfm_clear_psr_up();
+out:
+ UNPROTECT_CTX_NOIRQ(ctx);
+ return error;
+}
+
+int
+__xenpfm_start_stop(int is_start)
+{
+ if (is_start)
+ return __xenpfm_start();
+ else
+ return __xenpfm_stop();
+}
+
+struct xenpfm_start_arg {
+ int is_start;
+ atomic_t started;
+ atomic_t finished;
+ int error[NR_CPUS];
+};
+
+static void
+xenpfm_start_stop_cpu(void* info)
+{
+ unsigned long flags;
+ struct xenpfm_start_arg* arg = (struct xenpfm_start_arg*)info;
+
+ local_irq_save(flags);
+ atomic_inc(&arg->started);
+ while (!atomic_read(&arg->finished))
+ cpu_relax();
+
+ arg->error[smp_processor_id()] = __xenpfm_start_stop(arg->is_start);
+
+ atomic_inc(&arg->finished);
+ local_irq_restore(flags);
+}
+
+static void
+xenpfm_start_stop_vcpu(struct vcpu* v, int is_start)
+{
+ struct pt_regs *regs = vcpu_regs(v);
+
+ if (is_start) {
+ /* set user level psr.pp for the caller */
+ ia64_psr(regs)->pp = 1;
+
+ /* activate monitoring at user level */
+ ia64_psr(regs)->up = 1;
+
+ /* don't allow user level control */
+ ia64_psr(regs)->sp = 0;
+ } else {
+ /*
+ * stop monitoring in the caller
+ */
+ ia64_psr(regs)->pp = 0;
+
+ /*
+ * stop monitoring at the user level
+ */
+ ia64_psr(regs)->up = 0;
+
+#if 0
+ /*
+ * cancel user level control
+ */
+ ia64_psr(regs)->sp = 1;
+#endif
+ }
+}
+
+static int
+xenpfm_start_stop_locked(int is_start)
+{
+ struct xenpfm_start_arg arg;
+ int cpus = num_online_cpus();
+ int cpu;
+ unsigned long flags;
+ struct domain* d;
+ struct vcpu* v;
+ int error = 0;
+
+ arg.is_start = is_start;
+ atomic_set(&arg.started, 1); /* 1 for this cpu */
+ atomic_set(&arg.finished, 0);
+ for_each_cpu(cpu)
+ arg.error[cpu] = 0;
+
+ BUG_ON(!spin_is_locked(&xenpfm_context_lock));
+ smp_call_function(&xenpfm_start_stop_cpu, &arg, 1, 0);
+ local_irq_save(flags);
+
+ while (atomic_read(&arg.started) != cpus)
+ cpu_relax();
+
+ for_each_domain(d) {
+ for_each_vcpu(d, v)
+ xenpfm_start_stop_vcpu(v, is_start);
+ }
+
+ arg.error[smp_processor_id()] = __xenpfm_start_stop(is_start);
+ atomic_inc(&arg.finished);
+
+ while (atomic_read(&arg.finished) != cpus)
+ cpu_relax();
+ local_irq_restore(flags);
+
+ for_each_online_cpu(cpu) {
+ if (!arg.error[cpu]) {
+ gdprintk(XENLOG_INFO, "%s: cpu %d error %d\n",
+ __func__, cpu, arg.error[cpu]);
+ error = arg.error[cpu];
+ }
+ }
+ return error;
+}
+
+static int
+xenpfm_start_stop(int is_start)
+{
+ int error;
+
+ BUG_ON(in_irq());
+ read_lock(&domlist_lock);
+ spin_lock(&xenpfm_context_lock);
+ error =xenpfm_start_stop_locked(is_start);
+ spin_unlock(&xenpfm_context_lock);
+ read_unlock(&domlist_lock);
+
+ return error;
+}
+
+#define NONPRIV_OP(cmd) (((cmd) == PFM_GET_FEATURES))
+
+int
+do_perfmon_op(unsigned long cmd,
+ XEN_GUEST_HANDLE(void) arg1, unsigned long count)
+{
+ unsigned long error = 0;
+
+ if (!NONPRIV_OP(cmd) && current->domain != xenoprof_primary_profiler) {
+ gdprintk(XENLOG_INFO, "xen perfmon: "
+ "dom %d denied privileged operation %ld\n",
+ current->domain->domain_id, cmd);
+ return -EPERM;
+ }
+ switch (cmd) {
+ case PFM_GET_FEATURES:
+ error = xenpfm_get_features(guest_handle_cast(arg1,
pfarg_features_t));
+ break;
+
+ case PFM_CREATE_CONTEXT:
+ error = xenpfm_context_create(guest_handle_cast(arg1,
pfarg_context_t));
+ break;
+ case PFM_DESTROY_CONTEXT:
+ error = xenpfm_context_destroy();
+ break;
+
+ case PFM_WRITE_PMCS:
+ error = xenpfm_write_pmcs(guest_handle_cast(arg1, pfarg_reg_t),
count);
+ break;
+ case PFM_WRITE_PMDS:
+ error = xenpfm_write_pmds(guest_handle_cast(arg1, pfarg_reg_t),
count);
+ break;
+ case PFM_READ_PMDS:
+ error = -ENOSYS;
+ break;
+ case PFM_GET_PMC_RESET_VAL:
+ error = -ENOSYS;
+ break;
+
+ case PFM_LOAD_CONTEXT:
+ error = xenpfm_context_load(guest_handle_cast(arg1,
pfarg_load_t));
+ break;
+ case PFM_UNLOAD_CONTEXT:
+ error = xenpfm_context_unload();
+ break;
+
+ case PFM_START:
+ error = xenpfm_start_stop(1);
+ break;
+ case PFM_STOP:
+ error = xenpfm_start_stop(0);
+ break;
+ case PFM_RESTART:
+ error = -ENOSYS;
+ break;
+
+ case PFM_DEBUG:
+ error = -ENOSYS;
+ break;
+
+ case PFM_ENABLE:
+ case PFM_DISABLE:
+ case PFM_PROTECT_CONTEXT:
+ case PFM_UNPROTECT_CONTEXT:
+ default:
+ error = -EINVAL;
+ break;
+ }
+ return error;
+}
+#endif
diff -r 112e0e3b4852 -r 8ab9b43ad557
xen/arch/ia64/linux-xen/perfmon_default_smpl.c
--- a/xen/arch/ia64/linux-xen/perfmon_default_smpl.c Tue Nov 28 11:34:03
2006 -0700
+++ b/xen/arch/ia64/linux-xen/perfmon_default_smpl.c Tue Nov 28 21:35:13
2006 -0700
@@ -16,9 +16,15 @@
#include <asm/perfmon.h>
#include <asm/perfmon_default_smpl.h>
+#ifndef XEN
MODULE_AUTHOR("Stephane Eranian <eranian@xxxxxxxxxx>");
MODULE_DESCRIPTION("perfmon default sampling format");
MODULE_LICENSE("GPL");
+#endif
+
+#ifdef XEN
+#define pid vcpu_id
+#endif
#define DEFAULT_DEBUG 1
@@ -157,7 +163,9 @@ default_handler(struct task_struct *task
* system-wide:
* - this is not necessarily the task controlling the session
*/
+#ifndef XEN
ent->pid = current->pid;
+#endif
ent->ovfl_pmd = ovfl_pmd;
ent->last_reset_val = arg->pmd_last_reset; //pmd[0].reg_last_reset_val;
@@ -169,7 +177,9 @@ default_handler(struct task_struct *task
ent->tstamp = stamp;
ent->cpu = smp_processor_id();
ent->set = arg->active_set;
+#ifndef XEN
ent->tgid = current->tgid;
+#endif
/*
* selectively store PMDs in increasing index number
@@ -263,6 +273,7 @@ static pfm_buffer_fmt_t default_fmt={
.fmt_exit = default_exit,
};
+#ifndef XEN
static int __init
pfm_default_smpl_init_module(void)
{
@@ -282,6 +293,7 @@ pfm_default_smpl_init_module(void)
return ret;
}
+#endif
static void __exit
pfm_default_smpl_cleanup_module(void)
@@ -292,6 +304,8 @@ pfm_default_smpl_cleanup_module(void)
printk("perfmon_default_smpl: unregister %s=%d\n",
default_fmt.fmt_name, ret);
}
+#ifndef XEN
module_init(pfm_default_smpl_init_module);
module_exit(pfm_default_smpl_cleanup_module);
-
+#endif
+
diff -r 112e0e3b4852 -r 8ab9b43ad557 xen/arch/ia64/linux/Makefile
--- a/xen/arch/ia64/linux/Makefile Tue Nov 28 11:34:03 2006 -0700
+++ b/xen/arch/ia64/linux/Makefile Tue Nov 28 21:35:13 2006 -0700
@@ -22,6 +22,7 @@ obj-y += __udivdi3.o
obj-y += __udivdi3.o
obj-y += __moddi3.o
obj-y += __umoddi3.o
+obj-y += carta_random.o
## variants of divide/modulo
## see files in xen/arch/ia64/linux/lib (linux/arch/ia64/lib)
diff -r 112e0e3b4852 -r 8ab9b43ad557 xen/arch/ia64/xen/Makefile
--- a/xen/arch/ia64/xen/Makefile Tue Nov 28 11:34:03 2006 -0700
+++ b/xen/arch/ia64/xen/Makefile Tue Nov 28 21:35:13 2006 -0700
@@ -1,3 +1,5 @@ obj-y += acpi.o
+subdir-y += oprofile
+
obj-y += acpi.o
obj-y += dom0_ops.o
obj-y += domain.o
diff -r 112e0e3b4852 -r 8ab9b43ad557 xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c Tue Nov 28 11:34:03 2006 -0700
+++ b/xen/arch/ia64/xen/dom0_ops.c Tue Nov 28 21:35:13 2006 -0700
@@ -343,6 +343,12 @@ do_dom0vp_op(unsigned long cmd,
case IA64_DOM0VP_expose_p2m:
ret = dom0vp_expose_p2m(d, arg0, arg1, arg2, arg3);
break;
+ case IA64_DOM0VP_perfmon: {
+ XEN_GUEST_HANDLE(void) hnd;
+ set_xen_guest_handle(hnd, (void*)arg1);
+ ret = do_perfmon_op(arg0, hnd, arg2);
+ break;
+ }
default:
ret = -1;
printk("unknown dom0_vp_op 0x%lx\n", cmd);
diff -r 112e0e3b4852 -r 8ab9b43ad557 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Tue Nov 28 11:34:03 2006 -0700
+++ b/xen/arch/ia64/xen/domain.c Tue Nov 28 21:35:13 2006 -0700
@@ -48,6 +48,7 @@
#include <asm/shadow.h>
#include <xen/guest_access.h>
#include <asm/tlb_track.h>
+#include <asm/perfmon.h>
unsigned long dom0_size = 512*1024*1024;
@@ -231,11 +232,35 @@ void continue_running(struct vcpu *same)
/* nothing to do */
}
+#ifdef CONFIG_PERFMON
+static int pal_halt = 1;
+static int can_do_pal_halt = 1;
+
+static int __init nohalt_setup(char * str)
+{
+ pal_halt = can_do_pal_halt = 0;
+ return 1;
+}
+__setup("nohalt", nohalt_setup);
+
+void
+update_pal_halt_status(int status)
+{
+ can_do_pal_halt = pal_halt && status;
+}
+#else
+#define can_do_pal_halt (1)
+#endif
+
static void default_idle(void)
{
local_irq_disable();
- if ( !softirq_pending(smp_processor_id()) )
- safe_halt();
+ if ( !softirq_pending(smp_processor_id()) ) {
+ if (can_do_pal_halt)
+ safe_halt();
+ else
+ cpu_relax();
+ }
local_irq_enable();
}
@@ -628,6 +653,9 @@ void domain_relinquish_resources(struct
if (d->arch.is_vti && d->arch.sal_data)
xfree(d->arch.sal_data);
+
+ /* Free page used by xen oprofile buffer */
+ free_xenoprof_pages(d);
}
unsigned long
diff -r 112e0e3b4852 -r 8ab9b43ad557 xen/arch/ia64/xen/hypercall.c
--- a/xen/arch/ia64/xen/hypercall.c Tue Nov 28 11:34:03 2006 -0700
+++ b/xen/arch/ia64/xen/hypercall.c Tue Nov 28 21:35:13 2006 -0700
@@ -68,7 +68,7 @@ const hypercall_t ia64_hypercall_table[N
(hypercall_t)do_ni_hypercall, /* do_nmi_op */
(hypercall_t)do_sched_op,
(hypercall_t)do_callback_op, /* */ /* 30 */
- (hypercall_t)do_ni_hypercall, /* */
+ (hypercall_t)do_xenoprof_op, /* */
(hypercall_t)do_event_channel_op,
(hypercall_t)do_physdev_op,
(hypercall_t)do_hvm_op, /* */
diff -r 112e0e3b4852 -r 8ab9b43ad557 xen/arch/ia64/xen/oprofile/perfmon.c
--- a/xen/arch/ia64/xen/oprofile/perfmon.c Tue Nov 28 11:34:03 2006 -0700
+++ b/xen/arch/ia64/xen/oprofile/perfmon.c Tue Nov 28 21:35:13 2006 -0700
@@ -1,3 +1,25 @@
+/******************************************************************************
+ * perfmon.c for xenoprof
+ * This is based linux/arch/ia64/oprofile/perfmon.c, but heavily rewritten.
+ *
+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
/**
* @file perfmon.c
*
@@ -7,94 +29,172 @@
* @author John Levon <levon@xxxxxxxxxxxxxxxxx>
*/
-#include <linux/kernel.h>
-#include <linux/config.h>
-#include <linux/oprofile.h>
-#include <linux/sched.h>
+#include <xen/config.h>
+#include <xen/sched.h>
+#include <xen/event.h>
+#include <xen/xenoprof.h>
#include <asm/perfmon.h>
#include <asm/ptrace.h>
-#include <asm/errno.h>
-
+
+// XXX move them to an appropriate header file
+extern void xenoprof_log_event(struct vcpu *vcpu,
+ unsigned long eip, int mode, int event);
+extern int is_active(struct domain *d);
+
+static int allow_virq;
static int allow_ints;
static int
-perfmon_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg,
- struct pt_regs *regs, unsigned long stamp)
-{
- int event = arg->pmd_eventid;
+xenoprof_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg,
+ struct pt_regs *regs, unsigned long stamp)
+{
+ unsigned long ip = regs->cr_iip;
+ int event = arg->pmd_eventid;
- arg->ovfl_ctrl.bits.reset_ovfl_pmds = 1;
-
- /* the owner of the oprofile event buffer may have exited
- * without perfmon being shutdown (e.g. SIGSEGV)
- */
- if (allow_ints)
- oprofile_add_sample(regs, event);
- return 0;
-}
-
-
-static int perfmon_start(void)
+ arg->ovfl_ctrl.bits.reset_ovfl_pmds = 1;
+ if (!allow_virq || !allow_ints)
+ return 0;
+
+ xenoprof_log_event(current, ip, xenoprofile_get_mode(task, regs), event);
+
+ // send VIRQ_XENOPROF
+ if (is_active(current->domain) && !ring_0(regs))
+ send_guest_vcpu_virq(current, VIRQ_XENOPROF);
+
+ return 0;
+}
+
+// same as linux OPROFILE_FMT_UUID
+#define XENOPROF_FMT_UUID { \
+ 0x77, 0x7a, 0x6e, 0x61, 0x20, 0x65, 0x73, 0x69, 0x74, 0x6e, 0x72, 0x20,
0x61, 0x65, 0x0a, 0x6c }
+
+static pfm_buffer_fmt_t xenoprof_fmt = {
+ .fmt_name = "xenoprof_format",
+ .fmt_uuid = XENOPROF_FMT_UUID,
+ .fmt_handler = xenoprof_handler,
+};
+
+static char * get_cpu_type(void)
+{
+ __u8 family = local_cpu_data->family;
+
+ switch (family) {
+ case 0x07:
+ return "ia64/itanium";
+ case 0x1f:
+ return "ia64/itanium2";
+ default:
+ return "ia64/ia64";
+ }
+}
+
+static int using_xenoprof;
+
+int __init
+xenprof_perfmon_init(void)
+{
+ int ret = pfm_register_buffer_fmt(&xenoprof_fmt);
+ if (ret)
+ return -ENODEV;
+ using_xenoprof = 1;
+ printk("xenoprof: using perfmon.\n");
+ return 0;
+}
+__initcall(xenprof_perfmon_init);
+
+#ifdef notyet
+void xenoprof_perfmon_exit(void)
+{
+ if (!using_xenoprof)
+ return;
+
+ pfm_unregister_buffer_fmt(xenoprof_fmt.fmt_uuid);
+}
+__exitcall(xenoprof_perfmon_exit);
+#endif
+
+///////////////////////////////////////////////////////////////////////////
+// glue methods for xenoprof and perfmon.
+int
+xenoprof_arch_init(int *num_events, int *is_primary, char *cpu_type)
+{
+ *num_events = 0;
+ strncpy(cpu_type, get_cpu_type(), XENOPROF_CPU_TYPE_SIZE - 1);
+ cpu_type[XENOPROF_CPU_TYPE_SIZE - 1] = '\0';
+
+ *is_primary = 0;
+ if (xenoprof_primary_profiler == NULL) {
+ /* For now, only dom0 can be the primary profiler */
+ if (current->domain->domain_id == 0) {
+ *is_primary = 1;
+ }
+ } else if (xenoprof_primary_profiler == current->domain)
+ *is_primary = 1;
+ return 0;
+}
+
+int
+xenoprof_arch_reserve_counters(void)
+{
+ // perfmon takes care
+ return 0;
+}
+
+int
+xenoprof_arch_counter(XEN_GUEST_HANDLE(void) arg)
+{
+ return -ENOSYS;
+}
+
+int
+xenoprof_arch_setup_events(void)
+{
+ // perfmon takes care
+ return 0;
+}
+
+//XXX SMP: sync by IPI?
+int
+xenoprof_arch_enable_virq(void)
+{
+ allow_virq = 1;
+ return 0;
+}
+
+//XXX SMP: sync by IPI?
+int
+xenoprof_arch_start(void)
{
allow_ints = 1;
- return 0;
-}
-
-
-static void perfmon_stop(void)
+ return 0;
+}
+
+//XXX SMP: sync by IPI?
+void
+xenoprof_arch_stop(void)
{
allow_ints = 0;
}
-
-#define OPROFILE_FMT_UUID { \
- 0x77, 0x7a, 0x6e, 0x61, 0x20, 0x65, 0x73, 0x69, 0x74, 0x6e, 0x72, 0x20,
0x61, 0x65, 0x0a, 0x6c }
-
-static pfm_buffer_fmt_t oprofile_fmt = {
- .fmt_name = "oprofile_format",
- .fmt_uuid = OPROFILE_FMT_UUID,
- .fmt_handler = perfmon_handler,
-};
-
-
-static char * get_cpu_type(void)
-{
- __u8 family = local_cpu_data->family;
-
- switch (family) {
- case 0x07:
- return "ia64/itanium";
- case 0x1f:
- return "ia64/itanium2";
- default:
- return "ia64/ia64";
- }
-}
-
-
-/* all the ops are handled via userspace for IA64 perfmon */
-
-static int using_perfmon;
-
-int perfmon_init(struct oprofile_operations * ops)
-{
- int ret = pfm_register_buffer_fmt(&oprofile_fmt);
- if (ret)
- return -ENODEV;
-
- ops->cpu_type = get_cpu_type();
- ops->start = perfmon_start;
- ops->stop = perfmon_stop;
- using_perfmon = 1;
- printk(KERN_INFO "oprofile: using perfmon.\n");
- return 0;
-}
-
-
-void perfmon_exit(void)
-{
- if (!using_perfmon)
- return;
-
- pfm_unregister_buffer_fmt(oprofile_fmt.fmt_uuid);
-}
+//XXX SMP: sync by IPI?
+void
+xenoprof_arch_disable_virq(void)
+{
+ allow_virq = 0;
+}
+
+void
+xenoprof_arch_release_counters(void)
+{
+ // perfmon takes care
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 112e0e3b4852 -r 8ab9b43ad557 xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h Tue Nov 28 11:34:03 2006 -0700
+++ b/xen/include/asm-ia64/config.h Tue Nov 28 21:35:13 2006 -0700
@@ -125,6 +125,7 @@ extern char _end[]; /* standard ELF symb
// from include/asm-ia64/smp.h
#define get_cpu() smp_processor_id()
#define put_cpu() do {} while(0)
+#define put_cpu_no_resched() do{} while (0)
// needed for common/dom0_ops.c until hyperthreading is supported
#ifdef CONFIG_SMP
@@ -166,6 +167,8 @@ extern int smp_num_siblings;
#define ____cacheline_aligned_in_smp ____cacheline_aligned
#endif
+#define CONFIG_PERFMON
+
#ifndef __ASSEMBLY__
#include "asm/types.h" // for u64
#include "linux/linkage.h" // for asmlinkage which is used by
diff -r 112e0e3b4852 -r 8ab9b43ad557 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h Tue Nov 28 11:34:03 2006 -0700
+++ b/xen/include/asm-ia64/domain.h Tue Nov 28 21:35:13 2006 -0700
@@ -211,6 +211,10 @@ struct arch_vcpu {
#define IO_PORTS_PADDR 0x00000ffffc000000UL
#define IO_PORTS_SIZE 0x0000000004000000UL
+int
+do_perfmon_op(unsigned long cmd,
+ XEN_GUEST_HANDLE(void) arg1, unsigned long arg2);
+
#endif /* __ASM_DOMAIN_H__ */
/*
diff -r 112e0e3b4852 -r 8ab9b43ad557
xen/include/asm-ia64/linux-xen/asm/perfmon.h
--- a/xen/include/asm-ia64/linux-xen/asm/perfmon.h Tue Nov 28 11:34:03
2006 -0700
+++ b/xen/include/asm-ia64/linux-xen/asm/perfmon.h Tue Nov 28 21:35:13
2006 -0700
@@ -5,6 +5,14 @@
#ifndef _ASM_IA64_PERFMON_H
#define _ASM_IA64_PERFMON_H
+
+#ifdef XEN
+#include <asm/config.h>
+#ifndef pt_regs
+#define pt_regs cpu_user_regs
+#endif
+struct cpu_user_regs;
+#endif
/*
* perfmon comamnds supported on all CPU models
diff -r 112e0e3b4852 -r 8ab9b43ad557 xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h Tue Nov 28 11:34:03 2006 -0700
+++ b/xen/include/public/arch-ia64.h Tue Nov 28 21:35:13 2006 -0700
@@ -376,6 +376,9 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_guest_conte
/* expose the p2m table into domain */
#define IA64_DOM0VP_expose_p2m 7
+/* xen perfmon */
+#define IA64_DOM0VP_perfmon 8
+
// flags for page assignement to pseudo physical address space
#define _ASSIGN_readonly 0
#define ASSIGN_readonly (1UL << _ASSIGN_readonly)
@@ -462,6 +465,25 @@ struct xen_ia64_boot_param {
(((unsigned long)(addr) & XENCOMM_INLINE_MASK) == XENCOMM_INLINE_FLAG)
#define XENCOMM_INLINE_ADDR(addr) \
((unsigned long)(addr) & ~XENCOMM_INLINE_MASK)
+
+/* xen perfmon */
+#ifdef XEN
+#ifndef __ASSEMBLY__
+#ifndef _ASM_IA64_PERFMON_H
+
+#include <xen/list.h> // asm/perfmon.h requires struct list_head
+#include <asm/perfmon.h>
+// for PFM_xxx and pfarg_features_t, pfarg_context_t, pfarg_reg_t, pfarg_load_t
+
+#endif /* _ASM_IA64_PERFMON_H */
+
+DEFINE_XEN_GUEST_HANDLE(pfarg_features_t);
+DEFINE_XEN_GUEST_HANDLE(pfarg_context_t);
+DEFINE_XEN_GUEST_HANDLE(pfarg_reg_t);
+DEFINE_XEN_GUEST_HANDLE(pfarg_load_t);
+#endif /* __ASSEMBLY__ */
+#endif /* XEN */
+
#endif /* __HYPERVISOR_IF_IA64_H__ */
/*
diff -r 112e0e3b4852 -r 8ab9b43ad557 xen/arch/ia64/xen/oprofile/Makefile
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/xen/oprofile/Makefile Tue Nov 28 21:35:13 2006 -0700
@@ -0,0 +1,1 @@
+obj-y += perfmon.o xenoprof.o
diff -r 112e0e3b4852 -r 8ab9b43ad557 xen/arch/ia64/xen/oprofile/xenoprof.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/xen/oprofile/xenoprof.c Tue Nov 28 21:35:13 2006 -0700
@@ -0,0 +1,56 @@
+/******************************************************************************
+ * xenoprof.c
+ *
+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <xen/config.h>
+#include <xen/sched.h>
+#include <public/xen.h>
+#include <xen/xenoprof.h>
+
+int
+xenoprofile_get_mode(struct vcpu *v, struct cpu_user_regs * const regs)
+{
+ int mode = 0;
+
+ // mode
+ // 0: user, 1: kernel, 2: xen
+ // Xen/IA64 uses ring2 for kernel, and doesn't use ring1.
+ if (ring_2(regs))
+ mode = 1;
+ else if (ring_0(regs))
+ mode = 2;
+ else if (ring_1(regs)) {
+ gdprintk(XENLOG_ERR, "%s:%d ring1 is used!\n", __func__, __LINE__);
+ mode = 1;// fall back to kernel mode.
+ }
+
+ return mode;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 112e0e3b4852 -r 8ab9b43ad557 xen/include/asm-ia64/xenoprof.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/xenoprof.h Tue Nov 28 21:35:13 2006 -0700
@@ -0,0 +1,64 @@
+/******************************************************************************
+ * asm-ia64/xenoprof.h
+ * xenoprof ia64 arch specific header file
+ *
+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __ASM_XENOPROF_H__
+#define __ASM_XENOPROF_H__
+
+int xenoprof_arch_init(int *num_events, int *is_primary, char *cpu_type);
+int xenoprof_arch_reserve_counters(void);
+int xenoprof_arch_counter(XEN_GUEST_HANDLE(void) arg);
+int xenoprof_arch_setup_events(void);
+int xenoprof_arch_enable_virq(void);
+int xenoprof_arch_start(void);
+void xenoprof_arch_stop(void);
+void xenoprof_arch_disable_virq(void);
+void xenoprof_arch_release_counters(void);
+
+struct vcpu;
+struct cpu_user_regs;
+int xenoprofile_get_mode(struct vcpu *v, struct cpu_user_regs * const regs);
+
+#define xenoprof_shared_gmfn(d, gmaddr, maddr) \
+ assign_domain_page((d), (gmaddr), (maddr));
+
+static inline int
+ring(const struct pt_regs* regs)
+{
+ return ((struct ia64_psr*)(&(regs)->cr_ipsr))->cpl;
+}
+#define ring_0(r) (ring(r) == 0)
+#define ring_1(r) (ring(r) == 1)
+#define ring_2(r) (ring(r) == 2)
+#define ring_3(r) (ring(r) == 3)
+
+#endif /* __ASM_XENOPROF_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|