# HG changeset patch
# User iap10@xxxxxxxxxxxxxxxxxxxxx
# Node ID 424166f4f3cfe5b689b71cbfc5b4ea1fbdcab2ed
# Parent ab3813d38b20835557b90a6981d79cca4e8582be
Add some profiling support for writeable pagetables.
Signed-off-by: ian@xxxxxxxxxxxxx
diff -r ab3813d38b20 -r 424166f4f3cf xen/Rules.mk
--- a/xen/Rules.mk Fri Aug 12 16:05:37 2005
+++ b/xen/Rules.mk Sat Aug 13 20:47:47 2005
@@ -2,7 +2,7 @@
# If you change any of these configuration options then you must
# 'make clean' before rebuilding.
#
-verbose ?= n
+verbose ?= y
debug ?= n
perfc ?= n
perfc_arrays?= n
diff -r ab3813d38b20 -r 424166f4f3cf xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Fri Aug 12 16:05:37 2005
+++ b/xen/arch/x86/mm.c Sat Aug 13 20:47:47 2005
@@ -2625,6 +2625,129 @@
#define PTWR_PRINTK(_f, _a...) ((void)0)
#endif
+
+#ifdef PERF_ARRAYS
+
+/**************** writeable pagetables profiling functions *****************/
+
+#define ptwr_eip_buckets 256
+
+int ptwr_eip_stat_threshold[] = {1, 10, 50, 100, L1_PAGETABLE_ENTRIES};
+
+#define ptwr_eip_stat_thresholdN (sizeof(ptwr_eip_stat_threshold)/sizeof(int))
+
+struct {
+ unsigned long eip;
+ domid_t id;
+ u32 val[ptwr_eip_stat_thresholdN];
+} typedef ptwr_eip_stat_t;
+
+ptwr_eip_stat_t ptwr_eip_stats[ptwr_eip_buckets];
+
+static inline unsigned int ptwr_eip_stat_hash( unsigned long eip, domid_t id )
+{
+ return (((unsigned long) id) ^ eip ^ (eip>>8) ^ (eip>>16) ^ (eip>24)) %
+ ptwr_eip_buckets;
+}
+
+static void ptwr_eip_stat_inc(u32 *n)
+{
+ (*n)++;
+ if(*n == 0)
+ {
+ (*n)=~0;
+ /* rescale all buckets */
+ int i;
+ for(i=0;i<ptwr_eip_buckets;i++)
+ {
+ int j;
+ for(j=0;j<ptwr_eip_stat_thresholdN;j++)
+ ptwr_eip_stats[i].val[j] =
+ (((u64)ptwr_eip_stats[i].val[j])+1)>>1;
+ }
+ }
+}
+
+static void ptwr_eip_stat_update( unsigned long eip, domid_t id, int modified )
+{
+ int i, b;
+
+ i = b = ptwr_eip_stat_hash( eip, id );
+
+ do
+ {
+ if (!ptwr_eip_stats[i].eip)
+ { /* doesn't exist */
+ ptwr_eip_stats[i].eip = eip;
+ ptwr_eip_stats[i].id = id;
+ memset(ptwr_eip_stats[i].val,0, sizeof(ptwr_eip_stats[i].val));
+ }
+
+ if (ptwr_eip_stats[i].eip == eip)
+ {
+ int j;
+ for(j=0;j<ptwr_eip_stat_thresholdN;j++)
+ {
+ if(modified <= ptwr_eip_stat_threshold[j])
+ {
+ break;
+ }
+ }
+ BUG_ON(j>=ptwr_eip_stat_thresholdN);
+ ptwr_eip_stat_inc(&(ptwr_eip_stats[i].val[j]));
+ return;
+ }
+ i = (i+1) % ptwr_eip_buckets;
+ }
+ while(i!=b);
+
+ printk("ptwr_eip_stat: too many EIPs in use!\n");
+
+ ptwr_eip_stat_print();
+ ptwr_eip_stat_reset();
+}
+
+void ptwr_eip_stat_reset()
+{
+ memset( ptwr_eip_stats, 0, sizeof(ptwr_eip_stats));
+}
+
+void ptwr_eip_stat_print()
+{
+ struct domain *e;
+ domid_t d;
+
+ for_each_domain(e)
+ {
+ int i;
+ d = e->domain_id;
+
+ for(i=0;i<ptwr_eip_buckets;i++)
+ {
+ if ( ptwr_eip_stats[i].eip && ptwr_eip_stats[i].id == d )
+ {
+ int j;
+ printk("D %d eip %08lx ",
+ ptwr_eip_stats[i].id, ptwr_eip_stats[i].eip );
+
+ for(j=0;j<ptwr_eip_stat_thresholdN;j++)
+ printk("<=%u %4u \t",
+ ptwr_eip_stat_threshold[j],
+ ptwr_eip_stats[i].val[j] );
+ printk("\n");
+ }
+ }
+ }
+}
+
+#else /* PERF_ARRAYS */
+
+#define ptwr_eip_stat_update( eip, id, modified ) ((void)0)
+
+#endif
+
+/*******************************************************************/
+
/* Re-validate a given p.t. page, given its prior snapshot */
int revalidate_l1(
struct domain *d, l1_pgentry_t *l1page, l1_pgentry_t *snapshot)
@@ -2742,6 +2865,7 @@
modified = revalidate_l1(d, pl1e, d->arch.ptwr[which].page);
unmap_domain_page(pl1e);
perfc_incr_histo(wpt_updates, modified, PT_UPDATES);
+ ptwr_eip_stat_update( d->arch.ptwr[which].eip, d->domain_id, modified);
d->arch.ptwr[which].prev_nr_updates = modified;
/*
@@ -2897,7 +3021,8 @@
};
/* Write page fault handler: check if guest is trying to modify a PTE. */
-int ptwr_do_page_fault(struct domain *d, unsigned long addr)
+int ptwr_do_page_fault(struct domain *d, unsigned long addr,
+ struct cpu_user_regs *regs)
{
unsigned long pfn;
struct pfn_info *page;
@@ -2932,6 +3057,10 @@
{
return 0;
}
+
+#if 0 /* Leave this in as useful for debugging */
+ goto emulate;
+#endif
/* Get the L2 index at which this L1 p.t. is always mapped. */
l2_idx = page->u.inuse.type_info & PGT_va_mask;
@@ -3002,7 +3131,11 @@
d->arch.ptwr[which].l1va = addr | 1;
d->arch.ptwr[which].l2_idx = l2_idx;
d->arch.ptwr[which].vcpu = current;
-
+
+#ifdef PERF_ARRAYS
+ d->arch.ptwr[which].eip = regs->eip;
+#endif
+
/* For safety, disconnect the L1 p.t. page from current space. */
if ( which == PTWR_PT_ACTIVE )
{
diff -r ab3813d38b20 -r 424166f4f3cf xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Fri Aug 12 16:05:37 2005
+++ b/xen/arch/x86/traps.c Sat Aug 13 20:47:47 2005
@@ -438,7 +438,7 @@
&&
KERNEL_MODE(v, regs) &&
((regs->error_code & 3) == 3) && /* write-protection fault */
- ptwr_do_page_fault(d, addr) )
+ ptwr_do_page_fault(d, addr, regs) )
{
UNLOCK_BIGLOCK(d);
return EXCRET_fault_fixed;
diff -r ab3813d38b20 -r 424166f4f3cf xen/common/perfc.c
--- a/xen/common/perfc.c Fri Aug 12 16:05:37 2005
+++ b/xen/common/perfc.c Sat Aug 13 20:47:47 2005
@@ -7,6 +7,7 @@
#include <xen/spinlock.h>
#include <public/dom0_ops.h>
#include <asm/uaccess.h>
+#include <xen/mm.h>
#undef PERFCOUNTER
#undef PERFCOUNTER_CPU
@@ -81,6 +82,10 @@
}
printk("\n");
}
+
+#ifdef PERF_ARRAYS
+ ptwr_eip_stat_print();
+#endif
}
void perfc_reset(unsigned char key)
@@ -118,6 +123,10 @@
break;
}
}
+
+#ifdef PERF_ARRAYS
+ ptwr_eip_stat_reset();
+#endif
}
static dom0_perfc_desc_t perfc_d[NR_PERFCTRS];
diff -r ab3813d38b20 -r 424166f4f3cf xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h Fri Aug 12 16:05:37 2005
+++ b/xen/include/asm-x86/mm.h Sat Aug 13 20:47:47 2005
@@ -316,6 +316,9 @@
unsigned int prev_nr_updates;
/* Exec domain which created writable mapping. */
struct vcpu *vcpu;
+ /* EIP of the address which took the original write fault
+ used for stats collection only */
+ unsigned long eip;
};
#define PTWR_PT_ACTIVE 0
@@ -327,7 +330,8 @@
int ptwr_init(struct domain *);
void ptwr_destroy(struct domain *);
void ptwr_flush(struct domain *, const int);
-int ptwr_do_page_fault(struct domain *, unsigned long);
+int ptwr_do_page_fault(struct domain *, unsigned long,
+ struct cpu_user_regs *);
int revalidate_l1(struct domain *, l1_pgentry_t *, l1_pgentry_t *);
void cleanup_writable_pagetable(struct domain *d);
@@ -350,6 +354,18 @@
#define _audit_domain(_d, _f) ((void)0)
#define audit_domain(_d) ((void)0)
#define audit_domains() ((void)0)
+
+#endif
+
+#ifdef PERF_ARRAYS
+
+void ptwr_eip_stat_reset();
+void ptwr_eip_stat_print();
+
+#else
+
+#define ptwr_eip_stat_reset() ((void)0)
+#define ptwr_eip_stat_print() ((void)0)
#endif
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|