# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 8cc7ce549d00bef90a568ed64a0309cec3d399e9
# Parent 7d0d88685f797088c2f8e5e41055c2ad29552b4d
Sanitise the trace-buffer hypervisor<->user interface.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
diff -r 7d0d88685f79 -r 8cc7ce549d00 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h Sun Oct 30 22:30:41 2005
+++ b/tools/libxc/xenctrl.h Mon Oct 31 09:45:31 2005
@@ -507,12 +507,10 @@
int xc_tbuf_enable(int xc_handle, int enable);
/**
- * This function sets the size of the trace buffers. Setting it to zero
- * deallocates the memory used for trace buffers, and setting it to a
- * non-zero value specifies the number of pages per cpu to allocate.
- * To change the size of an existing allocation, you must first deallocate
- * it then reallocate it. No change in size is allowed when tracing is
- * enabled; A disable call must be made first.
+ * This function sets the size of the trace buffers. Setting the size
+ * is currently a one-shot operation that may be performed either at boot
+ * time or via this interface, not both. The buffer size must be set before
+ * enabling tracing.
*
* @parm xc_handle a handle to an open hypervisor interface
* @parm size the size in pages per cpu for the trace buffers
diff -r 7d0d88685f79 -r 8cc7ce549d00 tools/xentrace/xentrace.c
--- a/tools/xentrace/xentrace.c Sun Oct 30 22:30:41 2005
+++ b/tools/xentrace/xentrace.c Mon Oct 31 09:45:31 2005
@@ -23,9 +23,6 @@
#include "xc_private.h"
-typedef struct { int counter; } atomic_t;
-#define _atomic_read(v) ((v).counter)
-
#include <xen/trace.h>
extern FILE *stderr;
@@ -148,7 +145,7 @@
}
tbufs_mapped = xc_map_foreign_range(xc_handle, 0 /* Dom 0 ID */,
- size * num, PROT_READ,
+ size * num, PROT_READ | PROT_WRITE,
tbufs_mfn);
xc_interface_close(xc_handle);
@@ -240,10 +237,7 @@
* mapped in user space. Note that the trace buffer metadata contains machine
* pointers - the array returned allows more convenient access to them.
*/
-struct t_rec **init_rec_ptrs(unsigned long tbufs_mfn,
- struct t_buf *tbufs_mapped,
- struct t_buf **meta,
- unsigned int num)
+struct t_rec **init_rec_ptrs(struct t_buf **meta, unsigned int num)
{
int i;
struct t_rec **data;
@@ -256,36 +250,9 @@
}
for ( i = 0; i < num; i++ )
- data[i] = (struct t_rec *)(meta[i]->rec_addr -
(tbufs_mfn<<XC_PAGE_SHIFT) /* XXX */
- + (unsigned long)tbufs_mapped);
+ data[i] = (struct t_rec *)(meta[i] + 1);
return data;
-}
-
-/**
- * init_tail_idxs - initialise an array of tail indexes
- * @bufs: array of pointers to trace buffer metadata
- * @num: number of trace buffers
- *
- * The tail indexes indicate where we're read to so far in the data array of a
- * trace buffer. Each entry in this table corresponds to the tail index for a
- * particular trace buffer.
- */
-unsigned long *init_tail_idxs(struct t_buf **bufs, unsigned int num)
-{
- int i;
- unsigned long *tails = calloc(num, sizeof(unsigned int));
-
- if ( tails == NULL )
- {
- PERROR("Failed to allocate memory for tail pointers\n");
- exit(EXIT_FAILURE);
- }
-
- for ( i = 0; i<num; i++ )
- tails[i] = _atomic_read(bufs[i]->rec_idx);
-
- return tails;
}
/**
@@ -329,7 +296,6 @@
struct t_buf **meta; /* pointers to the trace buffer metadata */
struct t_rec **data; /* pointers to the trace buffer data areas
* where they are mapped into user space. */
- unsigned long *cons; /* store tail indexes for the trace buffers */
unsigned long tbufs_mfn; /* mfn of the tbufs */
unsigned int num; /* number of trace buffers / logical CPUS */
unsigned long size; /* size of a single trace buffer */
@@ -346,19 +312,22 @@
size_in_recs = (size - sizeof(struct t_buf)) / sizeof(struct t_rec);
/* build arrays of convenience ptrs */
- meta = init_bufs_ptrs (tbufs_mapped, num, size);
- data = init_rec_ptrs (tbufs_mfn, tbufs_mapped, meta, num);
- cons = init_tail_idxs (meta, num);
+ meta = init_bufs_ptrs(tbufs_mapped, num, size);
+ data = init_rec_ptrs(meta, num);
/* now, scan buffers for events */
while ( !interrupted )
{
- for ( i = 0; ( i < num ) && !interrupted; i++ )
- while( cons[i] != _atomic_read(meta[i]->rec_idx) )
+ for ( i = 0; (i < num) && !interrupted; i++ )
+ {
+ while ( meta[i]->cons != meta[i]->prod )
{
- write_rec(i, data[i] + cons[i], logfile);
- cons[i] = (cons[i] + 1) % size_in_recs;
+ rmb(); /* read prod, then read item. */
+ write_rec(i, data[i] + meta[i]->cons % size_in_recs, logfile);
+ mb(); /* read item, then update cons. */
+ meta[i]->cons++;
}
+ }
nanosleep(&opts.poll_sleep, NULL);
}
@@ -366,7 +335,6 @@
/* cleanup */
free(meta);
free(data);
- free(cons);
/* don't need to munmap - cleanup is automatic */
fclose(logfile);
diff -r 7d0d88685f79 -r 8cc7ce549d00 xen/common/trace.c
--- a/xen/common/trace.c Sun Oct 30 22:30:41 2005
+++ b/xen/common/trace.c Mon Oct 31 09:45:31 2005
@@ -37,6 +37,8 @@
/* Pointers to the meta-data objects for all system trace buffers */
static struct t_buf *t_bufs[NR_CPUS];
+static struct t_rec *t_recs[NR_CPUS];
+static int nr_recs;
/* a flag recording whether initialization has been done */
/* or more properly, if the tbuf subsystem is enabled right now */
@@ -70,6 +72,8 @@
nr_pages = num_online_cpus() * opt_tbuf_size;
order = get_order_from_pages(nr_pages);
+ nr_recs = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf)) /
+ sizeof(struct t_rec);
if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
{
@@ -84,13 +88,11 @@
for_each_online_cpu ( i )
{
buf = t_bufs[i] = (struct t_buf *)&rawbuf[i*opt_tbuf_size*PAGE_SIZE];
-
- _atomic_set(buf->rec_idx, 0);
- buf->rec_num = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf))
- / sizeof(struct t_rec);
- buf->rec = (struct t_rec *)(buf + 1);
- buf->rec_addr = __pa(buf->rec);
- }
+ buf->cons = buf->prod = 0;
+ buf->nr_recs = nr_recs;
+ t_recs[i] = (struct t_rec *)(buf + 1);
+ }
+
return 0;
}
@@ -223,9 +225,9 @@
void trace(u32 event, unsigned long d1, unsigned long d2,
unsigned long d3, unsigned long d4, unsigned long d5)
{
- atomic_t old, new, seen;
struct t_buf *buf;
struct t_rec *rec;
+ unsigned long flags;
BUG_ON(!tb_init_done);
@@ -249,17 +251,15 @@
buf = t_bufs[smp_processor_id()];
- do
- {
- old = buf->rec_idx;
- _atomic_set(new, (_atomic_read(old) + 1) % buf->rec_num);
- seen = atomic_compareandswap(old, new, &buf->rec_idx);
- }
- while ( unlikely(_atomic_read(seen) != _atomic_read(old)) );
-
- wmb();
-
- rec = &buf->rec[_atomic_read(old)];
+ local_irq_save(flags);
+
+ if ( (buf->prod - buf->cons) >= nr_recs )
+ {
+ local_irq_restore(flags);
+ return;
+ }
+
+ rec = &t_recs[smp_processor_id()][buf->prod % nr_recs];
rdtscll(rec->cycles);
rec->event = event;
rec->data[0] = d1;
@@ -267,6 +267,11 @@
rec->data[2] = d3;
rec->data[3] = d4;
rec->data[4] = d5;
+
+ wmb();
+ buf->prod++;
+
+ local_irq_restore(flags);
}
/*
diff -r 7d0d88685f79 -r 8cc7ce549d00 xen/include/public/trace.h
--- a/xen/include/public/trace.h Sun Oct 30 22:30:41 2005
+++ b/xen/include/public/trace.h Mon Oct 31 09:45:31 2005
@@ -65,13 +65,10 @@
* field, indexes into an array of struct t_rec's.
*/
struct t_buf {
- /* Used by both Xen and user space. */
- atomic_t rec_idx; /* the next record to save to */
- unsigned int rec_num; /* number of records in this trace buffer */
- /* Used by Xen only. */
- struct t_rec *rec; /* start of records */
- /* Used by user space only. */
- unsigned long rec_addr; /* machine address of the start of records */
+ unsigned int cons; /* Next item to be consumed by control tools. */
+ unsigned int prod; /* Next item to be produced by Xen. */
+ unsigned int nr_recs; /* Number of records in this trace buffer. */
+ /* 'nr_recs' records follow immediately after the meta-data header. */
};
#endif /* __XEN_PUBLIC_TRACE_H__ */
diff -r 7d0d88685f79 -r 8cc7ce549d00 xen/include/xen/trace.h
--- a/xen/include/xen/trace.h Sun Oct 30 22:30:41 2005
+++ b/xen/include/xen/trace.h Mon Oct 31 09:45:31 2005
@@ -24,7 +24,6 @@
#define __XEN_TRACE_H__
#include <xen/config.h>
-#include <asm/atomic.h>
#include <public/dom0_ops.h>
#include <public/trace.h>
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|