# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1220535506 -3600
# Node ID ae9b223a675d9ed37cffbc24d0abe83ef2a30ab3
# Parent 8d982c7a0d303de1200134fcf3a2573f6f4449fa
More efficient implementation of SCHEDOP_poll when polling a single port.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
xen/common/domain.c | 4 ++-
xen/common/event_channel.c | 21 ++++++++++------
xen/common/schedule.c | 57 ++++++++++++++++++++++++++++++++++++---------
xen/include/xen/sched.h | 21 ++++++++--------
4 files changed, 73 insertions(+), 30 deletions(-)
diff -r 8d982c7a0d30 -r ae9b223a675d xen/common/domain.c
--- a/xen/common/domain.c Thu Sep 04 14:37:56 2008 +0100
+++ b/xen/common/domain.c Thu Sep 04 14:38:26 2008 +0100
@@ -651,9 +651,11 @@ void vcpu_reset(struct vcpu *v)
set_bit(_VPF_down, &v->pause_flags);
+ clear_bit(v->vcpu_id, d->poll_mask);
+ v->poll_evtchn = 0;
+
v->fpu_initialised = 0;
v->fpu_dirtied = 0;
- v->is_polling = 0;
v->is_initialised = 0;
v->nmi_pending = 0;
v->mce_pending = 0;
diff -r 8d982c7a0d30 -r ae9b223a675d xen/common/event_channel.c
--- a/xen/common/event_channel.c Thu Sep 04 14:37:56 2008 +0100
+++ b/xen/common/event_channel.c Thu Sep 04 14:38:26 2008 +0100
@@ -545,6 +545,7 @@ static int evtchn_set_pending(struct vcp
static int evtchn_set_pending(struct vcpu *v, int port)
{
struct domain *d = v->domain;
+ int vcpuid;
/*
* The following bit operations must happen in strict order.
@@ -564,15 +565,19 @@ static int evtchn_set_pending(struct vcp
}
/* Check if some VCPU might be polling for this event. */
- if ( unlikely(d->is_polling) )
- {
- d->is_polling = 0;
- smp_mb(); /* check vcpu poll-flags /after/ clearing domain poll-flag */
- for_each_vcpu ( d, v )
+ if ( likely(bitmap_empty(d->poll_mask, MAX_VIRT_CPUS)) )
+ return 0;
+
+ /* Wake any interested (or potentially interested) pollers. */
+ for ( vcpuid = find_first_bit(d->poll_mask, MAX_VIRT_CPUS);
+ vcpuid < MAX_VIRT_CPUS;
+ vcpuid = find_next_bit(d->poll_mask, MAX_VIRT_CPUS, vcpuid+1) )
+ {
+ v = d->vcpu[vcpuid];
+ if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
+ test_and_clear_bit(vcpuid, d->poll_mask) )
{
- if ( !v->is_polling )
- continue;
- v->is_polling = 0;
+ v->poll_evtchn = 0;
vcpu_unblock(v);
}
}
diff -r 8d982c7a0d30 -r ae9b223a675d xen/common/schedule.c
--- a/xen/common/schedule.c Thu Sep 04 14:37:56 2008 +0100
+++ b/xen/common/schedule.c Thu Sep 04 14:38:26 2008 +0100
@@ -198,6 +198,27 @@ void vcpu_wake(struct vcpu *v)
TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
}
+void vcpu_unblock(struct vcpu *v)
+{
+ if ( !test_and_clear_bit(_VPF_blocked, &v->pause_flags) )
+ return;
+
+ /* Polling period ends when a VCPU is unblocked. */
+ if ( unlikely(v->poll_evtchn != 0) )
+ {
+ v->poll_evtchn = 0;
+ /*
+ * We *must* re-clear _VPF_blocked to avoid racing other wakeups of
+ * this VCPU (and it then going back to sleep on poll_mask).
+ * Test-and-clear is idiomatic and ensures clear_bit not reordered.
+ */
+ if ( test_and_clear_bit(v->vcpu_id, v->domain->poll_mask) )
+ clear_bit(_VPF_blocked, &v->pause_flags);
+ }
+
+ vcpu_wake(v);
+}
+
static void vcpu_migrate(struct vcpu *v)
{
unsigned long flags;
@@ -337,7 +358,7 @@ static long do_poll(struct sched_poll *s
struct vcpu *v = current;
struct domain *d = v->domain;
evtchn_port_t port;
- long rc = 0;
+ long rc;
unsigned int i;
/* Fairly arbitrary limit. */
@@ -348,11 +369,24 @@ static long do_poll(struct sched_poll *s
return -EFAULT;
set_bit(_VPF_blocked, &v->pause_flags);
- v->is_polling = 1;
- d->is_polling = 1;
-
+ v->poll_evtchn = -1;
+ set_bit(v->vcpu_id, d->poll_mask);
+
+#ifndef CONFIG_X86 /* set_bit() implies mb() on x86 */
/* Check for events /after/ setting flags: avoids wakeup waiting race. */
- smp_wmb();
+ smp_mb();
+
+ /*
+ * Someone may have seen we are blocked but not that we are polling, or
+ * vice versa. We are certainly being woken, so clean up and bail. Beyond
+ * this point others can be guaranteed to clean up for us if they wake us.
+ */
+ rc = 0;
+ if ( (v->poll_evtchn == 0) ||
+ !test_bit(_VPF_blocked, &v->pause_flags) ||
+ !test_bit(v->vcpu_id, d->poll_mask) )
+ goto out;
+#endif
for ( i = 0; i < sched_poll->nr_ports; i++ )
{
@@ -369,6 +403,9 @@ static long do_poll(struct sched_poll *s
goto out;
}
+ if ( sched_poll->nr_ports == 1 )
+ v->poll_evtchn = port;
+
if ( sched_poll->timeout != 0 )
set_timer(&v->poll_timer, sched_poll->timeout);
@@ -378,7 +415,8 @@ static long do_poll(struct sched_poll *s
return 0;
out:
- v->is_polling = 0;
+ v->poll_evtchn = 0;
+ clear_bit(v->vcpu_id, d->poll_mask);
clear_bit(_VPF_blocked, &v->pause_flags);
return rc;
}
@@ -760,11 +798,8 @@ static void poll_timer_fn(void *data)
{
struct vcpu *v = data;
- if ( !v->is_polling )
- return;
-
- v->is_polling = 0;
- vcpu_unblock(v);
+ if ( test_and_clear_bit(v->vcpu_id, v->domain->poll_mask) )
+ vcpu_unblock(v);
}
/* Initialise the data structures. */
diff -r 8d982c7a0d30 -r ae9b223a675d xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Thu Sep 04 14:37:56 2008 +0100
+++ b/xen/include/xen/sched.h Thu Sep 04 14:38:26 2008 +0100
@@ -106,8 +106,6 @@ struct vcpu
bool_t fpu_initialised;
/* Has the FPU been used since it was last saved? */
bool_t fpu_dirtied;
- /* Is this VCPU polling any event channels (SCHEDOP_poll)? */
- bool_t is_polling;
/* Initialization completed for this VCPU? */
bool_t is_initialised;
/* Currently running on a CPU? */
@@ -133,6 +131,13 @@ struct vcpu
bool_t paused_for_shutdown;
/* VCPU affinity is temporarily locked from controller changes? */
bool_t affinity_locked;
+
+ /*
+ * > 0: a single port is being polled;
+ * = 0: nothing is being polled (vcpu should be clear in d->poll_mask);
+ * < 0: multiple ports may be being polled.
+ */
+ int poll_evtchn;
unsigned long pause_flags;
atomic_t pause_count;
@@ -209,14 +214,15 @@ struct domain
struct domain *target;
/* Is this guest being debugged by dom0? */
bool_t debugger_attached;
- /* Are any VCPUs polling event channels (SCHEDOP_poll)? */
- bool_t is_polling;
/* Is this guest dying (i.e., a zombie)? */
enum { DOMDYING_alive, DOMDYING_dying, DOMDYING_dead } is_dying;
/* Domain is paused by controller software? */
bool_t is_paused_by_controller;
/* Domain's VCPUs are pinned 1:1 to physical CPUs? */
bool_t is_pinned;
+
+ /* Are any VCPUs polling event channels (SCHEDOP_poll)? */
+ DECLARE_BITMAP(poll_mask, MAX_VIRT_CPUS);
/* Guest has shut down (inc. reason code)? */
spinlock_t shutdown_lock;
@@ -507,6 +513,7 @@ static inline int vcpu_runnable(struct v
atomic_read(&v->domain->pause_count));
}
+void vcpu_unblock(struct vcpu *v);
void vcpu_pause(struct vcpu *v);
void vcpu_pause_nosync(struct vcpu *v);
void domain_pause(struct domain *d);
@@ -523,12 +530,6 @@ void vcpu_unlock_affinity(struct vcpu *v
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
-static inline void vcpu_unblock(struct vcpu *v)
-{
- if ( test_and_clear_bit(_VPF_blocked, &v->pause_flags) )
- vcpu_wake(v);
-}
-
#define IS_PRIV(_d) ((_d)->is_privileged)
#define IS_PRIV_FOR(_d, _t) (IS_PRIV(_d) || ((_d)->target && (_d)->target ==
(_t)))
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|