# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID ffa5b2975dfffa1c3a0b8f6fc8fee877f9bfe295
# Parent 49dcd838b7df72bfe66ab9233e360b52ceca3115
[XEN] Add Xen-attached event channels, which will be used
by HVM for the ioreq_packet port.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
xen/common/event_channel.c | 108 ++++++++++++++++++++++++++++++++++++++++++++-
xen/include/xen/event.h | 37 ++++++++++++---
xen/include/xen/sched.h | 8 ++-
3 files changed, 143 insertions(+), 10 deletions(-)
diff -r 49dcd838b7df -r ffa5b2975dff xen/common/event_channel.c
--- a/xen/common/event_channel.c Fri Aug 04 20:30:12 2006 +0100
+++ b/xen/common/event_channel.c Fri Aug 04 20:34:44 2006 +0100
@@ -333,6 +333,14 @@ static long __evtchn_close(struct domain
}
chn1 = evtchn_from_port(d1, port1);
+
+ /* Guest cannot close a Xen-attached event channel. */
+ if ( unlikely(chn1->consumer_is_xen) )
+ {
+ rc = -EINVAL;
+ goto out;
+ }
+
switch ( chn1->state )
{
case ECS_FREE:
@@ -441,6 +449,7 @@ long evtchn_send(unsigned int lport)
{
struct evtchn *lchn, *rchn;
struct domain *ld = current->domain, *rd;
+ struct vcpu *rvcpu;
int rport, ret = 0;
spin_lock(&ld->evtchn_lock);
@@ -452,13 +461,32 @@ long evtchn_send(unsigned int lport)
}
lchn = evtchn_from_port(ld, lport);
+
+ /* Guest cannot send via a Xen-attached event channel. */
+ if ( unlikely(lchn->consumer_is_xen) )
+ {
+ spin_unlock(&ld->evtchn_lock);
+ return -EINVAL;
+ }
+
switch ( lchn->state )
{
case ECS_INTERDOMAIN:
rd = lchn->u.interdomain.remote_dom;
rport = lchn->u.interdomain.remote_port;
rchn = evtchn_from_port(rd, rport);
- evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
+ rvcpu = rd->vcpu[rchn->notify_vcpu_id];
+ if ( rchn->consumer_is_xen )
+ {
+ /* Xen consumers need notification only if they are blocked. */
+ if ( test_and_clear_bit(_VCPUF_blocked_in_xen,
+ &rvcpu->vcpu_flags) )
+ vcpu_wake(rvcpu);
+ }
+ else
+ {
+ evtchn_set_pending(rvcpu, rport);
+ }
break;
case ECS_IPI:
evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
@@ -638,6 +666,14 @@ long evtchn_bind_vcpu(unsigned int port,
}
chn = evtchn_from_port(d, port);
+
+ /* Guest cannot re-bind a Xen-attached event channel. */
+ if ( unlikely(chn->consumer_is_xen) )
+ {
+ rc = -EINVAL;
+ goto out;
+ }
+
switch ( chn->state )
{
case ECS_VIRQ:
@@ -804,6 +840,71 @@ long do_event_channel_op(int cmd, XEN_GU
}
+int alloc_unbound_xen_event_channel(
+ struct vcpu *local_vcpu, domid_t remote_domid)
+{
+ struct evtchn *chn;
+ struct domain *d = local_vcpu->domain;
+ int port;
+
+ spin_lock(&d->evtchn_lock);
+
+ if ( (port = get_free_port(d)) < 0 )
+ goto out;
+ chn = evtchn_from_port(d, port);
+
+ chn->state = ECS_UNBOUND;
+ chn->consumer_is_xen = 1;
+ chn->notify_vcpu_id = local_vcpu->vcpu_id;
+ chn->u.unbound.remote_domid = remote_domid;
+
+ out:
+ spin_unlock(&d->evtchn_lock);
+
+ return port;
+}
+
+
+void free_xen_event_channel(
+ struct vcpu *local_vcpu, int port)
+{
+ struct evtchn *chn;
+ struct domain *d = local_vcpu->domain;
+
+ spin_lock(&d->evtchn_lock);
+ chn = evtchn_from_port(d, port);
+ BUG_ON(!chn->consumer_is_xen);
+ chn->consumer_is_xen = 0;
+ spin_unlock(&d->evtchn_lock);
+
+ (void)__evtchn_close(d, port);
+}
+
+
+void notify_via_xen_event_channel(int lport)
+{
+ struct evtchn *lchn, *rchn;
+ struct domain *ld = current->domain, *rd;
+ int rport;
+
+ spin_lock(&ld->evtchn_lock);
+
+ ASSERT(port_is_valid(ld, lport));
+ lchn = evtchn_from_port(ld, lport);
+ ASSERT(lchn->consumer_is_xen);
+
+ if ( likely(lchn->state == ECS_INTERDOMAIN) )
+ {
+ rd = lchn->u.interdomain.remote_dom;
+ rport = lchn->u.interdomain.remote_port;
+ rchn = evtchn_from_port(rd, rport);
+ evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
+ }
+
+ spin_unlock(&ld->evtchn_lock);
+}
+
+
int evtchn_init(struct domain *d)
{
spin_lock_init(&d->evtchn_lock);
@@ -819,7 +920,10 @@ void evtchn_destroy(struct domain *d)
int i;
for ( i = 0; port_is_valid(d, i); i++ )
- (void)__evtchn_close(d, i);
+ {
+ evtchn_from_port(d, i)->consumer_is_xen = 0;
+ (void)__evtchn_close(d, i);
+ }
for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
xfree(d->evtchn[i]);
diff -r 49dcd838b7df -r ffa5b2975dff xen/include/xen/event.h
--- a/xen/include/xen/event.h Fri Aug 04 20:30:12 2006 +0100
+++ b/xen/include/xen/event.h Fri Aug 04 20:34:44 2006 +0100
@@ -15,33 +15,58 @@
#include <asm/bitops.h>
#include <asm/event.h>
-extern void evtchn_set_pending(struct vcpu *v, int port);
+void evtchn_set_pending(struct vcpu *v, int port);
/*
* send_guest_vcpu_virq: Notify guest via a per-VCPU VIRQ.
* @v: VCPU to which virtual IRQ should be sent
* @virq: Virtual IRQ number (VIRQ_*)
*/
-extern void send_guest_vcpu_virq(struct vcpu *v, int virq);
+void send_guest_vcpu_virq(struct vcpu *v, int virq);
/*
* send_guest_global_virq: Notify guest via a global VIRQ.
* @d: Domain to which virtual IRQ should be sent
* @virq: Virtual IRQ number (VIRQ_*)
*/
-extern void send_guest_global_virq(struct domain *d, int virq);
+void send_guest_global_virq(struct domain *d, int virq);
/*
* send_guest_pirq:
* @d: Domain to which physical IRQ should be sent
* @pirq: Physical IRQ number
*/
-extern void send_guest_pirq(struct domain *d, int pirq);
+void send_guest_pirq(struct domain *d, int pirq);
/* Send a notification from a local event-channel port. */
-extern long evtchn_send(unsigned int lport);
+long evtchn_send(unsigned int lport);
/* Bind a local event-channel port to the specified VCPU. */
-extern long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id);
+long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id);
+
+/* Allocate/free a Xen-attached event channel port. */
+int alloc_unbound_xen_event_channel(
+ struct vcpu *local_vcpu, domid_t remote_domid);
+void free_xen_event_channel(
+ struct vcpu *local_vcpu, int port);
+
+/* Notify remote end of a Xen-attached event channel.*/
+void notify_via_xen_event_channel(int lport);
+
+/* Wait on a Xen-attached event channel. */
+#define wait_on_xen_event_channel(port, condition) \
+ do { \
+ if ( condition ) \
+ break; \
+ set_bit(_VCPUF_blocked_in_xen, ¤t->vcpu_flags); \
+ mb(); /* set blocked status /then/ re-evaluate condition */ \
+ if ( condition ) \
+ { \
+ clear_bit(_VCPUF_blocked_in_xen, ¤t->vcpu_flags); \
+ break; \
+ } \
+ raise_softirq(SCHEDULE_SOFTIRQ); \
+ do_softirq(); \
+ } while ( 0 )
#endif /* __XEN_EVENT_H__ */
diff -r 49dcd838b7df -r ffa5b2975dff xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Fri Aug 04 20:30:12 2006 +0100
+++ b/xen/include/xen/sched.h Fri Aug 04 20:34:44 2006 +0100
@@ -36,7 +36,8 @@ struct evtchn
#define ECS_PIRQ 4 /* Channel is bound to a physical IRQ line. */
#define ECS_VIRQ 5 /* Channel is bound to a virtual IRQ line. */
#define ECS_IPI 6 /* Channel is bound to a virtual IPI line. */
- u16 state; /* ECS_* */
+ u8 state; /* ECS_* */
+ u8 consumer_is_xen; /* Consumed by Xen or by guest? */
u16 notify_vcpu_id; /* VCPU for local delivery notification */
union {
struct {
@@ -375,6 +376,9 @@ extern struct domain *domain_list;
/* VCPU is paused by the hypervisor? */
#define _VCPUF_paused 11
#define VCPUF_paused (1UL<<_VCPUF_paused)
+ /* VCPU is blocked awaiting an event to be consumed by Xen. */
+#define _VCPUF_blocked_in_xen 12
+#define VCPUF_blocked_in_xen (1UL<<_VCPUF_blocked_in_xen)
/*
* Per-domain flags (domain_flags).
@@ -404,7 +408,7 @@ static inline int vcpu_runnable(struct v
static inline int vcpu_runnable(struct vcpu *v)
{
return ( !(v->vcpu_flags &
- (VCPUF_blocked|VCPUF_down|VCPUF_paused)) &&
+ (VCPUF_blocked|VCPUF_down|VCPUF_paused|VCPUF_blocked_in_xen)) &&
!(v->domain->domain_flags &
(DOMF_shutdown|DOMF_ctrl_pause|DOMF_paused)) );
}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|