|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 12/20] xen/domctl: Merge max_vcpus into createdomain
XEN_DOMCTL_max_vcpus is a mandatory hypercall, but nothing actually prevents a
toolstack from unpausing a domain with no vcpus.
Originally, d->vcpus[] was an embedded array in struct domain, but c/s
fb442e217 "x86_64: allow more vCPU-s per guest" in Xen 4.0 altered it to being
dynamically allocated. A side effect of this is that d->vcpu[] is NULL until
XEN_DOMCTL_max_vcpus has completed, but a lot of hypercalls blindly
dereference it.
Even today, the behaviour of XEN_DOMCTL_max_vcpus is a mandatory singleton
call which can't change the number of vcpus once a value has been chosen.
Therefore, delete XEN_DOMCTL_max_vcpus (including XSM hooks and toolstack
wrappers) and retain the functionality in XEN_DOMCTL_createdomain.
This will allow future cleanup to ensure that d->vcpus[] is always valid for a
locatable domain, and allow simplification of some creation logic which needs
to size domain-wide objects based on max_cpus, which currently have to be
deferred until vcpu construction.
For the python stubs, extend the domain_create keyword list to take a
max_vcpus parameter, in lieu of deleting the pyxc_domain_max_vcpus function.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Christian Lindig <christian.lindig@xxxxxxxxxx>
CC: David Scott <dave@xxxxxxxxxx>
CC: Jon Ludlam <jonathan.ludlam@xxxxxxxxxxxxx>
CC: Rob Hoes <Rob.Hoes@xxxxxxxxxx>
CC: Marek Marczykowski-Górecki <marmarek@xxxxxxxxxxxxxxxxxxxxxx>
CC: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
Hypervisor side cleanup is present in later patchs
---
tools/flask/policy/modules/dom0.te | 2 +-
tools/flask/policy/modules/xen.if | 2 +-
tools/helpers/init-xenstore-domain.c | 7 +--
tools/libxc/include/xenctrl.h | 12 ----
tools/libxc/xc_domain.c | 9 ---
tools/libxl/libxl_create.c | 1 +
tools/libxl/libxl_dom.c | 5 --
tools/ocaml/libs/xc/xenctrl.ml | 4 +-
tools/ocaml/libs/xc/xenctrl.mli | 3 +-
tools/ocaml/libs/xc/xenctrl_stubs.c | 25 +++-----
tools/python/xen/lowlevel/xc/xc.c | 31 ++--------
xen/common/domctl.c | 110 ++++++++++++-----------------------
xen/include/public/domctl.h | 10 +---
xen/xsm/flask/hooks.c | 3 -
xen/xsm/flask/policy/access_vectors | 2 -
15 files changed, 57 insertions(+), 169 deletions(-)
diff --git a/tools/flask/policy/modules/dom0.te
b/tools/flask/policy/modules/dom0.te
index dfdcdcd..70a35fb 100644
--- a/tools/flask/policy/modules/dom0.te
+++ b/tools/flask/policy/modules/dom0.te
@@ -32,7 +32,7 @@ allow dom0_t xen_t:mmu memorymap;
# Allow dom0 to use these domctls on itself. For domctls acting on other
# domains, see the definitions of create_domain and manage_domain.
allow dom0_t dom0_t:domain {
- setvcpucontext max_vcpus setaffinity getaffinity getscheduler
+ setvcpucontext setaffinity getaffinity getscheduler
getdomaininfo getvcpuinfo getvcpucontext setdomainmaxmem setdomainhandle
setdebugging hypercall settime setaddrsize getaddrsize trigger
getpodtarget setpodtarget set_misc_info set_virq_handler
diff --git a/tools/flask/policy/modules/xen.if
b/tools/flask/policy/modules/xen.if
index 5af984c..350d5fb 100644
--- a/tools/flask/policy/modules/xen.if
+++ b/tools/flask/policy/modules/xen.if
@@ -46,7 +46,7 @@ define(`declare_build_label', `
')
define(`create_domain_common', `
- allow $1 $2:domain { create max_vcpus setdomainmaxmem setaddrsize
+ allow $1 $2:domain { create setdomainmaxmem setaddrsize
getdomaininfo hypercall setvcpucontext getscheduler
getvcpuinfo getaddrsize getaffinity setaffinity
settime setdomainhandle getvcpucontext set_misc_info };
diff --git a/tools/helpers/init-xenstore-domain.c
b/tools/helpers/init-xenstore-domain.c
index 4771750..0a09261 100644
--- a/tools/helpers/init-xenstore-domain.c
+++ b/tools/helpers/init-xenstore-domain.c
@@ -66,6 +66,7 @@ static int build(xc_interface *xch)
struct xen_domctl_createdomain config = {
.ssidref = SECINITSID_DOMU,
.flags = XEN_DOMCTL_CDF_xs_domain,
+ .max_vcpus = 1,
.max_evtchn_port = -1, /* No limit. */
/*
@@ -100,12 +101,6 @@ static int build(xc_interface *xch)
fprintf(stderr, "xc_domain_create failed\n");
goto err;
}
- rv = xc_domain_max_vcpus(xch, domid, 1);
- if ( rv )
- {
- fprintf(stderr, "xc_domain_max_vcpus failed\n");
- goto err;
- }
rv = xc_domain_setmaxmem(xch, domid, limit_kb);
if ( rv )
{
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 0c7c07c..b7ea16c 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -521,18 +521,6 @@ int xc_domain_dumpcore_via_callback(xc_interface *xch,
void *arg,
dumpcore_rtn_t dump_rtn);
-/*
- * This function sets the maximum number of vcpus that a domain may create.
- *
- * @parm xch a handle to an open hypervisor interface.
- * @parm domid the domain id in which vcpus are to be created.
- * @parm max the maximum number of vcpus that the domain may create.
- * @return 0 on success, -1 on failure.
- */
-int xc_domain_max_vcpus(xc_interface *xch,
- uint32_t domid,
- unsigned int max);
-
/**
* This function pauses a domain. A paused domain still exists in memory
* however it does not receive any timeslices from the hypervisor.
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index e8d0734..3835d2e 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1292,15 +1292,6 @@ int xc_domain_get_pod_target(xc_interface *xch,
}
#endif
-int xc_domain_max_vcpus(xc_interface *xch, uint32_t domid, unsigned int max)
-{
- DECLARE_DOMCTL;
- domctl.cmd = XEN_DOMCTL_max_vcpus;
- domctl.domain = domid;
- domctl.u.max_vcpus.max = max;
- return do_domctl(xch, &domctl);
-}
-
int xc_domain_sethandle(xc_interface *xch, uint32_t domid,
xen_domain_handle_t handle)
{
diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
index c8eb59c..de59d2a 100644
--- a/tools/libxl/libxl_create.c
+++ b/tools/libxl/libxl_create.c
@@ -565,6 +565,7 @@ int libxl__domain_make(libxl__gc *gc, libxl_domain_config
*d_config,
if (!libxl_domid_valid_guest(*domid)) {
struct xen_domctl_createdomain create = {
.ssidref = info->ssidref,
+ .max_vcpus = b_info->max_vcpus,
.max_evtchn_port = b_info->event_channels,
.max_grant_frames = b_info->max_grant_frames,
.max_maptrack_frames = b_info->max_maptrack_frames,
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
index 9194109..762ee9a 100644
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -353,11 +353,6 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid,
int rc;
uint64_t size;
- if (xc_domain_max_vcpus(ctx->xch, domid, info->max_vcpus) != 0) {
- LOG(ERROR, "Couldn't set max vcpu count");
- return ERROR_FAIL;
- }
-
/*
* Check if the domain has any CPU or node affinity already. If not, try
* to build up the latter via automatic NUMA placement. In fact, in case
diff --git a/tools/ocaml/libs/xc/xenctrl.ml b/tools/ocaml/libs/xc/xenctrl.ml
index 7c8d6ab..c465a5a 100644
--- a/tools/ocaml/libs/xc/xenctrl.ml
+++ b/tools/ocaml/libs/xc/xenctrl.ml
@@ -63,6 +63,7 @@ type domctl_create_config =
ssidref: int32;
handle: string;
flags: domain_create_flag list;
+ max_vcpus: int32;
max_evtchn_port: int32;
max_grant_frames: int32;
max_maptrack_frames: int32;
@@ -152,9 +153,6 @@ external domain_create: handle -> domctl_create_config ->
domid
external domain_sethandle: handle -> domid -> string -> unit
= "stub_xc_domain_sethandle"
-external domain_max_vcpus: handle -> domid -> int -> unit
- = "stub_xc_domain_max_vcpus"
-
external domain_pause: handle -> domid -> unit = "stub_xc_domain_pause"
external domain_unpause: handle -> domid -> unit = "stub_xc_domain_unpause"
external domain_resume_fast: handle -> domid -> unit =
"stub_xc_domain_resume_fast"
diff --git a/tools/ocaml/libs/xc/xenctrl.mli b/tools/ocaml/libs/xc/xenctrl.mli
index f150a5d..c06dd5a 100644
--- a/tools/ocaml/libs/xc/xenctrl.mli
+++ b/tools/ocaml/libs/xc/xenctrl.mli
@@ -55,6 +55,7 @@ type domctl_create_config = {
ssidref: int32;
handle: string;
flags: domain_create_flag list;
+ max_vcpus: int32;
max_evtchn_port: int32;
max_grant_frames: int32;
max_maptrack_frames: int32;
@@ -111,8 +112,6 @@ val with_intf : (handle -> 'a) -> 'a
external domain_create : handle -> domctl_create_config -> domid
= "stub_xc_domain_create"
external domain_sethandle : handle -> domid -> string -> unit =
"stub_xc_domain_sethandle"
-external domain_max_vcpus : handle -> domid -> int -> unit
- = "stub_xc_domain_max_vcpus"
external domain_pause : handle -> domid -> unit = "stub_xc_domain_pause"
external domain_unpause : handle -> domid -> unit = "stub_xc_domain_unpause"
external domain_resume_fast : handle -> domid -> unit
diff --git a/tools/ocaml/libs/xc/xenctrl_stubs.c
b/tools/ocaml/libs/xc/xenctrl_stubs.c
index 882828f..03b5956 100644
--- a/tools/ocaml/libs/xc/xenctrl_stubs.c
+++ b/tools/ocaml/libs/xc/xenctrl_stubs.c
@@ -127,16 +127,18 @@ CAMLprim value stub_xc_domain_create(value xch, value
config)
#define VAL_SSIDREF Field(config, 0)
#define VAL_HANDLE Field(config, 1)
#define VAL_FLAGS Field(config, 2)
-#define VAL_MAX_EVTCHN_PORT Field(config, 3)
-#define VAL_MAX_GRANT_FRAMES Field(config, 4)
-#define VAL_MAX_MAPTRACK_FRAMES Field(config, 5)
-#define VAL_ARCH Field(config, 6)
+#define VAL_MAX_VCPUS Field(config, 3)
+#define VAL_MAX_EVTCHN_PORT Field(config, 4)
+#define VAL_MAX_GRANT_FRAMES Field(config, 5)
+#define VAL_MAX_MAPTRACK_FRAMES Field(config, 6)
+#define VAL_ARCH Field(config, 7)
uint32_t domid = 0;
int result;
value l;
struct xen_domctl_createdomain cfg = {
.ssidref = Int32_val(VAL_SSIDREF),
+ .max_vcpus = Int32_val(VAL_MAX_VCPUS),
.max_evtchn_port = Int32_val(VAL_MAX_EVTCHN_PORT),
.max_grant_frames = Int32_val(VAL_MAX_GRANT_FRAMES),
.max_maptrack_frames = Int32_val(VAL_MAX_MAPTRACK_FRAMES),
@@ -171,6 +173,7 @@ CAMLprim value stub_xc_domain_create(value xch, value
config)
#undef VAL_MAX_MAPTRACK_FRAMES
#undef VAL_MAX_GRANT_FRAMES
#undef VAL_MAX_EVTCHN_PORT
+#undef VAL_MAX_VCPUS
#undef VAL_FLAGS
#undef VAL_HANDLE
#undef VAL_SSIDREF
@@ -185,20 +188,6 @@ CAMLprim value stub_xc_domain_create(value xch, value
config)
CAMLreturn(Val_int(domid));
}
-CAMLprim value stub_xc_domain_max_vcpus(value xch, value domid,
- value max_vcpus)
-{
- CAMLparam3(xch, domid, max_vcpus);
- int r;
-
- r = xc_domain_max_vcpus(_H(xch), _D(domid), Int_val(max_vcpus));
- if (r)
- failwith_xc(_H(xch));
-
- CAMLreturn(Val_unit);
-}
-
-
value stub_xc_domain_sethandle(value xch, value domid, value handle)
{
CAMLparam3(xch, domid, handle);
diff --git a/tools/python/xen/lowlevel/xc/xc.c
b/tools/python/xen/lowlevel/xc/xc.c
index db99a52..d647b68 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -125,16 +125,19 @@ static PyObject *pyxc_domain_create(XcObject *self,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
},
+ .max_vcpus = 1,
.max_evtchn_port = -1, /* No limit. */
.max_grant_frames = 32,
.max_maptrack_frames = 1024,
};
- static char *kwd_list[] = { "domid", "ssidref", "handle", "flags",
"target", NULL };
+ static char *kwd_list[] = { "domid", "ssidref", "handle", "flags",
+ "target", "max_vcpus", NULL };
- if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOii", kwd_list,
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOiii", kwd_list,
&dom, &config.ssidref, &pyhandle,
- &config.flags, &target))
+ &config.flags, &target,
+ &config.max_vcpus))
return NULL;
if ( pyhandle != NULL )
{
@@ -176,20 +179,6 @@ static PyObject *pyxc_domain_create(XcObject *self,
return NULL;
}
-static PyObject *pyxc_domain_max_vcpus(XcObject *self, PyObject *args)
-{
- uint32_t dom, max;
-
- if (!PyArg_ParseTuple(args, "ii", &dom, &max))
- return NULL;
-
- if (xc_domain_max_vcpus(self->xc_handle, dom, max) != 0)
- return pyxc_error_to_exception(self->xc_handle);
-
- Py_INCREF(zero);
- return zero;
-}
-
static PyObject *pyxc_domain_pause(XcObject *self, PyObject *args)
{
return dom_op(self, args, xc_domain_pause);
@@ -2049,14 +2038,6 @@ static PyMethodDef pyxc_methods[] = {
" dom [int, 0]: Domain identifier to use (allocated if
zero).\n"
"Returns: [int] new domain identifier; -1 on error.\n" },
- { "domain_max_vcpus",
- (PyCFunction)pyxc_domain_max_vcpus,
- METH_VARARGS, "\n"
- "Set the maximum number of VCPUs a domain may create.\n"
- " dom [int, 0]: Domain identifier to use.\n"
- " max [int, 0]: New maximum number of VCPUs in domain.\n"
- "Returns: [int] 0 on success; -1 on error.\n" },
-
{ "domain_dumpcore",
(PyCFunction)pyxc_domain_dumpcore,
METH_VARARGS, "\n"
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index d8ba461..0326e43 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -496,6 +496,9 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t)
u_domctl)
case XEN_DOMCTL_createdomain:
{
domid_t dom;
+ unsigned int vcpus = op->u.createdomain.max_vcpus;
+ unsigned int i, cpu;
+ cpumask_t *online;
static domid_t rover = 0;
ret = -EINVAL;
@@ -504,7 +507,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t)
u_domctl)
| XEN_DOMCTL_CDF_hap
| XEN_DOMCTL_CDF_s3_integrity
| XEN_DOMCTL_CDF_oos_off
- | XEN_DOMCTL_CDF_xs_domain)) )
+ | XEN_DOMCTL_CDF_xs_domain)) ||
+ vcpus < 1 )
break;
dom = op->domain;
@@ -551,6 +555,37 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t)
u_domctl)
if ( !ret )
goto createdomain_fail_late;
+ ret = -EINVAL;
+ if ( vcpus > domain_max_vcpus(d) )
+ goto createdomain_fail_late;
+
+ ret = -ENOMEM;
+ online = cpupool_domain_cpumask(d);
+
+ BUG_ON(d->vcpu);
+ BUG_ON(d->max_vcpus);
+
+ d->vcpu = xzalloc_array(struct vcpu *, vcpus);
+ /* Install vcpu array /then/ update max_vcpus. */
+ smp_wmb();
+ if ( !d->vcpu )
+ goto createdomain_fail_late;
+ d->max_vcpus = vcpus;
+
+ cpu = cpumask_any(online);
+ for ( i = 0; i < vcpus; ++i )
+ {
+ BUG_ON(d->vcpu[i]);
+
+ if ( alloc_vcpu(d, i, cpu) == NULL )
+ goto createdomain_fail_late;
+
+ BUG_ON(!d->vcpu[i]);
+
+ cpu = cpumask_cycle(d->vcpu[i]->processor, online);
+ }
+
+ ret = 0;
d = NULL;
break;
@@ -572,79 +607,6 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t)
u_domctl)
break;
}
- case XEN_DOMCTL_max_vcpus:
- {
- unsigned int i, max = op->u.max_vcpus.max, cpu;
- cpumask_t *online;
-
- ret = -EINVAL;
- if ( (d == current->domain) || /* no domain_pause() */
- (max > domain_max_vcpus(d)) )
- break;
-
- /* Until Xenoprof can dynamically grow its vcpu-s array... */
- if ( d->xenoprof )
- {
- ret = -EAGAIN;
- break;
- }
-
- /* Needed, for example, to ensure writable p.t. state is synced. */
- domain_pause(d);
-
- /* We cannot reduce maximum VCPUs. */
- ret = -EINVAL;
- if ( (max < d->max_vcpus) && (d->vcpu[max] != NULL) )
- goto maxvcpu_out;
-
- /*
- * For now don't allow increasing the vcpu count from a non-zero
- * value: This code and all readers of d->vcpu would otherwise need
- * to be converted to use RCU, but at present there's no tools side
- * code path that would issue such a request.
- */
- ret = -EBUSY;
- if ( (d->max_vcpus > 0) && (max > d->max_vcpus) )
- goto maxvcpu_out;
-
- ret = -ENOMEM;
- online = cpupool_domain_cpumask(d);
- if ( max > d->max_vcpus )
- {
- struct vcpu **vcpus;
-
- BUG_ON(d->vcpu != NULL);
- BUG_ON(d->max_vcpus != 0);
-
- if ( (vcpus = xzalloc_array(struct vcpu *, max)) == NULL )
- goto maxvcpu_out;
-
- /* Install vcpu array /then/ update max_vcpus. */
- d->vcpu = vcpus;
- smp_wmb();
- d->max_vcpus = max;
- }
-
- for ( i = 0; i < max; i++ )
- {
- if ( d->vcpu[i] != NULL )
- continue;
-
- cpu = (i == 0) ?
- cpumask_any(online) :
- cpumask_cycle(d->vcpu[i-1]->processor, online);
-
- if ( alloc_vcpu(d, i, cpu) == NULL )
- goto maxvcpu_out;
- }
-
- ret = 0;
-
- maxvcpu_out:
- domain_unpause(d);
- break;
- }
-
case XEN_DOMCTL_soft_reset:
if ( d == current->domain ) /* no domain_pause() */
{
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 424f0a8..09ad277 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -70,6 +70,7 @@ struct xen_domctl_createdomain {
* Various domain limits, which impact the quantity of resources (global
* mapping space, xenheap, etc) a guest may consume.
*/
+ uint32_t max_vcpus;
uint32_t max_evtchn_port;
uint32_t max_grant_frames;
uint32_t max_maptrack_frames;
@@ -311,12 +312,6 @@ struct xen_domctl_vcpuaffinity {
};
-/* XEN_DOMCTL_max_vcpus */
-struct xen_domctl_max_vcpus {
- uint32_t max; /* maximum number of vcpus */
-};
-
-
/* XEN_DOMCTL_scheduler_op */
/* Scheduler types. */
/* #define XEN_SCHEDULER_SEDF 4 (Removed) */
@@ -1104,7 +1099,7 @@ struct xen_domctl {
#define XEN_DOMCTL_setvcpucontext 12
#define XEN_DOMCTL_getvcpucontext 13
#define XEN_DOMCTL_getvcpuinfo 14
-#define XEN_DOMCTL_max_vcpus 15
+/* #define XEN_DOMCTL_max_vcpus 15 - Moved into
XEN_DOMCTL_createdomain */
#define XEN_DOMCTL_scheduler_op 16
#define XEN_DOMCTL_setdomainhandle 17
#define XEN_DOMCTL_setdebugging 18
@@ -1183,7 +1178,6 @@ struct xen_domctl {
struct xen_domctl_max_mem max_mem;
struct xen_domctl_vcpucontext vcpucontext;
struct xen_domctl_getvcpuinfo getvcpuinfo;
- struct xen_domctl_max_vcpus max_vcpus;
struct xen_domctl_scheduler_op scheduler_op;
struct xen_domctl_setdomainhandle setdomainhandle;
struct xen_domctl_setdebugging setdebugging;
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index cccd1c7..806faa6 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -636,9 +636,6 @@ static int flask_domctl(struct domain *d, int cmd)
case XEN_DOMCTL_resumedomain:
return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__RESUME);
- case XEN_DOMCTL_max_vcpus:
- return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__MAX_VCPUS);
-
case XEN_DOMCTL_max_mem:
return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__SETDOMAINMAXMEM);
diff --git a/xen/xsm/flask/policy/access_vectors
b/xen/xsm/flask/policy/access_vectors
index 6f6e969..c76a180 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -132,8 +132,6 @@ class domain
# target = the new label of the domain
# see also the domain2 relabel{from,to,self} permissions
transition
-# XEN_DOMCTL_max_vcpus
- max_vcpus
# XEN_DOMCTL_destroydomain
destroy
# XEN_DOMCTL_setvcpuaffinity
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |