(I highly recommend the patchbomb extension ("hg email"), it makes
sending series much simpler and threads the mails together in a
convenient way)
On Fri, 2010-10-01 at 08:20 +0100, Juergen Gross wrote:
> Signed-off-by: juergen.gross@xxxxxxxxxxxxxx
This should come after the description.
> To be able to support arbitrary numbers of physical cpus it was necessary to
> include the size of cpumaps in the xc-interfaces for cpu pools.
> These were:
> definition of xc_cpupoolinfo_t
> xc_cpupool_getinfo()
> xc_cpupool_freeinfo()
Please also mention the change in xc_cpupool_getinfo semantics from
caller allocated buffer to callee allocated+returned.
> @@ -64,50 +78,61 @@ int xc_cpupool_destroy(xc_interface *xch
> return do_sysctl_save(xch, &sysctl);
> }
>
> -int xc_cpupool_getinfo(xc_interface *xch,
> - uint32_t first_poolid,
> - uint32_t n_max,
> - xc_cpupoolinfo_t *info)
> +xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
> + uint32_t poolid)
[...]
> - memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
> + local_size = get_cpumap_size(xch);
> + local = alloca(local_size);
> + if (!local_size)
> + {
> + PERROR("Could not get number of cpus");
> + return NULL;
> + }
I imagine alloca(0) is most likely safe so long as you don't actually
use the result, but the man page doesn't specifically say. Probably the
check of !local_size should be before the alloca(local_size) to be on
the safe side.
> + cpumap_size = (local_size + sizeof(*info->cpumap) - 1) /
> sizeof(*info->cpumap);
xg_private.h defines a macro ROUNDUP, I wonder if that should be moved
somewhere more generic and used to clarify code like this?
> diff -r 8b7d253f0e17 tools/libxc/xenctrl.h
> --- a/tools/libxc/xenctrl.h Fri Oct 01 08:39:49 2010 +0100
> +++ b/tools/libxc/xenctrl.h Fri Oct 01 09:13:36 2010 +0100
> @@ -564,15 +565,11 @@ int xc_cpupool_destroy(xc_interface *xch
> [...]
> + * @parm poolid lowest id for which info is returned
> + * return cpupool info ptr (obtained by malloc)
> */
> -int xc_cpupool_getinfo(xc_interface *xch,
> - uint32_t first_poolid,
> - uint32_t n_max,
> - xc_cpupoolinfo_t *info);
> +xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
> + uint32_t poolid);
>
> /**
> * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
> @@ -615,10 +612,12 @@ int xc_cpupool_movedomain(xc_interface *
> *
> * @parm xc_handle a handle to an open hypervisor interface
> * @parm cpumap pointer where to store the cpumap
> + * @parm cpusize size of cpumap array in bytes
> * return 0 on success, -1 on failure
> */
> int xc_cpupool_freeinfo(xc_interface *xch,
> - uint64_t *cpumap);
> + uint64_t *cpumap,
> + int cpusize);
xc_cpupool_getinfo returns a callee allocated buffer and
xc_cpupool_freeinfo expects to be given a caller allocated buffer? I
think we should make this consistent one way of the other.
> diff -r 71f836615ea2 tools/libxl/libxl.c
> --- a/tools/libxl/libxl.c Fri Sep 24 15:54:39 2010 +0100
> +++ b/tools/libxl/libxl.c Fri Oct 01 09:03:17 2010 +0200
> @@ -610,26 +610,34 @@ libxl_poolinfo * libxl_list_pool(libxl_c
> libxl_poolinfo * libxl_list_pool(libxl_ctx *ctx, int *nb_pool)
> {
> libxl_poolinfo *ptr;
> - int i, ret;
> - xc_cpupoolinfo_t info[256];
> - int size = 256;
> + int i;
> + xc_cpupoolinfo_t *info;
> + uint32_t poolid;
> + libxl_physinfo physinfo;
>
> - ptr = calloc(size, sizeof(libxl_poolinfo));
> - if (!ptr) {
> - LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool info");
> + if (libxl_get_physinfo(ctx, &physinfo) != 0) {
> + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting phys info");
> return NULL;
> }
Am I missing where the contents of physinfo is subsequently used in this
function?
>
> - ret = xc_cpupool_getinfo(ctx->xch, 0, 256, info);
> - if (ret<0) {
> - LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting cpupool info");
> - return NULL;
> + ptr = NULL;
> +
> + poolid = 0;
> + for (i = 0;; i++) {
> + info = xc_cpupool_getinfo(ctx->xch, poolid);
> + if (info == NULL)
> + break;
> + ptr = realloc(ptr, (i + 1) * sizeof(libxl_poolinfo));
> + if (!ptr) {
> + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool
> info");
> + return NULL;
> + }
This will leak the previous value of ptr if realloc() fails. You need to
do:
tmp = realloc(ptr, ....)
if (!tmp) {
free(ptr);
LIBXL__LOG_ERRNO(...);
return NULL;
}
ptr = tmp;
> diff -r 71f836615ea2 tools/python/xen/lowlevel/xc/xc.c
> --- a/tools/python/xen/lowlevel/xc/xc.c Fri Sep 24 15:54:39 2010 +0100
> +++ b/tools/python/xen/lowlevel/xc/xc.c Fri Oct 01 09:03:17 2010 +0200
> @@ -241,7 +241,7 @@ static PyObject *pyxc_vcpu_setaffinity(X
> if ( xc_physinfo(self->xc_handle, &info) != 0 )
> return pyxc_error_to_exception(self->xc_handle);
>
> - nr_cpus = info.nr_cpus;
> + nr_cpus = info.max_cpu_id + 1;
>
> size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
> cpumap = malloc(cpumap_size * size);
Is this (and the equivalent in getinfo) an independent bug fix for a
pre-existing issue or does it somehow relate to the rest of the changes?
I don't see any corresponding change to xc_vcpu_setaffinity is all.
Given the repeated uses of physinfo.max_cpu_id (here, in get_cpumap_size
etc) might a xc_get_nr_cpus() function be worthwhile?
Presumably when you change these interfaces to use uint8_t instead of
uint64_t this code becomes the same as the private get_cpumap_size you
defined earlier so it might be worth exporting that functionality from
libxc?
> @@ -1966,7 +1967,7 @@ static PyObject *pyxc_cpupool_getinfo(Xc
> PyObject *list, *info_dict;
>
> uint32_t first_pool = 0;
> - int max_pools = 1024, nr_pools, i;
> + int max_pools = 1024, i;
[...]
> + for (i = 0; i < max_pools; i++)
I don't think there is any 1024 pool limit inherent in the new libxc
code, is there? You've removed the limit from libxl and I think the
right thing to do is remove it here as well.
> {
> - free(info);
> - return pyxc_error_to_exception(self->xc_handle);
> - }
> -
> - list = PyList_New(nr_pools);
> - for ( i = 0 ; i < nr_pools; i++ )
> - {
> + info = xc_cpupool_getinfo(self->xc_handle, first_pool);
> + if (info == NULL)
> + break;
> info_dict = Py_BuildValue(
> "{s:i,s:i,s:i,s:N}",
> - "cpupool", (int)info[i].cpupool_id,
> - "sched", info[i].sched_id,
> - "n_dom", info[i].n_dom,
> - "cpulist", cpumap_to_cpulist(info[i].cpumap));
> + "cpupool", (int)info->cpupool_id,
> + "sched", info->sched_id,
> + "n_dom", info->n_dom,
> + "cpulist", cpumap_to_cpulist(info->cpumap,
> + info->cpumap_size));
> + first_pool = info->cpupool_id + 1;
> + free(info);
> +
> if ( info_dict == NULL )
> {
> Py_DECREF(list);
> - if ( info_dict != NULL ) { Py_DECREF(info_dict); }
> - free(info);
> return NULL;
> }
> - PyList_SetItem(list, i, info_dict);
> +
> + PyList_Append(list, info_dict);
> + Py_DECREF(info_dict);
> }
> -
> - free(info);
>
> return list;
> }
> @@ -2072,12 +2066,28 @@ static PyObject *pyxc_cpupool_movedomain
>
> static PyObject *pyxc_cpupool_freeinfo(XcObject *self)
> {
> - uint64_t cpumap;
> + uint64_t *cpumap;
> + xc_physinfo_t physinfo;
> + int ret;
> + PyObject *info = NULL;
>
> - if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0)
> + if (xc_physinfo(self->xc_handle, &physinfo))
> return pyxc_error_to_exception(self->xc_handle);
>
> - return cpumap_to_cpulist(cpumap);
> + cpumap = calloc((physinfo.max_cpu_id + 64) / 64, sizeof(uint64_t));
Making xc_cpupool_freeinfo allocate the buffer, like xc_cpupool_getinfo
does would remove the need for this sort of arithmetic in the users of
libxc.
Ian.
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|