Signed-off-by: juergen.gross@xxxxxxxxxxxxxx diff -r 9f49667fec71 tools/libxc/xc_cpupool.c --- a/tools/libxc/xc_cpupool.c Fri Jul 30 15:22:39 2010 +0100 +++ b/tools/libxc/xc_cpupool.c Tue Aug 03 08:10:55 2010 +0200 @@ -18,6 +18,20 @@ static int do_sysctl_save(xc_interface * } while ( (ret < 0) && (errno == EAGAIN) ); return ret; +} + +static int get_cpumap_size(xc_interface *xch) +{ + static int max_phys_cpus = 0; + xc_physinfo_t physinfo; + + if ( max_phys_cpus ) + return max_phys_cpus; + + if ( !xc_physinfo(xch, &physinfo) ) + max_phys_cpus = physinfo.max_phys_cpus; + + return max_phys_cpus; } int xc_cpupool_create(xc_interface *xch, @@ -50,50 +64,58 @@ int xc_cpupool_destroy(xc_interface *xch return do_sysctl_save(xch, &sysctl); } -int xc_cpupool_getinfo(xc_interface *xch, - uint32_t first_poolid, - uint32_t n_max, - xc_cpupoolinfo_t *info) +xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, + uint32_t first_poolid) { int err = 0; - int p; - uint32_t poolid = first_poolid; - uint8_t local[sizeof (info->cpumap)]; + xc_cpupoolinfo_t *info; + uint8_t *local; + int local_size; + int cpumap_size; + int n_cpu; + int size; DECLARE_SYSCTL; - memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t)); + n_cpu = get_cpumap_size(xch); + local_size = (n_cpu + 8) / 8; + cpumap_size = (n_cpu + 63) / 64; + size = sizeof(xc_cpupoolinfo_t) + cpumap_size * 8 + local_size; + info = malloc(size); + if ( !info ) + return NULL; - for (p = 0; p < n_max; p++) + memset(info, 0, size); + info->cpumap_size = n_cpu; + info->cpumap = (uint64_t *)(info + 1); + local = (uint8_t *)(info->cpumap + cpumap_size); + + sysctl.cmd = XEN_SYSCTL_cpupool_op; + sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO; + sysctl.u.cpupool_op.cpupool_id = first_poolid; + set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local); + sysctl.u.cpupool_op.cpumap.nr_cpus = n_cpu; + + if ( (err = lock_pages(local, local_size)) != 0 ) { - sysctl.cmd = XEN_SYSCTL_cpupool_op; - sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO; - sysctl.u.cpupool_op.cpupool_id = poolid; - set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local); - sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8; + PERROR("Could not lock memory for Xen hypercall"); + free(info); + return NULL; + } + err = do_sysctl_save(xch, &sysctl); + unlock_pages(local, local_size); - if ( (err = lock_pages(local, sizeof(local))) != 0 ) - { - PERROR("Could not lock memory for Xen hypercall"); - break; - } - err = do_sysctl_save(xch, &sysctl); - unlock_pages(local, sizeof (local)); - - if ( err < 0 ) - break; - - info->cpupool_id = sysctl.u.cpupool_op.cpupool_id; - info->sched_id = sysctl.u.cpupool_op.sched_id; - info->n_dom = sysctl.u.cpupool_op.n_dom; - bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8); - poolid = sysctl.u.cpupool_op.cpupool_id + 1; - info++; + if ( err < 0 ) + { + free(info); + return NULL; } - if ( p == 0 ) - return err; + info->cpupool_id = sysctl.u.cpupool_op.cpupool_id; + info->sched_id = sysctl.u.cpupool_op.sched_id; + info->n_dom = sysctl.u.cpupool_op.n_dom; + bitmap_byte_to_64(info->cpumap, local, local_size * 8); - return p; + return info; } int xc_cpupool_addcpu(xc_interface *xch, @@ -136,30 +158,34 @@ int xc_cpupool_movedomain(xc_interface * } int xc_cpupool_freeinfo(xc_interface *xch, - uint64_t *cpumap) + uint64_t *cpumap, + int cpusize) { int err; - uint8_t local[sizeof (*cpumap)]; + uint8_t *local; + int loc_size; DECLARE_SYSCTL; + loc_size = (cpusize + 7) / 8; + local = malloc(loc_size); sysctl.cmd = XEN_SYSCTL_cpupool_op; sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO; set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local); - sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8; + sysctl.u.cpupool_op.cpumap.nr_cpus = cpusize; - if ( (err = lock_pages(local, sizeof(local))) != 0 ) + if ( (err = lock_pages(local, loc_size)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); return err; } err = do_sysctl_save(xch, &sysctl); - unlock_pages(local, sizeof (local)); + unlock_pages(local, loc_size); if (err < 0) return err; - bitmap_byte_to_64(cpumap, local, sizeof(local) * 8); + bitmap_byte_to_64(cpumap, local, cpusize); return 0; } diff -r 9f49667fec71 tools/libxc/xc_domain.c --- a/tools/libxc/xc_domain.c Fri Jul 30 15:22:39 2010 +0100 +++ b/tools/libxc/xc_domain.c Tue Aug 03 08:10:55 2010 +0200 @@ -102,7 +102,10 @@ int xc_vcpu_setaffinity(xc_interface *xc { DECLARE_DOMCTL; int ret = -1; - uint8_t *local = malloc(cpusize); + uint8_t *local; + int loc_size = (cpusize + 7) / 8; + + local = malloc(loc_size); if(local == NULL) { @@ -113,13 +116,13 @@ int xc_vcpu_setaffinity(xc_interface *xc domctl.domain = (domid_t)domid; domctl.u.vcpuaffinity.vcpu = vcpu; - bitmap_64_to_byte(local, cpumap, cpusize * 8); + bitmap_64_to_byte(local, cpumap, cpusize); set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); - domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; + domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize; - if ( lock_pages(local, cpusize) != 0 ) + if ( lock_pages(local, loc_size) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out; @@ -127,7 +130,7 @@ int xc_vcpu_setaffinity(xc_interface *xc ret = do_domctl(xch, &domctl); - unlock_pages(local, cpusize); + unlock_pages(local, loc_size); out: free(local); @@ -142,8 +145,10 @@ int xc_vcpu_getaffinity(xc_interface *xc { DECLARE_DOMCTL; int ret = -1; - uint8_t * local = malloc(cpusize); + uint8_t *local; + int loc_size = (cpusize + 7) / 8; + local = malloc(loc_size); if(local == NULL) { PERROR("Could not alloc memory for Xen hypercall"); @@ -156,9 +161,9 @@ int xc_vcpu_getaffinity(xc_interface *xc set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); - domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; + domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize; - if ( lock_pages(local, sizeof(local)) != 0 ) + if ( lock_pages(local, loc_size) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out; @@ -166,8 +171,8 @@ int xc_vcpu_getaffinity(xc_interface *xc ret = do_domctl(xch, &domctl); - unlock_pages(local, sizeof (local)); - bitmap_byte_to_64(cpumap, local, cpusize * 8); + unlock_pages(local, loc_size); + bitmap_byte_to_64(cpumap, local, cpusize); out: free(local); return ret; diff -r 9f49667fec71 tools/libxc/xenctrl.h --- a/tools/libxc/xenctrl.h Fri Jul 30 15:22:39 2010 +0100 +++ b/tools/libxc/xenctrl.h Tue Aug 03 08:10:55 2010 +0200 @@ -547,7 +547,8 @@ typedef struct xc_cpupoolinfo { uint32_t cpupool_id; uint32_t sched_id; uint32_t n_dom; - uint64_t cpumap; + uint32_t cpumap_size; /* max number of cpus in map */ + uint64_t *cpumap; } xc_cpupoolinfo_t; /** @@ -573,18 +574,14 @@ int xc_cpupool_destroy(xc_interface *xch uint32_t poolid); /** - * Get cpupool info. Returns info for up to the specified number of cpupools + * Get cpupool info. Returns info for next cpupool * starting at the given id. * @parm xc_handle a handle to an open hypervisor interface * @parm first_poolid lowest id for which info is returned - * @parm n_max maximum number of cpupools to return info - * @parm info pointer to xc_cpupoolinfo_t array - * return number of cpupool infos + * return cpupool info ptr (obtained by malloc) */ -int xc_cpupool_getinfo(xc_interface *xch, - uint32_t first_poolid, - uint32_t n_max, - xc_cpupoolinfo_t *info); +xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, + uint32_t first_poolid); /** * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned. @@ -627,10 +624,12 @@ int xc_cpupool_movedomain(xc_interface * * * @parm xc_handle a handle to an open hypervisor interface * @parm cpumap pointer where to store the cpumap + * @parm cpusize size of cpumap array * return 0 on success, -1 on failure */ int xc_cpupool_freeinfo(xc_interface *xch, - uint64_t *cpumap); + uint64_t *cpumap, + int cpusize); /* diff -r 9f49667fec71 tools/libxl/libxl.c --- a/tools/libxl/libxl.c Fri Jul 30 15:22:39 2010 +0100 +++ b/tools/libxl/libxl.c Tue Aug 03 08:10:55 2010 +0200 @@ -526,9 +526,10 @@ libxl_poolinfo * libxl_list_pool(libxl_c libxl_poolinfo * libxl_list_pool(libxl_ctx *ctx, int *nb_pool) { libxl_poolinfo *ptr; - int i, ret; - xc_cpupoolinfo_t info[256]; + int i; + xc_cpupoolinfo_t *info; int size = 256; + uint32_t poolid; ptr = calloc(size, sizeof(libxl_poolinfo)); if (!ptr) { @@ -536,16 +537,17 @@ libxl_poolinfo * libxl_list_pool(libxl_c return NULL; } - ret = xc_cpupool_getinfo(ctx->xch, 0, 256, info); - if (ret<0) { - XL_LOG_ERRNO(ctx, XL_LOG_ERROR, "getting cpupool info"); - return NULL; + poolid = 0; + for (i = 0; i < size; i++) { + info = xc_cpupool_getinfo(ctx->xch, poolid); + if (info == NULL) + break; + ptr[i].poolid = info->cpupool_id; + poolid = info->cpupool_id + 1; + free(info); } - for (i = 0; i < ret; i++) { - ptr[i].poolid = info[i].cpupool_id; - } - *nb_pool = ret; + *nb_pool = i; return ptr; } @@ -2480,6 +2482,7 @@ int libxl_get_physinfo(libxl_ctx *ctx, l physinfo->max_cpu_id = xcphysinfo.max_cpu_id; physinfo->nr_cpus = xcphysinfo.nr_cpus; physinfo->cpu_khz = xcphysinfo.cpu_khz; + physinfo->max_phys_cpus = xcphysinfo.max_phys_cpus; physinfo->total_pages = xcphysinfo.total_pages; physinfo->free_pages = xcphysinfo.free_pages; physinfo->scrub_pages = xcphysinfo.scrub_pages; @@ -2551,7 +2554,7 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct XL_LOG_ERRNO(ctx, XL_LOG_ERROR, "getting physinfo"); return NULL; } - *cpusize = physinfo.max_cpu_id + 1; + *cpusize = physinfo.max_phys_cpus; ptr = libxl_calloc(ctx, domaininfo.max_vcpu_id + 1, sizeof (libxl_vcpuinfo)); if (!ptr) { return NULL; diff -r 9f49667fec71 tools/libxl/libxl.h --- a/tools/libxl/libxl.h Fri Jul 30 15:22:39 2010 +0100 +++ b/tools/libxl/libxl.h Tue Aug 03 08:10:55 2010 +0200 @@ -582,6 +582,7 @@ typedef struct { uint32_t max_cpu_id; uint32_t nr_cpus; uint32_t cpu_khz; + uint32_t max_phys_cpus; uint64_t total_pages; uint64_t free_pages; diff -r 9f49667fec71 tools/libxl/xl_cmdimpl.c --- a/tools/libxl/xl_cmdimpl.c Fri Jul 30 15:22:39 2010 +0100 +++ b/tools/libxl/xl_cmdimpl.c Tue Aug 03 08:10:55 2010 +0200 @@ -3341,7 +3341,7 @@ void vcpupin(char *d, const char *vcpu, goto vcpupin_out1; } - cpumap = calloc(physinfo.max_cpu_id + 1, sizeof (uint64_t)); + cpumap = calloc(physinfo.max_phys_cpus + 1, sizeof (uint64_t)); if (!cpumap) { goto vcpupin_out1; } @@ -3369,12 +3369,12 @@ void vcpupin(char *d, const char *vcpu, } } else { - memset(cpumap, -1, sizeof (uint64_t) * (physinfo.max_cpu_id + 1)); + memset(cpumap, -1, sizeof (uint64_t) * (physinfo.max_phys_cpus + 1)); } if (vcpuid != -1) { if (libxl_set_vcpuaffinity(&ctx, domid, vcpuid, - cpumap, physinfo.max_cpu_id + 1) == -1) { + cpumap, physinfo.max_phys_cpus + 1) == -1) { fprintf(stderr, "Could not set affinity for vcpu `%u'.\n", vcpuid); } } @@ -3385,7 +3385,7 @@ void vcpupin(char *d, const char *vcpu, } for (; nb_vcpu > 0; --nb_vcpu, ++vcpuinfo) { if (libxl_set_vcpuaffinity(&ctx, domid, vcpuinfo->vcpuid, - cpumap, physinfo.max_cpu_id + 1) == -1) { + cpumap, physinfo.max_phys_cpus + 1) == -1) { fprintf(stderr, "libxl_list_vcpu failed on vcpu `%u'.\n", vcpuinfo->vcpuid); } } diff -r 9f49667fec71 tools/python/xen/lowlevel/xc/xc.c --- a/tools/python/xen/lowlevel/xc/xc.c Fri Jul 30 15:22:39 2010 +0100 +++ b/tools/python/xen/lowlevel/xc/xc.c Tue Aug 03 08:10:55 2010 +0200 @@ -241,7 +241,7 @@ static PyObject *pyxc_vcpu_setaffinity(X if ( xc_physinfo(self->xc_handle, &info) != 0 ) return pyxc_error_to_exception(self->xc_handle); - nr_cpus = info.nr_cpus; + nr_cpus = info.max_phys_cpus; size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); cpumap = malloc(cpumap_size * size); @@ -400,13 +400,13 @@ static PyObject *pyxc_vcpu_getinfo(XcObj if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) return pyxc_error_to_exception(self->xc_handle); - nr_cpus = pinfo.nr_cpus; + nr_cpus = pinfo.max_phys_cpus; rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info); if ( rc < 0 ) return pyxc_error_to_exception(self->xc_handle); - size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); + size = (nr_cpus + cpumap_size * 8 - 1) / (cpumap_size * 8); if((cpumap = malloc(cpumap_size * size)) == NULL) return pyxc_error_to_exception(self->xc_handle); memset(cpumap, 0, cpumap_size * size); @@ -1906,22 +1906,23 @@ static PyObject *pyxc_dom_set_memshr(XcO return zero; } -static PyObject *cpumap_to_cpulist(uint64_t cpumap) +static PyObject *cpumap_to_cpulist(uint64_t *cpumap, int cpusize) { PyObject *cpulist = NULL; - uint32_t i; + int i; cpulist = PyList_New(0); - for ( i = 0; cpumap != 0; i++ ) + for ( i = 0; i < cpusize; i++ ) { - if ( cpumap & 1 ) + if ( *cpumap & (1L << (i % 64)) ) { PyObject* pyint = PyInt_FromLong(i); PyList_Append(cpulist, pyint); Py_DECREF(pyint); } - cpumap >>= 1; + if ( (i % 64) == 63 ) + cpumap++; } return cpulist; } @@ -1966,7 +1967,7 @@ static PyObject *pyxc_cpupool_getinfo(Xc PyObject *list, *info_dict; uint32_t first_pool = 0; - int max_pools = 1024, nr_pools, i; + int max_pools = 1024, i; xc_cpupoolinfo_t *info; static char *kwd_list[] = { "first_pool", "max_pools", NULL }; @@ -1975,38 +1976,31 @@ static PyObject *pyxc_cpupool_getinfo(Xc &first_pool, &max_pools) ) return NULL; - info = calloc(max_pools, sizeof(xc_cpupoolinfo_t)); - if (info == NULL) - return PyErr_NoMemory(); - - nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, info); - - if (nr_pools < 0) + list = PyList_New(0); + for (i = 0; i < max_pools; i++) { - free(info); - return pyxc_error_to_exception(self->xc_handle); - } - - list = PyList_New(nr_pools); - for ( i = 0 ; i < nr_pools; i++ ) - { + info = xc_cpupool_getinfo(self->xc_handle, first_pool); + if (info == NULL) + break; info_dict = Py_BuildValue( "{s:i,s:i,s:i,s:N}", - "cpupool", (int)info[i].cpupool_id, - "sched", info[i].sched_id, - "n_dom", info[i].n_dom, - "cpulist", cpumap_to_cpulist(info[i].cpumap)); + "cpupool", (int)info->cpupool_id, + "sched", info->sched_id, + "n_dom", info->n_dom, + "cpulist", cpumap_to_cpulist(info->cpumap, + info->cpumap_size)); + first_pool = info->cpupool_id + 1; + free(info); + if ( info_dict == NULL ) { Py_DECREF(list); - if ( info_dict != NULL ) { Py_DECREF(info_dict); } - free(info); return NULL; } - PyList_SetItem(list, i, info_dict); + + PyList_Append(list, info_dict); + Py_DECREF(info_dict); } - - free(info); return list; } @@ -2072,12 +2066,28 @@ static PyObject *pyxc_cpupool_movedomain static PyObject *pyxc_cpupool_freeinfo(XcObject *self) { - uint64_t cpumap; + uint64_t *cpumap; + xc_physinfo_t physinfo; + int ret; + PyObject *info = NULL; - if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0) + if (xc_physinfo(self->xc_handle, &physinfo)) return pyxc_error_to_exception(self->xc_handle); - return cpumap_to_cpulist(cpumap); + cpumap = calloc((physinfo.max_phys_cpus + 63) / 64, sizeof(uint64_t)); + if (!cpumap) { + errno = -ENOMEM; + return PyErr_SetFromErrno(xc_error_obj); + } + + ret = xc_cpupool_freeinfo(self->xc_handle, cpumap, + physinfo.max_phys_cpus); + if (!ret) + info = cpumap_to_cpulist(cpumap, physinfo.max_phys_cpus); + + free(cpumap); + + return ret ? pyxc_error_to_exception(self->xc_handle) : info; } static PyObject *pyflask_context_to_sid(PyObject *self, PyObject *args, @@ -2101,7 +2111,7 @@ static PyObject *pyflask_context_to_sid( buf = malloc(len); if (!buf) { errno = -ENOMEM; - PyErr_SetFromErrno(xc_error_obj); + return PyErr_SetFromErrno(xc_error_obj); } memcpy(buf, ctx, len); diff -r 9f49667fec71 xen/arch/x86/sysctl.c --- a/xen/arch/x86/sysctl.c Fri Jul 30 15:22:39 2010 +0100 +++ b/xen/arch/x86/sysctl.c Tue Aug 03 08:10:55 2010 +0200 @@ -68,6 +68,7 @@ long arch_do_sysctl( pi->free_pages = avail_domheap_pages(); pi->scrub_pages = 0; pi->cpu_khz = cpu_khz; + pi->max_phys_cpus = NR_CPUS; memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4); if ( hvm_enabled ) pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm; diff -r 9f49667fec71 xen/include/public/sysctl.h --- a/xen/include/public/sysctl.h Fri Jul 30 15:22:39 2010 +0100 +++ b/xen/include/public/sysctl.h Tue Aug 03 08:10:55 2010 +0200 @@ -96,6 +96,7 @@ struct xen_sysctl_physinfo { uint32_t nr_cpus, max_cpu_id; uint32_t nr_nodes, max_node_id; uint32_t cpu_khz; + uint32_t max_phys_cpus; uint64_aligned_t total_pages; uint64_aligned_t free_pages; uint64_aligned_t scrub_pages;