# HG changeset patch # User Juergen Gross # Date 1288331667 -7200 # Node ID d60c1258c6b75f6de0bb9e09b27ac9e86c974908 # Parent 5611ec8238ec6cee24a1a1712301e7939e489007 change tools cpumaps to uint8_t Cpumap types in tools (libxc and libxl) are changed to be based on bytes like in the interface to the hypervisor. To make handling easier the size of used cpumaps is always based on the number of physical cpus supported by the hypervisor. This eliminates the need to keep track of the cpumap size in external interfaces. In libxl a macro for cycling through a cpumap is added (libxl_for_each_cpu). Interfaces changed: libxl_set_vcpuaffinity() libxl_cpumap_alloc() xc_vcpu_setaffinity() xc_vcpu_getaffinity() xc_cpupool_freeinfo() Signed-off-by: juergen.gross@xxxxxxxxxxxxxx diff -r 5611ec8238ec -r d60c1258c6b7 tools/libxc/xc_cpupool.c --- a/tools/libxc/xc_cpupool.c Thu Oct 28 12:28:49 2010 +0100 +++ b/tools/libxc/xc_cpupool.c Fri Oct 29 07:54:27 2010 +0200 @@ -32,11 +32,6 @@ } while ( (ret < 0) && (errno == EAGAIN) ); return ret; -} - -static int get_cpumap_size(xc_interface *xch) -{ - return (xc_get_max_cpus(xch) + 7) / 8; } int xc_cpupool_create(xc_interface *xch, @@ -75,12 +70,10 @@ int err = 0; xc_cpupoolinfo_t *info = NULL; int local_size; - int cpumap_size; - int size; DECLARE_SYSCTL; DECLARE_HYPERCALL_BUFFER(uint8_t, local); - local_size = get_cpumap_size(xch); + local_size = xc_get_cpumap_size(xch); if (!local_size) { PERROR("Could not get number of cpus"); @@ -93,9 +86,6 @@ return NULL; } - cpumap_size = (local_size + sizeof(*info->cpumap) - 1) / sizeof(*info->cpumap); - size = sizeof(xc_cpupoolinfo_t) + cpumap_size * sizeof(*info->cpumap); - sysctl.cmd = XEN_SYSCTL_cpupool_op; sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO; sysctl.u.cpupool_op.cpupool_id = poolid; @@ -107,23 +97,31 @@ if ( err < 0 ) goto out; - info = malloc(size); + info = calloc(1, sizeof(xc_cpupoolinfo_t)); if ( !info ) goto out; - memset(info, 0, size); - info->cpumap_size = local_size * 8; - info->cpumap = (uint64_t *)(info + 1); - + info->cpumap = xc_cpumap_alloc(xch); + if (!info->cpumap) { + free(info); + goto out; + } info->cpupool_id = sysctl.u.cpupool_op.cpupool_id; info->sched_id = sysctl.u.cpupool_op.sched_id; info->n_dom = sysctl.u.cpupool_op.n_dom; - bitmap_byte_to_64(info->cpumap, local, local_size * 8); + memcpy(info->cpumap, local, local_size); out: xc_hypercall_buffer_free(xch, local); return info; +} + +void xc_cpupool_infofree(xc_interface *xch, + xc_cpupoolinfo_t *info) +{ + free(info->cpumap); + free(info); } int xc_cpupool_addcpu(xc_interface *xch, @@ -165,19 +163,19 @@ return do_sysctl_save(xch, &sysctl); } -uint64_t * xc_cpupool_freeinfo(xc_interface *xch, - int *cpusize) +xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch) { int err = -1; - uint64_t *cpumap = NULL; + xc_cpumap_t cpumap = NULL; + int mapsize; DECLARE_SYSCTL; DECLARE_HYPERCALL_BUFFER(uint8_t, local); - *cpusize = get_cpumap_size(xch); - if (*cpusize == 0) + mapsize = xc_get_cpumap_size(xch); + if (mapsize == 0) return NULL; - local = xc_hypercall_buffer_alloc(xch, local, *cpusize); + local = xc_hypercall_buffer_alloc(xch, local, mapsize); if ( local == NULL ) { PERROR("Could not allocate locked memory for xc_cpupool_freeinfo"); return NULL; @@ -186,18 +184,18 @@ sysctl.cmd = XEN_SYSCTL_cpupool_op; sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO; set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local); - sysctl.u.cpupool_op.cpumap.nr_cpus = *cpusize * 8; + sysctl.u.cpupool_op.cpumap.nr_cpus = mapsize * 8; err = do_sysctl_save(xch, &sysctl); if ( err < 0 ) goto out; - cpumap = calloc((*cpusize + sizeof(*cpumap) - 1) / sizeof(*cpumap), sizeof(*cpumap)); + cpumap = xc_cpumap_alloc(xch); if (cpumap == NULL) goto out; - bitmap_byte_to_64(cpumap, local, *cpusize * 8); + memcpy(cpumap, local, mapsize); out: xc_hypercall_buffer_free(xch, local); diff -r 5611ec8238ec -r d60c1258c6b7 tools/libxc/xc_domain.c --- a/tools/libxc/xc_domain.c Thu Oct 28 12:28:49 2010 +0100 +++ b/tools/libxc/xc_domain.c Fri Oct 29 07:54:27 2010 +0200 @@ -113,11 +113,19 @@ int xc_vcpu_setaffinity(xc_interface *xch, uint32_t domid, int vcpu, - uint64_t *cpumap, int cpusize) + xc_cpumap_t cpumap) { DECLARE_DOMCTL; DECLARE_HYPERCALL_BUFFER(uint8_t, local); int ret = -1; + int cpusize; + + cpusize = xc_get_cpumap_size(xch); + if (!cpusize) + { + PERROR("Could not get number of cpus"); + goto out; + } local = xc_hypercall_buffer_alloc(xch, local, cpusize); if ( local == NULL ) @@ -130,7 +138,7 @@ domctl.domain = (domid_t)domid; domctl.u.vcpuaffinity.vcpu = vcpu; - bitmap_64_to_byte(local, cpumap, cpusize * 8); + memcpy(local, cpumap, cpusize); set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); @@ -148,14 +156,22 @@ int xc_vcpu_getaffinity(xc_interface *xch, uint32_t domid, int vcpu, - uint64_t *cpumap, int cpusize) + xc_cpumap_t cpumap) { DECLARE_DOMCTL; DECLARE_HYPERCALL_BUFFER(uint8_t, local); int ret = -1; + int cpusize; + + cpusize = xc_get_cpumap_size(xch); + if (!cpusize) + { + PERROR("Could not get number of cpus"); + goto out; + } local = xc_hypercall_buffer_alloc(xch, local, cpusize); - if(local == NULL) + if (local == NULL) { PERROR("Could not allocate memory for getvcpuaffinity domctl hypercall"); goto out; @@ -170,7 +186,7 @@ ret = do_domctl(xch, &domctl); - bitmap_byte_to_64(cpumap, local, cpusize * 8); + memcpy(cpumap, local, cpusize); xc_hypercall_buffer_free(xch, local); out: diff -r 5611ec8238ec -r d60c1258c6b7 tools/libxc/xc_misc.c --- a/tools/libxc/xc_misc.c Thu Oct 28 12:28:49 2010 +0100 +++ b/tools/libxc/xc_misc.c Fri Oct 29 07:54:27 2010 +0200 @@ -33,6 +33,21 @@ max_cpus = physinfo.max_cpu_id + 1; return max_cpus; +} + +int xc_get_cpumap_size(xc_interface *xch) +{ + return (xc_get_max_cpus(xch) + 7) / 8; +} + +xc_cpumap_t xc_cpumap_alloc(xc_interface *xch) +{ + int sz; + + sz = xc_get_cpumap_size(xch); + if (sz == 0) + return NULL; + return calloc(1, sz); } int xc_readconsolering(xc_interface *xch, diff -r 5611ec8238ec -r d60c1258c6b7 tools/libxc/xenctrl.h --- a/tools/libxc/xenctrl.h Thu Oct 28 12:28:49 2010 +0100 +++ b/tools/libxc/xenctrl.h Fri Oct 29 07:54:27 2010 +0200 @@ -281,6 +281,20 @@ #define xc_hypercall_buffer_free_pages(_xch, _name, _nr) xc__hypercall_buffer_free_pages(_xch, HYPERCALL_BUFFER(_name), _nr) /* + * CPUMAP handling + */ +typedef uint8_t *xc_cpumap_t; + +/* return maximum number of cpus the hypervisor supports */ +int xc_get_max_cpus(xc_interface *xch); + +/* return array size for cpumap */ +int xc_get_cpumap_size(xc_interface *xch); + +/* allocate a cpumap */ +xc_cpumap_t xc_cpumap_alloc(xc_interface *xch); + +/* * DOMAIN DEBUGGING FUNCTIONS */ @@ -347,9 +361,6 @@ start_info_t s; } start_info_any_t; - -/* return maximum number of cpus the hypervisor supports */ -int xc_get_max_cpus(xc_interface *xch); int xc_domain_create(xc_interface *xch, uint32_t ssidref, @@ -462,13 +473,11 @@ int xc_vcpu_setaffinity(xc_interface *xch, uint32_t domid, int vcpu, - uint64_t *cpumap, - int cpusize); + xc_cpumap_t cpumap); int xc_vcpu_getaffinity(xc_interface *xch, uint32_t domid, int vcpu, - uint64_t *cpumap, - int cpusize); + xc_cpumap_t cpumap); /** * This function will return information about one or more domains. It is @@ -670,8 +679,7 @@ uint32_t cpupool_id; uint32_t sched_id; uint32_t n_dom; - uint32_t cpumap_size; /* max number of cpus in map */ - uint64_t *cpumap; + xc_cpumap_t cpumap; } xc_cpupoolinfo_t; /** @@ -701,10 +709,18 @@ * starting at the given id. * @parm xc_handle a handle to an open hypervisor interface * @parm poolid lowest id for which info is returned - * return cpupool info ptr (obtained by malloc) + * return cpupool info ptr (to be freed via xc_cpupool_infofree) */ xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, uint32_t poolid); + +/** + * Free cpupool info. Used to free info obtained via xc_cpupool_getinfo. + * @parm xc_handle a handle to an open hypervisor interface + * @parm info area to free + */ +void xc_cpupool_infofree(xc_interface *xch, + xc_cpupoolinfo_t *info); /** * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned. @@ -746,11 +762,9 @@ * Return map of cpus not in any cpupool. * * @parm xc_handle a handle to an open hypervisor interface - * @parm cpusize where to store array size in bytes * return cpumap array on success, NULL else */ -uint64_t *xc_cpupool_freeinfo(xc_interface *xch, - int *cpusize); +xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch); /* diff -r 5611ec8238ec -r d60c1258c6b7 tools/libxl/libxl.c --- a/tools/libxl/libxl.c Thu Oct 28 12:28:49 2010 +0100 +++ b/tools/libxl/libxl.c Fri Oct 29 07:54:27 2010 +0200 @@ -610,16 +610,11 @@ libxl_cpupoolinfo * libxl_list_cpupool(libxl_ctx *ctx, int *nb_pool) { libxl_cpupoolinfo *ptr, *tmp; - int i, m, ncpu; + int i; xc_cpupoolinfo_t *info; uint32_t poolid; ptr = NULL; - ncpu = xc_get_max_cpus(ctx->xch); - if (!ncpu) { - LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting max cpu number"); - return NULL; - } poolid = 0; for (i = 0;; i++) { @@ -630,19 +625,20 @@ if (!tmp) { LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool info"); free(ptr); + xc_cpupool_infofree(ctx->xch, info); return NULL; } ptr = tmp; ptr[i].poolid = info->cpupool_id; ptr[i].sched_id = info->sched_id; ptr[i].n_dom = info->n_dom; - if (libxl_cpumap_alloc(&ptr[i].cpumap, ncpu)) - break; - for (m = 0; m < ptr[i].cpumap.size / sizeof(*ptr[i].cpumap.map); m++) - ptr[i].cpumap.map[m] = (info->cpumap_size > (m * sizeof(*ptr[i].cpumap.map))) ? - info->cpumap[m] : 0; + if (libxl_cpumap_alloc(ctx, &ptr[i].cpumap)) { + xc_cpupool_infofree(ctx->xch, info); + break; + } + memcpy(ptr[i].cpumap.map, info->cpumap, ptr[i].cpumap.size); poolid = info->cpupool_id + 1; - free(info); + xc_cpupool_infofree(ctx->xch, info); } *nb_pool = i; @@ -3229,14 +3225,14 @@ LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting infolist"); return NULL; } - *nrcpus = xc_get_max_cpus(ctx->xch); + *nrcpus = libxl_get_max_cpus(ctx); ret = ptr = calloc(domaininfo.max_vcpu_id + 1, sizeof (libxl_vcpuinfo)); if (!ptr) { return NULL; } for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, ++ptr) { - if (libxl_cpumap_alloc(&ptr->cpumap, *nrcpus)) { + if (libxl_cpumap_alloc(ctx, &ptr->cpumap)) { LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpumap"); return NULL; } @@ -3244,8 +3240,7 @@ LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu info"); return NULL; } - if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu, - ptr->cpumap.map, ((*nrcpus) + 7) / 8) == -1) { + if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu, ptr->cpumap.map) == -1) { LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu affinity"); return NULL; } @@ -3260,9 +3255,9 @@ } int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid, - uint64_t *cpumap, int nrcpus) -{ - if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap, (nrcpus + 7) / 8)) { + libxl_cpumap *cpumap) +{ + if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap->map)) { LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "setting vcpu affinity"); return ERROR_FAIL; } @@ -3933,7 +3928,11 @@ { int ncpus; - cpumap->map = xc_cpupool_freeinfo(ctx->xch, &ncpus); + ncpus = libxl_get_max_cpus(ctx); + if (ncpus == 0) + return ERROR_FAIL; + + cpumap->map = xc_cpupool_freeinfo(ctx->xch); if (cpumap->map == NULL) return ERROR_FAIL; @@ -3963,8 +3962,8 @@ return ERROR_FAIL; } - for (i = 0; i < cpumap.size * 8; i++) - if (cpumap.map[i / 64] & (1L << (i % 64))) { + libxl_for_each_cpu(i, cpumap) + if (libxl_cpumap_test(&cpumap, i)) { rc = xc_cpupool_addcpu(ctx->xch, *poolid, i); if (rc) { LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc, @@ -3996,6 +3995,7 @@ int rc, i; xc_cpupoolinfo_t *info; xs_transaction_t t; + libxl_cpumap cpumap; info = xc_cpupool_getinfo(ctx->xch, poolid); if (info == NULL) @@ -4005,14 +4005,19 @@ if ((info->cpupool_id != poolid) || (info->n_dom)) goto out; - for (i = 0; i < info->cpumap_size; i++) - if (info->cpumap[i / 64] & (1L << (i % 64))) { + rc = ERROR_NOMEM; + if (libxl_cpumap_alloc(ctx, &cpumap)) + goto out; + + memcpy(cpumap.map, info->cpumap, cpumap.size); + libxl_for_each_cpu(i, cpumap) + if (libxl_cpumap_test(&cpumap, i)) { rc = xc_cpupool_removecpu(ctx->xch, poolid, i); if (rc) { LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc, "Error removing cpu from cpupool"); rc = ERROR_FAIL; - goto out; + goto out1; } } @@ -4020,7 +4025,7 @@ if (rc) { LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc, "Could not destroy cpupool"); rc = ERROR_FAIL; - goto out; + goto out1; } for (;;) { @@ -4034,8 +4039,10 @@ rc = 0; -out: - free(info); +out1: + libxl_cpumap_destroy(&cpumap); +out: + xc_cpupool_infofree(ctx->xch, info); return rc; } diff -r 5611ec8238ec -r d60c1258c6b7 tools/libxl/libxl.h --- a/tools/libxl/libxl.h Thu Oct 28 12:28:49 2010 +0100 +++ b/tools/libxl/libxl.h Fri Oct 29 07:54:27 2010 +0200 @@ -145,7 +145,7 @@ typedef struct { uint32_t size; /* number of bytes in map */ - uint64_t *map; + uint8_t *map; } libxl_cpumap; void libxl_cpumap_destroy(libxl_cpumap *map); @@ -464,7 +464,7 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid, int *nb_vcpu, int *nrcpus); int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid, - uint64_t *cpumap, int nrcpus); + libxl_cpumap *cpumap); int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, uint32_t bitmask); int libxl_get_sched_id(libxl_ctx *ctx); diff -r 5611ec8238ec -r d60c1258c6b7 tools/libxl/libxl_utils.c --- a/tools/libxl/libxl_utils.c Thu Oct 28 12:28:49 2010 +0100 +++ b/tools/libxl/libxl_utils.c Fri Oct 29 07:54:27 2010 +0200 @@ -708,15 +708,20 @@ return rc; } -int libxl_cpumap_alloc(libxl_cpumap *cpumap, int max_cpus) +int libxl_cpumap_alloc(libxl_ctx *ctx, libxl_cpumap *cpumap) { - int elems; + int max_cpus; + int sz; - elems = (max_cpus + 63) / 64; - cpumap->map = calloc(elems, sizeof(*cpumap->map)); + max_cpus = libxl_get_max_cpus(ctx); + if (max_cpus == 0) + return ERROR_FAIL; + + sz = (max_cpus + 7) / 8; + cpumap->map = calloc(sz, sizeof(*cpumap->map)); if (!cpumap->map) return ERROR_NOMEM; - cpumap->size = elems * 8; /* size in bytes */ + cpumap->size = sz; return 0; } @@ -729,21 +734,21 @@ { if (cpu >= cpumap->size * 8) return 0; - return (cpumap->map[cpu / 64] & (1L << (cpu & 63))) ? 1 : 0; + return (cpumap->map[cpu / 8] & (1 << (cpu & 7))) ? 1 : 0; } void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu) { if (cpu >= cpumap->size * 8) return; - cpumap->map[cpu / 64] |= 1L << (cpu & 63); + cpumap->map[cpu / 8] |= 1 << (cpu & 7); } void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu) { if (cpu >= cpumap->size * 8) return; - cpumap->map[cpu / 64] &= ~(1L << (cpu & 63)); + cpumap->map[cpu / 8] &= ~(1 << (cpu & 7)); } int libxl_get_max_cpus(libxl_ctx *ctx) diff -r 5611ec8238ec -r d60c1258c6b7 tools/libxl/libxl_utils.h --- a/tools/libxl/libxl_utils.h Thu Oct 28 12:28:49 2010 +0100 +++ b/tools/libxl/libxl_utils.h Fri Oct 29 07:54:27 2010 +0200 @@ -76,9 +76,11 @@ * return -1 if there are an error */ int libxl_check_device_model_version(libxl_ctx *ctx, char *path); -int libxl_cpumap_alloc(libxl_cpumap *cpumap, int max_cpus); +int libxl_cpumap_alloc(libxl_ctx *ctx, libxl_cpumap *cpumap); int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu); void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu); void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu); +#define libxl_for_each_cpu(var, map) for (var = 0; var < (map).size * 8; var++) + #endif diff -r 5611ec8238ec -r d60c1258c6b7 tools/libxl/xl_cmdimpl.c --- a/tools/libxl/xl_cmdimpl.c Thu Oct 28 12:28:49 2010 +0100 +++ b/tools/libxl/xl_cmdimpl.c Fri Oct 29 07:54:27 2010 +0200 @@ -3534,8 +3534,8 @@ uint32_t nr_cpus) { int i, l; - uint64_t *cpumap; - uint64_t pcpumap; + uint8_t *cpumap; + uint8_t pcpumap; char *domname; /* NAME ID VCPU */ @@ -3555,14 +3555,14 @@ /* TIM */ printf("%9.1f ", ((float)vcpuinfo->vcpu_time / 1e9)); /* CPU AFFINITY */ - pcpumap = nr_cpus > 64 ? (uint64_t)-1 : ((1ULL << nr_cpus) - 1); + pcpumap = nr_cpus > 8 ? (uint8_t)-1 : ((1 << nr_cpus) - 1); for (cpumap = vcpuinfo->cpumap.map; nr_cpus; ++cpumap) { if (*cpumap < pcpumap) { break; } - if (nr_cpus > 64) { + if (nr_cpus > 8) { pcpumap = -1; - nr_cpus -= 64; + nr_cpus -= 8; } else { pcpumap = ((1 << nr_cpus) - 1); nr_cpus = 0; @@ -3593,7 +3593,7 @@ } } printf("\n"); - nr_cpus = nr_cpus > 64 ? nr_cpus - 64 : 0; + nr_cpus = nr_cpus > 8 ? nr_cpus - 8 : 0; } } } @@ -3678,11 +3678,11 @@ static void vcpupin(char *d, const char *vcpu, char *cpu) { libxl_vcpuinfo *vcpuinfo; - uint64_t *cpumap = NULL; + libxl_cpumap cpumap; uint32_t vcpuid, cpuida, cpuidb; char *endptr, *toka, *tokb; - int i, nb_vcpu, cpusize, cpumapsize; + int i, nb_vcpu; vcpuid = strtoul(vcpu, &endptr, 10); if (vcpu == endptr) { @@ -3695,63 +3695,54 @@ find_domain(d); - if ((cpusize = libxl_get_max_cpus(&ctx)) == 0) { - fprintf(stderr, "libxl_get_max_cpus failed.\n"); - goto vcpupin_out1; - } - cpumapsize = (cpusize + sizeof (uint64_t) - 1) / sizeof (uint64_t); - - cpumap = calloc(cpumapsize, sizeof (uint64_t)); - if (!cpumap) { - goto vcpupin_out1; + if (libxl_cpumap_alloc(&ctx, &cpumap)) { + goto vcpupin_out; } if (strcmp(cpu, "all")) { for (toka = strtok(cpu, ","), i = 0; toka; toka = strtok(NULL, ","), ++i) { cpuida = strtoul(toka, &endptr, 10); if (toka == endptr) { fprintf(stderr, "Error: Invalid argument.\n"); - goto vcpupin_out; + goto vcpupin_out1; } if (*endptr == '-') { tokb = endptr + 1; cpuidb = strtoul(tokb, &endptr, 10); if ((tokb == endptr) || (cpuida > cpuidb)) { fprintf(stderr, "Error: Invalid argument.\n"); - goto vcpupin_out; + goto vcpupin_out1; } while (cpuida <= cpuidb) { - cpumap[cpuida / 64] |= (1 << (cpuida % 64)); + libxl_cpumap_set(&cpumap, cpuida); ++cpuida; } } else { - cpumap[cpuida / 64] |= (1 << (cpuida % 64)); + libxl_cpumap_set(&cpumap, cpuida); } } } else { - memset(cpumap, -1, sizeof (uint64_t) * cpumapsize); + memset(cpumap.map, -1, cpumap.size); } if (vcpuid != -1) { - if (libxl_set_vcpuaffinity(&ctx, domid, vcpuid, - cpumap, cpusize) == -1) { + if (libxl_set_vcpuaffinity(&ctx, domid, vcpuid, &cpumap) == -1) { fprintf(stderr, "Could not set affinity for vcpu `%u'.\n", vcpuid); } } else { if (!(vcpuinfo = libxl_list_vcpu(&ctx, domid, &nb_vcpu, &i))) { fprintf(stderr, "libxl_list_vcpu failed.\n"); - goto vcpupin_out; + goto vcpupin_out1; } for (; nb_vcpu > 0; --nb_vcpu, ++vcpuinfo) { - if (libxl_set_vcpuaffinity(&ctx, domid, vcpuinfo->vcpuid, - cpumap, cpusize) == -1) { + if (libxl_set_vcpuaffinity(&ctx, domid, vcpuinfo->vcpuid, &cpumap) == -1) { fprintf(stderr, "libxl_set_vcpuaffinity failed on vcpu `%u'.\n", vcpuinfo->vcpuid); } } } vcpupin_out1: - free(cpumap); + libxl_cpumap_destroy(&cpumap); vcpupin_out: ; } @@ -3903,7 +3894,7 @@ printf("free_memory : %"PRIu64"\n", info.free_pages / i); } if (!libxl_get_freecpus(&ctx, &cpumap)) { - for (i = 0; i < cpumap.size * 8; i++) + libxl_for_each_cpu(i, cpumap) if (libxl_cpumap_test(&cpumap, i)) n++; printf("free_cpus : %d\n", n); @@ -5455,7 +5446,7 @@ fprintf(stderr, "libxl_get_freecpus failed\n"); return -ERROR_FAIL; } - if (libxl_cpumap_alloc(&cpumap, freemap.size * 8)) { + if (libxl_cpumap_alloc(&ctx, &cpumap)) { fprintf(stderr, "Failed to allocate cpumap\n"); return -ERROR_FAIL; } @@ -5474,7 +5465,7 @@ } else { n_cpus = 1; n = 0; - for (i = 0; i < freemap.size * 8; i++) + libxl_for_each_cpu(i, freemap) if (libxl_cpumap_test(&freemap, i)) { n++; libxl_cpumap_set(&cpumap, i); @@ -5584,8 +5575,8 @@ printf("%-19s", name); free(name); n = 0; - for (c = 0; c < poolinfo[p].cpumap.size * 8; c++) - if (poolinfo[p].cpumap.map[c / 64] & (1L << (c % 64))) { + libxl_for_each_cpu(c, poolinfo[p].cpumap) + if (libxl_cpumap_test(&poolinfo[p].cpumap, c)) { if (n && opt_cpus) printf(","); if (opt_cpus) printf("%d", c); n++; diff -r 5611ec8238ec -r d60c1258c6b7 tools/python/xen/lowlevel/xc/xc.c --- a/tools/python/xen/lowlevel/xc/xc.c Thu Oct 28 12:28:49 2010 +0100 +++ b/tools/python/xen/lowlevel/xc/xc.c Fri Oct 29 07:54:27 2010 +0200 @@ -226,10 +226,8 @@ { uint32_t dom; int vcpu = 0, i; - uint64_t *cpumap; + xc_cpumap_t cpumap; PyObject *cpulist = NULL; - int nr_cpus, size; - uint64_t cpumap_size = sizeof(*cpumap); static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL }; @@ -237,29 +235,20 @@ &dom, &vcpu, &cpulist) ) return NULL; - nr_cpus = xc_get_max_cpus(self->xc_handle); - if ( nr_cpus == 0 ) - return pyxc_error_to_exception(self->xc_handle); - - size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); - cpumap = malloc(cpumap_size * size); + cpumap = xc_cpumap_alloc(self->xc_handle); if(cpumap == NULL) return pyxc_error_to_exception(self->xc_handle); if ( (cpulist != NULL) && PyList_Check(cpulist) ) { - for ( i = 0; i < size; i++) - { - cpumap[i] = 0ULL; - } for ( i = 0; i < PyList_Size(cpulist); i++ ) { long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i)); - cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu % (cpumap_size * 8)); + cpumap[cpu / 8] |= 1 << (cpu % 8); } } - if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * cpumap_size) != 0 ) + if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 ) { free(cpumap); return pyxc_error_to_exception(self->xc_handle); @@ -385,9 +374,8 @@ uint32_t dom, vcpu = 0; xc_vcpuinfo_t info; int rc, i; - uint64_t *cpumap; - int nr_cpus, size; - uint64_t cpumap_size = sizeof(*cpumap); + xc_cpumap_t cpumap; + int nr_cpus; static char *kwd_list[] = { "domid", "vcpu", NULL }; @@ -403,12 +391,11 @@ if ( rc < 0 ) return pyxc_error_to_exception(self->xc_handle); - size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); - if((cpumap = malloc(cpumap_size * size)) == NULL) - return pyxc_error_to_exception(self->xc_handle); - memset(cpumap, 0, cpumap_size * size); + cpumap = xc_cpumap_alloc(self->xc_handle); + if(cpumap == NULL) + return pyxc_error_to_exception(self->xc_handle); - rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, cpumap_size * size); + rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap); if ( rc < 0 ) { free(cpumap); @@ -424,12 +411,12 @@ cpulist = PyList_New(0); for ( i = 0; i < nr_cpus; i++ ) { - if (*(cpumap + i / (cpumap_size * 8)) & 1 ) { + if (*(cpumap + i / 8) & 1 ) { PyObject *pyint = PyInt_FromLong(i); PyList_Append(cpulist, pyint); Py_DECREF(pyint); } - cpumap[i / (cpumap_size * 8)] >>= 1; + cpumap[i / 8] >>= 1; } PyDict_SetItemString(info_dict, "cpumap", cpulist); Py_DECREF(cpulist); @@ -1931,22 +1918,27 @@ return zero; } -static PyObject *cpumap_to_cpulist(uint64_t *cpumap, int cpusize) +static PyObject *cpumap_to_cpulist(XcObject *self, xc_cpumap_t cpumap) { PyObject *cpulist = NULL; int i; + int nr_cpus; + + nr_cpus = xc_get_max_cpus(self->xc_handle); + if ( nr_cpus == 0 ) + return pyxc_error_to_exception(self->xc_handle); cpulist = PyList_New(0); - for ( i = 0; i < cpusize; i++ ) + for ( i = 0; i < nr_cpus; i++ ) { - if ( *cpumap & (1L << (i % 64)) ) + if ( *cpumap & (1 << (i % 8)) ) { PyObject* pyint = PyInt_FromLong(i); PyList_Append(cpulist, pyint); Py_DECREF(pyint); } - if ( (i % 64) == 63 ) + if ( (i % 8) == 7 ) cpumap++; } return cpulist; @@ -2003,10 +1995,9 @@ "cpupool", (int)info->cpupool_id, "sched", info->sched_id, "n_dom", info->n_dom, - "cpulist", cpumap_to_cpulist(info->cpumap, - info->cpumap_size)); + "cpulist", cpumap_to_cpulist(self, info->cpumap)); pool = info->cpupool_id + 1; - free(info); + xc_cpupool_infofree(self->xc_handle, info); if ( info_dict == NULL ) { @@ -2082,15 +2073,14 @@ static PyObject *pyxc_cpupool_freeinfo(XcObject *self) { - uint64_t *cpumap; - int mapsize; + xc_cpumap_t cpumap; PyObject *info = NULL; - cpumap = xc_cpupool_freeinfo(self->xc_handle, &mapsize); + cpumap = xc_cpupool_freeinfo(self->xc_handle); if (!cpumap) return pyxc_error_to_exception(self->xc_handle); - info = cpumap_to_cpulist(cpumap, mapsize * 8); + info = cpumap_to_cpulist(self, cpumap); free(cpumap);