diff -r b6b2e97f8db9 tools/python/xen/lowlevel/xc/xc.c --- a/tools/python/xen/lowlevel/xc/xc.c Fri Nov 13 22:13:59 2009 +0000 +++ b/tools/python/xen/lowlevel/xc/xc.c Tue Nov 17 01:48:24 2009 -0500 @@ -1054,15 +1054,21 @@ static PyObject *pyxc_physinfo(XcObject *self) { #define MAX_CPU_ID 255 - xc_physinfo_t info; + xc_physinfo_t info = { 0 }; char cpu_cap[128], virt_caps[128], *p; int i, j, max_cpu_id; uint64_t free_heap; PyObject *ret_obj, *node_to_cpu_obj, *node_to_memory_obj; - xc_cpu_to_node_t map[MAX_CPU_ID + 1]; + PyObject *pcpu_tuples_obj; + xc_cpu_to_node_t map_thread[MAX_CPU_ID + 1]; + xc_cpu_to_node_t map_core[MAX_CPU_ID + 1]; + xc_cpu_to_node_t map_socket[MAX_CPU_ID + 1]; + xc_cpu_to_node_t map_node[MAX_CPU_ID + 1]; const char *virtcap_names[] = { "hvm", "hvm_directio" }; - set_xen_guest_handle(info.cpu_to_node, map); + set_xen_guest_handle(info.cpu_to_core, map_core); + set_xen_guest_handle(info.cpu_to_socket, map_socket); + set_xen_guest_handle(info.cpu_to_node, map_node); info.max_cpu_id = MAX_CPU_ID; if ( xc_physinfo(self->xc_handle, &info) != 0 ) @@ -1099,6 +1105,43 @@ if ( max_cpu_id > MAX_CPU_ID ) max_cpu_id = MAX_CPU_ID; + /* Assumes cpu siblings are listed consecutively */ + for ( i = 0; i <= max_cpu_id; i++ ) + { + uint32_t cpu_thread_id; + if (map_core[i] == INVALID_TOPOLOGY_ID) + continue; + + cpu_thread_id = 0; + map_thread[i] = cpu_thread_id++; + + for ( j = i+1; (map_core[i] == map_core[j]) && (j <= max_cpu_id); j++ ) + map_thread[j] = cpu_thread_id++; + + i = j; + } + + /* Construct cpu-to-* lists */ + pcpu_tuples_obj = PyList_New(0); + + for ( i = 0; i <= max_cpu_id; i++ ) + { + PyObject *pcpu_tuple; + PyObject *pyint; + pcpu_tuple = PyList_New(0); + pyint = PyInt_FromLong(map_node[i]); + PyList_Append(pcpu_tuple, pyint); + pyint = PyInt_FromLong(map_socket[i]); + PyList_Append(pcpu_tuple, pyint); + pyint = PyInt_FromLong(map_core[i]); + PyList_Append(pcpu_tuple, pyint); + pyint = PyInt_FromLong(map_thread[i]); + PyList_Append(pcpu_tuple, pyint); + Py_DECREF(pyint); + PyList_Append(pcpu_tuples_obj, pcpu_tuple); + Py_DECREF(pcpu_tuple); + } + /* Construct node-to-cpu lists. */ node_to_cpu_obj = PyList_New(0); @@ -1107,7 +1150,7 @@ { PyObject *cpus = PyList_New(0); for ( j = 0; j <= max_cpu_id; j++ ) - if ( i == map[j]) { + if ( i == map_node[j]) { PyObject *pyint = PyInt_FromLong(j); PyList_Append(cpus, pyint); Py_DECREF(pyint); @@ -1128,6 +1171,8 @@ Py_DECREF(pyint); } + PyDict_SetItemString(ret_obj, "pcpu_tuples", pcpu_tuples_obj); + Py_DECREF(pcpu_tuples_obj); PyDict_SetItemString(ret_obj, "node_to_cpu", node_to_cpu_obj); Py_DECREF(node_to_cpu_obj); PyDict_SetItemString(ret_obj, "node_to_memory", node_to_memory_obj); diff -r b6b2e97f8db9 tools/python/xen/xend/XendNode.py --- a/tools/python/xen/xend/XendNode.py Fri Nov 13 22:13:59 2009 +0000 +++ b/tools/python/xen/xend/XendNode.py Tue Nov 17 01:48:24 2009 -0500 @@ -763,6 +763,10 @@ else: return 'unknown' + def pcpu_tuples(self): + phys_info = self.xc.physinfo() + return phys_info["pcpu_tuples"] + def get_cpu_configuration(self): phys_info = self.physinfo_dict() @@ -872,6 +876,7 @@ except: str='none\n' return str[:-1]; + def format_node_to_memory(self, pinfo): str='' whitespace='' @@ -886,7 +891,6 @@ str='none\n' return str[:-1]; - def physinfo(self): info = self.xc.physinfo() diff -r b6b2e97f8db9 tools/python/xen/xend/server/XMLRPCServer.py --- a/tools/python/xen/xend/server/XMLRPCServer.py Fri Nov 13 22:13:59 2009 +0000 +++ b/tools/python/xen/xend/server/XMLRPCServer.py Tue Nov 17 01:48:24 2009 -0500 @@ -199,7 +199,7 @@ # Functions in XendNode and XendDmesg for type, lst, n in [(XendNode, - ['info', 'pciinfo', 'send_debug_keys', + ['info','pcpu_tuples','pciinfo','send_debug_keys', 'tmem_list', 'tmem_freeze', 'tmem_thaw', 'tmem_flush', 'tmem_destroy', 'tmem_set_weight', 'tmem_set_cap', 'tmem_set_compress', diff -r b6b2e97f8db9 tools/python/xen/xm/main.py --- a/tools/python/xen/xm/main.py Fri Nov 13 22:13:59 2009 +0000 +++ b/tools/python/xen/xm/main.py Tue Nov 17 01:48:24 2009 -0500 @@ -156,10 +156,11 @@ 'Send a trigger to a domain.'), 'vcpu-list' : ('[Domain, ...]', 'List the VCPUs for all/some domains.'), - 'vcpu-pin' : (' ', + 'vcpu-pin' : (' \n' + 'pcpu-tuple : node.socket.core.thread', 'Set which CPUs a VCPU can use.'), 'vcpu-set' : (' ', - 'Set the number of active VCPUs for allowed for the' + 'Set the number of active VCPUs allowed for the' ' domain.'), #usb 'usb-add' : (' <[host:bus.addr] [host:vendor_id:product_id]>','Add the usb device to FV VM.'), @@ -1433,12 +1434,29 @@ ############################################################# - def xm_vcpu_pin(args): arg_check(args, "vcpu-pin", 3) + def cpumap_from_tuple(cpulist): + pcpu_tuples = server.xend.node.pcpu_tuples() + cpus = [] + (n,s,c,t) = cpulist.split('.') + for i in range(0, len(pcpu_tuples)): + if (n == '*') or (int(n) == pcpu_tuples[i][0]): + if (s == '*') or (int(s) == pcpu_tuples[i][1]): + if (c == '*') or (int(c) == pcpu_tuples[i][2]): + if (t == '*') or (int(t) == pcpu_tuples[i][3]): + cpus.append(int(i)) + cpus.sort() + return ",".join(map(str, cpus)) + def cpu_make_map(cpulist): cpus = [] + # Dotted tuples (node.socket.core.thread) + if cpulist.find('.') != -1: + cpumap = cpumap_from_tuple(cpulist) + return cpumap + for c in cpulist.split(','): if c.find('-') != -1: (x,y) = c.split('-') @@ -1819,6 +1837,7 @@ for (k, v) in sorted: print "%-23s:" % k, v else: + print "xm_info : comes here !!" info = server.xend.node.info() for x in info[1:]: if len(x) < 2: diff -r b6b2e97f8db9 xen/arch/x86/sysctl.c --- a/xen/arch/x86/sysctl.c Fri Nov 13 22:13:59 2009 +0000 +++ b/xen/arch/x86/sysctl.c Tue Nov 17 01:48:24 2009 -0500 @@ -46,6 +46,8 @@ case XEN_SYSCTL_physinfo: { uint32_t i, max_array_ent; + XEN_GUEST_HANDLE_64(uint32) cpu_to_core_arr; + XEN_GUEST_HANDLE_64(uint32) cpu_to_socket_arr; XEN_GUEST_HANDLE_64(uint32) cpu_to_node_arr; xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo; @@ -55,9 +57,14 @@ break; max_array_ent = pi->max_cpu_id; + cpu_to_core_arr = pi->cpu_to_core; + cpu_to_socket_arr = pi->cpu_to_socket; cpu_to_node_arr = pi->cpu_to_node; memset(pi, 0, sizeof(*pi)); + + pi->cpu_to_core = cpu_to_core_arr; + pi->cpu_to_socket = cpu_to_socket_arr; pi->cpu_to_node = cpu_to_node_arr; pi->threads_per_core = cpus_weight(per_cpu(cpu_sibling_map, 0)); @@ -84,7 +91,8 @@ { for ( i = 0; i <= max_array_ent; i++ ) { - uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u; + uint32_t node = + cpu_online(i) ? cpu_to_node(i) : INVALID_TOPOLOGY_ID; if ( copy_to_guest_offset(cpu_to_node_arr, i, &node, 1) ) { ret = -EFAULT; @@ -93,6 +101,34 @@ } } + if ( !guest_handle_is_null(cpu_to_core_arr) ) + { + for ( i = 0; i <= max_array_ent; i++ ) + { + uint32_t core = + cpu_online(i) ? cpu_to_core(i) : INVALID_TOPOLOGY_ID; + if ( copy_to_guest_offset(cpu_to_core_arr, i, &core, 1) ) + { + ret = -EFAULT; + break; + } + } + } + + if ( !guest_handle_is_null(cpu_to_socket_arr) ) + { + for ( i = 0; i <= max_array_ent; i++ ) + { + uint32_t socket = + cpu_online(i) ? cpu_to_socket(i) : INVALID_TOPOLOGY_ID; + if ( copy_to_guest_offset(cpu_to_socket_arr, i, &socket, 1) ) + { + ret = -EFAULT; + break; + } + } + } + if ( copy_to_guest(u_sysctl, sysctl, 1) ) ret = -EFAULT; } diff -r b6b2e97f8db9 xen/include/public/sysctl.h --- a/xen/include/public/sysctl.h Fri Nov 13 22:13:59 2009 +0000 +++ b/xen/include/public/sysctl.h Tue Nov 17 01:48:24 2009 -0500 @@ -115,8 +115,9 @@ * If the actual @max_cpu_id is smaller than the array then the trailing * elements of the array will not be written by the sysctl. */ + XEN_GUEST_HANDLE_64(uint32) cpu_to_core; + XEN_GUEST_HANDLE_64(uint32) cpu_to_socket; XEN_GUEST_HANDLE_64(uint32) cpu_to_node; - /* XEN_SYSCTL_PHYSCAP_??? */ uint32_t capabilities; };