# HG changeset patch # User Juergen Gross # Date 1288093915 -7200 # Node ID 55538c59d637fda3ec4a61d91270d9018b935167 # Parent cd193fa265b88bf4ff891f03c9be0e12415e6778 avoid numa placement of cpus with active cpupools When using cpupools don't pin vcpus to numa nodes as this might conflict with the cpupool definition. numa placement should be handled by cpupool configuration instead. Signed-off-by: juergen.gross@xxxxxxxxxxxxxx diff -r cd193fa265b8 -r 55538c59d637 tools/python/xen/xend/XendCPUPool.py --- a/tools/python/xen/xend/XendCPUPool.py Tue Oct 26 12:22:52 2010 +0100 +++ b/tools/python/xen/xend/XendCPUPool.py Tue Oct 26 13:51:55 2010 +0200 @@ -883,6 +883,11 @@ lookup_pool = classmethod(lookup_pool) + def number_of_pools(cls): + return len(xc.cpupool_getinfo()) + + number_of_pools = classmethod(number_of_pools) + def _cpu_number_to_ref(cls, number): node = XendNode.instance() for cpu_ref in node.get_host_cpu_refs(): diff -r cd193fa265b8 -r 55538c59d637 tools/python/xen/xend/XendDomainInfo.py --- a/tools/python/xen/xend/XendDomainInfo.py Tue Oct 26 12:22:52 2010 +0100 +++ b/tools/python/xen/xend/XendDomainInfo.py Tue Oct 26 13:51:55 2010 +0200 @@ -2748,7 +2748,7 @@ return map(lambda x: x[0], sorted(enumerate(nodeload), key=lambda x:x[1])) info = xc.numainfo() - if info['max_node_index'] > 0: + if info['max_node_index'] > 0 and XendCPUPool.number_of_pools() < 2: node_memory_list = info['node_memfree'] node_to_cpu = [] for i in range(0, info['max_node_index'] + 1):