* Ryan Harper <ryanh@xxxxxxxxxx> [2006-07-31 14:14]:
> Once Xen calculates nr_nodes properly, all nr_cpu calculations based on
> nr_nodes * sockets_per_node * cores_per_socket * threads_per_core are
> broken. The easy fix is to replace those calculations with a new field,
> nr_cpus in physinfo which is calculated by num_online_cpus(). This
> patch does so and attempts to change all users over to nr_cpus field in
> physinfo. This patch touches arch/ia64/xen/dom0_ops.c, but I've not done
> any IA64 testing with this patch applied.
-no changes
--
Ryan Harper
Software Engineer; Linux Technology Center
IBM Corp., Austin, Tx
(512) 838-9253 T/L: 678-9253
ryanh@xxxxxxxxxx
diffstat output:
tools/python/xen/lowlevel/xc/xc.c | 3 ++-
tools/python/xen/xend/XendNode.py | 4 ----
tools/xenmon/xenbaked.c | 5 +----
tools/xenstat/libxenstat/src/xenstat.c | 4 +---
tools/xentrace/xentrace.c | 5 +----
xen/arch/ia64/xen/dom0_ops.c | 1 +
xen/arch/x86/dom0_ops.c | 2 +-
xen/include/public/dom0_ops.h | 1 +
8 files changed, 8 insertions(+), 17 deletions(-)
Signed-off-by: Ryan Harper <ryanh@xxxxxxxxxx>
---
Add nr_cpus field to physinfo; remove calculation
diff -r b4cc2fb77f4f tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Tue Aug 15 11:50:40 2006 -0500
+++ b/tools/python/xen/lowlevel/xc/xc.c Tue Aug 15 11:52:38 2006 -0500
@@ -577,10 +577,11 @@ static PyObject *pyxc_physinfo(XcObject
if(q>cpu_cap)
*(q-1)=0;
- ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s}",
+ ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s}",
"threads_per_core", info.threads_per_core,
"cores_per_socket", info.cores_per_socket,
"sockets_per_node", info.sockets_per_node,
+ "nr_cpus" , info.nr_cpus,
"total_memory", pages_to_kib(info.total_pages),
"free_memory", pages_to_kib(info.free_pages),
"scrub_memory", pages_to_kib(info.scrub_pages),
diff -r b4cc2fb77f4f tools/python/xen/xend/XendNode.py
--- a/tools/python/xen/xend/XendNode.py Tue Aug 15 11:50:40 2006 -0500
+++ b/tools/python/xen/xend/XendNode.py Tue Aug 15 11:52:38 2006 -0500
@@ -122,10 +122,6 @@ class XendNode:
def physinfo(self):
info = self.xc.physinfo()
- info['nr_cpus'] = (info['nr_nodes'] *
- info['sockets_per_node'] *
- info['cores_per_socket'] *
- info['threads_per_core'])
info['cpu_mhz'] = info['cpu_khz'] / 1000
# physinfo is in KiB
info['total_memory'] = info['total_memory'] / 1024
diff -r b4cc2fb77f4f tools/xenmon/xenbaked.c
--- a/tools/xenmon/xenbaked.c Tue Aug 15 11:50:40 2006 -0500
+++ b/tools/xenmon/xenbaked.c Tue Aug 15 11:52:58 2006 -0500
@@ -462,10 +462,7 @@ unsigned int get_num_cpus(void)
xc_interface_close(xc_handle);
opts.cpu_freq = (double)physinfo.cpu_khz/1000.0;
- return (physinfo.threads_per_core *
- physinfo.cores_per_socket *
- physinfo.sockets_per_node *
- physinfo.nr_nodes);
+ return physinfo.nr_cpus;
}
diff -r b4cc2fb77f4f tools/xenstat/libxenstat/src/xenstat.c
--- a/tools/xenstat/libxenstat/src/xenstat.c Tue Aug 15 11:50:40 2006 -0500
+++ b/tools/xenstat/libxenstat/src/xenstat.c Tue Aug 15 11:52:38 2006 -0500
@@ -233,9 +233,7 @@ xenstat_node *xenstat_get_node(xenstat_h
}
node->cpu_hz = ((unsigned long long)physinfo.cpu_khz) * 1000ULL;
- node->num_cpus =
- (physinfo.threads_per_core * physinfo.cores_per_socket *
- physinfo.sockets_per_node * physinfo.nr_nodes);
+ node->num_cpus = physinfo.nr_cpus;
node->tot_mem = ((unsigned long long)physinfo.total_pages)
* handle->page_size;
node->free_mem = ((unsigned long long)physinfo.free_pages)
diff -r b4cc2fb77f4f tools/xentrace/xentrace.c
--- a/tools/xentrace/xentrace.c Tue Aug 15 11:50:40 2006 -0500
+++ b/tools/xentrace/xentrace.c Tue Aug 15 11:52:38 2006 -0500
@@ -268,10 +268,7 @@ unsigned int get_num_cpus(void)
xc_interface_close(xc_handle);
- return (physinfo.threads_per_core *
- physinfo.cores_per_socket *
- physinfo.sockets_per_node *
- physinfo.nr_nodes);
+ return physinfo.nr_cpus;
}
diff -r b4cc2fb77f4f xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c Tue Aug 15 11:50:40 2006 -0500
+++ b/xen/arch/ia64/xen/dom0_ops.c Tue Aug 15 11:52:38 2006 -0500
@@ -80,6 +80,7 @@ long arch_do_dom0_op(dom0_op_t *op, XEN_
cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
pi->sockets_per_node =
num_online_cpus() / cpus_weight(cpu_core_map[0]);
+ pi->nr_cpus = (u32)num_online_cpus();
pi->nr_nodes = 1;
pi->total_pages = total_pages;
pi->free_pages = avail_domheap_pages();
diff -r b4cc2fb77f4f xen/arch/x86/dom0_ops.c
--- a/xen/arch/x86/dom0_ops.c Tue Aug 15 11:50:40 2006 -0500
+++ b/xen/arch/x86/dom0_ops.c Tue Aug 15 11:52:38 2006 -0500
@@ -196,7 +196,7 @@ long arch_do_dom0_op(struct dom0_op *op,
cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
pi->sockets_per_node =
num_online_cpus() / cpus_weight(cpu_core_map[0]);
-
+ pi->nr_cpus = (u32)num_online_cpus();
pi->total_pages = total_pages;
pi->free_pages = avail_domheap_pages();
pi->scrub_pages = avail_scrub_pages();
diff -r b4cc2fb77f4f xen/include/public/dom0_ops.h
--- a/xen/include/public/dom0_ops.h Tue Aug 15 11:50:40 2006 -0500
+++ b/xen/include/public/dom0_ops.h Tue Aug 15 11:52:38 2006 -0500
@@ -229,6 +229,7 @@ struct dom0_physinfo {
uint32_t cores_per_socket;
uint32_t sockets_per_node;
uint32_t nr_nodes;
+ uint32_t nr_cpus;
uint32_t cpu_khz;
uint64_t total_pages;
uint64_t free_pages;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|