This patch add a "guestnodes=" config file option to allow to explicitly
specify the number of nodes for the guest. A value of "0" disables the
SRAT generation and should by all means mimic the current behaviour.
I am not sure whether I got this right, as the config option passing
seems a bit "obscure" to me.
For the future it would be nice to allow more values here (like
"automatic" to let Xen decide how many nodes the guest should have).
Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
--
Andre Przywara
AMD-Operating System Research Center (OSRC), Dresden, Germany
Tel: +49 351 488-3567-12
----to satisfy European Law for business letters:
Advanced Micro Devices GmbH
Karl-Hammerschmidt-Str. 34, 85609 Dornach b. Muenchen
Geschaeftsfuehrer: Andrew Bowd; Thomas M. McCoy; Giuliano Meroni
Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen
Registergericht Muenchen, HRB Nr. 43632
commit 50abdfdafbd3e8eca2bc40e5f44d51419e4444a6
Author: Andre Przywara <andre.przywara@xxxxxxx>
Date: Mon Feb 1 13:27:47 2010 +0100
add config file options for number of nodes and mem per node
diff --git a/tools/python/xen/xend/XendConfig.py
b/tools/python/xen/xend/XendConfig.py
index 216665a..eb17968 100644
--- a/tools/python/xen/xend/XendConfig.py
+++ b/tools/python/xen/xend/XendConfig.py
@@ -174,6 +174,7 @@ XENAPI_PLATFORM_CFG_TYPES = {
'xauthority': str,
'pci': str,
'vhpt': int,
+ 'guestnodes': int,
'guest_os_type': str,
'hap': int,
'xen_extended_power_mgmt': int,
@@ -670,7 +671,10 @@ class XendConfig(dict):
cfg["memory"] = int(sxp.child_value(sxp_cfg, "memory"))
if sxp.child_value(sxp_cfg, "maxmem") != None:
cfg["maxmem"] = int(sxp.child_value(sxp_cfg, "maxmem"))
-
+
+ if sxp.child_value(sxp_cfg, "guestnodes") != None:
+ cfg["guestnodes"] = int(sxp.child_value(sxp_cfg, "guestnodes"))
+
# Convert scheduling parameters to vcpus_params
if 'vcpus_params' not in cfg:
cfg['vcpus_params'] = {}
diff --git a/tools/python/xen/xend/image.py b/tools/python/xen/xend/image.py
index f06d6e2..4185f43 100644
--- a/tools/python/xen/xend/image.py
+++ b/tools/python/xen/xend/image.py
@@ -144,6 +144,8 @@ class ImageHandler:
if 'cpuid_check' in vmConfig:
self.cpuid_check = vmConfig['cpuid_check']
+ self.guestnodes = int(vmConfig['platform'].get('guestnodes',0))
+
def cleanupTmpImages(self):
if self.use_tmp_kernel:
self.unlink(self.kernel)
@@ -953,6 +955,7 @@ class HVMImageHandler(ImageHandler):
log.debug("vcpu_avail = %li", self.vm.getVCpuAvail())
log.debug("acpi = %d", self.acpi)
log.debug("apic = %d", self.apic)
+ log.debug("nodes = %d", self.guestnodes)
rc = xc.hvm_build(domid = self.vm.getDomid(),
image = self.loader,
@@ -962,7 +965,7 @@ class HVMImageHandler(ImageHandler):
vcpu_avail = self.vm.getVCpuAvail(),
acpi = self.acpi,
apic = self.apic,
- nodes = 0)
+ nodes = self.guestnodes)
rc['notes'] = { 'SUSPEND_CANCEL': 1 }
rc['store_mfn'] = xc.hvm_get_param(self.vm.getDomid(),
diff --git a/tools/python/xen/xm/create.py b/tools/python/xen/xm/create.py
index 73a6121..b58af7f 100644
--- a/tools/python/xen/xm/create.py
+++ b/tools/python/xen/xm/create.py
@@ -676,6 +676,10 @@ gopts.var('superpages', val='0|1',
fn=set_int, default=0,
use="Create domain with superpages")
+gopts.var('guestnodes', val="GUESTNODES",
+ fn=set_int, default=0,
+ use="""Number of NUMA nodes to appear in the guest.""")
+
def err(msg):
"""Print an error to stderr and exit.
"""
@@ -1079,7 +1083,7 @@ def configure_hvm(config_image, vals):
'vcpus', 'vnc', 'vncconsole', 'vncdisplay', 'vnclisten',
'vncunused', 'viridian', 'vpt_align',
'xauthority', 'xen_extended_power_mgmt', 'xen_platform_pci',
- 'memory_sharing' ]
+ 'memory_sharing', 'guestnodes' ]
for a in args:
if a in vals.__dict__ and vals.__dict__[a] is not None:
diff --git a/tools/python/xen/xm/xenapi_create.py
b/tools/python/xen/xm/xenapi_create.py
index 4c0177b..bb593ec 100644
--- a/tools/python/xen/xm/xenapi_create.py
+++ b/tools/python/xen/xm/xenapi_create.py
@@ -1110,7 +1110,8 @@ class sxp2xml:
'xen_platform_pci',
'tsc_mode'
'description',
- 'nomigrate'
+ 'nomigrate',
+ 'guestnodes'
]
platform_configs = []
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|