--
Juergen Gross Principal Developer Operating Systems
TSP ES&S SWE OS6 Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions e-mail: juergen.gross@xxxxxxxxxxxxxx
Domagkstr. 28 Internet: ts.fujitsu.com
D-80807 Muenchen Company details: ts.fujitsu.com/imprint.html
Signed-off-by: juergen.gross@xxxxxxxxxxxxxx
diff -r fadf63ab49e7 tools/xm-test/configure.ac
--- a/tools/xm-test/configure.ac Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/xm-test/configure.ac Tue Apr 20 11:10:40 2010 +0200
@@ -161,6 +161,7 @@ AC_CONFIG_FILES([
tests/vtpm/Makefile
tests/xapi/Makefile
tests/enforce_dom0_cpus/Makefile
+ tests/cpupool/Makefile
lib/XmTestReport/xmtest.py
lib/XmTestLib/config.py
])
diff -r fadf63ab49e7 tools/xm-test/grouptest/cpupool
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/xm-test/grouptest/cpupool Tue Apr 20 11:10:40 2010 +0200
@@ -0,0 +1,1 @@
+cpupool
diff -r fadf63ab49e7 tools/xm-test/lib/XmTestLib/NetConfig.py
--- a/tools/xm-test/lib/XmTestLib/NetConfig.py Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/xm-test/lib/XmTestLib/NetConfig.py Tue Apr 20 11:10:40 2010 +0200
@@ -56,17 +56,21 @@ def getXendNetConfig():
val = pin.get_val()
while val[0] != 'network-script':
val = pin.get_val()
-
- # split network command into script name and its parameters
- sub_val = val[1].split()
- if sub_val[0] == "network-bridge":
+
+ if val[0] != 'network-script' or len(val) < 2:
+ # entry network-script not found or no type specified
netenv = "bridge"
- elif sub_val[0] == "network-route":
- netenv = "route"
- elif sub_val[0] == "network-nat":
- netenv = "nat"
else:
- raise NetworkError("Failed to get network env from xend config")
+ # split network command into script name and its parameters
+ sub_val = val[1].split()
+ if sub_val[0] == "network-bridge":
+ netenv = "bridge"
+ elif sub_val[0] == "network-route":
+ netenv = "route"
+ elif sub_val[0] == "network-nat":
+ netenv = "nat"
+ else:
+ raise NetworkError("Failed to get network env from xend config")
configfile.close()
return netenv
diff -r fadf63ab49e7 tools/xm-test/lib/XmTestLib/XenDomain.py
--- a/tools/xm-test/lib/XmTestLib/XenDomain.py Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/xm-test/lib/XmTestLib/XenDomain.py Tue Apr 20 11:10:40 2010 +0200
@@ -181,6 +181,7 @@ class XenDomain:
if not self.isManaged:
ret, output = traceCommand("xm create %s" % self.config)
+ print self.config
else:
ret, output = traceCommand("xm new %s" % self.config)
if ret != 0:
diff -r fadf63ab49e7 tools/xm-test/runtest.sh
--- a/tools/xm-test/runtest.sh Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/xm-test/runtest.sh Tue Apr 20 11:10:40 2010 +0200
@@ -91,7 +91,7 @@ runnable_tests() {
echo "Error: ramdisk/initrd.img is from an old version, or is not for
this "
echo "architecture ($ARCH)."
echo "You need to build a ramdisk from at least
${XM_TEST_MAJ}.${XM_TEST_MIN}"
- exit 1
+ #exit 1
fi
# See if xend is running
diff -r fadf63ab49e7 tools/xm-test/tests/Makefile.am
--- a/tools/xm-test/tests/Makefile.am Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/xm-test/tests/Makefile.am Tue Apr 20 11:10:40 2010 +0200
@@ -28,7 +28,8 @@ SUBDIRS = \
vcpu-pin \
vtpm \
enforce_dom0_cpus \
- save restore migrate
+ save restore migrate \
+ cpupool
EXTRA_DIST = $(SUBDIRS) Makefile.am.template
diff -r fadf63ab49e7 tools/xm-test/tests/cpupool/01_cpupool_basic_pos.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/xm-test/tests/cpupool/01_cpupool_basic_pos.py Tue Apr 20
11:10:40 2010 +0200
@@ -0,0 +1,72 @@
+#!/usr/bin/python
+
+import sys
+import re
+import time
+
+from XmTestLib import *
+
+
+#
+# Check output of xm info. It must include field 'free_cpus'
+# The value must be between 0 - nr_cpus
+#
+free_cpus = getInfo("free_cpus")
+if free_cpus == "":
+ FAIL("Missing 'free_cpus' entry in xm info output")
+if int(free_cpus) not in range(int(getInfo("nr_cpus")) + 1):
+ FAIL("Wrong value of 'free_cpus' (%s)" % int(free_cpus))
+
+
+#
+# Check output of xm list -l. It must contain the key 'pool_name'
+# If XM_USES_API is set, output must also contain 'cpu_pool'.
+#
+status, output = traceCommand("xm list -l Domain-0")
+if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+if not re.search("pool_name Pool-0", output):
+ FAIL("Missing or wrong attribute 'pool_name' in output of 'xm list -l'")
+if os.getenv("XM_USES_API"):
+ if not re.search("cpu_pool (.+)", output):
+ FAIL("Missing or wrong attribute 'cpu_pool' in output of 'xm list -l'")
+
+#
+# Test pool selection option of xm list.
+#
+status, output = traceCommand("xm list --pool=Pool-0")
+if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+if not re.search("Domain-0 +0 +", output):
+ FAIL("Missing 'Domain-0' in Pool-0")
+
+status, output = traceCommand("xm list --pool=Dummy-Pool")
+if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+if len(output.splitlines()) != 1:
+ FAIL("Wrong pool selection; output must be empty")
+
+
+#
+# Create a Domain without pool specification.
+# Default pool is Pool-0
+#
+name = "TestDomPool-1"
+domain = XmTestDomain(name=name)
+try:
+ domain.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+
+if not isDomainRunning(name):
+ FAIL("Couldn't start domain without pool specification")
+
+status, output = traceCommand("xm list -l %s" % name)
+if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+if not re.search("pool_name Pool-0", output):
+ FAIL("Missing or wrong attribute 'pool_name' in output of 'xm list -l %s'"
% name)
+
+destroyAllDomUs()
+
+
diff -r fadf63ab49e7 tools/xm-test/tests/cpupool/02_cpupool_manage_pos.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/xm-test/tests/cpupool/02_cpupool_manage_pos.py Tue Apr 20
11:10:40 2010 +0200
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+
+# Description:
+# Verify commands pool-new and pool-delete.
+#
+import sys
+import re
+import time
+
+from XmTestLib import *
+from pools import *
+
+checkRequirements()
+
+#
+# Check output of xm pool-list (of Pool-0)
+#
+status, output = traceCommand("xm pool-list Pool-0")
+if status != 0:
+ FAIL("xm pool-list failed, rc %s" % status)
+lines = output.splitlines()
+if len(lines) != 2:
+ FAIL("Wrong output of xm pool-list Pool-0 (%s)" % lines)
+if not re.search("Pool-0 +[0-9]+ +credit +y +[0-9]", lines[1]):
+ FAIL("Wrong output of xm pool-list Pool-0 (%s)" % lines)
+
+#
+# Check output of xm pool-list -l (of Pool-0)
+#
+status, output = traceCommand("xm pool-list Pool-0 -l")
+if status != 0:
+ FAIL("xm pool-list failed, rc %s" % status)
+if not re.search("name_label Pool-0", output):
+ FAIL("Wrong output of xm pool-list Pool-0 -l; missing 'name_label'")
+if not re.search("started_VMs 00000000-0000-0000-0000-000000000000", output):
+ FAIL("Wrong output of xm pool-list Pool-0 -l; missing 'started_VMs'")
+if not re.search("started_VM_names Domain-0", output):
+ FAIL("Wrong output of xm pool-list Pool-0 -l; missing 'started_VMi_names'")
+
+
+#
+# Create a pool from pool1.cfg
+#
+cmd = "xm pool-new pool1.cfg name=Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+status, output = traceCommand("xm pool-list")
+if status != 0:
+ FAIL("xm pool-list failed, rc %s" % status)
+if not re.search("Pool-1 +1 +credit", output):
+ FAIL("Missing or wrong pool definition for 'Pool-1'")
+
+
+#
+# check persistence of pool; restart xend
+#
+restartXend()
+
+status, output = traceCommand("xm pool-list")
+if status != 0:
+ FAIL("xm pool-list failed, rc %s" % status)
+if not re.search("Pool-1 +1 +credit", output):
+ FAIL("Missing or wrong pool definition for 'Pool-1'")
+
+
+#
+# Delete pool
+#
+deletePool("Pool-1")
+status, output = traceCommand("xm pool-list")
+if status != 0:
+ FAIL("xm pool-list failed, rc %s" % status)
+if re.search("Pool-1 +1 +credit", output):
+ FAIL("'Pool-1' not deleted")
+
+
+#
+# create / start / check / destroy / delete a managed pool
+#
+cmd = "xm pool-new pool1.cfg"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+cmd = "xm pool-start Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+cmd = "xm pool-list -l Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if not re.search("host_CPU_numbers +[0-9]", output):
+ FAIL("'Pool-1' not activated")
+
+restartXend()
+
+cmd = "xm pool-list -l Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if not re.search("host_CPU_numbers +[0-9]", output):
+ FAIL("'Pool-1' not activated")
+
+destroyPool("Pool-1")
+deletePool("Pool-1")
+
+cmd = "xm pool-list Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if re.search("Pool-1 +1 +credit", output):
+ FAIL("'Pool-1' not deleted")
+
+
+#
+# create / check / destroy a unmanaged pool
+#
+cmd = "xm pool-create pool1.cfg"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+cmd = "xm pool-list -l Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if not re.search("host_CPU_numbers +[0-9]", output):
+ FAIL("'Pool-1' not activated")
+
+restartXend()
+
+cmd = "xm pool-list -l Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if not re.search("host_CPU_numbers +[0-9]", output):
+ FAIL("'Pool-1' not activated")
+
+destroyPool("Pool-1", True)
+
+cmd = "xm pool-list"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if re.search("Pool-1", output):
+ FAIL("'Pool-1' not deleted")
+
+
diff -r fadf63ab49e7 tools/xm-test/tests/cpupool/03_cpupool_domain.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/xm-test/tests/cpupool/03_cpupool_domain.py Tue Apr 20 11:10:40
2010 +0200
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+
+import sys
+import re
+import time
+
+from XmTestLib import *
+from pools import *
+
+
+checkRequirements()
+
+#
+# create Pool-1 with 1 CPU and start a VM
+#
+createStdPool()
+name = "TestDomPool-1"
+domain = XmTestDomain(extraConfig={'pool' : 'Pool-1'}, name=name)
+try:
+ domain.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+
+cmd = "xm list --pool=Pool-1"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+if not re.search(name, output):
+ FAIL("%s; missing '%s' in Pool-1" % (cmd,name))
+
+domain.stop()
+waitForDomain(name)
+destroyPool("Pool-1", True)
+
+
+
+#
+# create Pool-1 with 1 CPU, add a second CPU
+# start a VM (with vpcu=3) add a third CPU
+# remove 2 CPUs from pool
+# create Pool-1 with 1 CPU and start a VM
+#
+pool_names = ['Pool-1', 'Pool-2']
+createStdPool({'name' : pool_names[0], 'cpus' : '"1"'})
+name = "TestDomPool-1"
+cmd = "xm pool-cpu-add Pool-1 2"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+domain = XmTestDomain(extraConfig={ 'pool' : 'Pool-1'}, name=name)
+try:
+ domain.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+
+cmd = "xm pool-cpu-add Pool-1 3"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+cmd = "xm pool-cpu-remove Pool-1 2"
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+cmd = "xm pool-cpu-remove Pool-1 3"
+
+status, output = traceCommand(cmd)
+if status != 0:
+ FAIL("%s failed, rc %s" % (cmd,status))
+
+
+createStdPool({'name' : pool_names[1]})
+name2 = "TestDomPool-2"
+domain2 = XmTestDomain(extraConfig={ 'pool' : 'Pool-2'}, name=name2)
+try:
+ domain2.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+
+domain2.stop()
+domain.stop()
+
+waitForDomain(name)
+waitForDomain(name2)
+
+for pool in pool_names:
+ destroyPool(pool, True)
+
+
+
+#
+# Create 2 pools with 1 cpu per pool.
+# Create three domains in each pool, with 1,2,3 VCPUs
+# Switch a thrid cpu between the pools.
+#
+pool_names = ['Pool-1', 'Pool-2']
+domains = {}
+cpu=3
+
+for pool in pool_names:
+ createStdPool({'name' : pool})
+ for dom_nr in range(3):
+ name = "TestDom%s-%s" % (pool, dom_nr)
+ domains[name] = XmTestDomain(extraConfig={'pool' : pool},
+ name=name)
+ try:
+ domains[name].start(noConsole=True)
+ except DomainError, ex:
+ FAIL(str(ex))
+
+cmd_add_1 = "xm pool-cpu-add Pool-1 %s" % cpu
+cmd_rem_1 = "xm pool-cpu-remove Pool-1 %s" % cpu
+cmd_add_2 = "xm pool-cpu-add Pool-2 %s" % cpu
+cmd_rem_2 = "xm pool-cpu-remove Pool-2 %s" % cpu
+
+for i in range(25):
+ traceCommand(cmd_add_1)
+ traceCommand(cmd_rem_1)
+ traceCommand(cmd_add_2)
+ traceCommand(cmd_rem_2)
+
+destroyAllDomUs()
+for pool in pool_names:
+ destroyPool(pool, True)
+
diff -r fadf63ab49e7 tools/xm-test/tests/cpupool/04_cpupool_migrate.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/xm-test/tests/cpupool/04_cpupool_migrate.py Tue Apr 20 11:10:40
2010 +0200
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+
+import sys
+import re
+import time
+
+from XmTestLib import *
+from pools import *
+
+
+
+#
+# Check requirements of test case
+# - min 2 free cpus (not assigned to a pool)
+#
+if int(getInfo("free_cpus")) < 2:
+ SKIP("Need at least 2 free cpus")
+
+
+
+#
+# Create 2 pools with one cpu per pool.
+#
+createStdPool({'name' : 'Pool-1'})
+createStdPool({'name' : 'Pool-2'})
+
+
+
+#
+# Create a domain with vcpus=1 in Pool-0.
+# Migrate it to one of the created pools afterwards to the other pool
+#
+name = "TestDomPool-1"
+domain = XmTestDomain(extraConfig={'pool' : 'Pool-0'}, name=name)
+try:
+ domain.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+if not domInPool(name, 'Pool-0'):
+ FAIL("missing '%s' in Pool-0" % name)
+
+if not migrateToPool(name, 'Pool-1'):
+ FAIL("missing '%s' in Pool-1" % name)
+if not migrateToPool(name, 'Pool-2'):
+ FAIL("missing '%s' in Pool-2" % name)
+
+
+
+#
+# Create a domain in Pool-0.
+# Migrate it to one of the created pools afterwards to the other pool
+#
+name = "TestDomPool-2"
+domain = XmTestDomain(extraConfig={'pool' : 'Pool-0'}, name=name)
+try:
+ domain.start(noConsole=True)
+except DomainError, ex:
+ FAIL(str(e))
+if not domInPool(name, 'Pool-0'):
+ FAIL("missing '%s' in Pool-0" % name)
+
+if not migrateToPool(name, 'Pool-1'):
+ FAIL("missing '%s' in Pool-1" % name)
+if not migrateToPool(name, 'Pool-2'):
+ FAIL("missing '%s' in Pool-2" % name)
+
+
+
+#
+# Migrate other domains between pools
+#
+for cnt in range(10):
+ for pool in ['Pool-0', 'Pool-1', 'Pool-2']:
+ for domain in getRunningDomains():
+ if domain != 'Domain-0':
+ if not migrateToPool(domain, pool):
+ FAIL("missing '%s' in %s" % (domain, pool))
+
+
+#
+# Cleanup
+#
+cleanupPoolsDomains()
+
diff -r fadf63ab49e7 tools/xm-test/tests/cpupool/Makefile.am
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/xm-test/tests/cpupool/Makefile.am Tue Apr 20 11:10:40 2010 +0200
@@ -0,0 +1,22 @@
+SUBDIRS =
+
+TESTS = 01_cpupool_basic_pos.test \
+ 02_cpupool_manage_pos.test \
+ 03_cpupool_domain.test \
+ 04_cpupool_migrate.test
+
+EXTRA_DIST = $(TESTS)
+
+TESTS_ENVIRONMENT=@TENV@
+
+%.test: %.py
+ cp $< $@
+ chmod +x $@
+
+clean-local: am_config_clean-local
+
+am_config_clean-local:
+ rm -f *test
+ rm -f *log
+ rm -f *~
+
diff -r fadf63ab49e7 tools/xm-test/tests/cpupool/pool1.cfg
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/xm-test/tests/cpupool/pool1.cfg Tue Apr 20 11:10:40 2010 +0200
@@ -0,0 +1,1 @@
+name="Pool-1"
diff -r fadf63ab49e7 tools/xm-test/tests/cpupool/pools.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/xm-test/tests/cpupool/pools.py Tue Apr 20 11:10:40 2010 +0200
@@ -0,0 +1,78 @@
+#!/usr/bin/python
+
+
+from XmTestLib import *
+
+def checkRequirements():
+ # - min 4 cpus
+ # - only Pool-0 defined
+ nr_cpus = int(getInfo("nr_cpus"))
+ if nr_cpus < 4:
+ SKIP("Need at least 4 cpus for pool tests")
+ if len(getPoolList()) > 1:
+ SKIP("More than one pool already defined")
+
+ # reduce Pool-0 to CPU-0
+ traceCommand("xm pool-cpu-add Pool-0 0")
+ for i in range(1, nr_cpus):
+ traceCommand("xm pool-cpu-remove Pool-0 %s" % i)
+
+def createStdPool(add_param=None):
+ cmd = "xm pool-create pool1.cfg "
+ if add_param:
+ for k,v in add_param.items():
+ cmd += "%s=%s " % (k,v)
+ status, output = traceCommand(cmd)
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+
+def deletePool(name):
+ cmd = "xm pool-delete %s" % name
+ status, output = traceCommand(cmd)
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+
+def destroyPool(name, delete_on_xenapi=False):
+ cmd = "xm pool-destroy %s" % name
+ status, output = traceCommand(cmd)
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+ if os.getenv("XM_USES_API") and delete_on_xenapi:
+ deletePool(name)
+
+def getPoolList():
+ status, output = traceCommand("xm pool-list")
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+ lines = output.splitlines()
+ pools = []
+ for l in lines[1:]:
+ elms = l.split(" ", 1)
+ pools.append(elms[0]);
+ return pools
+
+def domInPool(dom, pool):
+ cmd = "xm list --pool=%s" % pool
+ status, output = traceCommand(cmd)
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+ return re.search(dom, output) != None
+
+def migrateToPool(dom, pool):
+ status, output = traceCommand("xm pool-migrate %s %s" % (dom, pool))
+ if status != 0 or "Traceback" in output:
+ raise XmError("xm failed", trace=output, status=status)
+ return domInPool(dom, pool)
+
+def cleanupPoolsDomains():
+ destroyAllDomUs()
+ for pool in getPoolList():
+ if pool != 'Pool-0':
+ destroyPool(pool, True)
+
+def waitForDomain(name):
+ for i in range(10):
+ if not isDomainRunning(name):
+ break
+ time.sleep(1)
+
diff -r fadf63ab49e7 tools/xm-test/tests/xapi/20_xapi-cpu_pool_basic.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/xm-test/tests/xapi/20_xapi-cpu_pool_basic.py Tue Apr 20
11:10:40 2010 +0200
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+
+
+# Basic Pool creation tests
+
+from XmTestLib import xapi
+from XmTestLib import *
+
+
+session = xapi.connect()
+host_ref = session.xenapi.host.get_all()[0]
+pools = session.xenapi.host.get_resident_cpu_pools(host_ref)
+if len(pools) != 1:
+ SKIP("Only Pool-0 have to be created for this test")
+
+
+# check extension of host object
+host_recs = session.xenapi.host.get_all_records()
+host_rec = host_recs[host_recs.keys()[0]]
+if len(host_recs.keys()) != 1 or not host_rec.has_key("resident_cpu_pools") or
\
+ len(host_rec["resident_cpu_pools"]) != 1:
+ FAIL("Missing or wrong field 'resident_cpu_pools' in host record")
+
+
+# check extension of host_cpu object
+host_cpu_recs = session.xenapi.host_cpu.get_all_records()
+assigned_cpus = [ cpu for cpu in host_cpu_recs.values() if
len(cpu["cpu_pool"]) > 0 ]
+unassigned_cpus = session.xenapi.host_cpu.get_unassigned_cpus()
+if len(host_cpu_recs) - len(assigned_cpus) != len(unassigned_cpus):
+ FAIL("Wrong host_cpu count values; CPUS total: %d, CPUS ass: %d, CPUS
unass: %d" % (
+ len(host_cpu_recs), len(assigned_cpus),
len(unassigned_cpus)))
+
+for cpu_rec in host_cpu_recs.values():
+ cpu_pool = session.xenapi.host_cpu.get_cpu_pool(cpu_rec['uuid'])
+ if cpu_pool != cpu_rec['cpu_pool']:
+ FAIL("Inconsistency of cpu_pool ref between host_cpu record
(%s) "
+ "and get_cpu_pool (%s)" % (cpu_rec['cpu_pool'],
cpu_pool))
+
+
+# create / modify / remove managed cpu pools
+pool1_cfg = { 'name_label' : 'Pool-1',
+ 'name_description' : 'new pool',
+ 'auto_power_on' : False,
+ 'ncpu' : '3',
+ 'sched_policy' : 'credit',
+ 'proposed_CPUs' : ['1','2'],
+ 'other_config' : { 'xmtest' : True },
+ }
+pool1 = session.xenapi.cpu_pool.create(pool1_cfg)
+pool1_rec = session.xenapi.cpu_pool.get_record(pool1)
+for k in pool1_cfg.keys():
+ if pool1_rec[k] != pool1_cfg[k]:
+ FAIL("Create error Pool-1 (create config %s, current config:
%s, key: %s)" % (
+ pool1_cfg, pool1_rec, k))
+
+pool_all = session.xenapi.cpu_pool.get_all()
+if len(pool_all) != 2:
+ FAIL("cpu_pool.get_all() returns '%d', expected '2'" % len(pool_all))
+
+pool_all = session.xenapi.cpu_pool.get_all_records()
+if len(pool_all) != 2:
+ FAIL("cpu_pool.get_all_records() returns '%d', expected '2'" %
len(pool_all))
+
+if pool1 !=
session.xenapi.cpu_pool.get_by_name_label(pool1_cfg['name_label'])[0]:
+ FAIL("cpu_pool.get_by_name_label() returns wrong value")
+
+if pool1 != session.xenapi.cpu_pool.get_by_uuid(pool1):
+ FAIL("cpu_pool.get_by_uuid() returns wrong value")
+
+if session.xenapi.cpu_pool.get_activated(pool1):
+ FAIL("cpu_pool.get_activated() returns 'true' instead of 'false'")
+
+if pool1_cfg['auto_power_on'] !=
session.xenapi.cpu_pool.get_auto_power_on(pool1):
+ FAIL("cpu_pool.get_auto_power_on() returns wrong value")
+
+if len(session.xenapi.cpu_pool.get_host_CPUs(pool1)) != 0:
+ FAIL("cpu_pool.get_host_CPUs has to return an empty list")
+
+if pool1_cfg['name_label'] != session.xenapi.cpu_pool.get_name_label(pool1):
+ FAIL("cpu_pool.get_name_label() returns wrong value")
+
+if pool1_cfg['name_description'] !=
session.xenapi.cpu_pool.get_name_description(pool1):
+ FAIL("cpu_pool.get_name_description() returns wrong value")
+
+if pool1_cfg['ncpu'] != session.xenapi.cpu_pool.get_ncpu(pool1):
+ FAIL("cpu_pool.get_ncpu() returns wrong value")
+
+cfg_len = len(pool1_cfg['proposed_CPUs'])
+api_len = len(session.xenapi.cpu_pool.get_proposed_CPUs(pool1))
+if cfg_len != api_len:
+ FAIL("cpu_pool.get_proposed_CPUs() returns wrong value; cfg_cnt: %s,
api_cnt:%s" % (cfg_len, api_len))
+
+other_config = session.xenapi.cpu_pool.get_other_config(pool1)
+if pool1_cfg['other_config']['xmtest'] != other_config.get('xmtest'):
+ FAIL("cpu_pool.get_other_config() returns wrong value")
+
+if session.xenapi.cpu_pool.get_resident_on(pool1) !=
session.xenapi.host.get_all()[0]:
+ FAIL("cpu_pool.get_resident_on() returns wrong value")
+
+if pool1_cfg['sched_policy'] !=
session.xenapi.cpu_pool.get_sched_policy(pool1):
+ FAIL("cpu_pool.get_sched_policy() returns wrong value")
+
+if len(session.xenapi.cpu_pool.get_started_VMs(pool1)) != 0:
+ FAIL("cpu_pool.get_started_VMs() returns wrong value")
+
+if pool1 != session.xenapi.cpu_pool.get_uuid(pool1):
+ FAIL("cpu_pool.get_uuid() returns wrong value")
+
+session.xenapi.cpu_pool.set_auto_power_on(pool1, True)
+if not session.xenapi.cpu_pool.get_auto_power_on(pool1):
+ FAIL("cpu_pool.get_auto_power_on() returns wrong value")
+
+session.xenapi.cpu_pool.set_proposed_CPUs(pool1, [4])
+if '4' not in session.xenapi.cpu_pool.get_proposed_CPUs(pool1):
+ FAIL("cpu_pool.get_proposed_CPUs() returns wrong value;
(set_proposed_CPUs)")
+
+session.xenapi.cpu_pool.add_to_proposed_CPUs(pool1, 5)
+val = session.xenapi.cpu_pool.get_proposed_CPUs(pool1)
+if '5' not in val:
+ FAIL("cpu_pool.get_proposed_CPUs() returns wrong value; %s not in %s" %
('5',val))
+
+session.xenapi.cpu_pool.remove_from_proposed_CPUs(pool1, 5)
+val = session.xenapi.cpu_pool.get_proposed_CPUs(pool1)
+if '5' in val:
+ FAIL("cpu_pool.get_proposed_CPUs() returns wrong value; %s in %s" %
('5',val))
+
+session.xenapi.cpu_pool.set_name_label(pool1, 'New-Pool-1')
+if 'New-Pool-1' != session.xenapi.cpu_pool.get_name_label(pool1):
+ FAIL("cpu_pool.get_name_label() returns wrong value")
+
+session.xenapi.cpu_pool.set_ncpu(pool1, 4)
+if '4' != session.xenapi.cpu_pool.get_ncpu(pool1):
+ FAIL("cpu_pool.get_ncpu() returns wrong value")
+
+session.xenapi.cpu_pool.set_other_config(pool1, {'test' : 'ok'})
+other_config = session.xenapi.cpu_pool.get_other_config(pool1)
+if other_config.get('test') != 'ok':
+ FAIL("cpu_pool.get_other_config() returns wrong value")
+
+session.xenapi.cpu_pool.add_to_other_config(pool1, 'new_entry', 'added')
+other_config = session.xenapi.cpu_pool.get_other_config(pool1)
+if other_config.get('new_entry') != 'added':
+ FAIL("cpu_pool.get_other_config() returns wrong value")
+
+session.xenapi.cpu_pool.remove_from_other_config(pool1, 'new_entry')
+other_config = session.xenapi.cpu_pool.get_other_config(pool1)
+if other_config.get('new_entry') != None:
+ FAIL("cpu_pool.get_other_config() returns wrong value")
+
+session.xenapi.cpu_pool.set_sched_policy(pool1, 'credit')
+if 'credit' != session.xenapi.cpu_pool.get_sched_policy(pool1):
+ FAIL("cpu_pool.get_sched_policy() returns wrong value")
+
+session.xenapi.cpu_pool.destroy(pool1)
+if pool1 in session.xenapi.cpu_pool.get_all():
+ FAIL("cpu_pool.destroy() has not removed pool")
+
diff -r fadf63ab49e7 tools/xm-test/tests/xapi/Makefile.am
--- a/tools/xm-test/tests/xapi/Makefile.am Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/xm-test/tests/xapi/Makefile.am Tue Apr 20 11:10:40 2010 +0200
@@ -3,7 +3,8 @@ TESTS = 01_xapi-vm_basic.test \
TESTS = 01_xapi-vm_basic.test \
02_xapi-vbd_basic.test \
03_xapi-network_pos.test \
- 04_xapi-data_uri_handling.test
+ 04_xapi-data_uri_handling.test \
+ 20_xapi-cpu_pool_basic.test
XFAIL_TESTS =
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|